morpheus_network/driver/virtio/tx.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126
//! VirtIO TX logic.
//!
//! Fire-and-forget transmit - never wait for completion!
//!
//! # Reference
//! NETWORK_IMPL_GUIDE.md ยง4.6
use crate::dma::BufferPool;
use crate::driver::traits::TxError;
use crate::types::{VirtioNetHdr, VirtqueueState};
/// Maximum frame size including VirtIO header.
pub const MAX_TX_FRAME_SIZE: usize = VirtioNetHdr::SIZE + 1514;
/// Transmit a packet via VirtIO.
///
/// # Arguments
/// - `tx_state`: TX virtqueue state
/// - `tx_pool`: TX buffer pool
/// - `frame`: Ethernet frame (without VirtIO header)
///
/// # Returns
/// - `Ok(())`: Frame queued (fire-and-forget)
/// - `Err(TxError)`: Transmission failed
///
/// # Contract
/// - MUST return immediately (no completion wait)
/// - Caller should call `collect_tx_completions` periodically
#[cfg(target_arch = "x86_64")]
pub fn transmit(
tx_state: &mut VirtqueueState,
tx_pool: &mut BufferPool,
frame: &[u8],
) -> Result<(), TxError> {
use crate::asm::drivers::virtio::{notify, tx as asm_tx};
// Check frame size
let total_len = VirtioNetHdr::SIZE + frame.len();
if total_len > MAX_TX_FRAME_SIZE {
return Err(TxError::FrameTooLarge);
}
// Collect any pending completions first (reclaim buffers)
collect_completions(tx_state, tx_pool);
// Allocate TX buffer
let buf = tx_pool.alloc().ok_or(TxError::QueueFull)?;
let buf_idx = buf.index();
// Write VirtIO header (12 bytes, all zeros)
let hdr = VirtioNetHdr::zeroed();
buf.as_mut_slice()[..VirtioNetHdr::SIZE].copy_from_slice(hdr.as_bytes());
// Copy frame after header
buf.as_mut_slice()[VirtioNetHdr::SIZE..total_len].copy_from_slice(frame);
// Mark device-owned BEFORE submit
unsafe {
buf.mark_device_owned();
}
// Submit via ASM (includes barriers)
let success = asm_tx::submit(tx_state, buf_idx, total_len as u16);
if !success {
// Queue was full (shouldn't happen after collect, but handle it)
if let Some(buf) = tx_pool.get_mut(buf_idx) {
unsafe {
buf.mark_driver_owned();
}
}
tx_pool.free(buf_idx);
return Err(TxError::QueueFull);
}
// NOTE: Notification deferred to collect_completions() for batching throughput
// The notify happens in Phase 5 after all TX submissions for this iteration
// *** DO NOT WAIT FOR COMPLETION ***
// Completion collected in main loop Phase 5
Ok(())
}
/// Collect TX completions and notify device of any pending submissions.
///
/// Call periodically (main loop Phase 5) to reclaim TX buffers.
/// Also sends batched notification for any TX submissions since last call.
#[cfg(target_arch = "x86_64")]
pub fn collect_completions(tx_state: &mut VirtqueueState, tx_pool: &mut BufferPool) {
use crate::asm::drivers::virtio::{notify, tx as asm_tx};
// First, notify device of any pending TX submissions (batched)
// This is safe to call even if no new submissions - device ignores redundant notifies
notify::notify(tx_state);
// Then collect completions
loop {
let idx = asm_tx::poll_complete(tx_state);
match idx {
Some(buf_idx) => {
// Return buffer to pool
if let Some(buf) = tx_pool.get_mut(buf_idx) {
unsafe {
buf.mark_driver_owned();
}
tx_pool.free(buf_idx);
}
}
None => break, // No more completions
}
}
}
// Stubs for non-x86_64 platforms
#[cfg(not(target_arch = "x86_64"))]
pub fn transmit(
_tx_state: &mut VirtqueueState,
_tx_pool: &mut BufferPool,
_frame: &[u8],
) -> Result<(), TxError> {
Err(TxError::DeviceNotReady)
}
#[cfg(not(target_arch = "x86_64"))]
pub fn collect_completions(_tx_state: &mut VirtqueueState, _tx_pool: &mut BufferPool) {}