morpheus_network/dma/region.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182
//! DMA region definition and layout.
//!
//! # Memory Layout (2MB Region)
//! ```text
//! Offset Size Content
//! 0x00000 0x0200 RX Descriptor Table (32 × 16 bytes)
//! 0x00200 0x0048 RX Available Ring
//! 0x00400 0x0108 RX Used Ring
//! 0x00800 0x0200 TX Descriptor Table (32 × 16 bytes)
//! 0x00A00 0x0048 TX Available Ring
//! 0x00C00 0x0108 TX Used Ring
//! 0x01000 0x10000 RX Buffers (32 × 2KB)
//! 0x11000 0x10000 TX Buffers (32 × 2KB)
//! ```
//!
//! # Reference
//! NETWORK_IMPL_GUIDE.md §3.3
/// DMA-capable memory region.
///
/// Contains both the CPU-accessible pointer and the device-visible bus address.
pub struct DmaRegion {
/// CPU-accessible pointer to the region.
pub cpu_ptr: *mut u8,
/// Device-visible bus address.
pub bus_addr: u64,
/// Total size of the region in bytes.
pub size: usize,
}
impl DmaRegion {
/// Minimum region size (2MB).
pub const MIN_SIZE: usize = 2 * 1024 * 1024;
/// Default queue size (number of descriptors).
pub const DEFAULT_QUEUE_SIZE: usize = 32;
/// Default buffer size (2KB each).
pub const DEFAULT_BUFFER_SIZE: usize = 2048;
// Layout offsets
/// RX descriptor table offset.
pub const RX_DESC_OFFSET: usize = 0x0000;
/// RX available ring offset.
pub const RX_AVAIL_OFFSET: usize = 0x0200;
/// RX used ring offset.
pub const RX_USED_OFFSET: usize = 0x0400;
/// TX descriptor table offset.
pub const TX_DESC_OFFSET: usize = 0x0800;
/// TX available ring offset.
pub const TX_AVAIL_OFFSET: usize = 0x0A00;
/// TX used ring offset.
pub const TX_USED_OFFSET: usize = 0x0C00;
/// RX buffers offset.
pub const RX_BUFFERS_OFFSET: usize = 0x1000;
/// TX buffers offset.
pub const TX_BUFFERS_OFFSET: usize = 0x11000;
/// Create a new DMA region.
///
/// # Safety
/// - `cpu_ptr` must point to valid DMA-capable memory
/// - `bus_addr` must be the corresponding device-visible address
/// - Region must be properly aligned (page-aligned preferred)
pub unsafe fn new(cpu_ptr: *mut u8, bus_addr: u64, size: usize) -> Self {
debug_assert!(size >= Self::MIN_SIZE, "DMA region too small");
Self {
cpu_ptr,
bus_addr,
size,
}
}
/// Get CPU base pointer.
pub fn cpu_base(&self) -> *mut u8 {
self.cpu_ptr
}
/// Get bus base address.
pub fn bus_base(&self) -> u64 {
self.bus_addr
}
/// Get total size.
pub fn size(&self) -> usize {
self.size
}
/// Get CPU pointer for RX descriptor table.
pub fn rx_desc_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::RX_DESC_OFFSET) }
}
/// Get bus address for RX descriptor table.
pub fn rx_desc_bus(&self) -> u64 {
self.bus_addr + Self::RX_DESC_OFFSET as u64
}
/// Get CPU pointer for RX available ring.
pub fn rx_avail_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::RX_AVAIL_OFFSET) }
}
/// Get bus address for RX available ring.
pub fn rx_avail_bus(&self) -> u64 {
self.bus_addr + Self::RX_AVAIL_OFFSET as u64
}
/// Get CPU pointer for RX used ring.
pub fn rx_used_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::RX_USED_OFFSET) }
}
/// Get bus address for RX used ring.
pub fn rx_used_bus(&self) -> u64 {
self.bus_addr + Self::RX_USED_OFFSET as u64
}
/// Get CPU pointer for TX descriptor table.
pub fn tx_desc_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::TX_DESC_OFFSET) }
}
/// Get bus address for TX descriptor table.
pub fn tx_desc_bus(&self) -> u64 {
self.bus_addr + Self::TX_DESC_OFFSET as u64
}
/// Get CPU pointer for TX available ring.
pub fn tx_avail_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::TX_AVAIL_OFFSET) }
}
/// Get bus address for TX available ring.
pub fn tx_avail_bus(&self) -> u64 {
self.bus_addr + Self::TX_AVAIL_OFFSET as u64
}
/// Get CPU pointer for TX used ring.
pub fn tx_used_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::TX_USED_OFFSET) }
}
/// Get bus address for TX used ring.
pub fn tx_used_bus(&self) -> u64 {
self.bus_addr + Self::TX_USED_OFFSET as u64
}
/// Get CPU pointer for RX buffers.
pub fn rx_buffers_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::RX_BUFFERS_OFFSET) }
}
/// Get bus address for RX buffers.
pub fn rx_buffers_bus(&self) -> u64 {
self.bus_addr + Self::RX_BUFFERS_OFFSET as u64
}
/// Get CPU pointer for TX buffers.
pub fn tx_buffers_cpu(&self) -> *mut u8 {
unsafe { self.cpu_ptr.add(Self::TX_BUFFERS_OFFSET) }
}
/// Get bus address for TX buffers.
pub fn tx_buffers_bus(&self) -> u64 {
self.bus_addr + Self::TX_BUFFERS_OFFSET as u64
}
/// Calculate buffer address by index.
pub fn buffer_cpu(&self, offset: usize, index: usize, buffer_size: usize) -> *mut u8 {
unsafe { self.cpu_ptr.add(offset + index * buffer_size) }
}
/// Calculate buffer bus address by index.
pub fn buffer_bus(&self, offset: usize, index: usize, buffer_size: usize) -> u64 {
self.bus_addr + (offset + index * buffer_size) as u64
}
}
unsafe impl Send for DmaRegion {}
unsafe impl Sync for DmaRegion {}