stm32/eth_v2: update to new embassy-net trait, remove PeripheralMutex.

This commit is contained in:
Dario Nieuwenhuis 2022-12-12 02:04:33 +01:00
parent 8f30652109
commit 3005ee0178
4 changed files with 289 additions and 487 deletions

View File

@ -1,19 +1,10 @@
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf};
use vcell::VolatileCell; use vcell::VolatileCell;
use crate::eth::{Packet, RX_BUFFER_SIZE, TX_BUFFER_SIZE};
use crate::pac::ETH; use crate::pac::ETH;
#[non_exhaustive]
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Error {
NoBufferAvailable,
// TODO: Break down this error into several others
TransmissionError,
}
/// Transmit and Receive Descriptor fields /// Transmit and Receive Descriptor fields
#[allow(dead_code)] #[allow(dead_code)]
mod emac_consts { mod emac_consts {
@ -41,7 +32,7 @@ use emac_consts::*;
/// * tdes2: buffer lengths /// * tdes2: buffer lengths
/// * tdes3: control and payload/frame length /// * tdes3: control and payload/frame length
#[repr(C)] #[repr(C)]
struct TDes { pub(crate) struct TDes {
tdes0: VolatileCell<u32>, tdes0: VolatileCell<u32>,
tdes1: VolatileCell<u32>, tdes1: VolatileCell<u32>,
tdes2: VolatileCell<u32>, tdes2: VolatileCell<u32>,
@ -59,41 +50,26 @@ impl TDes {
} }
/// Return true if this TDes is not currently owned by the DMA /// Return true if this TDes is not currently owned by the DMA
pub fn available(&self) -> bool { fn available(&self) -> bool {
self.tdes3.get() & EMAC_DES3_OWN == 0 self.tdes3.get() & EMAC_DES3_OWN == 0
} }
} }
pub(crate) struct TDesRing<const N: usize> { pub(crate) struct TDesRing<'a> {
td: [TDes; N], descriptors: &'a mut [TDes],
buffers: [Option<PacketBuf>; N], buffers: &'a mut [Packet<TX_BUFFER_SIZE>],
tdidx: usize, index: usize,
} }
impl<const N: usize> TDesRing<N> { impl<'a> TDesRing<'a> {
pub const fn new() -> Self { /// Initialise this TDesRing. Assume TDesRing is corrupt.
const TDES: TDes = TDes::new(); pub fn new(descriptors: &'a mut [TDes], buffers: &'a mut [Packet<TX_BUFFER_SIZE>]) -> Self {
const BUFFERS: Option<PacketBuf> = None; assert!(descriptors.len() > 0);
assert!(descriptors.len() == buffers.len());
Self { for td in descriptors.iter_mut() {
td: [TDES; N],
buffers: [BUFFERS; N],
tdidx: 0,
}
}
/// Initialise this TDesRing. Assume TDesRing is corrupt
///
/// The current memory address of the buffers inside this TDesRing
/// will be stored in the descriptors, so ensure the TDesRing is
/// not moved after initialisation.
pub(crate) fn init(&mut self) {
assert!(N > 0);
for td in self.td.iter_mut() {
*td = TDes::new(); *td = TDes::new();
} }
self.tdidx = 0;
// Initialize the pointers in the DMA engine. (There will be a memory barrier later // Initialize the pointers in the DMA engine. (There will be a memory barrier later
// before the DMA engine is enabled.) // before the DMA engine is enabled.)
@ -101,80 +77,60 @@ impl<const N: usize> TDesRing<N> {
unsafe { unsafe {
let dma = ETH.ethernet_dma(); let dma = ETH.ethernet_dma();
dma.dmactx_dlar().write(|w| w.0 = &self.td as *const _ as u32); dma.dmactx_dlar().write(|w| w.0 = descriptors.as_mut_ptr() as u32);
dma.dmactx_rlr().write(|w| w.set_tdrl((N as u16) - 1)); dma.dmactx_rlr().write(|w| w.set_tdrl((descriptors.len() as u16) - 1));
dma.dmactx_dtpr().write(|w| w.0 = &self.td[0] as *const _ as u32); dma.dmactx_dtpr().write(|w| w.0 = 0);
}
Self {
descriptors,
buffers,
index: 0,
} }
} }
/// Return true if a TDes is available for use pub(crate) fn len(&self) -> usize {
pub(crate) fn available(&self) -> bool { self.descriptors.len()
self.td[self.tdidx].available()
} }
pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> { /// Return the next available packet buffer for transmitting, or None
if !self.available() { pub(crate) fn available(&mut self) -> Option<&mut [u8]> {
return Err(Error::NoBufferAvailable); let d = &mut self.descriptors[self.index];
if d.available() {
Some(&mut self.buffers[self.index].0)
} else {
None
}
} }
let x = self.tdidx;
let td = &mut self.td[x];
let pkt_len = pkt.len(); /// Transmit the packet written in a buffer returned by `available`.
assert!(pkt_len as u32 <= EMAC_TDES2_B1L); pub(crate) fn transmit(&mut self, len: usize) {
let address = pkt.as_ptr() as u32; let td = &mut self.descriptors[self.index];
assert!(td.available());
assert!(len as u32 <= EMAC_TDES2_B1L);
// Read format // Read format
td.tdes0.set(address); td.tdes0.set(self.buffers[self.index].0.as_ptr() as u32);
td.tdes2.set(pkt_len as u32 & EMAC_TDES2_B1L | EMAC_TDES2_IOC); td.tdes2.set(len as u32 & EMAC_TDES2_B1L | EMAC_TDES2_IOC);
// FD: Contains first buffer of packet // FD: Contains first buffer of packet
// LD: Contains last buffer of packet // LD: Contains last buffer of packet
// Give the DMA engine ownership // Give the DMA engine ownership
td.tdes3.set(EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_OWN); td.tdes3.set(EMAC_DES3_FD | EMAC_DES3_LD | EMAC_DES3_OWN);
self.buffers[x].replace(pkt);
// Ensure changes to the descriptor are committed before DMA engine sees tail pointer store. // Ensure changes to the descriptor are committed before DMA engine sees tail pointer store.
// This will generate an DMB instruction. // This will generate an DMB instruction.
// "Preceding reads and writes cannot be moved past subsequent writes." // "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release); fence(Ordering::Release);
// Move the tail pointer (TPR) to the next descriptor self.index = self.index + 1;
let x = (x + 1) % N; if self.index == self.descriptors.len() {
self.index = 0;
}
// signal DMA it can try again.
// NOTE(unsafe) Atomic write // NOTE(unsafe) Atomic write
unsafe { unsafe { ETH.ethernet_dma().dmactx_dtpr().write(|w| w.0 = 0) }
ETH.ethernet_dma()
.dmactx_dtpr()
.write(|w| w.0 = &self.td[x] as *const _ as u32);
}
self.tdidx = x;
Ok(())
}
pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> {
let previous = (self.tdidx + N - 1) % N;
let td = &self.td[previous];
// DMB to ensure that we are reading an updated value, probably not needed at the hardware
// level, but this is also a hint to the compiler that we're syncing on the buffer.
fence(Ordering::SeqCst);
let tdes3 = td.tdes3.get();
if tdes3 & EMAC_DES3_OWN != 0 {
// Transmission isn't done yet, probably a receive interrupt that fired this
return Ok(());
}
assert!(tdes3 & EMAC_DES3_CTXT == 0);
// Release the buffer
self.buffers[previous].take();
if tdes3 & EMAC_DES3_ES != 0 {
Err(Error::TransmissionError)
} else {
Ok(())
}
} }
} }
@ -185,7 +141,7 @@ impl<const N: usize> TDesRing<N> {
/// * rdes2: /// * rdes2:
/// * rdes3: OWN and Status /// * rdes3: OWN and Status
#[repr(C)] #[repr(C)]
struct RDes { pub(crate) struct RDes {
rdes0: VolatileCell<u32>, rdes0: VolatileCell<u32>,
rdes1: VolatileCell<u32>, rdes1: VolatileCell<u32>,
rdes2: VolatileCell<u32>, rdes2: VolatileCell<u32>,
@ -204,7 +160,7 @@ impl RDes {
/// Return true if this RDes is acceptable to us /// Return true if this RDes is acceptable to us
#[inline(always)] #[inline(always)]
pub fn valid(&self) -> bool { fn valid(&self) -> bool {
// Write-back descriptor is valid if: // Write-back descriptor is valid if:
// //
// Contains first buffer of packet AND contains last buf of // Contains first buffer of packet AND contains last buf of
@ -215,177 +171,96 @@ impl RDes {
/// Return true if this RDes is not currently owned by the DMA /// Return true if this RDes is not currently owned by the DMA
#[inline(always)] #[inline(always)]
pub fn available(&self) -> bool { fn available(&self) -> bool {
self.rdes3.get() & EMAC_DES3_OWN == 0 // Owned by us self.rdes3.get() & EMAC_DES3_OWN == 0 // Owned by us
} }
#[inline(always)] #[inline(always)]
pub fn set_ready(&mut self, buf_addr: u32) { fn set_ready(&mut self, buf: *mut u8) {
self.rdes0.set(buf_addr); self.rdes0.set(buf as u32);
self.rdes3.set(EMAC_RDES3_BUF1V | EMAC_RDES3_IOC | EMAC_DES3_OWN); self.rdes3.set(EMAC_RDES3_BUF1V | EMAC_RDES3_IOC | EMAC_DES3_OWN);
} }
} }
/// Rx ring of descriptors and packets /// Rx ring of descriptors and packets
/// pub(crate) struct RDesRing<'a> {
/// This ring has three major locations that work in lock-step. The DMA will never write to the tail descriptors: &'a mut [RDes],
/// index, so the `read_index` must never pass the tail index. The `next_tail_index` is always 1 buffers: &'a mut [Packet<RX_BUFFER_SIZE>],
/// slot ahead of the real tail index, and it must never pass the `read_index` or it could overwrite index: usize,
/// a packet still to be passed to the application.
///
/// nt can't pass r (no alloc)
/// +---+---+---+---+ Read ok +---+---+---+---+ No Read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation ok +---+---+---+---+ +---+---+---+---+
/// ^ ^t ^t ^ ^t ^
/// |r |r |r
/// |nt |nt |nt
///
///
/// +---+---+---+---+ Read ok +---+---+---+---+ Can't read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation fail +---+---+---+---+ Allocation ok +---+---+---+---+
/// ^ ^t ^ ^t ^ ^ ^ ^t
/// |r | |r | | |r
/// |nt |nt |nt
///
pub(crate) struct RDesRing<const N: usize> {
rd: [RDes; N],
buffers: [Option<PacketBox>; N],
read_idx: usize,
next_tail_idx: usize,
} }
impl<const N: usize> RDesRing<N> { impl<'a> RDesRing<'a> {
pub const fn new() -> Self { pub(crate) fn new(descriptors: &'a mut [RDes], buffers: &'a mut [Packet<RX_BUFFER_SIZE>]) -> Self {
const RDES: RDes = RDes::new(); assert!(descriptors.len() > 1);
const BUFFERS: Option<PacketBox> = None; assert!(descriptors.len() == buffers.len());
Self { for (i, desc) in descriptors.iter_mut().enumerate() {
rd: [RDES; N],
buffers: [BUFFERS; N],
read_idx: 0,
next_tail_idx: 0,
}
}
pub(crate) fn init(&mut self) {
assert!(N > 1);
for desc in self.rd.iter_mut() {
*desc = RDes::new(); *desc = RDes::new();
desc.set_ready(buffers[i].0.as_mut_ptr());
} }
let mut last_index = 0;
for (index, buf) in self.buffers.iter_mut().enumerate() {
let pkt = match PacketBox::new(Packet::new()) {
Some(p) => p,
None => {
if index == 0 {
panic!("Could not allocate at least one buffer for Ethernet receiving");
} else {
break;
}
}
};
let addr = pkt.as_ptr() as u32;
*buf = Some(pkt);
self.rd[index].set_ready(addr);
last_index = index;
}
self.next_tail_idx = (last_index + 1) % N;
unsafe { unsafe {
let dma = ETH.ethernet_dma(); let dma = ETH.ethernet_dma();
dma.dmacrx_dlar().write(|w| w.0 = self.rd.as_ptr() as u32); dma.dmacrx_dlar().write(|w| w.0 = descriptors.as_mut_ptr() as u32);
dma.dmacrx_rlr().write(|w| w.set_rdrl((N as u16) - 1)); dma.dmacrx_rlr().write(|w| w.set_rdrl((descriptors.len() as u16) - 1));
dma.dmacrx_dtpr().write(|w| w.0 = 0);
}
// We manage to allocate all buffers, set the index to the last one, that means Self {
// that the DMA won't consider the last one as ready, because it (unfortunately) descriptors,
// stops at the tail ptr and wraps at the end of the ring, which means that we buffers,
// can't tell it to stop after the last buffer. index: 0,
let tail_ptr = &self.rd[last_index] as *const _ as u32;
fence(Ordering::Release);
dma.dmacrx_dtpr().write(|w| w.0 = tail_ptr);
} }
} }
pub(crate) fn on_interrupt(&mut self) { /// Get a received packet if any, or None.
// XXX: Do we need to do anything here ? Maybe we should try to advance the tail ptr, but it pub(crate) fn available(&mut self) -> Option<&mut [u8]> {
// would soon hit the read ptr anyway, and we will wake smoltcp's stack on the interrupt
// which should try to pop a packet...
}
pub(crate) fn pop_packet(&mut self) -> Option<PacketBuf> {
// Not sure if the contents of the write buffer on the M7 can affects reads, so we are using // Not sure if the contents of the write buffer on the M7 can affects reads, so we are using
// a DMB here just in case, it also serves as a hint to the compiler that we're syncing the // a DMB here just in case, it also serves as a hint to the compiler that we're syncing the
// buffer (I think .-.) // buffer (I think .-.)
fence(Ordering::SeqCst); fence(Ordering::SeqCst);
let read_available = self.rd[self.read_idx].available(); // We might have to process many packets, in case some have been rx'd but are invalid.
let tail_index = (self.next_tail_idx + N - 1) % N; loop {
let descriptor = &mut self.descriptors[self.index];
let pkt = if read_available && self.read_idx != tail_index { if !descriptor.available() {
let pkt = self.buffers[self.read_idx].take(); return None;
let len = (self.rd[self.read_idx].rdes3.get() & EMAC_RDES3_PKTLEN) as usize;
assert!(pkt.is_some());
let valid = self.rd[self.read_idx].valid();
self.read_idx = (self.read_idx + 1) % N;
if valid {
pkt.map(|p| p.slice(0..len))
} else {
None
} }
} else {
None
};
// Try to advance the tail_idx // If packet is invalid, pop it and try again.
if self.next_tail_idx != self.read_idx { if !descriptor.valid() {
match PacketBox::new(Packet::new()) { warn!("invalid packet: {:08x}", descriptor.rdes0.get());
Some(b) => { self.pop_packet();
let addr = b.as_ptr() as u32; continue;
self.buffers[self.next_tail_idx].replace(b); }
self.rd[self.next_tail_idx].set_ready(addr);
break;
}
let descriptor = &mut self.descriptors[self.index];
let len = (descriptor.rdes3.get() & EMAC_RDES3_PKTLEN) as usize;
return Some(&mut self.buffers[self.index].0[..len]);
}
/// Pop the packet previously returned by `available`.
pub(crate) fn pop_packet(&mut self) {
let descriptor = &mut self.descriptors[self.index];
assert!(descriptor.available());
self.descriptors[self.index].set_ready(self.buffers[self.index].0.as_mut_ptr());
// "Preceding reads and writes cannot be moved past subsequent writes." // "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release); fence(Ordering::Release);
// NOTE(unsafe) atomic write // signal DMA it can try again.
unsafe { // NOTE(unsafe) Atomic write
ETH.ethernet_dma() unsafe { ETH.ethernet_dma().dmacrx_dtpr().write(|w| w.0 = 0) }
.dmacrx_dtpr()
.write(|w| w.0 = &self.rd[self.next_tail_idx] as *const _ as u32);
}
self.next_tail_idx = (self.next_tail_idx + 1) % N; // Increment index.
} self.index += 1;
None => {} if self.index == self.descriptors.len() {
self.index = 0
} }
} }
pkt
}
}
pub struct DescriptorRing<const T: usize, const R: usize> {
pub(crate) tx: TDesRing<T>,
pub(crate) rx: RDesRing<R>,
}
impl<const T: usize, const R: usize> DescriptorRing<T, R> {
pub const fn new() -> Self {
Self {
tx: TDesRing::new(),
rx: RDesRing::new(),
}
}
pub fn init(&mut self) {
self.tx.init();
self.rx.init();
}
} }

View File

@ -1,35 +1,28 @@
use core::marker::PhantomData; mod descriptors;
use core::sync::atomic::{fence, Ordering}; use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use embassy_cortex_m::peripheral::{PeripheralMutex, PeripheralState, StateStorage}; use embassy_cortex_m::interrupt::InterruptExt;
use embassy_hal_common::{into_ref, PeripheralRef}; use embassy_hal_common::{into_ref, PeripheralRef};
use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
use embassy_sync::waitqueue::AtomicWaker;
pub(crate) use self::descriptors::{RDes, RDesRing, TDes, TDesRing};
use super::*;
use crate::gpio::sealed::{AFType, Pin as _}; use crate::gpio::sealed::{AFType, Pin as _};
use crate::gpio::{AnyPin, Speed}; use crate::gpio::{AnyPin, Speed};
use crate::pac::{ETH, RCC, SYSCFG}; use crate::pac::{ETH, RCC, SYSCFG};
use crate::Peripheral; use crate::Peripheral;
mod descriptors; const MTU: usize = 1514; // 14 Ethernet header + 1500 IP packet
use descriptors::DescriptorRing;
use super::*; pub struct Ethernet<'d, T: Instance, P: PHY> {
_peri: PeripheralRef<'d, T>,
pub struct State<'d, T: Instance, const TX: usize, const RX: usize>(StateStorage<Inner<'d, T, TX, RX>>); pub(crate) tx: TDesRing<'d>,
impl<'d, T: Instance, const TX: usize, const RX: usize> State<'d, T, TX, RX> { pub(crate) rx: RDesRing<'d>,
pub const fn new() -> Self {
Self(StateStorage::new())
}
}
pub struct Ethernet<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> {
state: PeripheralMutex<'d, Inner<'d, T, TX, RX>>,
pins: [PeripheralRef<'d, AnyPin>; 9], pins: [PeripheralRef<'d, AnyPin>; 9],
_phy: P, _phy: P,
clock_range: u8, clock_range: u8,
phy_addr: u8, phy_addr: u8,
mac_addr: [u8; 6], pub(crate) mac_addr: [u8; 6],
} }
macro_rules! config_pins { macro_rules! config_pins {
@ -44,10 +37,9 @@ macro_rules! config_pins {
}; };
} }
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T, P, TX, RX> { impl<'d, T: Instance, P: PHY> Ethernet<'d, T, P> {
/// safety: the returned instance is not leak-safe pub fn new<const TX: usize, const RX: usize>(
pub unsafe fn new( queue: &'d mut PacketQueue<TX, RX>,
state: &'d mut State<'d, T, TX, RX>,
peri: impl Peripheral<P = T> + 'd, peri: impl Peripheral<P = T> + 'd,
interrupt: impl Peripheral<P = crate::interrupt::ETH> + 'd, interrupt: impl Peripheral<P = crate::interrupt::ETH> + 'd,
ref_clk: impl Peripheral<P = impl RefClkPin<T>> + 'd, ref_clk: impl Peripheral<P = impl RefClkPin<T>> + 'd,
@ -63,8 +55,9 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
mac_addr: [u8; 6], mac_addr: [u8; 6],
phy_addr: u8, phy_addr: u8,
) -> Self { ) -> Self {
into_ref!(interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en); into_ref!(peri, interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
unsafe {
// Enable the necessary Clocks // Enable the necessary Clocks
// NOTE(unsafe) We have exclusive access to the registers // NOTE(unsafe) We have exclusive access to the registers
critical_section::with(|_| { critical_section::with(|_| {
@ -81,9 +74,6 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
config_pins!(ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en); config_pins!(ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
// NOTE(unsafe) We are ourselves not leak-safe.
let state = PeripheralMutex::new(interrupt, &mut state.0, || Inner::new(peri));
// NOTE(unsafe) We have exclusive access to the registers // NOTE(unsafe) We have exclusive access to the registers
let dma = ETH.ethernet_dma(); let dma = ETH.ethernet_dma();
let mac = ETH.ethernet_mac(); let mac = ETH.ethernet_mac();
@ -173,7 +163,9 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
]; ];
let mut this = Self { let mut this = Self {
state, _peri: peri,
tx: TDesRing::new(&mut queue.tx_desc, &mut queue.tx_buf),
rx: RDesRing::new(&mut queue.rx_desc, &mut queue.rx_buf),
pins, pins,
_phy: phy, _phy: phy,
clock_range, clock_range,
@ -181,9 +173,6 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
mac_addr, mac_addr,
}; };
this.state.with(|s| {
s.desc_ring.init();
fence(Ordering::SeqCst); fence(Ordering::SeqCst);
let mac = ETH.ethernet_mac(); let mac = ETH.ethernet_mac();
@ -205,17 +194,37 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, T,
w.set_rie(true); w.set_rie(true);
w.set_tie(true); w.set_tie(true);
}); });
});
P::phy_reset(&mut this); P::phy_reset(&mut this);
P::phy_init(&mut this); P::phy_init(&mut this);
interrupt.set_handler(Self::on_interrupt);
interrupt.enable();
this this
} }
} }
unsafe impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> StationManagement fn on_interrupt(_cx: *mut ()) {
for Ethernet<'d, T, P, TX, RX> WAKER.wake();
{
// TODO: Check and clear more flags
unsafe {
let dma = ETH.ethernet_dma();
dma.dmacsr().modify(|w| {
w.set_ti(true);
w.set_ri(true);
w.set_nis(true);
});
// Delay two peripheral's clock
dma.dmacsr().read();
dma.dmacsr().read();
}
}
}
unsafe impl<'d, T: Instance, P: PHY> StationManagement for Ethernet<'d, T, P> {
fn smi_read(&mut self, reg: u8) -> u16 { fn smi_read(&mut self, reg: u8) -> u16 {
// NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self` // NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
unsafe { unsafe {
@ -251,44 +260,7 @@ unsafe impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> StationMa
} }
} }
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Device for Ethernet<'d, T, P, TX, RX> { impl<'d, T: Instance, P: PHY> Drop for Ethernet<'d, T, P> {
fn is_transmit_ready(&mut self) -> bool {
self.state.with(|s| s.desc_ring.tx.available())
}
fn transmit(&mut self, pkt: PacketBuf) {
self.state.with(|s| unwrap!(s.desc_ring.tx.transmit(pkt)));
}
fn receive(&mut self) -> Option<PacketBuf> {
self.state.with(|s| s.desc_ring.rx.pop_packet())
}
fn register_waker(&mut self, waker: &Waker) {
WAKER.register(waker);
}
fn capabilities(&self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
caps.max_transmission_unit = MTU;
caps.max_burst_size = Some(TX.min(RX));
caps
}
fn link_state(&mut self) -> LinkState {
if P::poll_link(self) {
LinkState::Up
} else {
LinkState::Down
}
}
fn ethernet_address(&self) -> [u8; 6] {
self.mac_addr
}
}
impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Drop for Ethernet<'d, T, P, TX, RX> {
fn drop(&mut self) { fn drop(&mut self) {
// NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers // NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers
unsafe { unsafe {
@ -325,46 +297,3 @@ impl<'d, T: Instance, P: PHY, const TX: usize, const RX: usize> Drop for Etherne
}) })
} }
} }
//----------------------------------------------------------------------
struct Inner<'d, T: Instance, const TX: usize, const RX: usize> {
_peri: PhantomData<&'d mut T>,
desc_ring: DescriptorRing<TX, RX>,
}
impl<'d, T: Instance, const TX: usize, const RX: usize> Inner<'d, T, TX, RX> {
pub fn new(_peri: impl Peripheral<P = T> + 'd) -> Self {
Self {
_peri: PhantomData,
desc_ring: DescriptorRing::new(),
}
}
}
impl<'d, T: Instance, const TX: usize, const RX: usize> PeripheralState for Inner<'d, T, TX, RX> {
type Interrupt = crate::interrupt::ETH;
fn on_interrupt(&mut self) {
unwrap!(self.desc_ring.tx.on_interrupt());
self.desc_ring.rx.on_interrupt();
WAKER.wake();
// TODO: Check and clear more flags
unsafe {
let dma = ETH.ethernet_dma();
dma.dmacsr().modify(|w| {
w.set_ti(true);
w.set_ri(true);
w.set_nis(true);
});
// Delay two peripheral's clock
dma.dmacsr().read();
dma.dmacsr().read();
}
}
}
static WAKER: AtomicWaker = AtomicWaker::new();

View File

@ -7,7 +7,7 @@ use embassy_executor::Spawner;
use embassy_net::tcp::TcpSocket; use embassy_net::tcp::TcpSocket;
use embassy_net::{Ipv4Address, Stack, StackResources}; use embassy_net::{Ipv4Address, Stack, StackResources};
use embassy_stm32::eth::generic_smi::GenericSMI; use embassy_stm32::eth::generic_smi::GenericSMI;
use embassy_stm32::eth::{Ethernet, State}; use embassy_stm32::eth::{Ethernet, PacketQueue};
use embassy_stm32::peripherals::ETH; use embassy_stm32::peripherals::ETH;
use embassy_stm32::rng::Rng; use embassy_stm32::rng::Rng;
use embassy_stm32::time::mhz; use embassy_stm32::time::mhz;
@ -22,11 +22,12 @@ macro_rules! singleton {
($val:expr) => {{ ($val:expr) => {{
type T = impl Sized; type T = impl Sized;
static STATIC_CELL: StaticCell<T> = StaticCell::new(); static STATIC_CELL: StaticCell<T> = StaticCell::new();
STATIC_CELL.init_with(move || $val) let (x,) = STATIC_CELL.init(($val,));
x
}}; }};
} }
type Device = Ethernet<'static, ETH, GenericSMI, 4, 4>; type Device = Ethernet<'static, ETH, GenericSMI>;
#[embassy_executor::task] #[embassy_executor::task]
async fn net_task(stack: &'static Stack<Device>) -> ! { async fn net_task(stack: &'static Stack<Device>) -> ! {
@ -51,9 +52,8 @@ async fn main(spawner: Spawner) -> ! {
let eth_int = interrupt::take!(ETH); let eth_int = interrupt::take!(ETH);
let mac_addr = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF]; let mac_addr = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF];
let device = unsafe { let device = Ethernet::new(
Ethernet::new( singleton!(PacketQueue::<16, 16>::new()),
singleton!(State::new()),
p.ETH, p.ETH,
eth_int, eth_int,
p.PA1, p.PA1,
@ -68,8 +68,7 @@ async fn main(spawner: Spawner) -> ! {
GenericSMI, GenericSMI,
mac_addr, mac_addr,
0, 0,
) );
};
let config = embassy_net::ConfigStrategy::Dhcp; let config = embassy_net::ConfigStrategy::Dhcp;
//let config = embassy_net::ConfigStrategy::Static(embassy_net::Config { //let config = embassy_net::ConfigStrategy::Static(embassy_net::Config {

View File

@ -7,7 +7,7 @@ use embassy_executor::Spawner;
use embassy_net::tcp::client::{TcpClient, TcpClientState}; use embassy_net::tcp::client::{TcpClient, TcpClientState};
use embassy_net::{Stack, StackResources}; use embassy_net::{Stack, StackResources};
use embassy_stm32::eth::generic_smi::GenericSMI; use embassy_stm32::eth::generic_smi::GenericSMI;
use embassy_stm32::eth::{Ethernet, State}; use embassy_stm32::eth::{Ethernet, PacketQueue};
use embassy_stm32::peripherals::ETH; use embassy_stm32::peripherals::ETH;
use embassy_stm32::rng::Rng; use embassy_stm32::rng::Rng;
use embassy_stm32::time::mhz; use embassy_stm32::time::mhz;
@ -23,11 +23,12 @@ macro_rules! singleton {
($val:expr) => {{ ($val:expr) => {{
type T = impl Sized; type T = impl Sized;
static STATIC_CELL: StaticCell<T> = StaticCell::new(); static STATIC_CELL: StaticCell<T> = StaticCell::new();
STATIC_CELL.init_with(move || $val) let (x,) = STATIC_CELL.init(($val,));
x
}}; }};
} }
type Device = Ethernet<'static, ETH, GenericSMI, 4, 4>; type Device = Ethernet<'static, ETH, GenericSMI>;
#[embassy_executor::task] #[embassy_executor::task]
async fn net_task(stack: &'static Stack<Device>) -> ! { async fn net_task(stack: &'static Stack<Device>) -> ! {
@ -52,9 +53,8 @@ async fn main(spawner: Spawner) -> ! {
let eth_int = interrupt::take!(ETH); let eth_int = interrupt::take!(ETH);
let mac_addr = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF]; let mac_addr = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF];
let device = unsafe { let device = Ethernet::new(
Ethernet::new( singleton!(PacketQueue::<16, 16>::new()),
singleton!(State::new()),
p.ETH, p.ETH,
eth_int, eth_int,
p.PA1, p.PA1,
@ -69,8 +69,7 @@ async fn main(spawner: Spawner) -> ! {
GenericSMI, GenericSMI,
mac_addr, mac_addr,
0, 0,
) );
};
let config = embassy_net::ConfigStrategy::Dhcp; let config = embassy_net::ConfigStrategy::Dhcp;
//let config = embassy_net::ConfigStrategy::Static(embassy_net::Config { //let config = embassy_net::ConfigStrategy::Static(embassy_net::Config {