468: Add v1c ethernet driver for the STM32F7 family. r=Dirbaio a=matoushybl



Co-authored-by: Matous Hybl <hyblmatous@gmail.com>
This commit is contained in:
bors[bot] 2021-11-10 22:07:38 +00:00 committed by GitHub
commit 96e2f0dfc5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
8 changed files with 1199 additions and 3 deletions

View File

@ -1,7 +1,8 @@
#![macro_use]
#[cfg_attr(eth_v1, path = "v1.rs")]
#[cfg_attr(eth_v1c, path = "v1c/mod.rs")]
#[cfg_attr(eth_v2, path = "v2/mod.rs")]
#[cfg_attr(eth_v1, path = "v1.rs")]
mod _version;
pub mod lan8742a;

View File

@ -0,0 +1,21 @@
use crate::eth::_version::rx_desc::RDesRing;
use crate::eth::_version::tx_desc::TDesRing;
pub struct DescriptorRing<const T: usize, const R: usize> {
pub(crate) tx: TDesRing<T>,
pub(crate) rx: RDesRing<R>,
}
impl<const T: usize, const R: usize> DescriptorRing<T, R> {
pub const fn new() -> Self {
Self {
tx: TDesRing::new(),
rx: RDesRing::new(),
}
}
pub fn init(&mut self) {
self.tx.init();
self.rx.init();
}
}

View File

@ -0,0 +1,473 @@
// The v1c ethernet driver was ported to embassy from the awesome stm32-eth project (https://github.com/stm32-rs/stm32-eth).
use core::marker::PhantomData;
use core::sync::atomic::{fence, Ordering};
use core::task::Waker;
use embassy::util::Unborrow;
use embassy::waitqueue::AtomicWaker;
use embassy_hal_common::peripheral::{PeripheralMutex, PeripheralState, StateStorage};
use embassy_hal_common::unborrow;
use embassy_net::{Device, DeviceCapabilities, LinkState, PacketBuf, MTU};
use crate::gpio::sealed::Pin as __GpioPin;
use crate::gpio::Pin as GpioPin;
use crate::gpio::{sealed::AFType::OutputPushPull, AnyPin};
use crate::pac::gpio::vals::Ospeedr;
use crate::pac::{ETH, RCC, SYSCFG};
use crate::peripherals;
mod descriptors;
mod rx_desc;
mod tx_desc;
use super::{StationManagement, PHY};
use descriptors::DescriptorRing;
use stm32_metapac::eth::vals::{
Apcs, Cr, Dm, DmaomrSr, Fes, Ftf, Ifg, MbProgress, Mw, Pbl, Rsf, St, Tsf,
};
pub struct State<'d, const TX: usize, const RX: usize>(StateStorage<Inner<'d, TX, RX>>);
impl<'d, const TX: usize, const RX: usize> State<'d, TX, RX> {
pub const fn new() -> Self {
Self(StateStorage::new())
}
}
pub struct Ethernet<'d, P: PHY, const TX: usize, const RX: usize> {
state: PeripheralMutex<'d, Inner<'d, TX, RX>>,
pins: [AnyPin; 9],
_phy: P,
clock_range: Cr,
phy_addr: u8,
mac_addr: [u8; 6],
}
impl<'d, P: PHY, const TX: usize, const RX: usize> Ethernet<'d, P, TX, RX> {
/// safety: the returned instance is not leak-safe
pub unsafe fn new(
state: &'d mut State<'d, TX, RX>,
peri: impl Unborrow<Target = peripherals::ETH> + 'd,
interrupt: impl Unborrow<Target = crate::interrupt::ETH> + 'd,
ref_clk: impl Unborrow<Target = impl RefClkPin> + 'd,
mdio: impl Unborrow<Target = impl MDIOPin> + 'd,
mdc: impl Unborrow<Target = impl MDCPin> + 'd,
crs: impl Unborrow<Target = impl CRSPin> + 'd,
rx_d0: impl Unborrow<Target = impl RXD0Pin> + 'd,
rx_d1: impl Unborrow<Target = impl RXD1Pin> + 'd,
tx_d0: impl Unborrow<Target = impl TXD0Pin> + 'd,
tx_d1: impl Unborrow<Target = impl TXD1Pin> + 'd,
tx_en: impl Unborrow<Target = impl TXEnPin> + 'd,
phy: P,
mac_addr: [u8; 6],
phy_addr: u8,
) -> Self {
unborrow!(interrupt, ref_clk, mdio, mdc, crs, rx_d0, rx_d1, tx_d0, tx_d1, tx_en);
// Enable the necessary Clocks
// NOTE(unsafe) We have exclusive access to the registers
critical_section::with(|_| {
RCC.apb2enr().modify(|w| w.set_syscfgen(true));
RCC.ahb1enr().modify(|w| {
w.set_ethen(true);
w.set_ethtxen(true);
w.set_ethrxen(true);
});
// RMII (Reduced Media Independent Interface)
SYSCFG.pmc().modify(|w| w.set_mii_rmii_sel(true));
});
ref_clk.configure();
mdio.configure();
mdc.configure();
crs.configure();
rx_d0.configure();
rx_d1.configure();
tx_d0.configure();
tx_d1.configure();
tx_en.configure();
// NOTE(unsafe) We are ourselves not leak-safe.
let state = PeripheralMutex::new_unchecked(interrupt, &mut state.0, || Inner::new(peri));
// NOTE(unsafe) We have exclusive access to the registers
let dma = ETH.ethernet_dma();
let mac = ETH.ethernet_mac();
// Reset and wait
dma.dmabmr().modify(|w| w.set_sr(true));
while dma.dmabmr().read().sr() {}
mac.maccr().modify(|w| {
w.set_ifg(Ifg::IFG96); // inter frame gap 96 bit times
w.set_apcs(Apcs::STRIP); // automatic padding and crc stripping
w.set_fes(Fes::FES100); // fast ethernet speed
w.set_dm(Dm::FULLDUPLEX); // full duplex
// TODO: Carrier sense ? ECRSFD
});
// Note: Writing to LR triggers synchronisation of both LR and HR into the MAC core,
// so the LR write must happen after the HR write.
mac.maca0hr()
.modify(|w| w.set_maca0h(u16::from(mac_addr[4]) | (u16::from(mac_addr[5]) << 8)));
mac.maca0lr().write(|w| {
w.set_maca0l(
u32::from(mac_addr[0])
| (u32::from(mac_addr[1]) << 8)
| (u32::from(mac_addr[2]) << 16)
| (u32::from(mac_addr[3]) << 24),
)
});
// pause time
mac.macfcr().modify(|w| w.set_pt(0x100));
// Transfer and Forward, Receive and Forward
dma.dmaomr().modify(|w| {
w.set_tsf(Tsf::STOREFORWARD);
w.set_rsf(Rsf::STOREFORWARD);
});
dma.dmabmr().modify(|w| {
w.set_pbl(Pbl::PBL32) // programmable burst length - 32 ?
});
// TODO MTU size setting not found for v1 ethernet, check if correct
// NOTE(unsafe) We got the peripheral singleton, which means that `rcc::init` was called
let hclk = crate::rcc::get_freqs().ahb1;
let hclk_mhz = hclk.0 / 1_000_000;
// Set the MDC clock frequency in the range 1MHz - 2.5MHz
let clock_range = match hclk_mhz {
0..=24 => panic!("Invalid HCLK frequency - should be at least 25 MHz."),
25..=34 => Cr::CR_20_35, // Divide by 16
35..=59 => Cr::CR_35_60, // Divide by 26
60..=99 => Cr::CR_60_100, // Divide by 42
100..=149 => Cr::CR_100_150, // Divide by 62
150..=216 => Cr::CR_150_168, // Divide by 102
_ => {
panic!("HCLK results in MDC clock > 2.5MHz even for the highest CSR clock divider")
}
};
let pins = [
ref_clk.degrade(),
mdio.degrade(),
mdc.degrade(),
crs.degrade(),
rx_d0.degrade(),
rx_d1.degrade(),
tx_d0.degrade(),
tx_d1.degrade(),
tx_en.degrade(),
];
let mut this = Self {
state,
pins,
_phy: phy,
clock_range,
phy_addr,
mac_addr,
};
this.state.with(|s| {
s.desc_ring.init();
fence(Ordering::SeqCst);
let mac = ETH.ethernet_mac();
let dma = ETH.ethernet_dma();
mac.maccr().modify(|w| {
w.set_re(true);
w.set_te(true);
});
dma.dmaomr().modify(|w| {
w.set_ftf(Ftf::FLUSH); // flush transmit fifo (queue)
w.set_st(St::STARTED); // start transmitting channel
w.set_sr(DmaomrSr::STARTED); // start receiving channel
});
// Enable interrupts
dma.dmaier().modify(|w| {
w.set_nise(true);
w.set_rie(true);
w.set_tie(true);
});
});
P::phy_reset(&mut this);
P::phy_init(&mut this);
this
}
}
unsafe impl<'d, P: PHY, const TX: usize, const RX: usize> StationManagement
for Ethernet<'d, P, TX, RX>
{
fn smi_read(&mut self, reg: u8) -> u16 {
// NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
unsafe {
let mac = ETH.ethernet_mac();
mac.macmiiar().modify(|w| {
w.set_pa(self.phy_addr);
w.set_mr(reg);
w.set_mw(Mw::READ); // read operation
w.set_cr(self.clock_range);
w.set_mb(MbProgress::BUSY); // indicate that operation is in progress
});
while mac.macmiiar().read().mb() == MbProgress::BUSY {}
mac.macmiidr().read().md()
}
}
fn smi_write(&mut self, reg: u8, val: u16) {
// NOTE(unsafe) These registers aren't used in the interrupt and we have `&mut self`
unsafe {
let mac = ETH.ethernet_mac();
mac.macmiidr().write(|w| w.set_md(val));
mac.macmiiar().modify(|w| {
w.set_pa(self.phy_addr);
w.set_mr(reg);
w.set_mw(Mw::WRITE); // write
w.set_cr(self.clock_range);
w.set_mb(MbProgress::BUSY);
});
while mac.macmiiar().read().mb() == MbProgress::BUSY {}
}
}
}
impl<'d, P: PHY, const TX: usize, const RX: usize> Device for Ethernet<'d, P, TX, RX> {
fn is_transmit_ready(&mut self) -> bool {
self.state.with(|s| s.desc_ring.tx.available())
}
fn transmit(&mut self, pkt: PacketBuf) {
self.state.with(|s| unwrap!(s.desc_ring.tx.transmit(pkt)));
}
fn receive(&mut self) -> Option<PacketBuf> {
self.state.with(|s| s.desc_ring.rx.pop_packet())
}
fn register_waker(&mut self, waker: &Waker) {
WAKER.register(waker);
}
fn capabilities(&mut self) -> DeviceCapabilities {
let mut caps = DeviceCapabilities::default();
caps.max_transmission_unit = MTU;
caps.max_burst_size = Some(TX.min(RX));
caps
}
fn link_state(&mut self) -> LinkState {
if P::poll_link(self) {
LinkState::Up
} else {
LinkState::Down
}
}
fn ethernet_address(&mut self) -> [u8; 6] {
self.mac_addr
}
}
impl<'d, P: PHY, const TX: usize, const RX: usize> Drop for Ethernet<'d, P, TX, RX> {
fn drop(&mut self) {
// NOTE(unsafe) We have `&mut self` and the interrupt doesn't use this registers
unsafe {
let dma = ETH.ethernet_dma();
let mac = ETH.ethernet_mac();
// Disable the TX DMA and wait for any previous transmissions to be completed
dma.dmaomr().modify(|w| w.set_st(St::STOPPED));
// Disable MAC transmitter and receiver
mac.maccr().modify(|w| {
w.set_re(false);
w.set_te(false);
});
dma.dmaomr().modify(|w| w.set_sr(DmaomrSr::STOPPED));
}
for pin in self.pins.iter_mut() {
// NOTE(unsafe) Exclusive access to the regs
critical_section::with(|_| unsafe {
pin.set_as_analog();
pin.block()
.ospeedr()
.modify(|w| w.set_ospeedr(pin.pin() as usize, Ospeedr::LOWSPEED));
})
}
}
}
//----------------------------------------------------------------------
struct Inner<'d, const TX: usize, const RX: usize> {
_peri: PhantomData<&'d mut peripherals::ETH>,
desc_ring: DescriptorRing<TX, RX>,
}
impl<'d, const TX: usize, const RX: usize> Inner<'d, TX, RX> {
pub fn new(_peri: impl Unborrow<Target = peripherals::ETH> + 'd) -> Self {
Self {
_peri: PhantomData,
desc_ring: DescriptorRing::new(),
}
}
}
impl<'d, const TX: usize, const RX: usize> PeripheralState for Inner<'d, TX, RX> {
type Interrupt = crate::interrupt::ETH;
fn on_interrupt(&mut self) {
unwrap!(self.desc_ring.tx.on_interrupt());
self.desc_ring.rx.on_interrupt();
WAKER.wake();
// TODO: Check and clear more flags
unsafe {
let dma = ETH.ethernet_dma();
dma.dmasr().modify(|w| {
w.set_ts(true);
w.set_rs(true);
w.set_nis(true);
});
// Delay two peripheral's clock
dma.dmasr().read();
dma.dmasr().read();
}
}
}
mod sealed {
use super::*;
pub trait RefClkPin: GpioPin {
fn configure(&mut self);
}
pub trait MDIOPin: GpioPin {
fn configure(&mut self);
}
pub trait MDCPin: GpioPin {
fn configure(&mut self);
}
pub trait CRSPin: GpioPin {
fn configure(&mut self);
}
pub trait RXD0Pin: GpioPin {
fn configure(&mut self);
}
pub trait RXD1Pin: GpioPin {
fn configure(&mut self);
}
pub trait TXD0Pin: GpioPin {
fn configure(&mut self);
}
pub trait TXD1Pin: GpioPin {
fn configure(&mut self);
}
pub trait TXEnPin: GpioPin {
fn configure(&mut self);
}
}
pub trait RefClkPin: sealed::RefClkPin + 'static {}
pub trait MDIOPin: sealed::MDIOPin + 'static {}
pub trait MDCPin: sealed::MDCPin + 'static {}
pub trait CRSPin: sealed::CRSPin + 'static {}
pub trait RXD0Pin: sealed::RXD0Pin + 'static {}
pub trait RXD1Pin: sealed::RXD1Pin + 'static {}
pub trait TXD0Pin: sealed::TXD0Pin + 'static {}
pub trait TXD1Pin: sealed::TXD1Pin + 'static {}
pub trait TXEnPin: sealed::TXEnPin + 'static {}
static WAKER: AtomicWaker = AtomicWaker::new();
macro_rules! impl_pin {
($pin:ident, $signal:ident, $af:expr) => {
impl sealed::$signal for peripherals::$pin {
fn configure(&mut self) {
// NOTE(unsafe) Exclusive access to the registers
critical_section::with(|_| unsafe {
self.set_as_af($af, OutputPushPull);
self.block()
.ospeedr()
.modify(|w| w.set_ospeedr(self.pin() as usize, Ospeedr::VERYHIGHSPEED));
})
}
}
impl $signal for peripherals::$pin {}
};
}
// impl sealed::RefClkPin for peripherals::PA1 {
// fn configure(&mut self) {
// // NOTE(unsafe) Exclusive access to the registers
// critical_section::with(|_| unsafe {
// self.set_as_af(11, OutputPushPull);
// self.block()
// .ospeedr()
// .modify(|w| w.set_ospeedr(self.pin() as usize, Ospeedr::VERYHIGHSPEED));
// })
// }
// }
// impl RefClkPin for peripherals::PA1 {}
crate::pac::peripheral_pins!(
($inst:ident, eth, ETH, $pin:ident, REF_CLK, $af:expr) => {
impl_pin!($pin, RefClkPin, $af);
};
($inst:ident, eth, ETH, $pin:ident, MDIO, $af:expr) => {
impl_pin!($pin, MDIOPin, $af);
};
($inst:ident, eth, ETH, $pin:ident, MDC, $af:expr) => {
impl_pin!($pin, MDCPin, $af);
};
($inst:ident, eth, ETH, $pin:ident, CRS_DV, $af:expr) => {
impl_pin!($pin, CRSPin, $af);
};
($inst:ident, eth, ETH, $pin:ident, RXD0, $af:expr) => {
impl_pin!($pin, RXD0Pin, $af);
};
($inst:ident, eth, ETH, $pin:ident, RXD1, $af:expr) => {
impl_pin!($pin, RXD1Pin, $af);
};
($inst:ident, eth, ETH, $pin:ident, TXD0, $af:expr) => {
impl_pin!($pin, TXD0Pin, $af);
};
($inst:ident, eth, ETH, $pin:ident, TXD1, $af:expr) => {
impl_pin!($pin, TXD1Pin, $af);
};
($inst:ident, eth, ETH, $pin:ident, TX_EN, $af:expr) => {
impl_pin!($pin, TXEnPin, $af);
};
);

View File

@ -0,0 +1,309 @@
use core::sync::atomic::{compiler_fence, fence, Ordering};
use embassy_net::{Packet, PacketBox, PacketBoxExt, PacketBuf};
use stm32_metapac::eth::vals::{DmaomrSr, Rpd, Rps};
use vcell::VolatileCell;
use crate::pac::ETH;
mod rx_consts {
/// Owned by DMA engine
pub const RXDESC_0_OWN: u32 = 1 << 31;
/// First descriptor
pub const RXDESC_0_FS: u32 = 1 << 9;
/// Last descriptor
pub const RXDESC_0_LS: u32 = 1 << 8;
/// Error summary
pub const RXDESC_0_ES: u32 = 1 << 15;
/// Frame length
pub const RXDESC_0_FL_MASK: u32 = 0x3FFF;
pub const RXDESC_0_FL_SHIFT: usize = 16;
pub const RXDESC_1_RBS_MASK: u32 = 0x0fff;
/// Second address chained
pub const RXDESC_1_RCH: u32 = 1 << 14;
/// End Of Ring
pub const RXDESC_1_RER: u32 = 1 << 15;
}
use rx_consts::*;
/// Receive Descriptor representation
///
/// * rdes0: OWN and Status
/// * rdes1: allocated buffer length
/// * rdes2: data buffer address
/// * rdes3: next descriptor address
#[repr(C)]
struct RDes {
rdes0: VolatileCell<u32>,
rdes1: VolatileCell<u32>,
rdes2: VolatileCell<u32>,
rdes3: VolatileCell<u32>,
}
impl RDes {
pub const fn new() -> Self {
Self {
rdes0: VolatileCell::new(0),
rdes1: VolatileCell::new(0),
rdes2: VolatileCell::new(0),
rdes3: VolatileCell::new(0),
}
}
/// Return true if this RDes is acceptable to us
#[inline(always)]
pub fn valid(&self) -> bool {
// Write-back descriptor is valid if:
//
// Contains first buffer of packet AND contains last buf of
// packet AND no errors
(self.rdes0.get() & (RXDESC_0_ES | RXDESC_0_FS | RXDESC_0_LS))
== (RXDESC_0_FS | RXDESC_0_LS)
}
/// Return true if this RDes is not currently owned by the DMA
#[inline(always)]
pub fn available(&self) -> bool {
self.rdes0.get() & RXDESC_0_OWN == 0 // Owned by us
}
/// Configures the reception buffer address and length and passed descriptor ownership to the DMA
#[inline(always)]
pub fn set_ready(&mut self, buf_addr: u32, buf_len: usize) {
self.rdes1
.set(self.rdes1.get() | (buf_len as u32) & RXDESC_1_RBS_MASK);
self.rdes2.set(buf_addr);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
compiler_fence(Ordering::Release);
self.rdes0.set(self.rdes0.get() | RXDESC_0_OWN);
// Used to flush the store buffer as fast as possible to make the buffer available for the
// DMA.
fence(Ordering::SeqCst);
}
// points to next descriptor (RCH)
#[inline(always)]
fn set_buffer2(&mut self, buffer: *const u8) {
self.rdes3.set(buffer as u32);
}
#[inline(always)]
fn set_end_of_ring(&mut self) {
self.rdes1.set(self.rdes1.get() | RXDESC_1_RER);
}
#[inline(always)]
fn packet_len(&self) -> usize {
((self.rdes0.get() >> RXDESC_0_FL_SHIFT) & RXDESC_0_FL_MASK) as usize
}
pub fn setup(&mut self, next: Option<&Self>) {
// Defer this initialization to this function, so we can have `RingEntry` on bss.
self.rdes1.set(self.rdes1.get() | RXDESC_1_RCH);
match next {
Some(next) => self.set_buffer2(next as *const _ as *const u8),
None => {
self.set_buffer2(0 as *const u8);
self.set_end_of_ring();
}
}
}
}
/// Running state of the `RxRing`
#[derive(PartialEq, Eq, Debug)]
pub enum RunningState {
Unknown,
Stopped,
Running,
}
impl RunningState {
/// whether self equals to `RunningState::Running`
pub fn is_running(&self) -> bool {
*self == RunningState::Running
}
}
/// Rx ring of descriptors and packets
///
/// This ring has three major locations that work in lock-step. The DMA will never write to the tail
/// index, so the `read_index` must never pass the tail index. The `next_tail_index` is always 1
/// slot ahead of the real tail index, and it must never pass the `read_index` or it could overwrite
/// a packet still to be passed to the application.
///
/// nt can't pass r (no alloc)
/// +---+---+---+---+ Read ok +---+---+---+---+ No Read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation ok +---+---+---+---+ +---+---+---+---+
/// ^ ^t ^t ^ ^t ^
/// |r |r |r
/// |nt |nt |nt
///
///
/// +---+---+---+---+ Read ok +---+---+---+---+ Can't read +---+---+---+---+
/// | | | | | ------------> | | | | | ------------> | | | | |
/// +---+---+---+---+ Allocation fail +---+---+---+---+ Allocation ok +---+---+---+---+
/// ^ ^t ^ ^t ^ ^ ^ ^t
/// |r | |r | | |r
/// |nt |nt |nt
///
pub(crate) struct RDesRing<const N: usize> {
descriptors: [RDes; N],
buffers: [Option<PacketBox>; N],
read_index: usize,
next_tail_index: usize,
}
impl<const N: usize> RDesRing<N> {
pub const fn new() -> Self {
const RDES: RDes = RDes::new();
const BUFFERS: Option<PacketBox> = None;
Self {
descriptors: [RDES; N],
buffers: [BUFFERS; N],
read_index: 0,
next_tail_index: 0,
}
}
pub(crate) fn init(&mut self) {
assert!(N > 1);
let mut last_index = 0;
for (index, buf) in self.buffers.iter_mut().enumerate() {
let pkt = match PacketBox::new(Packet::new()) {
Some(p) => p,
None => {
if index == 0 {
panic!("Could not allocate at least one buffer for Ethernet receiving");
} else {
break;
}
}
};
self.descriptors[index].set_ready(pkt.as_ptr() as u32, pkt.len());
*buf = Some(pkt);
last_index = index;
}
self.next_tail_index = (last_index + 1) % N;
// not sure if this is supposed to span all of the descriptor or just those that contain buffers
{
let mut previous: Option<&mut RDes> = None;
for entry in self.descriptors.iter_mut() {
if let Some(prev) = &mut previous {
prev.setup(Some(entry));
}
previous = Some(entry);
}
if let Some(entry) = &mut previous {
entry.setup(None);
}
}
// Register txdescriptor start
// NOTE (unsafe) Used for atomic writes
unsafe {
ETH.ethernet_dma()
.dmardlar()
.write(|w| w.0 = &self.descriptors as *const _ as u32);
};
// We already have fences in `set_owned`, which is called in `setup`
// Start receive
unsafe {
ETH.ethernet_dma()
.dmaomr()
.modify(|w| w.set_sr(DmaomrSr::STARTED))
};
self.demand_poll();
}
fn demand_poll(&self) {
unsafe { ETH.ethernet_dma().dmarpdr().write(|w| w.set_rpd(Rpd::POLL)) };
}
pub(crate) fn on_interrupt(&mut self) {
// XXX: Do we need to do anything here ? Maybe we should try to advance the tail ptr, but it
// would soon hit the read ptr anyway, and we will wake smoltcp's stack on the interrupt
// which should try to pop a packet...
}
/// Get current `RunningState`
fn running_state(&self) -> RunningState {
match unsafe { ETH.ethernet_dma().dmasr().read().rps() } {
// Reset or Stop Receive Command issued
Rps::STOPPED => RunningState::Stopped,
// Fetching receive transfer descriptor
Rps::RUNNINGFETCHING => RunningState::Running,
// Waiting for receive packet
Rps::RUNNINGWAITING => RunningState::Running,
// Receive descriptor unavailable
Rps::SUSPENDED => RunningState::Stopped,
// Closing receive descriptor
Rps(0b101) => RunningState::Running,
// Transferring the receive packet data from receive buffer to host memory
Rps::RUNNINGWRITING => RunningState::Running,
_ => RunningState::Unknown,
}
}
pub(crate) fn pop_packet(&mut self) -> Option<PacketBuf> {
if !self.running_state().is_running() {
self.demand_poll();
}
// Not sure if the contents of the write buffer on the M7 can affects reads, so we are using
// a DMB here just in case, it also serves as a hint to the compiler that we're syncing the
// buffer (I think .-.)
fence(Ordering::SeqCst);
let read_available = self.descriptors[self.read_index].available();
let tail_index = (self.next_tail_index + N - 1) % N;
let pkt = if read_available && self.read_index != tail_index {
let pkt = self.buffers[self.read_index].take();
let len = self.descriptors[self.read_index].packet_len();
assert!(pkt.is_some());
let valid = self.descriptors[self.read_index].valid();
self.read_index = (self.read_index + 1) % N;
if valid {
pkt.map(|p| p.slice(0..len))
} else {
None
}
} else {
None
};
// Try to advance the tail_index
if self.next_tail_index != self.read_index {
match PacketBox::new(Packet::new()) {
Some(b) => {
let addr = b.as_ptr() as u32;
let buffer_len = b.len();
self.buffers[self.next_tail_index].replace(b);
self.descriptors[self.next_tail_index].set_ready(addr, buffer_len);
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
self.next_tail_index = (self.next_tail_index + 1) % N;
}
None => {}
}
}
pkt
}
}

View File

@ -0,0 +1,238 @@
use core::sync::atomic::{compiler_fence, fence, Ordering};
use embassy_net::PacketBuf;
use stm32_metapac::eth::vals::St;
use vcell::VolatileCell;
use crate::pac::ETH;
#[non_exhaustive]
#[derive(Debug, Copy, Clone)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub enum Error {
NoBufferAvailable,
// TODO: Break down this error into several others
TransmissionError,
}
/// Transmit and Receive Descriptor fields
#[allow(dead_code)]
mod tx_consts {
pub const TXDESC_0_OWN: u32 = 1 << 31;
pub const TXDESC_0_IOC: u32 = 1 << 30;
// First segment of frame
pub const TXDESC_0_FS: u32 = 1 << 28;
// Last segment of frame
pub const TXDESC_0_LS: u32 = 1 << 29;
// Transmit end of ring
pub const TXDESC_0_TER: u32 = 1 << 21;
// Second address chained
pub const TXDESC_0_TCH: u32 = 1 << 20;
// Error status
pub const TXDESC_0_ES: u32 = 1 << 15;
// Transmit buffer size
pub const TXDESC_1_TBS_SHIFT: usize = 0;
pub const TXDESC_1_TBS_MASK: u32 = 0x0fff << TXDESC_1_TBS_SHIFT;
}
use tx_consts::*;
/// Transmit Descriptor representation
///
/// * tdes0: control
/// * tdes1: buffer lengths
/// * tdes2: data buffer address
/// * tdes3: next descriptor address
#[repr(C)]
struct TDes {
tdes0: VolatileCell<u32>,
tdes1: VolatileCell<u32>,
tdes2: VolatileCell<u32>,
tdes3: VolatileCell<u32>,
}
impl TDes {
pub const fn new() -> Self {
Self {
tdes0: VolatileCell::new(0),
tdes1: VolatileCell::new(0),
tdes2: VolatileCell::new(0),
tdes3: VolatileCell::new(0),
}
}
/// Return true if this TDes is not currently owned by the DMA
pub fn available(&self) -> bool {
(self.tdes0.get() & TXDESC_0_OWN) == 0
}
/// Pass ownership to the DMA engine
fn set_owned(&mut self) {
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
compiler_fence(Ordering::Release);
self.tdes0.set(self.tdes0.get() | TXDESC_0_OWN);
// Used to flush the store buffer as fast as possible to make the buffer available for the
// DMA.
fence(Ordering::SeqCst);
}
fn set_buffer1(&mut self, buffer: *const u8) {
self.tdes2.set(buffer as u32);
}
fn set_buffer1_len(&mut self, len: usize) {
self.tdes1
.set((self.tdes1.get() & !TXDESC_1_TBS_MASK) | ((len as u32) << TXDESC_1_TBS_SHIFT));
}
// points to next descriptor (RCH)
fn set_buffer2(&mut self, buffer: *const u8) {
self.tdes3.set(buffer as u32);
}
fn set_end_of_ring(&mut self) {
self.tdes0.set(self.tdes0.get() | TXDESC_0_TER);
}
// set up as a part fo the ring buffer - configures the tdes
pub fn setup(&mut self, next: Option<&Self>) {
// Defer this initialization to this function, so we can have `RingEntry` on bss.
self.tdes0
.set(TXDESC_0_TCH | TXDESC_0_IOC | TXDESC_0_FS | TXDESC_0_LS);
match next {
Some(next) => self.set_buffer2(next as *const TDes as *const u8),
None => {
self.set_buffer2(0 as *const u8);
self.set_end_of_ring();
}
}
}
}
pub(crate) struct TDesRing<const N: usize> {
descriptors: [TDes; N],
buffers: [Option<PacketBuf>; N],
next_entry: usize,
}
impl<const N: usize> TDesRing<N> {
pub const fn new() -> Self {
const TDES: TDes = TDes::new();
const BUFFERS: Option<PacketBuf> = None;
Self {
descriptors: [TDES; N],
buffers: [BUFFERS; N],
next_entry: 0,
}
}
/// Initialise this TDesRing. Assume TDesRing is corrupt
///
/// The current memory address of the buffers inside this TDesRing
/// will be stored in the descriptors, so ensure the TDesRing is
/// not moved after initialisation.
pub(crate) fn init(&mut self) {
assert!(N > 0);
{
let mut previous: Option<&mut TDes> = None;
for entry in self.descriptors.iter_mut() {
if let Some(prev) = &mut previous {
prev.setup(Some(entry));
}
previous = Some(entry);
}
if let Some(entry) = &mut previous {
entry.setup(None);
}
}
self.next_entry = 0;
// Register txdescriptor start
// NOTE (unsafe) Used for atomic writes
unsafe {
ETH.ethernet_dma()
.dmatdlar()
.write(|w| w.0 = &self.descriptors as *const _ as u32);
}
// "Preceding reads and writes cannot be moved past subsequent writes."
#[cfg(feature = "fence")]
fence(Ordering::Release);
// We don't need a compiler fence here because all interactions with `Descriptor` are
// volatiles
// Start transmission
unsafe {
ETH.ethernet_dma()
.dmaomr()
.modify(|w| w.set_st(St::STARTED))
};
}
/// Return true if a TDes is available for use
pub(crate) fn available(&self) -> bool {
self.descriptors[self.next_entry].available()
}
pub(crate) fn transmit(&mut self, pkt: PacketBuf) -> Result<(), Error> {
if !self.available() {
return Err(Error::NoBufferAvailable);
}
let descriptor = &mut self.descriptors[self.next_entry];
let pkt_len = pkt.len();
let address = pkt.as_ptr() as *const u8;
descriptor.set_buffer1(address);
descriptor.set_buffer1_len(pkt_len);
self.buffers[self.next_entry].replace(pkt);
descriptor.set_owned();
// Ensure changes to the descriptor are committed before DMA engine sees tail pointer store.
// This will generate an DMB instruction.
// "Preceding reads and writes cannot be moved past subsequent writes."
fence(Ordering::Release);
// Move the tail pointer (TPR) to the next descriptor
self.next_entry = (self.next_entry + 1) % N;
// Request the DMA engine to poll the latest tx descriptor
unsafe { ETH.ethernet_dma().dmatpdr().modify(|w| w.0 = 1) }
Ok(())
}
pub(crate) fn on_interrupt(&mut self) -> Result<(), Error> {
let previous = (self.next_entry + N - 1) % N;
let td = &self.descriptors[previous];
// DMB to ensure that we are reading an updated value, probably not needed at the hardware
// level, but this is also a hint to the compiler that we're syncing on the buffer.
fence(Ordering::SeqCst);
let tdes0 = td.tdes0.get();
if tdes0 & TXDESC_0_OWN != 0 {
// Transmission isn't done yet, probably a receive interrupt that fired this
return Ok(());
}
// Release the buffer
self.buffers[previous].take();
if tdes0 & TXDESC_0_ES != 0 {
Err(Error::TransmissionError)
} else {
Ok(())
}
}
}

View File

@ -19,8 +19,10 @@ defmt-error = []
[dependencies]
embassy = { version = "0.1.0", path = "../../embassy", features = ["defmt", "defmt-trace"] }
embassy-traits = { version = "0.1.0", path = "../../embassy-traits", features = ["defmt"] }
embassy-stm32 = { version = "0.1.0", path = "../../embassy-stm32", features = ["defmt", "defmt-trace", "stm32f767zi", "unstable-pac", "time-driver-tim2"] }
embassy-stm32 = { version = "0.1.0", path = "../../embassy-stm32", features = ["defmt", "defmt-trace", "net", "stm32f767zi", "unstable-pac", "time-driver-tim2"] }
embassy-hal-common = {version = "0.1.0", path = "../../embassy-hal-common" }
embassy-net = { path = "../../embassy-net", default-features = false, features = ["defmt-debug", "defmt", "tcp", "medium-ethernet", "pool-16"] }
embassy-macros = { path = "../../embassy-macros" }
defmt = "0.2.3"
defmt-rtt = "0.2.0"
@ -33,3 +35,17 @@ futures = { version = "0.3.17", default-features = false, features = ["async-awa
rtt-target = { version = "0.3.1", features = ["cortex-m"] }
heapless = { version = "0.7.5", default-features = false }
nb = "1.0.0"
rand_core = "0.6.3"
critical-section = "0.2.3"
[dependencies.smoltcp]
git = "https://github.com/smoltcp-rs/smoltcp"
rev = "e4241510337e095b9d21136c5f58b2eaa1b78479"
default-features = false
features = [
"proto-ipv4",
"socket",
"async",
"defmt",
]

View File

@ -0,0 +1,128 @@
#![no_std]
#![no_main]
#![feature(type_alias_impl_trait)]
#[path = "../example_common.rs"]
mod example_common;
use example_common::config;
use cortex_m_rt::entry;
use defmt::{info, unwrap};
use defmt_rtt as _; // global logger
use embassy::executor::{Executor, Spawner};
use embassy::io::AsyncWriteExt;
use embassy::time::{Duration, Timer};
use embassy::util::Forever;
use embassy_macros::interrupt_take;
use embassy_net::{
Config as NetConfig, Ipv4Address, Ipv4Cidr, StackResources, StaticConfigurator, TcpSocket,
};
use embassy_stm32::eth::lan8742a::LAN8742A;
use embassy_stm32::eth::{Ethernet, State};
use embassy_stm32::rng::Rng;
use embassy_stm32::{interrupt, peripherals};
use heapless::Vec;
use panic_probe as _;
use peripherals::RNG;
#[embassy::task]
async fn main_task(
device: &'static mut Ethernet<'static, LAN8742A, 4, 4>,
config: &'static mut StaticConfigurator,
spawner: Spawner,
) {
let net_resources = NET_RESOURCES.put(StackResources::new());
// Init network stack
embassy_net::init(device, config, net_resources);
// Launch network task
unwrap!(spawner.spawn(net_task()));
info!("Network task initialized");
// Then we can use it!
let mut rx_buffer = [0; 1024];
let mut tx_buffer = [0; 1024];
let mut socket = TcpSocket::new(&mut rx_buffer, &mut tx_buffer);
socket.set_timeout(Some(embassy_net::SmolDuration::from_secs(10)));
let remote_endpoint = (Ipv4Address::new(192, 168, 0, 10), 8000);
let r = socket.connect(remote_endpoint).await;
if let Err(e) = r {
info!("connect error: {:?}", e);
return;
}
info!("connected!");
loop {
let r = socket.write_all(b"Hello\n").await;
if let Err(e) = r {
info!("write error: {:?}", e);
return;
}
Timer::after(Duration::from_secs(1)).await;
}
}
#[embassy::task]
async fn net_task() {
embassy_net::run().await
}
#[no_mangle]
fn _embassy_rand(buf: &mut [u8]) {
use rand_core::RngCore;
critical_section::with(|_| unsafe {
unwrap!(RNG_INST.as_mut()).fill_bytes(buf);
});
}
static mut RNG_INST: Option<Rng<RNG>> = None;
static EXECUTOR: Forever<Executor> = Forever::new();
static STATE: Forever<State<'static, 4, 4>> = Forever::new();
static ETH: Forever<Ethernet<'static, LAN8742A, 4, 4>> = Forever::new();
static CONFIG: Forever<StaticConfigurator> = Forever::new();
static NET_RESOURCES: Forever<StackResources<1, 2, 8>> = Forever::new();
#[entry]
fn main() -> ! {
info!("Hello World!");
info!("Setup RCC...");
let p = embassy_stm32::init(config());
let rng = Rng::new(p.RNG);
unsafe {
RNG_INST.replace(rng);
}
let eth_int = interrupt_take!(ETH);
let mac_addr = [0x00, 0x00, 0xDE, 0xAD, 0xBE, 0xEF];
let state = STATE.put(State::new());
let eth = unsafe {
ETH.put(Ethernet::new(
state, p.ETH, eth_int, p.PA1, p.PA2, p.PC1, p.PA7, p.PC4, p.PC5, p.PG13, p.PB13,
p.PG11, LAN8742A, mac_addr, 0,
))
};
let config = StaticConfigurator::new(NetConfig {
address: Ipv4Cidr::new(Ipv4Address::new(192, 168, 0, 61), 24),
dns_servers: Vec::new(),
gateway: Some(Ipv4Address::new(192, 168, 0, 1)),
});
let config = CONFIG.put(config);
let executor = EXECUTOR.put(Executor::new());
executor.run(move |spawner| {
unwrap!(spawner.spawn(main_task(eth, config, spawner)));
})
}

View File

@ -1,6 +1,9 @@
#![macro_use]
use defmt_rtt as _; // global logger
use defmt_rtt as _;
use embassy_stm32::time::U32Ext;
use embassy_stm32::Config;
// global logger
use panic_probe as _;
pub use defmt::*;
@ -15,3 +18,10 @@ defmt::timestamp! {"{=u64}", {
n as u64
}
}
#[allow(unused)]
pub fn config() -> Config {
let mut config = Config::default();
config.rcc.sys_ck = Some(200.mhz().into());
config
}