From fc268df6f56661d5f43450c7a03850044ae8e136 Mon Sep 17 00:00:00 2001 From: Rasmus Melchior Jacobsen Date: Thu, 27 Apr 2023 10:48:38 +0200 Subject: [PATCH] Support overflow detection for more than one ring-period --- embassy-stm32/src/dma/dma.rs | 115 ++++++++++++----- embassy-stm32/src/dma/ringbuffer.rs | 127 ++++++++----------- embassy-stm32/src/usart/rx_ringbuffered.rs | 121 +++++++++--------- tests/stm32/src/bin/usart_rx_ringbuffered.rs | 19 ++- tests/utils/src/bin/saturate_serial.rs | 15 ++- 5 files changed, 216 insertions(+), 181 deletions(-) diff --git a/embassy-stm32/src/dma/dma.rs b/embassy-stm32/src/dma/dma.rs index 17d82fe2d..10ae20f5c 100644 --- a/embassy-stm32/src/dma/dma.rs +++ b/embassy-stm32/src/dma/dma.rs @@ -4,6 +4,7 @@ use core::pin::Pin; use core::sync::atomic::{fence, Ordering}; use core::task::{Context, Poll, Waker}; +use atomic_polyfill::AtomicUsize; use embassy_cortex_m::interrupt::Priority; use embassy_hal_common::{into_ref, Peripheral, PeripheralRef}; use embassy_sync::waitqueue::AtomicWaker; @@ -129,13 +130,16 @@ impl From for vals::Fth { struct State { ch_wakers: [AtomicWaker; DMA_CHANNEL_COUNT], + complete_count: [AtomicUsize; DMA_CHANNEL_COUNT], } impl State { const fn new() -> Self { + const ZERO: AtomicUsize = AtomicUsize::new(0); const AW: AtomicWaker = AtomicWaker::new(); Self { ch_wakers: [AW; DMA_CHANNEL_COUNT], + complete_count: [ZERO; DMA_CHANNEL_COUNT], } } } @@ -184,13 +188,43 @@ pub(crate) unsafe fn on_irq_inner(dma: pac::dma::Dma, channel_num: usize, index: panic!("DMA: error on DMA@{:08x} channel {}", dma.0 as u32, channel_num); } - if isr.tcif(channel_num % 4) && cr.read().tcie() { - /* acknowledge transfer complete interrupt */ - dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); + let mut wake = false; + + if isr.htif(channel_num % 4) && cr.read().htie() { + // Acknowledge half transfer complete interrupt + dma.ifcr(channel_num / 4).write(|w| w.set_htif(channel_num % 4, true)); + wake = true; + } + + wake |= process_tcif(dma, channel_num, index); + + if wake { STATE.ch_wakers[index].wake(); } } +unsafe fn process_tcif(dma: pac::dma::Dma, channel_num: usize, index: usize) -> bool { + let isr_reg = dma.isr(channel_num / 4); + let cr_reg = dma.st(channel_num).cr(); + + // First, figure out if tcif is set without a cs. + if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() { + // Make tcif test again within a cs to avoid race when incrementing complete_count. + critical_section::with(|_| { + if isr_reg.read().tcif(channel_num % 4) && cr_reg.read().tcie() { + // Acknowledge transfer complete interrupt + dma.ifcr(channel_num / 4).write(|w| w.set_tcif(channel_num % 4, true)); + STATE.complete_count[index].fetch_add(1, Ordering::Release); + true + } else { + false + } + }) + } else { + false + } +} + #[cfg(any(dma_v2, dmamux))] pub type Request = u8; #[cfg(not(any(dma_v2, dmamux)))] @@ -530,6 +564,7 @@ impl<'a, C: Channel, W: Word> DoubleBuffered<'a, C, W> { unsafe { dma.ifcr(isrn).write(|w| { + w.set_htif(isrbit, true); w.set_tcif(isrbit, true); w.set_teif(isrbit, true); }) @@ -593,32 +628,28 @@ impl<'a, C: Channel, W: Word> Drop for DoubleBuffered<'a, C, W> { // ============================== impl DmaCtrl for C { - fn tcif(&self) -> bool { - let channel_number = self.num(); - let dma = self.regs(); - let isrn = channel_number / 4; - let isrbit = channel_number % 4; - - unsafe { dma.isr(isrn).read() }.tcif(isrbit) - } - - fn clear_tcif(&mut self) { - let channel_number = self.num(); - let dma = self.regs(); - let isrn = channel_number / 4; - let isrbit = channel_number % 4; - - unsafe { - dma.ifcr(isrn).write(|w| { - w.set_tcif(isrbit, true); - }) - } - } - fn ndtr(&self) -> usize { let ch = self.regs().st(self.num()); unsafe { ch.ndtr().read() }.ndt() as usize } + + fn get_complete_count(&self) -> usize { + let dma = self.regs(); + let channel_num = self.num(); + let index = self.index(); + // Manually process tcif in case transfer was completed and we are in a higher priority task. + unsafe { process_tcif(dma, channel_num, index) }; + STATE.complete_count[index].load(Ordering::Acquire) + } + + fn reset_complete_count(&mut self) -> usize { + let dma = self.regs(); + let channel_num = self.num(); + let index = self.index(); + // Manually process tcif in case transfer was completed and we are in a higher priority task. + unsafe { process_tcif(dma, channel_num, index) }; + STATE.complete_count[index].swap(0, Ordering::AcqRel) + } } pub struct RingBuffer<'a, C: Channel, W: Word> { @@ -657,7 +688,8 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { w.set_minc(vals::Inc::INCREMENTED); w.set_pinc(vals::Inc::FIXED); w.set_teie(true); - w.set_tcie(false); + w.set_htie(true); + w.set_tcie(true); w.set_circ(vals::Circ::ENABLED); #[cfg(dma_v1)] w.set_trbuff(true); @@ -703,7 +735,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { } pub fn clear(&mut self) { - self.ringbuf.clear(); + self.ringbuf.clear(&mut *self.channel); } /// Read bytes from the ring buffer @@ -712,6 +744,22 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { self.ringbuf.read(&mut *self.channel, buf) } + pub fn is_empty(&self) -> bool { + self.ringbuf.is_empty() + } + + pub fn len(&self) -> usize { + self.ringbuf.len() + } + + pub fn capacity(&self) -> usize { + self.ringbuf.dma_buf.len() + } + + pub fn set_waker(&mut self, waker: &Waker) { + STATE.ch_wakers[self.channel.index()].register(waker); + } + fn clear_irqs(&mut self) { let channel_number = self.channel.num(); let dma = self.channel.regs(); @@ -720,6 +768,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { unsafe { dma.ifcr(isrn).write(|w| { + w.set_htif(isrbit, true); w.set_tcif(isrbit, true); w.set_teif(isrbit, true); }) @@ -733,6 +782,7 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { unsafe { ch.cr().write(|w| { w.set_teie(true); + w.set_htie(true); w.set_tcie(true); }) } @@ -743,15 +793,10 @@ impl<'a, C: Channel, W: Word> RingBuffer<'a, C, W> { unsafe { ch.cr().read() }.en() } - /// Gets the total remaining transfers for the channel - /// Note: this will be zero for transfers that completed without cancellation. - pub fn get_remaining_transfers(&self) -> usize { + /// Synchronize the position of the ring buffer to the actual DMA controller position + pub fn reload_position(&mut self) { let ch = self.channel.regs().st(self.channel.num()); - unsafe { ch.ndtr().read() }.ndt() as usize - } - - pub fn set_ndtr(&mut self, ndtr: usize) { - self.ringbuf.ndtr = ndtr; + self.ringbuf.ndtr = unsafe { ch.ndtr().read() }.ndt() as usize; } } diff --git a/embassy-stm32/src/dma/ringbuffer.rs b/embassy-stm32/src/dma/ringbuffer.rs index f9ace6018..544ec9461 100644 --- a/embassy-stm32/src/dma/ringbuffer.rs +++ b/embassy-stm32/src/dma/ringbuffer.rs @@ -10,14 +10,6 @@ use super::word::Word; /// to the current register value. `ndtr` describes the current position of the DMA /// write. /// -/// # Safety -/// -/// The ring buffer controls the TCIF (transfer completed interrupt flag) to -/// detect buffer overruns, hence this interrupt must be disabled. -/// The buffer can detect overruns up to one period, that is, for a X byte buffer, -/// overruns can be detected if they happen from byte X+1 up to 2X. After this -/// point, overrunds may or may not be detected. -/// /// # Buffer layout /// /// ```text @@ -39,7 +31,6 @@ pub struct DmaRingBuffer<'a, W: Word> { pub(crate) dma_buf: &'a mut [W], first: usize, pub ndtr: usize, - expect_next_read_to_wrap: bool, } #[derive(Debug, PartialEq)] @@ -50,13 +41,13 @@ pub trait DmaCtrl { /// buffer until the dma writer wraps. fn ndtr(&self) -> usize; - /// Read the transfer completed interrupt flag - /// This flag is set by the dma controller when NDTR is reloaded, + /// Get the transfer completed counter. + /// This counter is incremented by the dma controller when NDTR is reloaded, /// i.e. when the writing wraps. - fn tcif(&self) -> bool; + fn get_complete_count(&self) -> usize; - /// Clear the transfer completed interrupt flag - fn clear_tcif(&mut self); + /// Reset the transfer completed counter to 0 and return the value just prior to the reset. + fn reset_complete_count(&mut self) -> usize; } impl<'a, W: Word> DmaRingBuffer<'a, W> { @@ -66,15 +57,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { dma_buf, first: 0, ndtr, - expect_next_read_to_wrap: false, } } /// Reset the ring buffer to its initial state - pub fn clear(&mut self) { + pub fn clear(&mut self, dma: &mut impl DmaCtrl) { self.first = 0; self.ndtr = self.dma_buf.len(); - self.expect_next_read_to_wrap = false; + dma.reset_complete_count(); } /// The buffer end position @@ -83,14 +73,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { } /// Returns whether the buffer is empty - #[allow(dead_code)] pub fn is_empty(&self) -> bool { self.first == self.end() } /// The current number of bytes in the buffer /// This may change at any time if dma is currently active - #[allow(dead_code)] pub fn len(&self) -> usize { // Read out a stable end (the dma periheral can change it at anytime) let end = self.end(); @@ -112,27 +100,19 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { if self.first == end { // The buffer is currently empty - if dma.tcif() { - // The dma controller has written such that the ring buffer now wraps - // This is the special case where exactly n*dma_buf.len(), n = 1,2,..., bytes was written, - // but where additional bytes are now written causing the ring buffer to wrap. - // This is only an error if the writing has passed the current unread region. + if dma.get_complete_count() > 0 { + // The DMA has written such that the ring buffer wraps at least once self.ndtr = dma.ndtr(); - if self.end() > self.first { - dma.clear_tcif(); + if self.end() > self.first || dma.get_complete_count() > 1 { return Err(OverrunError); } } - self.expect_next_read_to_wrap = false; Ok(0) } else if self.first < end { // The available, unread portion in the ring buffer DOES NOT wrap - if self.expect_next_read_to_wrap { - // The read was expected to wrap but it did not - - dma.clear_tcif(); + if dma.get_complete_count() > 1 { return Err(OverrunError); } @@ -141,35 +121,39 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { compiler_fence(Ordering::SeqCst); - if dma.tcif() { - // The dma controller has written such that the ring buffer now wraps - - self.ndtr = dma.ndtr(); - if self.end() > self.first { - // The bytes that we have copied out have overflowed - // as the writer has now both wrapped and is currently writing - // within the region that we have just copied out - - // Clear transfer completed interrupt flag - dma.clear_tcif(); + match dma.get_complete_count() { + 0 => { + // The DMA writer has not wrapped before nor after the copy + } + 1 => { + // The DMA writer has written such that the ring buffer now wraps + self.ndtr = dma.ndtr(); + if self.end() > self.first || dma.get_complete_count() > 1 { + // The bytes that we have copied out have overflowed + // as the writer has now both wrapped and is currently writing + // within the region that we have just copied out + return Err(OverrunError); + } + } + _ => { return Err(OverrunError); } } self.first = (self.first + len) % self.dma_buf.len(); - self.expect_next_read_to_wrap = false; Ok(len) } else { // The available, unread portion in the ring buffer DOES wrap - // The dma controller has wrapped since we last read and is currently + // The DMA writer has wrapped since we last read and is currently // writing (or the next byte added will be) in the beginning of the ring buffer. - // If the unread portion wraps then the writer must also have wrapped, - // or it has wrapped and we already cleared the TCIF flag - assert!(dma.tcif() || self.expect_next_read_to_wrap); + let complete_count = dma.get_complete_count(); + if complete_count > 1 { + return Err(OverrunError); + } - // Clear transfer completed interrupt flag - dma.clear_tcif(); + // If the unread portion wraps then the writer must also have wrapped + assert!(complete_count == 1); if self.first + buf.len() < self.dma_buf.len() { // The provided read buffer is not large enough to include all bytes from the tail of the dma buffer. @@ -182,13 +166,12 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { // We have now copied out the data from dma_buf // Make sure that the just read part was not overwritten during the copy self.ndtr = dma.ndtr(); - if self.end() > self.first || dma.tcif() { + if self.end() > self.first || dma.get_complete_count() > 1 { // The writer has entered the data that we have just read since we read out `end` in the beginning and until now. return Err(OverrunError); } self.first = (self.first + len) % self.dma_buf.len(); - self.expect_next_read_to_wrap = true; Ok(len) } else { // The provided read buffer is large enough to include all bytes from the tail of the dma buffer, @@ -201,14 +184,14 @@ impl<'a, W: Word> DmaRingBuffer<'a, W> { compiler_fence(Ordering::SeqCst); // We have now copied out the data from dma_buf - // Make sure that the just read part was not overwritten during the copy + // Reset complete counter and make sure that the just read part was not overwritten during the copy self.ndtr = dma.ndtr(); - if self.end() > self.first || dma.tcif() { + let complete_count = dma.reset_complete_count(); + if self.end() > self.first || complete_count > 1 { return Err(OverrunError); } self.first = head; - self.expect_next_read_to_wrap = false; Ok(tail + head) } } @@ -243,14 +226,14 @@ mod tests { struct TestCtrl { next_ndtr: RefCell>, - tcif: bool, + complete_count: usize, } impl TestCtrl { pub const fn new() -> Self { Self { next_ndtr: RefCell::new(None), - tcif: false, + complete_count: 0, } } @@ -264,12 +247,14 @@ mod tests { self.next_ndtr.borrow_mut().unwrap() } - fn tcif(&self) -> bool { - self.tcif + fn get_complete_count(&self) -> usize { + self.complete_count } - fn clear_tcif(&mut self) { - self.tcif = false; + fn reset_complete_count(&mut self) -> usize { + let old = self.complete_count; + self.complete_count = 0; + old } } @@ -320,7 +305,7 @@ mod tests { ringbuf.ndtr = 10; // The dma controller has written 4 + 6 bytes and has reloaded NDTR - ctrl.tcif = true; + ctrl.complete_count = 1; ctrl.set_next_ndtr(10); assert!(!ringbuf.is_empty()); @@ -346,14 +331,14 @@ mod tests { ringbuf.ndtr = 6; // The dma controller has written 6 + 2 bytes and has reloaded NDTR - ctrl.tcif = true; + ctrl.complete_count = 1; ctrl.set_next_ndtr(14); let mut buf = [0; 2]; assert_eq!(2, ringbuf.read(&mut ctrl, &mut buf).unwrap()); assert_eq!([2, 3], buf); - assert_eq!(true, ctrl.tcif); // The interrupt flag IS NOT cleared + assert_eq!(1, ctrl.complete_count); // The interrupt flag IS NOT cleared } #[test] @@ -365,14 +350,14 @@ mod tests { ringbuf.ndtr = 10; // The dma controller has written 6 + 2 bytes and has reloaded NDTR - ctrl.tcif = true; + ctrl.complete_count = 1; ctrl.set_next_ndtr(14); let mut buf = [0; 10]; assert_eq!(10, ringbuf.read(&mut ctrl, &mut buf).unwrap()); assert_eq!([12, 13, 14, 15, 0, 1, 2, 3, 4, 5], buf); - assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared + assert_eq!(0, ctrl.complete_count); // The interrupt flag IS cleared } #[test] @@ -387,12 +372,12 @@ mod tests { assert!(ringbuf.is_empty()); // The ring buffer thinks that it is empty // The dma controller has written exactly 16 bytes - ctrl.tcif = true; + ctrl.complete_count = 1; let mut buf = [0; 2]; assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); - assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared + assert_eq!(1, ctrl.complete_count); // The complete counter is not reset } #[test] @@ -404,13 +389,13 @@ mod tests { ringbuf.ndtr = 6; // The dma controller has written 6 + 3 bytes and has reloaded NDTR - ctrl.tcif = true; + ctrl.complete_count = 1; ctrl.set_next_ndtr(13); let mut buf = [0; 2]; assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); - assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared + assert_eq!(1, ctrl.complete_count); // The complete counter is not reset } #[test] @@ -422,12 +407,12 @@ mod tests { ringbuf.ndtr = 10; // The dma controller has written 6 + 13 bytes and has reloaded NDTR - ctrl.tcif = true; + ctrl.complete_count = 1; ctrl.set_next_ndtr(3); let mut buf = [0; 2]; assert_eq!(Err(OverrunError), ringbuf.read(&mut ctrl, &mut buf)); - assert_eq!(false, ctrl.tcif); // The interrupt flag IS cleared + assert_eq!(1, ctrl.complete_count); // The complete counter is not reset } } diff --git a/embassy-stm32/src/usart/rx_ringbuffered.rs b/embassy-stm32/src/usart/rx_ringbuffered.rs index 0dc90ece7..dc21f557b 100644 --- a/embassy-stm32/src/usart/rx_ringbuffered.rs +++ b/embassy-stm32/src/usart/rx_ringbuffered.rs @@ -4,8 +4,9 @@ use core::task::Poll; use embassy_hal_common::drop::OnDrop; use embassy_hal_common::PeripheralRef; +use futures::future::{select, Either}; -use super::{rdr, sr, BasicInstance, Error, UartRx}; +use super::{clear_interrupt_flags, rdr, sr, BasicInstance, Error, UartRx}; use crate::dma::ringbuffer::OverrunError; use crate::dma::RingBuffer; @@ -98,7 +99,8 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma> RingBufferedUartRx<'d, T, RxD } /// Read bytes that are readily available in the ring buffer. - /// If no bytes are currently available in the buffer the call waits until data are received. + /// If no bytes are currently available in the buffer the call waits until the some + /// bytes are available (at least one byte and at most half the buffer size) /// /// Background receive is started if `start()` has not been previously called. /// @@ -107,10 +109,9 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma> RingBufferedUartRx<'d, T, RxD pub async fn read(&mut self, buf: &mut [u8]) -> Result { let r = T::regs(); + // Start background receive if it was not already started // SAFETY: read only let is_started = unsafe { r.cr3().read().dmar() }; - - // Start background receive if it was not already started if !is_started { self.start()?; } @@ -132,8 +133,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma> RingBufferedUartRx<'d, T, RxD } } - let ndtr = self.ring_buf.get_remaining_transfers(); - self.ring_buf.set_ndtr(ndtr); + self.ring_buf.reload_position(); match self.ring_buf.read(buf) { Ok(len) if len == 0 => {} Ok(len) => { @@ -148,28 +148,32 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma> RingBufferedUartRx<'d, T, RxD } } - // Wait for any data since `ndtr` - self.wait_for_data(ndtr).await?; + loop { + self.wait_for_data_or_idle().await?; + + self.ring_buf.reload_position(); + if !self.ring_buf.is_empty() { + break; + } + } - // ndtr is now different than the value provided to `wait_for_data()` - // Re-sample ndtr now when it has changed. - self.ring_buf.set_ndtr(self.ring_buf.get_remaining_transfers()); let len = self.ring_buf.read(buf).map_err(|_err| Error::Overrun)?; assert!(len > 0); + Ok(len) } - /// Wait for uart data - async fn wait_for_data(&mut self, old_ndtr: usize) -> Result<(), Error> { + /// Wait for uart idle or dma half-full or full + async fn wait_for_data_or_idle(&mut self) -> Result<(), Error> { let r = T::regs(); - // make sure USART state is restored to neutral state when this future is dropped - let _drop = OnDrop::new(move || { + // make sure USART state is restored to neutral state + let _on_drop = OnDrop::new(move || { // SAFETY: only clears Rx related flags unsafe { r.cr1().modify(|w| { - // disable RXNE interrupt - w.set_rxneie(false); + // disable idle line interrupt + w.set_idleie(false); }); } }); @@ -177,76 +181,65 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma> RingBufferedUartRx<'d, T, RxD // SAFETY: only sets Rx related flags unsafe { r.cr1().modify(|w| { - // enable RXNE interrupt - w.set_rxneie(true); + // enable idle line interrupt + w.set_idleie(true); }); } - // future which completes when RX "not empty" is detected, - // i.e. when there is data in uart rx register - let rxne = poll_fn(|cx| { - let s = T::state(); + compiler_fence(Ordering::SeqCst); - // Register waker to be awaken when RXNE interrupt is received + // Future which completes when there is dma is half full or full + let dma = poll_fn(|cx| { + self.ring_buf.set_waker(cx.waker()); + + compiler_fence(Ordering::SeqCst); + + self.ring_buf.reload_position(); + if !self.ring_buf.is_empty() { + // Some data is now available + Poll::Ready(()) + } else { + Poll::Pending + } + }); + + // Future which completes when idle line is detected + let uart = poll_fn(|cx| { + let s = T::state(); s.rx_waker.register(cx.waker()); compiler_fence(Ordering::SeqCst); // SAFETY: read only and we only use Rx related flags - let s = unsafe { sr(r).read() }; - let has_errors = s.pe() || s.fe() || s.ne() || s.ore(); + let sr = unsafe { sr(r).read() }; + + let has_errors = sr.pe() || sr.fe() || sr.ne() || sr.ore(); if has_errors { - if s.pe() { + if sr.pe() { return Poll::Ready(Err(Error::Parity)); - } else if s.fe() { + } else if sr.fe() { return Poll::Ready(Err(Error::Framing)); - } else if s.ne() { + } else if sr.ne() { return Poll::Ready(Err(Error::Noise)); } else { return Poll::Ready(Err(Error::Overrun)); } } - // Re-sample ndtr and determine if it has changed since we started - // waiting for data. - let new_ndtr = self.ring_buf.get_remaining_transfers(); - if new_ndtr != old_ndtr { - // Some data was received as NDTR has changed + if sr.idle() { + // Idle line is detected Poll::Ready(Ok(())) } else { - // It may be that the DMA controller is currently busy consuming the - // RX data register. We therefore wait register to become empty. - while unsafe { sr(r).read().rxne() } {} - - compiler_fence(Ordering::SeqCst); - - // Re-get again: This time we know that the DMA controller has consumed - // the current read register if it was busy doing so - let new_ndtr = self.ring_buf.get_remaining_transfers(); - if new_ndtr != old_ndtr { - // Some data was received as NDTR has changed - Poll::Ready(Ok(())) - } else { - Poll::Pending - } + Poll::Pending } }); - compiler_fence(Ordering::SeqCst); - - let new_ndtr = self.ring_buf.get_remaining_transfers(); - if new_ndtr != old_ndtr { - // Fast path - NDTR has already changed, no reason to poll - Ok(()) - } else { - // NDTR has not changed since we first read from the ring buffer - // Wait for RXNE interrupt... - match rxne.await { - Ok(()) => Ok(()), - Err(e) => { - self.teardown_uart(); - Err(e) - } + match select(dma, uart).await { + Either::Left(((), _)) => Ok(()), + Either::Right((Ok(()), _)) => Ok(()), + Either::Right((Err(e), _)) => { + self.teardown_uart(); + Err(e) } } } diff --git a/tests/stm32/src/bin/usart_rx_ringbuffered.rs b/tests/stm32/src/bin/usart_rx_ringbuffered.rs index 3ea8bfb7b..48dc25b0e 100644 --- a/tests/stm32/src/bin/usart_rx_ringbuffered.rs +++ b/tests/stm32/src/bin/usart_rx_ringbuffered.rs @@ -56,6 +56,7 @@ mod board { } const ONE_BYTE_DURATION_US: u32 = 9_000_000 / 115200; +const DMA_BUF_SIZE: usize = 64; #[embassy_executor::main] async fn main(spawner: Spawner) { @@ -114,7 +115,7 @@ async fn main(spawner: Spawner) { let usart = Uart::new(usart, rx, tx, irq, tx_dma, rx_dma, config); let (tx, rx) = usart.split(); - static mut DMA_BUF: [u8; 64] = [0; 64]; + static mut DMA_BUF: [u8; DMA_BUF_SIZE] = [0; DMA_BUF_SIZE]; let dma_buf = unsafe { DMA_BUF.as_mut() }; let rx = rx.into_ring_buffered(dma_buf); @@ -159,7 +160,14 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx loop { let mut buf = [0; 100]; let max_len = 1 + (rng.next_u32() as usize % (buf.len() - 1)); - let received = rx.read(&mut buf[..max_len]).await.unwrap(); + let received = match rx.read(&mut buf[..max_len]).await { + Ok(r) => r, + Err(e) => { + error!("Test fail! read error: {:?}", e); + cortex_m::asm::bkpt(); + return; + } + }; if expected.is_none() { info!("Test started"); @@ -176,8 +184,11 @@ async fn receive_task(mut rx: RingBufferedUartRx<'static, board::Uart, board::Rx } if received < max_len { - let byte_count = rng.next_u32() % 64; - Timer::after(Duration::from_micros((byte_count * ONE_BYTE_DURATION_US) as _)).await; + let byte_count = rng.next_u32() % (DMA_BUF_SIZE as u32); + let random_delay_us = (byte_count * ONE_BYTE_DURATION_US) as u64; + if random_delay_us > 200 { + Timer::after(Duration::from_micros(random_delay_us - 200)).await; + } } i += 1; diff --git a/tests/utils/src/bin/saturate_serial.rs b/tests/utils/src/bin/saturate_serial.rs index 28480516d..18ca12fb7 100644 --- a/tests/utils/src/bin/saturate_serial.rs +++ b/tests/utils/src/bin/saturate_serial.rs @@ -1,18 +1,19 @@ use std::path::Path; use std::time::Duration; -use std::{env, io, thread}; +use std::{env, io, process, thread}; use rand::random; use serial::SerialPort; pub fn main() { if let Some(port_name) = env::args().nth(1) { - let sleep = env::args().position(|x| x == "--sleep").is_some(); + let idles = env::args().position(|x| x == "--idles").is_some(); println!("Saturating port {:?} with 115200 8N1", port_name); - println!("Sleep: {}", sleep); + println!("Idles: {}", idles); + println!("Process ID: {}", process::id()); let mut port = serial::open(&port_name).unwrap(); - if saturate(&mut port, sleep).is_err() { + if saturate(&mut port, idles).is_err() { eprintln!("Unable to saturate port"); } } else { @@ -23,7 +24,7 @@ pub fn main() { } } -fn saturate(port: &mut T, sleep: bool) -> io::Result<()> { +fn saturate(port: &mut T, idles: bool) -> io::Result<()> { port.reconfigure(&|settings| { settings.set_baud_rate(serial::Baud115200)?; settings.set_char_size(serial::Bits8); @@ -39,7 +40,7 @@ fn saturate(port: &mut T, sleep: bool) -> io::Result<()> { port.write_all(&buf)?; - if sleep { + if idles { let micros = (random::() % 1000) as u64; println!("Sleeping {}us", micros); port.flush().unwrap(); @@ -49,4 +50,4 @@ fn saturate(port: &mut T, sleep: bool) -> io::Result<()> { written += len; println!("Written: {}", written); } -} \ No newline at end of file +}