mirror of
https://github.com/embassy-rs/embassy.git
synced 2024-11-21 22:32:29 +00:00
Merge pull request #3336 from liarokapisv/stm32-alternative-ringbuffer-impl
stm32: Ringbuffer rewrite
This commit is contained in:
commit
bcfbaaab95
5
ci.sh
5
ci.sh
@ -305,11 +305,6 @@ rm out/tests/stm32f207zg/eth
|
|||||||
# doesn't work, gives "noise error", no idea why. usart_dma does pass.
|
# doesn't work, gives "noise error", no idea why. usart_dma does pass.
|
||||||
rm out/tests/stm32u5a5zj/usart
|
rm out/tests/stm32u5a5zj/usart
|
||||||
|
|
||||||
# flaky, probably due to bad ringbuffered dma code.
|
|
||||||
rm out/tests/stm32l152re/usart_rx_ringbuffered
|
|
||||||
rm out/tests/stm32f207zg/usart_rx_ringbuffered
|
|
||||||
rm out/tests/stm32wl55jc/usart_rx_ringbuffered
|
|
||||||
|
|
||||||
if [[ -z "${TELEPROBE_TOKEN-}" ]]; then
|
if [[ -z "${TELEPROBE_TOKEN-}" ]]; then
|
||||||
echo No teleprobe token found, skipping running HIL tests
|
echo No teleprobe token found, skipping running HIL tests
|
||||||
exit
|
exit
|
||||||
|
@ -93,6 +93,8 @@ aligned = "0.4.1"
|
|||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
critical-section = { version = "1.1", features = ["std"] }
|
critical-section = { version = "1.1", features = ["std"] }
|
||||||
|
proptest = "1.5.0"
|
||||||
|
proptest-state-machine = "0.3.0"
|
||||||
|
|
||||||
[build-dependencies]
|
[build-dependencies]
|
||||||
proc-macro2 = "1.0.36"
|
proc-macro2 = "1.0.36"
|
||||||
|
@ -6,11 +6,13 @@ use embassy_hal_internal::{into_ref, Peripheral};
|
|||||||
use stm32_metapac::adc::vals::SampleTime;
|
use stm32_metapac::adc::vals::SampleTime;
|
||||||
|
|
||||||
use crate::adc::{Adc, AdcChannel, Instance, RxDma};
|
use crate::adc::{Adc, AdcChannel, Instance, RxDma};
|
||||||
use crate::dma::ringbuffer::OverrunError;
|
|
||||||
use crate::dma::{Priority, ReadableRingBuffer, TransferOptions};
|
use crate::dma::{Priority, ReadableRingBuffer, TransferOptions};
|
||||||
use crate::pac::adc::vals;
|
use crate::pac::adc::vals;
|
||||||
use crate::rcc;
|
use crate::rcc;
|
||||||
|
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
pub struct OverrunError;
|
||||||
|
|
||||||
fn clear_interrupt_flags(r: crate::pac::adc::Adc) {
|
fn clear_interrupt_flags(r: crate::pac::adc::Adc) {
|
||||||
r.sr().modify(|regs| {
|
r.sr().modify(|regs| {
|
||||||
regs.set_eoc(false);
|
regs.set_eoc(false);
|
||||||
@ -226,9 +228,8 @@ impl<'d, T: Instance> RingBufferedAdc<'d, T> {
|
|||||||
|
|
||||||
/// Turns on ADC if it is not already turned on and starts continuous DMA transfer.
|
/// Turns on ADC if it is not already turned on and starts continuous DMA transfer.
|
||||||
pub fn start(&mut self) -> Result<(), OverrunError> {
|
pub fn start(&mut self) -> Result<(), OverrunError> {
|
||||||
self.ring_buf.clear();
|
|
||||||
|
|
||||||
self.setup_adc();
|
self.setup_adc();
|
||||||
|
self.ring_buf.clear();
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
@ -245,7 +246,7 @@ impl<'d, T: Instance> RingBufferedAdc<'d, T> {
|
|||||||
/// [`start`]: #method.start
|
/// [`start`]: #method.start
|
||||||
pub fn teardown_adc(&mut self) {
|
pub fn teardown_adc(&mut self) {
|
||||||
// Stop the DMA transfer
|
// Stop the DMA transfer
|
||||||
self.ring_buf.request_stop();
|
self.ring_buf.request_pause();
|
||||||
|
|
||||||
let r = T::regs();
|
let r = T::regs();
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@ use core::task::{Context, Poll, Waker};
|
|||||||
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
|
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
|
||||||
use embassy_sync::waitqueue::AtomicWaker;
|
use embassy_sync::waitqueue::AtomicWaker;
|
||||||
|
|
||||||
use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
|
use super::ringbuffer::{DmaCtrl, Error, ReadableDmaRingBuffer, WritableDmaRingBuffer};
|
||||||
use super::word::{Word, WordSize};
|
use super::word::{Word, WordSize};
|
||||||
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
||||||
use crate::interrupt::typelevel::Interrupt;
|
use crate::interrupt::typelevel::Interrupt;
|
||||||
@ -299,7 +299,6 @@ impl AnyChannel {
|
|||||||
} else {
|
} else {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
state.waker.wake();
|
state.waker.wake();
|
||||||
}
|
}
|
||||||
#[cfg(bdma)]
|
#[cfg(bdma)]
|
||||||
@ -763,10 +762,6 @@ impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
|
|||||||
self.0.get_remaining_transfers() as _
|
self.0.get_remaining_transfers() as _
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_complete_count(&self) -> usize {
|
|
||||||
STATE[self.0.id as usize].complete_count.load(Ordering::Acquire)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset_complete_count(&mut self) -> usize {
|
fn reset_complete_count(&mut self) -> usize {
|
||||||
let state = &STATE[self.0.id as usize];
|
let state = &STATE[self.0.id as usize];
|
||||||
#[cfg(not(armv6m))]
|
#[cfg(not(armv6m))]
|
||||||
@ -832,27 +827,28 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
|||||||
///
|
///
|
||||||
/// You must call this after creating it for it to work.
|
/// You must call this after creating it for it to work.
|
||||||
pub fn start(&mut self) {
|
pub fn start(&mut self) {
|
||||||
self.channel.start()
|
self.channel.start();
|
||||||
|
self.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear all data in the ring buffer.
|
/// Clear all data in the ring buffer.
|
||||||
pub fn clear(&mut self) {
|
pub fn clear(&mut self) {
|
||||||
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read elements from the ring buffer
|
/// Read elements from the ring buffer
|
||||||
/// Return a tuple of the length read and the length remaining in the buffer
|
/// Return a tuple of the length read and the length remaining in the buffer
|
||||||
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
||||||
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
/// Error is returned if the portion to be read was overwritten by the DMA controller.
|
||||||
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), Error> {
|
||||||
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read an exact number of elements from the ringbuffer.
|
/// Read an exact number of elements from the ringbuffer.
|
||||||
///
|
///
|
||||||
/// Returns the remaining number of elements available for immediate reading.
|
/// Returns the remaining number of elements available for immediate reading.
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
/// Error is returned if the portion to be read was overwritten by the DMA controller.
|
||||||
///
|
///
|
||||||
/// Async/Wake Behavior:
|
/// Async/Wake Behavior:
|
||||||
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
||||||
@ -860,12 +856,17 @@ impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
|||||||
/// ring buffer was created with a buffer of size 'N':
|
/// ring buffer was created with a buffer of size 'N':
|
||||||
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
||||||
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
||||||
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
|
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, Error> {
|
||||||
self.ringbuf
|
self.ringbuf
|
||||||
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The current length of the ringbuffer
|
||||||
|
pub fn len(&mut self) -> Result<usize, Error> {
|
||||||
|
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
|
||||||
|
}
|
||||||
|
|
||||||
/// The capacity of the ringbuffer
|
/// The capacity of the ringbuffer
|
||||||
pub const fn capacity(&self) -> usize {
|
pub const fn capacity(&self) -> usize {
|
||||||
self.ringbuf.cap()
|
self.ringbuf.cap()
|
||||||
@ -979,34 +980,40 @@ impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
|||||||
///
|
///
|
||||||
/// You must call this after creating it for it to work.
|
/// You must call this after creating it for it to work.
|
||||||
pub fn start(&mut self) {
|
pub fn start(&mut self) {
|
||||||
self.channel.start()
|
self.channel.start();
|
||||||
|
self.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Clear all data in the ring buffer.
|
/// Clear all data in the ring buffer.
|
||||||
pub fn clear(&mut self) {
|
pub fn clear(&mut self) {
|
||||||
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
self.ringbuf.reset(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write elements directly to the raw buffer.
|
/// Write elements directly to the raw buffer.
|
||||||
/// This can be used to fill the buffer before starting the DMA transfer.
|
/// This can be used to fill the buffer before starting the DMA transfer.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||||
self.ringbuf.write_immediate(buf)
|
self.ringbuf.write_immediate(buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write elements from the ring buffer
|
/// Write elements from the ring buffer
|
||||||
/// Return a tuple of the length written and the length remaining in the buffer
|
/// Return a tuple of the length written and the length remaining in the buffer
|
||||||
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||||
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Write an exact number of elements to the ringbuffer.
|
/// Write an exact number of elements to the ringbuffer.
|
||||||
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
|
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, Error> {
|
||||||
self.ringbuf
|
self.ringbuf
|
||||||
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||||
.await
|
.await
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The current length of the ringbuffer
|
||||||
|
pub fn len(&mut self) -> Result<usize, Error> {
|
||||||
|
Ok(self.ringbuf.len(&mut DmaCtrlImpl(self.channel.reborrow()))?)
|
||||||
|
}
|
||||||
|
|
||||||
/// The capacity of the ringbuffer
|
/// The capacity of the ringbuffer
|
||||||
pub const fn capacity(&self) -> usize {
|
pub const fn capacity(&self) -> usize {
|
||||||
self.ringbuf.cap()
|
self.ringbuf.cap()
|
||||||
|
@ -1,668 +0,0 @@
|
|||||||
#![cfg_attr(gpdma, allow(unused))]
|
|
||||||
|
|
||||||
use core::future::poll_fn;
|
|
||||||
use core::ops::Range;
|
|
||||||
use core::sync::atomic::{compiler_fence, Ordering};
|
|
||||||
use core::task::{Poll, Waker};
|
|
||||||
|
|
||||||
use super::word::Word;
|
|
||||||
|
|
||||||
/// A "read-only" ring-buffer to be used together with the DMA controller which
|
|
||||||
/// writes in a circular way, "uncontrolled" to the buffer.
|
|
||||||
///
|
|
||||||
/// A snapshot of the ring buffer state can be attained by setting the `ndtr` field
|
|
||||||
/// to the current register value. `ndtr` describes the current position of the DMA
|
|
||||||
/// write.
|
|
||||||
///
|
|
||||||
/// # Buffer layout
|
|
||||||
///
|
|
||||||
/// ```text
|
|
||||||
/// Without wraparound: With wraparound:
|
|
||||||
///
|
|
||||||
/// + buf +--- NDTR ---+ + buf +---------- NDTR ----------+
|
|
||||||
/// | | | | | |
|
|
||||||
/// v v v v v v
|
|
||||||
/// +-----------------------------------------+ +-----------------------------------------+
|
|
||||||
/// |oooooooooooXXXXXXXXXXXXXXXXoooooooooooooo| |XXXXXXXXXXXXXooooooooooooXXXXXXXXXXXXXXXX|
|
|
||||||
/// +-----------------------------------------+ +-----------------------------------------+
|
|
||||||
/// ^ ^ ^ ^ ^ ^
|
|
||||||
/// | | | | | |
|
|
||||||
/// +- start --+ | +- end ------+ |
|
|
||||||
/// | | | |
|
|
||||||
/// +- end --------------------+ +- start ----------------+
|
|
||||||
/// ```
|
|
||||||
pub struct ReadableDmaRingBuffer<'a, W: Word> {
|
|
||||||
pub(crate) dma_buf: &'a mut [W],
|
|
||||||
start: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Debug, PartialEq)]
|
|
||||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
|
||||||
pub struct OverrunError;
|
|
||||||
|
|
||||||
pub trait DmaCtrl {
|
|
||||||
/// Get the NDTR register value, i.e. the space left in the underlying
|
|
||||||
/// buffer until the dma writer wraps.
|
|
||||||
fn get_remaining_transfers(&self) -> usize;
|
|
||||||
|
|
||||||
/// Get the transfer completed counter.
|
|
||||||
/// This counter is incremented by the dma controller when NDTR is reloaded,
|
|
||||||
/// i.e. when the writing wraps.
|
|
||||||
fn get_complete_count(&self) -> usize;
|
|
||||||
|
|
||||||
/// Reset the transfer completed counter to 0 and return the value just prior to the reset.
|
|
||||||
fn reset_complete_count(&mut self) -> usize;
|
|
||||||
|
|
||||||
/// Set the waker for a running poll_fn
|
|
||||||
fn set_waker(&mut self, waker: &Waker);
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
|
|
||||||
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
|
||||||
Self { dma_buf, start: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reset the ring buffer to its initial state
|
|
||||||
pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
|
|
||||||
self.start = 0;
|
|
||||||
dma.reset_complete_count();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The capacity of the ringbuffer
|
|
||||||
pub const fn cap(&self) -> usize {
|
|
||||||
self.dma_buf.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The current position of the ringbuffer
|
|
||||||
fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
|
|
||||||
self.cap() - dma.get_remaining_transfers()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read an exact number of elements from the ringbuffer.
|
|
||||||
///
|
|
||||||
/// Returns the remaining number of elements available for immediate reading.
|
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
|
||||||
///
|
|
||||||
/// Async/Wake Behavior:
|
|
||||||
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
|
||||||
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
|
||||||
/// ring buffer was created with a buffer of size 'N':
|
|
||||||
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
|
||||||
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
|
||||||
pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, OverrunError> {
|
|
||||||
let mut read_data = 0;
|
|
||||||
let buffer_len = buffer.len();
|
|
||||||
|
|
||||||
poll_fn(|cx| {
|
|
||||||
dma.set_waker(cx.waker());
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
match self.read(dma, &mut buffer[read_data..buffer_len]) {
|
|
||||||
Ok((len, remaining)) => {
|
|
||||||
read_data += len;
|
|
||||||
if read_data == buffer_len {
|
|
||||||
Poll::Ready(Ok(remaining))
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => Poll::Ready(Err(e)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read elements from the ring buffer
|
|
||||||
/// Return a tuple of the length read and the length remaining in the buffer
|
|
||||||
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
|
||||||
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
|
||||||
pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
/*
|
|
||||||
This algorithm is optimistic: we assume we haven't overrun more than a full buffer and then check
|
|
||||||
after we've done our work to see we have. This is because on stm32, an interrupt is not guaranteed
|
|
||||||
to fire in the same clock cycle that a register is read, so checking get_complete_count early does
|
|
||||||
not yield relevant information.
|
|
||||||
|
|
||||||
Therefore, the only variable we really need to know is ndtr. If the dma has overrun by more than a full
|
|
||||||
buffer, we will do a bit more work than we have to, but algorithms should not be optimized for error
|
|
||||||
conditions.
|
|
||||||
|
|
||||||
After we've done our work, we confirm that we haven't overrun more than a full buffer, and also that
|
|
||||||
the dma has not overrun within the data we could have copied. We check the data we could have copied
|
|
||||||
rather than the data we actually copied because it costs nothing and confirms an error condition
|
|
||||||
earlier.
|
|
||||||
*/
|
|
||||||
let end = self.pos(dma);
|
|
||||||
if self.start == end && dma.get_complete_count() == 0 {
|
|
||||||
// No elements are available in the buffer
|
|
||||||
Ok((0, self.cap()))
|
|
||||||
} else if self.start < end {
|
|
||||||
// The available, unread portion in the ring buffer DOES NOT wrap
|
|
||||||
// Copy out the elements from the dma buffer
|
|
||||||
let len = self.copy_to(buf, self.start..end);
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
/*
|
|
||||||
first, check if the dma has wrapped at all if it's after end
|
|
||||||
or more than once if it's before start
|
|
||||||
|
|
||||||
this is in a critical section to try to reduce mushy behavior.
|
|
||||||
it's not ideal but it's the best we can do
|
|
||||||
|
|
||||||
then, get the current position of of the dma write and check
|
|
||||||
if it's inside data we could have copied
|
|
||||||
*/
|
|
||||||
let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
|
|
||||||
if (pos >= self.start && pos < end) || (complete_count > 0 && pos >= end) || complete_count > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.start = (self.start + len) % self.cap();
|
|
||||||
|
|
||||||
Ok((len, self.cap() - self.start))
|
|
||||||
}
|
|
||||||
} else if self.start + buf.len() < self.cap() {
|
|
||||||
// The available, unread portion in the ring buffer DOES wrap
|
|
||||||
// The DMA writer has wrapped since we last read and is currently
|
|
||||||
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
|
||||||
|
|
||||||
// The provided read buffer is not large enough to include all elements from the tail of the dma buffer.
|
|
||||||
|
|
||||||
// Copy out from the dma buffer
|
|
||||||
let len = self.copy_to(buf, self.start..self.cap());
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
/*
|
|
||||||
first, check if the dma has wrapped around more than once
|
|
||||||
|
|
||||||
then, get the current position of of the dma write and check
|
|
||||||
if it's inside data we could have copied
|
|
||||||
*/
|
|
||||||
let pos = self.pos(dma);
|
|
||||||
if pos > self.start || pos < end || dma.get_complete_count() > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.start = (self.start + len) % self.cap();
|
|
||||||
|
|
||||||
Ok((len, self.start + end))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The available, unread portion in the ring buffer DOES wrap
|
|
||||||
// The DMA writer has wrapped since we last read and is currently
|
|
||||||
// writing (or the next byte added will be) in the beginning of the ring buffer.
|
|
||||||
|
|
||||||
// The provided read buffer is large enough to include all elements from the tail of the dma buffer,
|
|
||||||
// so the next read will not have any unread tail elements in the ring buffer.
|
|
||||||
|
|
||||||
// Copy out from the dma buffer
|
|
||||||
let tail = self.copy_to(buf, self.start..self.cap());
|
|
||||||
let head = self.copy_to(&mut buf[tail..], 0..end);
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
/*
|
|
||||||
first, check if the dma has wrapped around more than once
|
|
||||||
|
|
||||||
then, get the current position of of the dma write and check
|
|
||||||
if it's inside data we could have copied
|
|
||||||
*/
|
|
||||||
let pos = self.pos(dma);
|
|
||||||
if pos > self.start || pos < end || dma.reset_complete_count() > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.start = head;
|
|
||||||
Ok((tail + head, self.cap() - self.start))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Copy from the dma buffer at `data_range` into `buf`
|
|
||||||
fn copy_to(&mut self, buf: &mut [W], data_range: Range<usize>) -> usize {
|
|
||||||
// Limit the number of elements that can be copied
|
|
||||||
let length = usize::min(data_range.len(), buf.len());
|
|
||||||
|
|
||||||
// Copy from dma buffer into read buffer
|
|
||||||
// We need to do it like this instead of a simple copy_from_slice() because
|
|
||||||
// reading from a part of memory that may be simultaneously written to is unsafe
|
|
||||||
unsafe {
|
|
||||||
let dma_buf = self.dma_buf.as_ptr();
|
|
||||||
|
|
||||||
for i in 0..length {
|
|
||||||
buf[i] = core::ptr::read_volatile(dma_buf.offset((data_range.start + i) as isize));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct WritableDmaRingBuffer<'a, W: Word> {
|
|
||||||
pub(crate) dma_buf: &'a mut [W],
|
|
||||||
end: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
|
|
||||||
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
|
||||||
Self { dma_buf, end: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Reset the ring buffer to its initial state
|
|
||||||
pub fn clear(&mut self, dma: &mut impl DmaCtrl) {
|
|
||||||
self.end = 0;
|
|
||||||
dma.reset_complete_count();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The capacity of the ringbuffer
|
|
||||||
pub const fn cap(&self) -> usize {
|
|
||||||
self.dma_buf.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The current position of the ringbuffer
|
|
||||||
fn pos(&self, dma: &mut impl DmaCtrl) -> usize {
|
|
||||||
self.cap() - dma.get_remaining_transfers()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write elements directly to the buffer. This must be done before the DMA is started
|
|
||||||
/// or after the buffer has been cleared using `clear()`.
|
|
||||||
pub fn write_immediate(&mut self, buffer: &[W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
if self.end != 0 {
|
|
||||||
return Err(OverrunError);
|
|
||||||
}
|
|
||||||
let written = self.copy_from(buffer, 0..self.cap());
|
|
||||||
self.end = written % self.cap();
|
|
||||||
Ok((written, self.cap() - written))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write an exact number of elements to the ringbuffer.
|
|
||||||
pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, OverrunError> {
|
|
||||||
let mut written_data = 0;
|
|
||||||
let buffer_len = buffer.len();
|
|
||||||
|
|
||||||
poll_fn(|cx| {
|
|
||||||
dma.set_waker(cx.waker());
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
match self.write(dma, &buffer[written_data..buffer_len]) {
|
|
||||||
Ok((len, remaining)) => {
|
|
||||||
written_data += len;
|
|
||||||
if written_data == buffer_len {
|
|
||||||
Poll::Ready(Ok(remaining))
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(e) => Poll::Ready(Err(e)),
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write elements from the ring buffer
|
|
||||||
/// Return a tuple of the length written and the capacity remaining to be written in the buffer
|
|
||||||
pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
let start = self.pos(dma);
|
|
||||||
if start > self.end {
|
|
||||||
// The occupied portion in the ring buffer DOES wrap
|
|
||||||
let len = self.copy_from(buf, self.end..start);
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
// Confirm that the DMA is not inside data we could have written
|
|
||||||
let (pos, complete_count) = critical_section::with(|_| (self.pos(dma), dma.get_complete_count()));
|
|
||||||
if (pos >= self.end && pos < start) || (complete_count > 0 && pos >= start) || complete_count > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.end = (self.end + len) % self.cap();
|
|
||||||
|
|
||||||
Ok((len, self.cap() - (start - self.end)))
|
|
||||||
}
|
|
||||||
} else if start == self.end && dma.get_complete_count() == 0 {
|
|
||||||
Ok((0, 0))
|
|
||||||
} else if start <= self.end && self.end + buf.len() < self.cap() {
|
|
||||||
// The occupied portion in the ring buffer DOES NOT wrap
|
|
||||||
// and copying elements into the buffer WILL NOT cause it to
|
|
||||||
|
|
||||||
// Copy into the dma buffer
|
|
||||||
let len = self.copy_from(buf, self.end..self.cap());
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
// Confirm that the DMA is not inside data we could have written
|
|
||||||
let pos = self.pos(dma);
|
|
||||||
if pos > self.end || pos < start || dma.get_complete_count() > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.end = (self.end + len) % self.cap();
|
|
||||||
|
|
||||||
Ok((len, self.cap() - (self.end - start)))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// The occupied portion in the ring buffer DOES NOT wrap
|
|
||||||
// and copying elements into the buffer WILL cause it to
|
|
||||||
|
|
||||||
let tail = self.copy_from(buf, self.end..self.cap());
|
|
||||||
let head = self.copy_from(&buf[tail..], 0..start);
|
|
||||||
|
|
||||||
compiler_fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
// Confirm that the DMA is not inside data we could have written
|
|
||||||
let pos = self.pos(dma);
|
|
||||||
if pos > self.end || pos < start || dma.reset_complete_count() > 1 {
|
|
||||||
Err(OverrunError)
|
|
||||||
} else {
|
|
||||||
self.end = head;
|
|
||||||
|
|
||||||
Ok((tail + head, self.cap() - (start - self.end)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
/// Copy into the dma buffer at `data_range` from `buf`
|
|
||||||
fn copy_from(&mut self, buf: &[W], data_range: Range<usize>) -> usize {
|
|
||||||
// Limit the number of elements that can be copied
|
|
||||||
let length = usize::min(data_range.len(), buf.len());
|
|
||||||
|
|
||||||
// Copy into dma buffer from read buffer
|
|
||||||
// We need to do it like this instead of a simple copy_from_slice() because
|
|
||||||
// reading from a part of memory that may be simultaneously written to is unsafe
|
|
||||||
unsafe {
|
|
||||||
let dma_buf = self.dma_buf.as_mut_ptr();
|
|
||||||
|
|
||||||
for i in 0..length {
|
|
||||||
core::ptr::write_volatile(dma_buf.offset((data_range.start + i) as isize), buf[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests {
|
|
||||||
use core::array;
|
|
||||||
use std::{cell, vec};
|
|
||||||
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[derive(PartialEq, Debug)]
|
|
||||||
enum TestCircularTransferRequest {
|
|
||||||
GetCompleteCount(usize),
|
|
||||||
ResetCompleteCount(usize),
|
|
||||||
PositionRequest(usize),
|
|
||||||
}
|
|
||||||
|
|
||||||
struct TestCircularTransfer {
|
|
||||||
len: usize,
|
|
||||||
requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl DmaCtrl for TestCircularTransfer {
|
|
||||||
fn get_remaining_transfers(&self) -> usize {
|
|
||||||
match self.requests.borrow_mut().pop().unwrap() {
|
|
||||||
TestCircularTransferRequest::PositionRequest(pos) => {
|
|
||||||
let len = self.len;
|
|
||||||
|
|
||||||
assert!(len >= pos);
|
|
||||||
|
|
||||||
len - pos
|
|
||||||
}
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_complete_count(&self) -> usize {
|
|
||||||
match self.requests.borrow_mut().pop().unwrap() {
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(complete_count) => complete_count,
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset_complete_count(&mut self) -> usize {
|
|
||||||
match self.requests.get_mut().pop().unwrap() {
|
|
||||||
TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
|
|
||||||
_ => unreachable!(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_waker(&mut self, waker: &Waker) {}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TestCircularTransfer {
|
|
||||||
pub fn new(len: usize) -> Self {
|
|
||||||
Self {
|
|
||||||
requests: cell::RefCell::new(vec![]),
|
|
||||||
len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
|
|
||||||
requests.reverse();
|
|
||||||
self.requests.replace(requests);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn empty_and_read_not_started() {
|
|
||||||
let mut dma_buf = [0u8; 16];
|
|
||||||
let ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_read() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(8),
|
|
||||||
TestCircularTransferRequest::PositionRequest(10),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 2];
|
|
||||||
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!([0, 1], buf);
|
|
||||||
assert_eq!(2, ringbuf.start);
|
|
||||||
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(10),
|
|
||||||
TestCircularTransferRequest::PositionRequest(12),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 2];
|
|
||||||
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!([2, 3], buf);
|
|
||||||
assert_eq!(4, ringbuf.start);
|
|
||||||
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(12),
|
|
||||||
TestCircularTransferRequest::PositionRequest(14),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 8];
|
|
||||||
assert_eq!(8, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!([4, 5, 6, 7, 8, 9], buf[..6]);
|
|
||||||
assert_eq!(12, ringbuf.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_read_with_wrap() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read to close to the end of the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(14),
|
|
||||||
TestCircularTransferRequest::PositionRequest(16),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 14];
|
|
||||||
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(14, ringbuf.start);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now, read around the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::PositionRequest(8),
|
|
||||||
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(4, ringbuf.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_read_when_dma_writer_is_wrapped_and_read_does_not_wrap() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read to close to the end of the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(14),
|
|
||||||
TestCircularTransferRequest::PositionRequest(16),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 14];
|
|
||||||
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(14, ringbuf.start);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now, read to the end of the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::PositionRequest(8),
|
|
||||||
TestCircularTransferRequest::ResetCompleteCount(1),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 2];
|
|
||||||
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn can_read_when_dma_writer_wraps_once_with_same_ndtr() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read to about the middle of the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(6, ringbuf.start);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now, wrap the DMA controller around
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(1),
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(1),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(6, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(12, ringbuf.start);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn cannot_read_when_dma_writer_overwrites_during_not_wrapping_read() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read a few bytes
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(2),
|
|
||||||
TestCircularTransferRequest::PositionRequest(2),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(2, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(2, ringbuf.start);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now, overtake the reader
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(4),
|
|
||||||
TestCircularTransferRequest::PositionRequest(6),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(1),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn cannot_read_when_dma_writer_overwrites_during_wrapping_read() {
|
|
||||||
let mut dma = TestCircularTransfer::new(16);
|
|
||||||
|
|
||||||
let mut dma_buf: [u8; 16] = array::from_fn(|idx| idx as u8); // 0, 1, ..., 15
|
|
||||||
let mut ringbuf = ReadableDmaRingBuffer::new(&mut dma_buf);
|
|
||||||
|
|
||||||
assert_eq!(0, ringbuf.start);
|
|
||||||
assert_eq!(16, ringbuf.cap());
|
|
||||||
|
|
||||||
/*
|
|
||||||
Read to close to the end of the buffer
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(14),
|
|
||||||
TestCircularTransferRequest::PositionRequest(16),
|
|
||||||
TestCircularTransferRequest::GetCompleteCount(0),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 14];
|
|
||||||
assert_eq!(14, ringbuf.read(&mut dma, &mut buf).unwrap().0);
|
|
||||||
assert_eq!(14, ringbuf.start);
|
|
||||||
|
|
||||||
/*
|
|
||||||
Now, overtake the reader
|
|
||||||
*/
|
|
||||||
dma.setup(vec![
|
|
||||||
TestCircularTransferRequest::PositionRequest(8),
|
|
||||||
TestCircularTransferRequest::PositionRequest(10),
|
|
||||||
TestCircularTransferRequest::ResetCompleteCount(2),
|
|
||||||
]);
|
|
||||||
let mut buf = [0; 6];
|
|
||||||
assert_eq!(OverrunError, ringbuf.read(&mut dma, &mut buf).unwrap_err());
|
|
||||||
}
|
|
||||||
}
|
|
305
embassy-stm32/src/dma/ringbuffer/mod.rs
Normal file
305
embassy-stm32/src/dma/ringbuffer/mod.rs
Normal file
@ -0,0 +1,305 @@
|
|||||||
|
#![cfg_attr(gpdma, allow(unused))]
|
||||||
|
|
||||||
|
use core::future::poll_fn;
|
||||||
|
use core::task::{Poll, Waker};
|
||||||
|
|
||||||
|
use crate::dma::word::Word;
|
||||||
|
|
||||||
|
pub trait DmaCtrl {
|
||||||
|
/// Get the NDTR register value, i.e. the space left in the underlying
|
||||||
|
/// buffer until the dma writer wraps.
|
||||||
|
fn get_remaining_transfers(&self) -> usize;
|
||||||
|
|
||||||
|
/// Reset the transfer completed counter to 0 and return the value just prior to the reset.
|
||||||
|
fn reset_complete_count(&mut self) -> usize;
|
||||||
|
|
||||||
|
/// Set the waker for a running poll_fn
|
||||||
|
fn set_waker(&mut self, waker: &Waker);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, PartialEq)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
pub enum Error {
|
||||||
|
Overrun,
|
||||||
|
DmaUnsynced,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Copy, Default)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
struct DmaIndex {
|
||||||
|
complete_count: usize,
|
||||||
|
pos: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DmaIndex {
|
||||||
|
fn reset(&mut self) {
|
||||||
|
self.pos = 0;
|
||||||
|
self.complete_count = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn as_index(&self, cap: usize, offset: usize) -> usize {
|
||||||
|
(self.pos + offset) % cap
|
||||||
|
}
|
||||||
|
|
||||||
|
fn dma_sync(&mut self, cap: usize, dma: &mut impl DmaCtrl) {
|
||||||
|
// Important!
|
||||||
|
// The ordering of the first two lines matters!
|
||||||
|
// If changed, the code will detect a wrong +capacity
|
||||||
|
// jump at wrap-around.
|
||||||
|
let count_diff = dma.reset_complete_count();
|
||||||
|
let pos = cap - dma.get_remaining_transfers();
|
||||||
|
self.pos = if pos < self.pos && count_diff == 0 {
|
||||||
|
cap - 1
|
||||||
|
} else {
|
||||||
|
pos
|
||||||
|
};
|
||||||
|
|
||||||
|
self.complete_count += count_diff;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn advance(&mut self, cap: usize, steps: usize) {
|
||||||
|
let next = self.pos + steps;
|
||||||
|
self.complete_count += next / cap;
|
||||||
|
self.pos = next % cap;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn normalize(lhs: &mut DmaIndex, rhs: &mut DmaIndex) {
|
||||||
|
let min_count = lhs.complete_count.min(rhs.complete_count);
|
||||||
|
lhs.complete_count -= min_count;
|
||||||
|
rhs.complete_count -= min_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn diff(&self, cap: usize, rhs: &DmaIndex) -> isize {
|
||||||
|
(self.complete_count * cap + self.pos) as isize - (rhs.complete_count * cap + rhs.pos) as isize
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct ReadableDmaRingBuffer<'a, W: Word> {
|
||||||
|
dma_buf: &'a mut [W],
|
||||||
|
write_index: DmaIndex,
|
||||||
|
read_index: DmaIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> ReadableDmaRingBuffer<'a, W> {
|
||||||
|
/// Construct an empty buffer.
|
||||||
|
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
||||||
|
Self {
|
||||||
|
dma_buf,
|
||||||
|
write_index: Default::default(),
|
||||||
|
read_index: Default::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reset the ring buffer to its initial state.
|
||||||
|
pub fn reset(&mut self, dma: &mut impl DmaCtrl) {
|
||||||
|
dma.reset_complete_count();
|
||||||
|
self.write_index.reset();
|
||||||
|
self.write_index.dma_sync(self.cap(), dma);
|
||||||
|
self.read_index = self.write_index;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the full ringbuffer capacity.
|
||||||
|
pub const fn cap(&self) -> usize {
|
||||||
|
self.dma_buf.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the available readable dma samples.
|
||||||
|
pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> {
|
||||||
|
self.write_index.dma_sync(self.cap(), dma);
|
||||||
|
DmaIndex::normalize(&mut self.write_index, &mut self.read_index);
|
||||||
|
|
||||||
|
let diff = self.write_index.diff(self.cap(), &self.read_index);
|
||||||
|
|
||||||
|
if diff < 0 {
|
||||||
|
Err(Error::DmaUnsynced)
|
||||||
|
} else if diff > self.cap() as isize {
|
||||||
|
Err(Error::Overrun)
|
||||||
|
} else {
|
||||||
|
Ok(diff as usize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read elements from the ring buffer.
|
||||||
|
///
|
||||||
|
/// Return a tuple of the length read and the length remaining in the buffer
|
||||||
|
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
||||||
|
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
||||||
|
/// Error is returned if the portion to be read was overwritten by the DMA controller,
|
||||||
|
/// in which case the rinbuffer will automatically reset itself.
|
||||||
|
pub fn read(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> {
|
||||||
|
self.read_raw(dma, buf).inspect_err(|_e| {
|
||||||
|
self.reset(dma);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read an exact number of elements from the ringbuffer.
|
||||||
|
///
|
||||||
|
/// Returns the remaining number of elements available for immediate reading.
|
||||||
|
/// Error is returned if the portion to be read was overwritten by the DMA controller.
|
||||||
|
///
|
||||||
|
/// Async/Wake Behavior:
|
||||||
|
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
||||||
|
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
||||||
|
/// ring buffer was created with a buffer of size 'N':
|
||||||
|
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
||||||
|
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
||||||
|
pub async fn read_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &mut [W]) -> Result<usize, Error> {
|
||||||
|
let mut read_data = 0;
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
|
||||||
|
poll_fn(|cx| {
|
||||||
|
dma.set_waker(cx.waker());
|
||||||
|
|
||||||
|
match self.read(dma, &mut buffer[read_data..buffer_len]) {
|
||||||
|
Ok((len, remaining)) => {
|
||||||
|
read_data += len;
|
||||||
|
if read_data == buffer_len {
|
||||||
|
Poll::Ready(Ok(remaining))
|
||||||
|
} else {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => Poll::Ready(Err(e)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_raw(&mut self, dma: &mut impl DmaCtrl, buf: &mut [W]) -> Result<(usize, usize), Error> {
|
||||||
|
let readable = self.len(dma)?.min(buf.len());
|
||||||
|
for i in 0..readable {
|
||||||
|
buf[i] = self.read_buf(i);
|
||||||
|
}
|
||||||
|
let available = self.len(dma)?;
|
||||||
|
self.read_index.advance(self.cap(), readable);
|
||||||
|
Ok((readable, available - readable))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_buf(&self, offset: usize) -> W {
|
||||||
|
unsafe {
|
||||||
|
core::ptr::read_volatile(
|
||||||
|
self.dma_buf
|
||||||
|
.as_ptr()
|
||||||
|
.offset(self.read_index.as_index(self.cap(), offset) as isize),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct WritableDmaRingBuffer<'a, W: Word> {
|
||||||
|
dma_buf: &'a mut [W],
|
||||||
|
read_index: DmaIndex,
|
||||||
|
write_index: DmaIndex,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> WritableDmaRingBuffer<'a, W> {
|
||||||
|
/// Construct a ringbuffer filled with the given buffer data.
|
||||||
|
pub fn new(dma_buf: &'a mut [W]) -> Self {
|
||||||
|
let len = dma_buf.len();
|
||||||
|
Self {
|
||||||
|
dma_buf,
|
||||||
|
read_index: Default::default(),
|
||||||
|
write_index: DmaIndex {
|
||||||
|
complete_count: 0,
|
||||||
|
pos: len,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Reset the ring buffer to its initial state. The buffer after the reset will be full.
|
||||||
|
pub fn reset(&mut self, dma: &mut impl DmaCtrl) {
|
||||||
|
dma.reset_complete_count();
|
||||||
|
self.read_index.reset();
|
||||||
|
self.read_index.dma_sync(self.cap(), dma);
|
||||||
|
self.write_index = self.read_index;
|
||||||
|
self.write_index.advance(self.cap(), self.cap());
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the remaining writable dma samples.
|
||||||
|
pub fn len(&mut self, dma: &mut impl DmaCtrl) -> Result<usize, Error> {
|
||||||
|
self.read_index.dma_sync(self.cap(), dma);
|
||||||
|
DmaIndex::normalize(&mut self.read_index, &mut self.write_index);
|
||||||
|
|
||||||
|
let diff = self.write_index.diff(self.cap(), &self.read_index);
|
||||||
|
|
||||||
|
if diff < 0 {
|
||||||
|
Err(Error::Overrun)
|
||||||
|
} else if diff > self.cap() as isize {
|
||||||
|
Err(Error::DmaUnsynced)
|
||||||
|
} else {
|
||||||
|
Ok(self.cap().saturating_sub(diff as usize))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the full ringbuffer capacity.
|
||||||
|
pub const fn cap(&self) -> usize {
|
||||||
|
self.dma_buf.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Append data to the ring buffer.
|
||||||
|
/// Returns a tuple of the data written and the remaining write capacity in the buffer.
|
||||||
|
/// Error is returned if the portion to be written was previously read by the DMA controller.
|
||||||
|
/// In this case, the ringbuffer will automatically reset itself, giving a full buffer worth of
|
||||||
|
/// leeway between the write index and the DMA.
|
||||||
|
pub fn write(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||||
|
self.write_raw(dma, buf).inspect_err(|_e| {
|
||||||
|
self.reset(dma);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write elements directly to the buffer.
|
||||||
|
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||||
|
for (i, data) in buf.iter().enumerate() {
|
||||||
|
self.write_buf(i, *data)
|
||||||
|
}
|
||||||
|
let written = buf.len().min(self.cap());
|
||||||
|
Ok((written, self.cap() - written))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write an exact number of elements to the ringbuffer.
|
||||||
|
pub async fn write_exact(&mut self, dma: &mut impl DmaCtrl, buffer: &[W]) -> Result<usize, Error> {
|
||||||
|
let mut written_data = 0;
|
||||||
|
let buffer_len = buffer.len();
|
||||||
|
|
||||||
|
poll_fn(|cx| {
|
||||||
|
dma.set_waker(cx.waker());
|
||||||
|
|
||||||
|
match self.write(dma, &buffer[written_data..buffer_len]) {
|
||||||
|
Ok((len, remaining)) => {
|
||||||
|
written_data += len;
|
||||||
|
if written_data == buffer_len {
|
||||||
|
Poll::Ready(Ok(remaining))
|
||||||
|
} else {
|
||||||
|
Poll::Pending
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => Poll::Ready(Err(e)),
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_raw(&mut self, dma: &mut impl DmaCtrl, buf: &[W]) -> Result<(usize, usize), Error> {
|
||||||
|
let writable = self.len(dma)?.min(buf.len());
|
||||||
|
for i in 0..writable {
|
||||||
|
self.write_buf(i, buf[i]);
|
||||||
|
}
|
||||||
|
let available = self.len(dma)?;
|
||||||
|
self.write_index.advance(self.cap(), writable);
|
||||||
|
Ok((writable, available - writable))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn write_buf(&mut self, offset: usize, value: W) {
|
||||||
|
unsafe {
|
||||||
|
core::ptr::write_volatile(
|
||||||
|
self.dma_buf
|
||||||
|
.as_mut_ptr()
|
||||||
|
.offset(self.write_index.as_index(self.cap(), offset) as isize),
|
||||||
|
value,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
90
embassy-stm32/src/dma/ringbuffer/tests/mod.rs
Normal file
90
embassy-stm32/src/dma/ringbuffer/tests/mod.rs
Normal file
@ -0,0 +1,90 @@
|
|||||||
|
use std::{cell, vec};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
#[derive(PartialEq, Debug)]
|
||||||
|
enum TestCircularTransferRequest {
|
||||||
|
ResetCompleteCount(usize),
|
||||||
|
PositionRequest(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
struct TestCircularTransfer {
|
||||||
|
len: usize,
|
||||||
|
requests: cell::RefCell<vec::Vec<TestCircularTransferRequest>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DmaCtrl for TestCircularTransfer {
|
||||||
|
fn get_remaining_transfers(&self) -> usize {
|
||||||
|
match self.requests.borrow_mut().pop().unwrap() {
|
||||||
|
TestCircularTransferRequest::PositionRequest(pos) => {
|
||||||
|
let len = self.len;
|
||||||
|
|
||||||
|
assert!(len >= pos);
|
||||||
|
|
||||||
|
len - pos
|
||||||
|
}
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reset_complete_count(&mut self) -> usize {
|
||||||
|
match self.requests.get_mut().pop().unwrap() {
|
||||||
|
TestCircularTransferRequest::ResetCompleteCount(complete_count) => complete_count,
|
||||||
|
_ => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_waker(&mut self, _waker: &Waker) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TestCircularTransfer {
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn new(len: usize) -> Self {
|
||||||
|
Self {
|
||||||
|
requests: cell::RefCell::new(vec![]),
|
||||||
|
len,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn setup(&self, mut requests: vec::Vec<TestCircularTransferRequest>) {
|
||||||
|
requests.reverse();
|
||||||
|
self.requests.replace(requests);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const CAP: usize = 16;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dma_index_as_index_returns_index_mod_cap_by_default() {
|
||||||
|
let index = DmaIndex::default();
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 0);
|
||||||
|
assert_eq!(index.as_index(CAP, 1), 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 2), 2);
|
||||||
|
assert_eq!(index.as_index(CAP, 3), 3);
|
||||||
|
assert_eq!(index.as_index(CAP, 4), 4);
|
||||||
|
assert_eq!(index.as_index(CAP, CAP), 0);
|
||||||
|
assert_eq!(index.as_index(CAP, CAP + 1), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn dma_index_advancing_increases_as_index() {
|
||||||
|
let mut index = DmaIndex::default();
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 0);
|
||||||
|
index.advance(CAP, 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 1);
|
||||||
|
index.advance(CAP, 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 2);
|
||||||
|
index.advance(CAP, 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 3);
|
||||||
|
index.advance(CAP, 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 4);
|
||||||
|
index.advance(CAP, CAP - 4);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 0);
|
||||||
|
index.advance(CAP, 1);
|
||||||
|
assert_eq!(index.as_index(CAP, 0), 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
mod prop_test;
|
50
embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
Normal file
50
embassy-stm32/src/dma/ringbuffer/tests/prop_test/mod.rs
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
use std::task::Waker;
|
||||||
|
|
||||||
|
use proptest::prop_oneof;
|
||||||
|
use proptest::strategy::{self, BoxedStrategy, Strategy as _};
|
||||||
|
use proptest_state_machine::{prop_state_machine, ReferenceStateMachine, StateMachineTest};
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
const CAP: usize = 128;
|
||||||
|
|
||||||
|
#[derive(Debug, Default)]
|
||||||
|
struct DmaMock {
|
||||||
|
pos: usize,
|
||||||
|
wraps: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DmaMock {
|
||||||
|
pub fn advance(&mut self, steps: usize) {
|
||||||
|
let next = self.pos + steps;
|
||||||
|
self.pos = next % CAP;
|
||||||
|
self.wraps += next / CAP;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DmaCtrl for DmaMock {
|
||||||
|
fn get_remaining_transfers(&self) -> usize {
|
||||||
|
CAP - self.pos
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reset_complete_count(&mut self) -> usize {
|
||||||
|
core::mem::replace(&mut self.wraps, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_waker(&mut self, _waker: &Waker) {}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum Status {
|
||||||
|
Available(usize),
|
||||||
|
Failed,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Status {
|
||||||
|
pub fn new(capacity: usize) -> Self {
|
||||||
|
Self::Available(capacity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod reader;
|
||||||
|
mod writer;
|
123
embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
Normal file
123
embassy-stm32/src/dma/ringbuffer/tests/prop_test/reader.rs
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
use core::fmt::Debug;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum ReaderTransition {
|
||||||
|
Write(usize),
|
||||||
|
Reset,
|
||||||
|
ReadUpTo(usize),
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ReaderSM;
|
||||||
|
|
||||||
|
impl ReferenceStateMachine for ReaderSM {
|
||||||
|
type State = Status;
|
||||||
|
type Transition = ReaderTransition;
|
||||||
|
|
||||||
|
fn init_state() -> BoxedStrategy<Self::State> {
|
||||||
|
strategy::Just(Status::new(0)).boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> {
|
||||||
|
prop_oneof![
|
||||||
|
(1..50_usize).prop_map(ReaderTransition::Write),
|
||||||
|
(1..50_usize).prop_map(ReaderTransition::ReadUpTo),
|
||||||
|
strategy::Just(ReaderTransition::Reset),
|
||||||
|
]
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply(status: Self::State, transition: &Self::Transition) -> Self::State {
|
||||||
|
match (status, transition) {
|
||||||
|
(_, ReaderTransition::Reset) => Status::Available(0),
|
||||||
|
(Status::Available(x), ReaderTransition::Write(y)) => {
|
||||||
|
if x + y > CAP {
|
||||||
|
Status::Failed
|
||||||
|
} else {
|
||||||
|
Status::Available(x + y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Status::Failed, ReaderTransition::Write(_)) => Status::Failed,
|
||||||
|
(Status::Available(x), ReaderTransition::ReadUpTo(y)) => Status::Available(x.saturating_sub(*y)),
|
||||||
|
(Status::Failed, ReaderTransition::ReadUpTo(_)) => Status::Available(0),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ReaderSut {
|
||||||
|
status: Status,
|
||||||
|
buffer: *mut [u8],
|
||||||
|
producer: DmaMock,
|
||||||
|
consumer: ReadableDmaRingBuffer<'static, u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for ReaderSut {
|
||||||
|
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||||
|
<DmaMock as Debug>::fmt(&self.producer, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ReaderTest;
|
||||||
|
|
||||||
|
impl StateMachineTest for ReaderTest {
|
||||||
|
type SystemUnderTest = ReaderSut;
|
||||||
|
type Reference = ReaderSM;
|
||||||
|
|
||||||
|
fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest {
|
||||||
|
let buffer = Box::into_raw(Box::new([0; CAP]));
|
||||||
|
ReaderSut {
|
||||||
|
status: ref_status.clone(),
|
||||||
|
buffer,
|
||||||
|
producer: DmaMock::default(),
|
||||||
|
consumer: ReadableDmaRingBuffer::new(unsafe { &mut *buffer }),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn teardown(state: Self::SystemUnderTest) {
|
||||||
|
unsafe {
|
||||||
|
let _ = Box::from_raw(state.buffer);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply(
|
||||||
|
mut sut: Self::SystemUnderTest,
|
||||||
|
ref_state: &<Self::Reference as ReferenceStateMachine>::State,
|
||||||
|
transition: <Self::Reference as ReferenceStateMachine>::Transition,
|
||||||
|
) -> Self::SystemUnderTest {
|
||||||
|
match transition {
|
||||||
|
ReaderTransition::Write(x) => sut.producer.advance(x),
|
||||||
|
ReaderTransition::Reset => {
|
||||||
|
sut.consumer.reset(&mut sut.producer);
|
||||||
|
}
|
||||||
|
ReaderTransition::ReadUpTo(x) => {
|
||||||
|
let status = sut.status;
|
||||||
|
let ReaderSut {
|
||||||
|
ref mut producer,
|
||||||
|
ref mut consumer,
|
||||||
|
..
|
||||||
|
} = sut;
|
||||||
|
let mut buf = vec![0; x];
|
||||||
|
let res = consumer.read(producer, &mut buf);
|
||||||
|
match status {
|
||||||
|
Status::Available(n) => {
|
||||||
|
let readable = x.min(n);
|
||||||
|
|
||||||
|
assert_eq!(res.unwrap().0, readable);
|
||||||
|
}
|
||||||
|
Status::Failed => assert!(res.is_err()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ReaderSut {
|
||||||
|
status: ref_state.clone(),
|
||||||
|
..sut
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prop_state_machine! {
|
||||||
|
#[test]
|
||||||
|
fn reader_state_test(sequential 1..20 => ReaderTest);
|
||||||
|
}
|
122
embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
Normal file
122
embassy-stm32/src/dma/ringbuffer/tests/prop_test/writer.rs
Normal file
@ -0,0 +1,122 @@
|
|||||||
|
use core::fmt::Debug;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
enum WriterTransition {
|
||||||
|
Read(usize),
|
||||||
|
WriteUpTo(usize),
|
||||||
|
Reset,
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WriterSM;
|
||||||
|
|
||||||
|
impl ReferenceStateMachine for WriterSM {
|
||||||
|
type State = Status;
|
||||||
|
type Transition = WriterTransition;
|
||||||
|
|
||||||
|
fn init_state() -> BoxedStrategy<Self::State> {
|
||||||
|
strategy::Just(Status::new(CAP)).boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn transitions(_state: &Self::State) -> BoxedStrategy<Self::Transition> {
|
||||||
|
prop_oneof![
|
||||||
|
(1..50_usize).prop_map(WriterTransition::Read),
|
||||||
|
(1..50_usize).prop_map(WriterTransition::WriteUpTo),
|
||||||
|
strategy::Just(WriterTransition::Reset),
|
||||||
|
]
|
||||||
|
.boxed()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply(status: Self::State, transition: &Self::Transition) -> Self::State {
|
||||||
|
match (status, transition) {
|
||||||
|
(_, WriterTransition::Reset) => Status::Available(CAP),
|
||||||
|
(Status::Available(x), WriterTransition::Read(y)) => {
|
||||||
|
if x < *y {
|
||||||
|
Status::Failed
|
||||||
|
} else {
|
||||||
|
Status::Available(x - y)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(Status::Failed, WriterTransition::Read(_)) => Status::Failed,
|
||||||
|
(Status::Available(x), WriterTransition::WriteUpTo(y)) => Status::Available((x + *y).min(CAP)),
|
||||||
|
(Status::Failed, WriterTransition::WriteUpTo(_)) => Status::Available(CAP),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WriterSut {
|
||||||
|
status: Status,
|
||||||
|
buffer: *mut [u8],
|
||||||
|
producer: WritableDmaRingBuffer<'static, u8>,
|
||||||
|
consumer: DmaMock,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Debug for WriterSut {
|
||||||
|
fn fmt(&self, f: &mut core::fmt::Formatter) -> core::fmt::Result {
|
||||||
|
<DmaMock as Debug>::fmt(&self.consumer, f)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct WriterTest;
|
||||||
|
|
||||||
|
impl StateMachineTest for WriterTest {
|
||||||
|
type SystemUnderTest = WriterSut;
|
||||||
|
type Reference = WriterSM;
|
||||||
|
|
||||||
|
fn init_test(ref_status: &<Self::Reference as ReferenceStateMachine>::State) -> Self::SystemUnderTest {
|
||||||
|
let buffer = Box::into_raw(Box::new([0; CAP]));
|
||||||
|
WriterSut {
|
||||||
|
status: ref_status.clone(),
|
||||||
|
buffer,
|
||||||
|
producer: WritableDmaRingBuffer::new(unsafe { &mut *buffer }),
|
||||||
|
consumer: DmaMock::default(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn teardown(state: Self::SystemUnderTest) {
|
||||||
|
unsafe {
|
||||||
|
let _ = Box::from_raw(state.buffer);
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
fn apply(
|
||||||
|
mut sut: Self::SystemUnderTest,
|
||||||
|
ref_status: &<Self::Reference as ReferenceStateMachine>::State,
|
||||||
|
transition: <Self::Reference as ReferenceStateMachine>::Transition,
|
||||||
|
) -> Self::SystemUnderTest {
|
||||||
|
match transition {
|
||||||
|
WriterTransition::Read(x) => sut.consumer.advance(x),
|
||||||
|
WriterTransition::Reset => {
|
||||||
|
sut.producer.reset(&mut sut.consumer);
|
||||||
|
}
|
||||||
|
WriterTransition::WriteUpTo(x) => {
|
||||||
|
let status = sut.status;
|
||||||
|
let WriterSut {
|
||||||
|
ref mut producer,
|
||||||
|
ref mut consumer,
|
||||||
|
..
|
||||||
|
} = sut;
|
||||||
|
let mut buf = vec![0; x];
|
||||||
|
let res = producer.write(consumer, &mut buf);
|
||||||
|
match status {
|
||||||
|
Status::Available(n) => {
|
||||||
|
let writable = x.min(CAP - n.min(CAP));
|
||||||
|
assert_eq!(res.unwrap().0, writable);
|
||||||
|
}
|
||||||
|
Status::Failed => assert!(res.is_err()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
WriterSut {
|
||||||
|
status: ref_status.clone(),
|
||||||
|
..sut
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
prop_state_machine! {
|
||||||
|
#[test]
|
||||||
|
fn writer_state_test(sequential 1..20 => WriterTest);
|
||||||
|
}
|
@ -296,6 +296,9 @@ mod dual_core {
|
|||||||
/// It cannot be initialized by the user. The intended use is:
|
/// It cannot be initialized by the user. The intended use is:
|
||||||
///
|
///
|
||||||
/// ```
|
/// ```
|
||||||
|
/// use core::mem::MaybeUninit;
|
||||||
|
/// use embassy_stm32::{init_secondary, SharedData};
|
||||||
|
///
|
||||||
/// #[link_section = ".ram_d3"]
|
/// #[link_section = ".ram_d3"]
|
||||||
/// static SHARED_DATA: MaybeUninit<SharedData> = MaybeUninit::uninit();
|
/// static SHARED_DATA: MaybeUninit<SharedData> = MaybeUninit::uninit();
|
||||||
///
|
///
|
||||||
|
@ -27,8 +27,14 @@ pub enum Error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(gpdma))]
|
#[cfg(not(gpdma))]
|
||||||
impl From<ringbuffer::OverrunError> for Error {
|
impl From<ringbuffer::Error> for Error {
|
||||||
fn from(_: ringbuffer::OverrunError) -> Self {
|
fn from(#[allow(unused)] err: ringbuffer::Error) -> Self {
|
||||||
|
#[cfg(feature = "defmt")]
|
||||||
|
{
|
||||||
|
if err == ringbuffer::Error::DmaUnsynced {
|
||||||
|
defmt::error!("Ringbuffer broken invariants detected!");
|
||||||
|
}
|
||||||
|
}
|
||||||
Self::Overrun
|
Self::Overrun
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -83,7 +83,6 @@ impl<'d> RingBufferedUartRx<'d> {
|
|||||||
// Clear the buffer so that it is ready to receive data
|
// Clear the buffer so that it is ready to receive data
|
||||||
compiler_fence(Ordering::SeqCst);
|
compiler_fence(Ordering::SeqCst);
|
||||||
self.ring_buf.start();
|
self.ring_buf.start();
|
||||||
self.ring_buf.clear();
|
|
||||||
|
|
||||||
let r = self.info.regs;
|
let r = self.info.regs;
|
||||||
// clear all interrupts and DMA Rx Request
|
// clear all interrupts and DMA Rx Request
|
||||||
|
Loading…
Reference in New Issue
Block a user