mirror of
https://github.com/embassy-rs/embassy.git
synced 2024-11-22 14:53:03 +00:00
stm32/dma: add AnyChannel, add support for BDMA on H7.
This commit is contained in:
parent
f77d59500e
commit
e67dfcb04f
@ -353,50 +353,6 @@ fn main() {
|
|||||||
|
|
||||||
g.extend(quote! { pub mod flash_regions { #flash_regions } });
|
g.extend(quote! { pub mod flash_regions { #flash_regions } });
|
||||||
|
|
||||||
// ========
|
|
||||||
// Generate DMA IRQs.
|
|
||||||
|
|
||||||
let mut dma_irqs: BTreeMap<&str, Vec<(&str, &str, &str)>> = BTreeMap::new();
|
|
||||||
|
|
||||||
for p in METADATA.peripherals {
|
|
||||||
if let Some(r) = &p.registers {
|
|
||||||
if r.kind == "dma" || r.kind == "bdma" || r.kind == "gpdma" {
|
|
||||||
if p.name == "BDMA1" {
|
|
||||||
// BDMA1 in H7 doesn't use DMAMUX, which breaks
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
for irq in p.interrupts {
|
|
||||||
dma_irqs
|
|
||||||
.entry(irq.interrupt)
|
|
||||||
.or_default()
|
|
||||||
.push((r.kind, p.name, irq.signal));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let dma_irqs: TokenStream = dma_irqs
|
|
||||||
.iter()
|
|
||||||
.map(|(irq, channels)| {
|
|
||||||
let irq = format_ident!("{}", irq);
|
|
||||||
|
|
||||||
let xdma = format_ident!("{}", channels[0].0);
|
|
||||||
let channels = channels.iter().map(|(_, dma, ch)| format_ident!("{}_{}", dma, ch));
|
|
||||||
|
|
||||||
quote! {
|
|
||||||
#[cfg(feature = "rt")]
|
|
||||||
#[crate::interrupt]
|
|
||||||
unsafe fn #irq () {
|
|
||||||
#(
|
|
||||||
<crate::peripherals::#channels as crate::dma::#xdma::sealed::Channel>::on_irq();
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
g.extend(dma_irqs);
|
|
||||||
|
|
||||||
// ========
|
// ========
|
||||||
// Extract the rcc registers
|
// Extract the rcc registers
|
||||||
let rcc_registers = METADATA
|
let rcc_registers = METADATA
|
||||||
@ -664,7 +620,7 @@ fn main() {
|
|||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let signals: HashMap<_, _> = [
|
let signals: HashMap<_, _> = [
|
||||||
// (kind, signal) => trait
|
// (kind, signal) => trait
|
||||||
(("usart", "TX"), quote!(crate::usart::TxPin)),
|
(("usart", "TX"), quote!(crate::usart::TxPin)),
|
||||||
(("usart", "RX"), quote!(crate::usart::RxPin)),
|
(("usart", "RX"), quote!(crate::usart::RxPin)),
|
||||||
(("usart", "CTS"), quote!(crate::usart::CtsPin)),
|
(("usart", "CTS"), quote!(crate::usart::CtsPin)),
|
||||||
@ -897,7 +853,7 @@ fn main() {
|
|||||||
(("quadspi", "BK2_IO3"), quote!(crate::qspi::BK2D3Pin)),
|
(("quadspi", "BK2_IO3"), quote!(crate::qspi::BK2D3Pin)),
|
||||||
(("quadspi", "BK2_NCS"), quote!(crate::qspi::BK2NSSPin)),
|
(("quadspi", "BK2_NCS"), quote!(crate::qspi::BK2NSSPin)),
|
||||||
(("quadspi", "CLK"), quote!(crate::qspi::SckPin)),
|
(("quadspi", "CLK"), quote!(crate::qspi::SckPin)),
|
||||||
].into();
|
].into();
|
||||||
|
|
||||||
for p in METADATA.peripherals {
|
for p in METADATA.peripherals {
|
||||||
if let Some(regs) = &p.registers {
|
if let Some(regs) = &p.registers {
|
||||||
@ -959,7 +915,7 @@ fn main() {
|
|||||||
};
|
};
|
||||||
if let Some(ch) = ch {
|
if let Some(ch) = ch {
|
||||||
g.extend(quote! {
|
g.extend(quote! {
|
||||||
impl_adc_pin!( #peri, #pin_name, #ch);
|
impl_adc_pin!( #peri, #pin_name, #ch);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -991,7 +947,7 @@ fn main() {
|
|||||||
let ch: u8 = pin.signal.strip_prefix("OUT").unwrap().parse().unwrap();
|
let ch: u8 = pin.signal.strip_prefix("OUT").unwrap().parse().unwrap();
|
||||||
|
|
||||||
g.extend(quote! {
|
g.extend(quote! {
|
||||||
impl_dac_pin!( #peri, #pin_name, #ch);
|
impl_dac_pin!( #peri, #pin_name, #ch);
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1189,7 +1145,6 @@ fn main() {
|
|||||||
let mut interrupts_table: Vec<Vec<String>> = Vec::new();
|
let mut interrupts_table: Vec<Vec<String>> = Vec::new();
|
||||||
let mut peripherals_table: Vec<Vec<String>> = Vec::new();
|
let mut peripherals_table: Vec<Vec<String>> = Vec::new();
|
||||||
let mut pins_table: Vec<Vec<String>> = Vec::new();
|
let mut pins_table: Vec<Vec<String>> = Vec::new();
|
||||||
let mut dma_channels_table: Vec<Vec<String>> = Vec::new();
|
|
||||||
let mut adc_common_table: Vec<Vec<String>> = Vec::new();
|
let mut adc_common_table: Vec<Vec<String>> = Vec::new();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1283,51 +1238,108 @@ fn main() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut dma_channel_count: usize = 0;
|
let mut dmas = TokenStream::new();
|
||||||
let mut bdma_channel_count: usize = 0;
|
let has_dmamux = METADATA
|
||||||
let mut gpdma_channel_count: usize = 0;
|
.peripherals
|
||||||
|
.iter()
|
||||||
|
.flat_map(|p| &p.registers)
|
||||||
|
.any(|p| p.kind == "dmamux");
|
||||||
|
|
||||||
|
for (ch_idx, ch) in METADATA.dma_channels.iter().enumerate() {
|
||||||
|
// Some H7 chips have BDMA1 hardcoded for DFSDM, ie no DMAMUX. It's unsupported, skip it.
|
||||||
|
if has_dmamux && ch.dmamux.is_none() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let name = format_ident!("{}", ch.name);
|
||||||
|
let idx = ch_idx as u8;
|
||||||
|
g.extend(quote!(dma_channel_impl!(#name, #idx);));
|
||||||
|
|
||||||
|
let dma = format_ident!("{}", ch.dma);
|
||||||
|
let ch_num = ch.channel as usize;
|
||||||
|
|
||||||
for ch in METADATA.dma_channels {
|
|
||||||
let mut row = Vec::new();
|
|
||||||
let dma_peri = METADATA.peripherals.iter().find(|p| p.name == ch.dma).unwrap();
|
let dma_peri = METADATA.peripherals.iter().find(|p| p.name == ch.dma).unwrap();
|
||||||
let bi = dma_peri.registers.as_ref().unwrap();
|
let bi = dma_peri.registers.as_ref().unwrap();
|
||||||
|
|
||||||
let num;
|
let dma_info = match bi.kind {
|
||||||
match bi.kind {
|
"dma" => quote!(crate::dma::DmaInfo::Dma(crate::pac::#dma)),
|
||||||
"dma" => {
|
"bdma" => quote!(crate::dma::DmaInfo::Bdma(crate::pac::#dma)),
|
||||||
num = dma_channel_count;
|
"gpdma" => quote!(crate::pac::#dma),
|
||||||
dma_channel_count += 1;
|
|
||||||
}
|
|
||||||
"bdma" => {
|
|
||||||
num = bdma_channel_count;
|
|
||||||
bdma_channel_count += 1;
|
|
||||||
}
|
|
||||||
"gpdma" => {
|
|
||||||
num = gpdma_channel_count;
|
|
||||||
gpdma_channel_count += 1;
|
|
||||||
}
|
|
||||||
_ => panic!("bad dma channel kind {}", bi.kind),
|
_ => panic!("bad dma channel kind {}", bi.kind),
|
||||||
}
|
};
|
||||||
|
|
||||||
row.push(ch.name.to_string());
|
let dmamux = match &ch.dmamux {
|
||||||
row.push(ch.dma.to_string());
|
Some(dmamux) => {
|
||||||
row.push(bi.kind.to_string());
|
let dmamux = format_ident!("{}", dmamux);
|
||||||
row.push(ch.channel.to_string());
|
let num = ch.dmamux_channel.unwrap() as usize;
|
||||||
row.push(num.to_string());
|
|
||||||
if let Some(dmamux) = &ch.dmamux {
|
|
||||||
let dmamux_channel = ch.dmamux_channel.unwrap();
|
|
||||||
row.push(format!("{{dmamux: {}, dmamux_channel: {}}}", dmamux, dmamux_channel));
|
|
||||||
} else {
|
|
||||||
row.push("{}".to_string());
|
|
||||||
}
|
|
||||||
|
|
||||||
dma_channels_table.push(row);
|
g.extend(quote!(dmamux_channel_impl!(#name, #dmamux);));
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
dmamux: crate::dma::DmamuxInfo {
|
||||||
|
mux: crate::pac::#dmamux,
|
||||||
|
num: #num,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => quote!(),
|
||||||
|
};
|
||||||
|
|
||||||
|
dmas.extend(quote! {
|
||||||
|
crate::dma::ChannelInfo {
|
||||||
|
dma: #dma_info,
|
||||||
|
num: #ch_num,
|
||||||
|
#dmamux
|
||||||
|
},
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ========
|
||||||
|
// Generate DMA IRQs.
|
||||||
|
|
||||||
|
let mut dma_irqs: BTreeMap<&str, Vec<String>> = BTreeMap::new();
|
||||||
|
|
||||||
|
for p in METADATA.peripherals {
|
||||||
|
if let Some(r) = &p.registers {
|
||||||
|
if r.kind == "dma" || r.kind == "bdma" || r.kind == "gpdma" {
|
||||||
|
for irq in p.interrupts {
|
||||||
|
let ch_name = format!("{}_{}", p.name, irq.signal);
|
||||||
|
let ch = METADATA.dma_channels.iter().find(|c| c.name == ch_name).unwrap();
|
||||||
|
|
||||||
|
// Some H7 chips have BDMA1 hardcoded for DFSDM, ie no DMAMUX. It's unsupported, skip it.
|
||||||
|
if has_dmamux && ch.dmamux.is_none() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
dma_irqs.entry(irq.interrupt).or_default().push(ch_name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let dma_irqs: TokenStream = dma_irqs
|
||||||
|
.iter()
|
||||||
|
.map(|(irq, channels)| {
|
||||||
|
let irq = format_ident!("{}", irq);
|
||||||
|
|
||||||
|
let channels = channels.iter().map(|c| format_ident!("{}", c));
|
||||||
|
|
||||||
|
quote! {
|
||||||
|
#[cfg(feature = "rt")]
|
||||||
|
#[crate::interrupt]
|
||||||
|
unsafe fn #irq () {
|
||||||
|
#(
|
||||||
|
<crate::peripherals::#channels as crate::dma::sealed::ChannelInterrupt>::on_irq();
|
||||||
|
)*
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
g.extend(dma_irqs);
|
||||||
|
|
||||||
g.extend(quote! {
|
g.extend(quote! {
|
||||||
pub(crate) const DMA_CHANNEL_COUNT: usize = #dma_channel_count;
|
pub(crate) const DMA_CHANNELS: &[crate::dma::ChannelInfo] = &[#dmas];
|
||||||
pub(crate) const BDMA_CHANNEL_COUNT: usize = #bdma_channel_count;
|
|
||||||
pub(crate) const GPDMA_CHANNEL_COUNT: usize = #gpdma_channel_count;
|
|
||||||
});
|
});
|
||||||
|
|
||||||
for irq in METADATA.interrupts {
|
for irq in METADATA.interrupts {
|
||||||
@ -1347,7 +1359,6 @@ fn main() {
|
|||||||
make_table(&mut m, "foreach_interrupt", &interrupts_table);
|
make_table(&mut m, "foreach_interrupt", &interrupts_table);
|
||||||
make_table(&mut m, "foreach_peripheral", &peripherals_table);
|
make_table(&mut m, "foreach_peripheral", &peripherals_table);
|
||||||
make_table(&mut m, "foreach_pin", &pins_table);
|
make_table(&mut m, "foreach_pin", &pins_table);
|
||||||
make_table(&mut m, "foreach_dma_channel", &dma_channels_table);
|
|
||||||
make_table(&mut m, "foreach_adc", &adc_common_table);
|
make_table(&mut m, "foreach_adc", &adc_common_table);
|
||||||
|
|
||||||
let out_dir = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
|
let out_dir = &PathBuf::from(env::var_os("OUT_DIR").unwrap());
|
||||||
|
@ -394,19 +394,7 @@ where
|
|||||||
|
|
||||||
/// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer.
|
/// This method starts the capture and finishes when both the dma transfer and DCMI finish the frame transfer.
|
||||||
/// The implication is that the input buffer size must be exactly the size of the captured frame.
|
/// The implication is that the input buffer size must be exactly the size of the captured frame.
|
||||||
///
|
|
||||||
/// Note that when `buffer.len() > 0xffff` the capture future requires some real-time guarantees to be upheld
|
|
||||||
/// (must be polled fast enough so the buffers get switched before data is overwritten).
|
|
||||||
/// It is therefore recommended that it is run on higher priority executor.
|
|
||||||
pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
|
pub async fn capture(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
|
||||||
if buffer.len() <= 0xffff {
|
|
||||||
return self.capture_small(buffer).await;
|
|
||||||
} else {
|
|
||||||
return self.capture_giant(buffer).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn capture_small(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
|
|
||||||
let r = self.inner.regs();
|
let r = self.inner.regs();
|
||||||
let src = r.dr().as_ptr() as *mut u32;
|
let src = r.dr().as_ptr() as *mut u32;
|
||||||
let request = self.dma.request();
|
let request = self.dma.request();
|
||||||
@ -441,116 +429,6 @@ where
|
|||||||
|
|
||||||
result
|
result
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(dma))]
|
|
||||||
async fn capture_giant(&mut self, _buffer: &mut [u32]) -> Result<(), Error> {
|
|
||||||
panic!("capturing to buffers larger than 0xffff is only supported on DMA for now, not on BDMA or GPDMA.");
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(dma)]
|
|
||||||
async fn capture_giant(&mut self, buffer: &mut [u32]) -> Result<(), Error> {
|
|
||||||
use crate::dma::TransferOptions;
|
|
||||||
|
|
||||||
let data_len = buffer.len();
|
|
||||||
let chunk_estimate = data_len / 0xffff;
|
|
||||||
|
|
||||||
let mut chunks = chunk_estimate + 1;
|
|
||||||
while data_len % chunks != 0 {
|
|
||||||
chunks += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
let chunk_size = data_len / chunks;
|
|
||||||
|
|
||||||
let mut remaining_chunks = chunks - 2;
|
|
||||||
|
|
||||||
let mut m0ar = buffer.as_mut_ptr();
|
|
||||||
let mut m1ar = unsafe { buffer.as_mut_ptr().add(chunk_size) };
|
|
||||||
|
|
||||||
let channel = &mut self.dma;
|
|
||||||
let request = channel.request();
|
|
||||||
|
|
||||||
let r = self.inner.regs();
|
|
||||||
let src = r.dr().as_ptr() as *mut u32;
|
|
||||||
|
|
||||||
let mut transfer = unsafe {
|
|
||||||
crate::dma::DoubleBuffered::new_read(
|
|
||||||
&mut self.dma,
|
|
||||||
request,
|
|
||||||
src,
|
|
||||||
m0ar,
|
|
||||||
m1ar,
|
|
||||||
chunk_size,
|
|
||||||
TransferOptions::default(),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut last_chunk_set_for_transfer = false;
|
|
||||||
let mut buffer0_last_accessible = false;
|
|
||||||
let dma_result = poll_fn(|cx| {
|
|
||||||
transfer.set_waker(cx.waker());
|
|
||||||
|
|
||||||
let buffer0_currently_accessible = transfer.is_buffer0_accessible();
|
|
||||||
|
|
||||||
// check if the accessible buffer changed since last poll
|
|
||||||
if buffer0_last_accessible == buffer0_currently_accessible {
|
|
||||||
return Poll::Pending;
|
|
||||||
}
|
|
||||||
buffer0_last_accessible = !buffer0_last_accessible;
|
|
||||||
|
|
||||||
if remaining_chunks != 0 {
|
|
||||||
if remaining_chunks % 2 == 0 && buffer0_currently_accessible {
|
|
||||||
m0ar = unsafe { m0ar.add(2 * chunk_size) };
|
|
||||||
unsafe { transfer.set_buffer0(m0ar) }
|
|
||||||
remaining_chunks -= 1;
|
|
||||||
} else if !buffer0_currently_accessible {
|
|
||||||
m1ar = unsafe { m1ar.add(2 * chunk_size) };
|
|
||||||
unsafe { transfer.set_buffer1(m1ar) };
|
|
||||||
remaining_chunks -= 1;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if buffer0_currently_accessible {
|
|
||||||
unsafe { transfer.set_buffer0(buffer.as_mut_ptr()) }
|
|
||||||
} else {
|
|
||||||
unsafe { transfer.set_buffer1(buffer.as_mut_ptr()) }
|
|
||||||
}
|
|
||||||
if last_chunk_set_for_transfer {
|
|
||||||
transfer.request_stop();
|
|
||||||
return Poll::Ready(());
|
|
||||||
}
|
|
||||||
last_chunk_set_for_transfer = true;
|
|
||||||
}
|
|
||||||
Poll::Pending
|
|
||||||
});
|
|
||||||
|
|
||||||
Self::clear_interrupt_flags();
|
|
||||||
Self::enable_irqs();
|
|
||||||
|
|
||||||
let result = poll_fn(|cx| {
|
|
||||||
STATE.waker.register(cx.waker());
|
|
||||||
|
|
||||||
let ris = crate::pac::DCMI.ris().read();
|
|
||||||
if ris.err_ris() {
|
|
||||||
crate::pac::DCMI.icr().write(|r| r.set_err_isc(true));
|
|
||||||
Poll::Ready(Err(Error::PeripheralError))
|
|
||||||
} else if ris.ovr_ris() {
|
|
||||||
crate::pac::DCMI.icr().write(|r| r.set_ovr_isc(true));
|
|
||||||
Poll::Ready(Err(Error::Overrun))
|
|
||||||
} else if ris.frame_ris() {
|
|
||||||
crate::pac::DCMI.icr().write(|r| r.set_frame_isc(true));
|
|
||||||
Poll::Ready(Ok(()))
|
|
||||||
} else {
|
|
||||||
Poll::Pending
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
Self::toggle(true);
|
|
||||||
|
|
||||||
let (_, result) = embassy_futures::join::join(dma_result, result).await;
|
|
||||||
|
|
||||||
Self::toggle(false);
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod sealed {
|
mod sealed {
|
||||||
|
@ -1,740 +0,0 @@
|
|||||||
//! Basic Direct Memory Acccess (BDMA)
|
|
||||||
|
|
||||||
use core::future::Future;
|
|
||||||
use core::pin::Pin;
|
|
||||||
use core::sync::atomic::{fence, AtomicUsize, Ordering};
|
|
||||||
use core::task::{Context, Poll, Waker};
|
|
||||||
|
|
||||||
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
|
|
||||||
use embassy_sync::waitqueue::AtomicWaker;
|
|
||||||
|
|
||||||
use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
|
|
||||||
use super::word::{Word, WordSize};
|
|
||||||
use super::Dir;
|
|
||||||
use crate::_generated::BDMA_CHANNEL_COUNT;
|
|
||||||
use crate::interrupt::typelevel::Interrupt;
|
|
||||||
use crate::interrupt::Priority;
|
|
||||||
use crate::pac;
|
|
||||||
use crate::pac::bdma::{regs, vals};
|
|
||||||
|
|
||||||
/// BDMA transfer options.
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
|
||||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
|
||||||
#[non_exhaustive]
|
|
||||||
pub struct TransferOptions {
|
|
||||||
/// Enable circular DMA
|
|
||||||
///
|
|
||||||
/// Note:
|
|
||||||
/// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
|
|
||||||
/// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
|
|
||||||
pub circular: bool,
|
|
||||||
/// Enable half transfer interrupt
|
|
||||||
pub half_transfer_ir: bool,
|
|
||||||
/// Enable transfer complete interrupt
|
|
||||||
pub complete_transfer_ir: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for TransferOptions {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self {
|
|
||||||
circular: false,
|
|
||||||
half_transfer_ir: false,
|
|
||||||
complete_transfer_ir: true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<WordSize> for vals::Size {
|
|
||||||
fn from(raw: WordSize) -> Self {
|
|
||||||
match raw {
|
|
||||||
WordSize::OneByte => Self::BITS8,
|
|
||||||
WordSize::TwoBytes => Self::BITS16,
|
|
||||||
WordSize::FourBytes => Self::BITS32,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<Dir> for vals::Dir {
|
|
||||||
fn from(raw: Dir) -> Self {
|
|
||||||
match raw {
|
|
||||||
Dir::MemoryToPeripheral => Self::FROMMEMORY,
|
|
||||||
Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
struct State {
|
|
||||||
ch_wakers: [AtomicWaker; BDMA_CHANNEL_COUNT],
|
|
||||||
complete_count: [AtomicUsize; BDMA_CHANNEL_COUNT],
|
|
||||||
}
|
|
||||||
|
|
||||||
impl State {
|
|
||||||
const fn new() -> Self {
|
|
||||||
const ZERO: AtomicUsize = AtomicUsize::new(0);
|
|
||||||
const AW: AtomicWaker = AtomicWaker::new();
|
|
||||||
Self {
|
|
||||||
ch_wakers: [AW; BDMA_CHANNEL_COUNT],
|
|
||||||
complete_count: [ZERO; BDMA_CHANNEL_COUNT],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static STATE: State = State::new();
|
|
||||||
|
|
||||||
/// safety: must be called only once
|
|
||||||
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
|
|
||||||
foreach_interrupt! {
|
|
||||||
($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
|
|
||||||
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, irq_priority);
|
|
||||||
crate::interrupt::typelevel::$irq::enable();
|
|
||||||
};
|
|
||||||
}
|
|
||||||
crate::_generated::init_bdma();
|
|
||||||
}
|
|
||||||
|
|
||||||
foreach_dma_channel! {
|
|
||||||
($channel_peri:ident, BDMA1, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
|
|
||||||
// BDMA1 in H7 doesn't use DMAMUX, which breaks
|
|
||||||
};
|
|
||||||
($channel_peri:ident, $dma_peri:ident, bdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
|
|
||||||
impl sealed::Channel for crate::peripherals::$channel_peri {
|
|
||||||
fn regs(&self) -> pac::bdma::Dma {
|
|
||||||
pac::$dma_peri
|
|
||||||
}
|
|
||||||
fn num(&self) -> usize {
|
|
||||||
$channel_num
|
|
||||||
}
|
|
||||||
fn index(&self) -> usize {
|
|
||||||
$index
|
|
||||||
}
|
|
||||||
fn on_irq() {
|
|
||||||
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Channel for crate::peripherals::$channel_peri {}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
|
||||||
pub(crate) unsafe fn on_irq_inner(dma: pac::bdma::Dma, channel_num: usize, index: usize) {
|
|
||||||
let isr = dma.isr().read();
|
|
||||||
let cr = dma.ch(channel_num).cr();
|
|
||||||
|
|
||||||
if isr.teif(channel_num) {
|
|
||||||
panic!("DMA: error on BDMA@{:08x} channel {}", dma.as_ptr() as u32, channel_num);
|
|
||||||
}
|
|
||||||
|
|
||||||
if isr.htif(channel_num) && cr.read().htie() {
|
|
||||||
// Acknowledge half transfer complete interrupt
|
|
||||||
dma.ifcr().write(|w| w.set_htif(channel_num, true));
|
|
||||||
} else if isr.tcif(channel_num) && cr.read().tcie() {
|
|
||||||
// Acknowledge transfer complete interrupt
|
|
||||||
dma.ifcr().write(|w| w.set_tcif(channel_num, true));
|
|
||||||
#[cfg(not(armv6m))]
|
|
||||||
STATE.complete_count[index].fetch_add(1, Ordering::Release);
|
|
||||||
#[cfg(armv6m)]
|
|
||||||
critical_section::with(|_| {
|
|
||||||
let x = STATE.complete_count[index].load(Ordering::Relaxed);
|
|
||||||
STATE.complete_count[index].store(x + 1, Ordering::Release);
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
STATE.ch_wakers[index].wake();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// DMA request type alias.
|
|
||||||
#[cfg(any(bdma_v2, dmamux))]
|
|
||||||
pub type Request = u8;
|
|
||||||
/// DMA request type alias.
|
|
||||||
#[cfg(not(any(bdma_v2, dmamux)))]
|
|
||||||
pub type Request = ();
|
|
||||||
|
|
||||||
/// DMA channel.
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
|
|
||||||
/// DMA channel.
|
|
||||||
#[cfg(not(dmamux))]
|
|
||||||
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
|
|
||||||
|
|
||||||
pub(crate) mod sealed {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
pub trait Channel {
|
|
||||||
fn regs(&self) -> pac::bdma::Dma;
|
|
||||||
fn num(&self) -> usize;
|
|
||||||
fn index(&self) -> usize;
|
|
||||||
fn on_irq();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// DMA transfer.
|
|
||||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
|
||||||
pub struct Transfer<'a, C: Channel> {
|
|
||||||
channel: PeripheralRef<'a, C>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel> Transfer<'a, C> {
|
|
||||||
/// Create a new read DMA transfer (peripheral to memory).
|
|
||||||
pub unsafe fn new_read<W: Word>(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
request: Request,
|
|
||||||
peri_addr: *mut W,
|
|
||||||
buf: &'a mut [W],
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
Self::new_read_raw(channel, request, peri_addr, buf, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
|
||||||
pub unsafe fn new_read_raw<W: Word>(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
request: Request,
|
|
||||||
peri_addr: *mut W,
|
|
||||||
buf: *mut [W],
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
into_ref!(channel);
|
|
||||||
|
|
||||||
let (ptr, len) = super::slice_ptr_parts_mut(buf);
|
|
||||||
assert!(len > 0 && len <= 0xFFFF);
|
|
||||||
|
|
||||||
Self::new_inner(
|
|
||||||
channel,
|
|
||||||
request,
|
|
||||||
Dir::PeripheralToMemory,
|
|
||||||
peri_addr as *const u32,
|
|
||||||
ptr as *mut u32,
|
|
||||||
len,
|
|
||||||
true,
|
|
||||||
W::size(),
|
|
||||||
options,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral).
|
|
||||||
pub unsafe fn new_write<W: Word>(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
request: Request,
|
|
||||||
buf: &'a [W],
|
|
||||||
peri_addr: *mut W,
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
Self::new_write_raw(channel, request, buf, peri_addr, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
|
||||||
pub unsafe fn new_write_raw<W: Word>(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
request: Request,
|
|
||||||
buf: *const [W],
|
|
||||||
peri_addr: *mut W,
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
into_ref!(channel);
|
|
||||||
|
|
||||||
let (ptr, len) = super::slice_ptr_parts(buf);
|
|
||||||
assert!(len > 0 && len <= 0xFFFF);
|
|
||||||
|
|
||||||
Self::new_inner(
|
|
||||||
channel,
|
|
||||||
request,
|
|
||||||
Dir::MemoryToPeripheral,
|
|
||||||
peri_addr as *const u32,
|
|
||||||
ptr as *mut u32,
|
|
||||||
len,
|
|
||||||
true,
|
|
||||||
W::size(),
|
|
||||||
options,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
|
||||||
pub unsafe fn new_write_repeated<W: Word>(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
request: Request,
|
|
||||||
repeated: &'a W,
|
|
||||||
count: usize,
|
|
||||||
peri_addr: *mut W,
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
into_ref!(channel);
|
|
||||||
|
|
||||||
Self::new_inner(
|
|
||||||
channel,
|
|
||||||
request,
|
|
||||||
Dir::MemoryToPeripheral,
|
|
||||||
peri_addr as *const u32,
|
|
||||||
repeated as *const W as *mut u32,
|
|
||||||
count,
|
|
||||||
false,
|
|
||||||
W::size(),
|
|
||||||
options,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
unsafe fn new_inner(
|
|
||||||
channel: PeripheralRef<'a, C>,
|
|
||||||
_request: Request,
|
|
||||||
dir: Dir,
|
|
||||||
peri_addr: *const u32,
|
|
||||||
mem_addr: *mut u32,
|
|
||||||
mem_len: usize,
|
|
||||||
incr_mem: bool,
|
|
||||||
data_size: WordSize,
|
|
||||||
options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
let ch = channel.regs().ch(channel.num());
|
|
||||||
|
|
||||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
#[cfg(bdma_v2)]
|
|
||||||
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
|
|
||||||
|
|
||||||
let mut this = Self { channel };
|
|
||||||
this.clear_irqs();
|
|
||||||
STATE.complete_count[this.channel.index()].store(0, Ordering::Release);
|
|
||||||
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
super::dmamux::configure_dmamux(&*this.channel, _request);
|
|
||||||
|
|
||||||
ch.par().write_value(peri_addr as u32);
|
|
||||||
ch.mar().write_value(mem_addr as u32);
|
|
||||||
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
|
|
||||||
ch.cr().write(|w| {
|
|
||||||
w.set_psize(data_size.into());
|
|
||||||
w.set_msize(data_size.into());
|
|
||||||
w.set_minc(incr_mem);
|
|
||||||
w.set_dir(dir.into());
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_tcie(options.complete_transfer_ir);
|
|
||||||
w.set_htie(options.half_transfer_ir);
|
|
||||||
w.set_circ(options.circular);
|
|
||||||
if options.circular {
|
|
||||||
debug!("Setting circular mode");
|
|
||||||
}
|
|
||||||
w.set_pl(vals::Pl::VERYHIGH);
|
|
||||||
w.set_en(true);
|
|
||||||
});
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_irqs(&mut self) {
|
|
||||||
self.channel.regs().ifcr().write(|w| {
|
|
||||||
w.set_tcif(self.channel.num(), true);
|
|
||||||
w.set_teif(self.channel.num(), true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request the transfer to stop.
|
|
||||||
///
|
|
||||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
|
||||||
pub fn request_stop(&mut self) {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
|
|
||||||
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
|
||||||
ch.cr().write(|w| {
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_tcie(true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return whether this transfer is still running.
|
|
||||||
///
|
|
||||||
/// If this returns `false`, it can be because either the transfer finished, or
|
|
||||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
|
||||||
pub fn is_running(&mut self) -> bool {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
let en = ch.cr().read().en();
|
|
||||||
let circular = ch.cr().read().circ();
|
|
||||||
let tcif = STATE.complete_count[self.channel.index()].load(Ordering::Acquire) != 0;
|
|
||||||
en && (circular || !tcif)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the total remaining transfers for the channel.
|
|
||||||
///
|
|
||||||
/// This will be zero for transfers that completed instead of being canceled with [`request_stop`](Self::request_stop).
|
|
||||||
pub fn get_remaining_transfers(&self) -> u16 {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
ch.ndtr().read().ndt()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Blocking wait until the transfer finishes.
|
|
||||||
pub fn blocking_wait(mut self) {
|
|
||||||
while self.is_running() {}
|
|
||||||
self.request_stop();
|
|
||||||
|
|
||||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
core::mem::forget(self);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel> Drop for Transfer<'a, C> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.request_stop();
|
|
||||||
while self.is_running() {}
|
|
||||||
|
|
||||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
|
|
||||||
impl<'a, C: Channel> Future for Transfer<'a, C> {
|
|
||||||
type Output = ();
|
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
|
||||||
STATE.ch_wakers[self.channel.index()].register(cx.waker());
|
|
||||||
|
|
||||||
if self.is_running() {
|
|
||||||
Poll::Pending
|
|
||||||
} else {
|
|
||||||
Poll::Ready(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ==============================
|
|
||||||
|
|
||||||
struct DmaCtrlImpl<'a, C: Channel>(PeripheralRef<'a, C>);
|
|
||||||
|
|
||||||
impl<'a, C: Channel> DmaCtrl for DmaCtrlImpl<'a, C> {
|
|
||||||
fn get_remaining_transfers(&self) -> usize {
|
|
||||||
let ch = self.0.regs().ch(self.0.num());
|
|
||||||
ch.ndtr().read().ndt() as usize
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_complete_count(&self) -> usize {
|
|
||||||
STATE.complete_count[self.0.index()].load(Ordering::Acquire)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn reset_complete_count(&mut self) -> usize {
|
|
||||||
#[cfg(not(armv6m))]
|
|
||||||
return STATE.complete_count[self.0.index()].swap(0, Ordering::AcqRel);
|
|
||||||
#[cfg(armv6m)]
|
|
||||||
return critical_section::with(|_| {
|
|
||||||
let x = STATE.complete_count[self.0.index()].load(Ordering::Acquire);
|
|
||||||
STATE.complete_count[self.0.index()].store(0, Ordering::Release);
|
|
||||||
x
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_waker(&mut self, waker: &Waker) {
|
|
||||||
STATE.ch_wakers[self.0.index()].register(waker);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ringbuffer for reading data using DMA circular mode.
|
|
||||||
pub struct ReadableRingBuffer<'a, C: Channel, W: Word> {
|
|
||||||
cr: regs::Cr,
|
|
||||||
channel: PeripheralRef<'a, C>,
|
|
||||||
ringbuf: ReadableDmaRingBuffer<'a, W>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel, W: Word> ReadableRingBuffer<'a, C, W> {
|
|
||||||
/// Create a new ring buffer.
|
|
||||||
pub unsafe fn new(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
_request: Request,
|
|
||||||
peri_addr: *mut W,
|
|
||||||
buffer: &'a mut [W],
|
|
||||||
_options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
into_ref!(channel);
|
|
||||||
|
|
||||||
let len = buffer.len();
|
|
||||||
assert!(len > 0 && len <= 0xFFFF);
|
|
||||||
|
|
||||||
let dir = Dir::PeripheralToMemory;
|
|
||||||
let data_size = W::size();
|
|
||||||
|
|
||||||
let channel_number = channel.num();
|
|
||||||
let dma = channel.regs();
|
|
||||||
|
|
||||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
#[cfg(bdma_v2)]
|
|
||||||
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
|
|
||||||
|
|
||||||
let mut w = regs::Cr(0);
|
|
||||||
w.set_psize(data_size.into());
|
|
||||||
w.set_msize(data_size.into());
|
|
||||||
w.set_minc(true);
|
|
||||||
w.set_dir(dir.into());
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_htie(true);
|
|
||||||
w.set_tcie(true);
|
|
||||||
w.set_circ(true);
|
|
||||||
w.set_pl(vals::Pl::VERYHIGH);
|
|
||||||
w.set_en(true);
|
|
||||||
|
|
||||||
let buffer_ptr = buffer.as_mut_ptr();
|
|
||||||
let mut this = Self {
|
|
||||||
channel,
|
|
||||||
cr: w,
|
|
||||||
ringbuf: ReadableDmaRingBuffer::new(buffer),
|
|
||||||
};
|
|
||||||
this.clear_irqs();
|
|
||||||
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
super::dmamux::configure_dmamux(&*this.channel, _request);
|
|
||||||
|
|
||||||
let ch = dma.ch(channel_number);
|
|
||||||
ch.par().write_value(peri_addr as u32);
|
|
||||||
ch.mar().write_value(buffer_ptr as u32);
|
|
||||||
ch.ndtr().write(|w| w.set_ndt(len as u16));
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start the ring buffer operation.
|
|
||||||
///
|
|
||||||
/// You must call this after creating it for it to work.
|
|
||||||
pub fn start(&mut self) {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
ch.cr().write_value(self.cr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Clear all data in the ring buffer.
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read elements from the ring buffer
|
|
||||||
/// Return a tuple of the length read and the length remaining in the buffer
|
|
||||||
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
|
||||||
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
|
||||||
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read an exact number of elements from the ringbuffer.
|
|
||||||
///
|
|
||||||
/// Returns the remaining number of elements available for immediate reading.
|
|
||||||
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
|
||||||
///
|
|
||||||
/// Async/Wake Behavior:
|
|
||||||
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
|
||||||
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
|
||||||
/// ring buffer was created with a buffer of size 'N':
|
|
||||||
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
|
||||||
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
|
||||||
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
|
|
||||||
self.ringbuf
|
|
||||||
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The capacity of the ringbuffer.
|
|
||||||
pub const fn capacity(&self) -> usize {
|
|
||||||
self.ringbuf.cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a waker to be woken when at least one byte is received.
|
|
||||||
pub fn set_waker(&mut self, waker: &Waker) {
|
|
||||||
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_irqs(&mut self) {
|
|
||||||
let dma = self.channel.regs();
|
|
||||||
dma.ifcr().write(|w| {
|
|
||||||
w.set_htif(self.channel.num(), true);
|
|
||||||
w.set_tcif(self.channel.num(), true);
|
|
||||||
w.set_teif(self.channel.num(), true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request DMA to stop.
|
|
||||||
///
|
|
||||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
|
||||||
pub fn request_stop(&mut self) {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
|
|
||||||
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
|
||||||
// If the channel is enabled and transfer is not completed, we need to perform
|
|
||||||
// two separate write access to the CR register to disable the channel.
|
|
||||||
ch.cr().write(|w| {
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_htie(true);
|
|
||||||
w.set_tcie(true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return whether DMA is still running.
|
|
||||||
///
|
|
||||||
/// If this returns `false`, it can be because either the transfer finished, or
|
|
||||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
|
||||||
pub fn is_running(&mut self) -> bool {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
ch.cr().read().en()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel, W: Word> Drop for ReadableRingBuffer<'a, C, W> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.request_stop();
|
|
||||||
while self.is_running() {}
|
|
||||||
|
|
||||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Ringbuffer for writing data using DMA circular mode.
|
|
||||||
pub struct WritableRingBuffer<'a, C: Channel, W: Word> {
|
|
||||||
cr: regs::Cr,
|
|
||||||
channel: PeripheralRef<'a, C>,
|
|
||||||
ringbuf: WritableDmaRingBuffer<'a, W>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel, W: Word> WritableRingBuffer<'a, C, W> {
|
|
||||||
/// Create a new ring buffer.
|
|
||||||
pub unsafe fn new(
|
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
|
||||||
_request: Request,
|
|
||||||
peri_addr: *mut W,
|
|
||||||
buffer: &'a mut [W],
|
|
||||||
_options: TransferOptions,
|
|
||||||
) -> Self {
|
|
||||||
into_ref!(channel);
|
|
||||||
|
|
||||||
let len = buffer.len();
|
|
||||||
assert!(len > 0 && len <= 0xFFFF);
|
|
||||||
|
|
||||||
let dir = Dir::MemoryToPeripheral;
|
|
||||||
let data_size = W::size();
|
|
||||||
|
|
||||||
let channel_number = channel.num();
|
|
||||||
let dma = channel.regs();
|
|
||||||
|
|
||||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
|
|
||||||
#[cfg(bdma_v2)]
|
|
||||||
critical_section::with(|_| channel.regs().cselr().modify(|w| w.set_cs(channel.num(), _request)));
|
|
||||||
|
|
||||||
let mut w = regs::Cr(0);
|
|
||||||
w.set_psize(data_size.into());
|
|
||||||
w.set_msize(data_size.into());
|
|
||||||
w.set_minc(true);
|
|
||||||
w.set_dir(dir.into());
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_htie(true);
|
|
||||||
w.set_tcie(true);
|
|
||||||
w.set_circ(true);
|
|
||||||
w.set_pl(vals::Pl::VERYHIGH);
|
|
||||||
w.set_en(true);
|
|
||||||
|
|
||||||
let buffer_ptr = buffer.as_mut_ptr();
|
|
||||||
let mut this = Self {
|
|
||||||
channel,
|
|
||||||
cr: w,
|
|
||||||
ringbuf: WritableDmaRingBuffer::new(buffer),
|
|
||||||
};
|
|
||||||
this.clear_irqs();
|
|
||||||
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
super::dmamux::configure_dmamux(&*this.channel, _request);
|
|
||||||
|
|
||||||
let ch = dma.ch(channel_number);
|
|
||||||
ch.par().write_value(peri_addr as u32);
|
|
||||||
ch.mar().write_value(buffer_ptr as u32);
|
|
||||||
ch.ndtr().write(|w| w.set_ndt(len as u16));
|
|
||||||
|
|
||||||
this
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Start the ring buffer operation.
|
|
||||||
///
|
|
||||||
/// You must call this after creating it for it to work.
|
|
||||||
pub fn start(&mut self) {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
ch.cr().write_value(self.cr)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Clear all data in the ring buffer.
|
|
||||||
pub fn clear(&mut self) {
|
|
||||||
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write elements directly to the raw buffer.
|
|
||||||
/// This can be used to fill the buffer before starting the DMA transfer.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
self.ringbuf.write_immediate(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write elements to the ring buffer
|
|
||||||
/// Return a tuple of the length written and the length remaining in the buffer
|
|
||||||
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
|
||||||
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Write an exact number of elements to the ringbuffer.
|
|
||||||
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
|
|
||||||
self.ringbuf
|
|
||||||
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The capacity of the ringbuffer.
|
|
||||||
pub const fn capacity(&self) -> usize {
|
|
||||||
self.ringbuf.cap()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Set a waker to be woken when at least one byte is sent.
|
|
||||||
pub fn set_waker(&mut self, waker: &Waker) {
|
|
||||||
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn clear_irqs(&mut self) {
|
|
||||||
let dma = self.channel.regs();
|
|
||||||
dma.ifcr().write(|w| {
|
|
||||||
w.set_htif(self.channel.num(), true);
|
|
||||||
w.set_tcif(self.channel.num(), true);
|
|
||||||
w.set_teif(self.channel.num(), true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Request DMA to stop.
|
|
||||||
///
|
|
||||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
|
||||||
pub fn request_stop(&mut self) {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
|
|
||||||
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
|
||||||
// If the channel is enabled and transfer is not completed, we need to perform
|
|
||||||
// two separate write access to the CR register to disable the channel.
|
|
||||||
ch.cr().write(|w| {
|
|
||||||
w.set_teie(true);
|
|
||||||
w.set_htie(true);
|
|
||||||
w.set_tcie(true);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return whether DMA is still running.
|
|
||||||
///
|
|
||||||
/// If this returns `false`, it can be because either the transfer finished, or
|
|
||||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
|
||||||
pub fn is_running(&mut self) -> bool {
|
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
|
||||||
ch.cr().read().en()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<'a, C: Channel, W: Word> Drop for WritableRingBuffer<'a, C, W> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.request_stop();
|
|
||||||
while self.is_running() {}
|
|
||||||
|
|
||||||
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
|
||||||
fence(Ordering::SeqCst);
|
|
||||||
}
|
|
||||||
}
|
|
File diff suppressed because it is too large
Load Diff
913
embassy-stm32/src/dma/dma_bdma.rs
Normal file
913
embassy-stm32/src/dma/dma_bdma.rs
Normal file
@ -0,0 +1,913 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use core::pin::Pin;
|
||||||
|
use core::sync::atomic::{fence, AtomicUsize, Ordering};
|
||||||
|
use core::task::{Context, Poll, Waker};
|
||||||
|
|
||||||
|
use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
|
||||||
|
use embassy_sync::waitqueue::AtomicWaker;
|
||||||
|
|
||||||
|
use super::ringbuffer::{DmaCtrl, OverrunError, ReadableDmaRingBuffer, WritableDmaRingBuffer};
|
||||||
|
use super::word::{Word, WordSize};
|
||||||
|
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
||||||
|
use crate::interrupt::typelevel::Interrupt;
|
||||||
|
use crate::interrupt::Priority;
|
||||||
|
use crate::pac;
|
||||||
|
|
||||||
|
pub(crate) struct ChannelInfo {
|
||||||
|
pub(crate) dma: DmaInfo,
|
||||||
|
pub(crate) num: usize,
|
||||||
|
#[cfg(dmamux)]
|
||||||
|
pub(crate) dmamux: super::DmamuxInfo,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy)]
|
||||||
|
pub(crate) enum DmaInfo {
|
||||||
|
#[cfg(dma)]
|
||||||
|
Dma(pac::dma::Dma),
|
||||||
|
#[cfg(bdma)]
|
||||||
|
Bdma(pac::bdma::Dma),
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA transfer options.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
#[non_exhaustive]
|
||||||
|
pub struct TransferOptions {
|
||||||
|
/// Peripheral burst transfer configuration
|
||||||
|
#[cfg(dma)]
|
||||||
|
pub pburst: Burst,
|
||||||
|
/// Memory burst transfer configuration
|
||||||
|
#[cfg(dma)]
|
||||||
|
pub mburst: Burst,
|
||||||
|
/// Flow control configuration
|
||||||
|
#[cfg(dma)]
|
||||||
|
pub flow_ctrl: FlowControl,
|
||||||
|
/// FIFO threshold for DMA FIFO mode. If none, direct mode is used.
|
||||||
|
#[cfg(dma)]
|
||||||
|
pub fifo_threshold: Option<FifoThreshold>,
|
||||||
|
/// Enable circular DMA
|
||||||
|
///
|
||||||
|
/// Note:
|
||||||
|
/// If you enable circular mode manually, you may want to build and `.await` the `Transfer` in a separate task.
|
||||||
|
/// Since DMA in circular mode need manually stop, `.await` in current task would block the task forever.
|
||||||
|
pub circular: bool,
|
||||||
|
/// Enable half transfer interrupt
|
||||||
|
pub half_transfer_ir: bool,
|
||||||
|
/// Enable transfer complete interrupt
|
||||||
|
pub complete_transfer_ir: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for TransferOptions {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
#[cfg(dma)]
|
||||||
|
pburst: Burst::Single,
|
||||||
|
#[cfg(dma)]
|
||||||
|
mburst: Burst::Single,
|
||||||
|
#[cfg(dma)]
|
||||||
|
flow_ctrl: FlowControl::Dma,
|
||||||
|
#[cfg(dma)]
|
||||||
|
fifo_threshold: None,
|
||||||
|
circular: false,
|
||||||
|
half_transfer_ir: false,
|
||||||
|
complete_transfer_ir: true,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(dma)]
|
||||||
|
pub use dma_only::*;
|
||||||
|
#[cfg(dma)]
|
||||||
|
mod dma_only {
|
||||||
|
use pac::dma::vals;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
impl From<WordSize> for vals::Size {
|
||||||
|
fn from(raw: WordSize) -> Self {
|
||||||
|
match raw {
|
||||||
|
WordSize::OneByte => Self::BITS8,
|
||||||
|
WordSize::TwoBytes => Self::BITS16,
|
||||||
|
WordSize::FourBytes => Self::BITS32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Dir> for vals::Dir {
|
||||||
|
fn from(raw: Dir) -> Self {
|
||||||
|
match raw {
|
||||||
|
Dir::MemoryToPeripheral => Self::MEMORYTOPERIPHERAL,
|
||||||
|
Dir::PeripheralToMemory => Self::PERIPHERALTOMEMORY,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA transfer burst setting.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
pub enum Burst {
|
||||||
|
/// Single transfer
|
||||||
|
Single,
|
||||||
|
/// Incremental burst of 4 beats
|
||||||
|
Incr4,
|
||||||
|
/// Incremental burst of 8 beats
|
||||||
|
Incr8,
|
||||||
|
/// Incremental burst of 16 beats
|
||||||
|
Incr16,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Burst> for vals::Burst {
|
||||||
|
fn from(burst: Burst) -> Self {
|
||||||
|
match burst {
|
||||||
|
Burst::Single => vals::Burst::SINGLE,
|
||||||
|
Burst::Incr4 => vals::Burst::INCR4,
|
||||||
|
Burst::Incr8 => vals::Burst::INCR8,
|
||||||
|
Burst::Incr16 => vals::Burst::INCR16,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA flow control setting.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
pub enum FlowControl {
|
||||||
|
/// Flow control by DMA
|
||||||
|
Dma,
|
||||||
|
/// Flow control by peripheral
|
||||||
|
Peripheral,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FlowControl> for vals::Pfctrl {
|
||||||
|
fn from(flow: FlowControl) -> Self {
|
||||||
|
match flow {
|
||||||
|
FlowControl::Dma => vals::Pfctrl::DMA,
|
||||||
|
FlowControl::Peripheral => vals::Pfctrl::PERIPHERAL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA FIFO threshold.
|
||||||
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
|
pub enum FifoThreshold {
|
||||||
|
/// 1/4 full FIFO
|
||||||
|
Quarter,
|
||||||
|
/// 1/2 full FIFO
|
||||||
|
Half,
|
||||||
|
/// 3/4 full FIFO
|
||||||
|
ThreeQuarters,
|
||||||
|
/// Full FIFO
|
||||||
|
Full,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<FifoThreshold> for vals::Fth {
|
||||||
|
fn from(value: FifoThreshold) -> Self {
|
||||||
|
match value {
|
||||||
|
FifoThreshold::Quarter => vals::Fth::QUARTER,
|
||||||
|
FifoThreshold::Half => vals::Fth::HALF,
|
||||||
|
FifoThreshold::ThreeQuarters => vals::Fth::THREEQUARTERS,
|
||||||
|
FifoThreshold::Full => vals::Fth::FULL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(bdma)]
|
||||||
|
mod bdma_only {
|
||||||
|
use pac::bdma::vals;
|
||||||
|
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
impl From<WordSize> for vals::Size {
|
||||||
|
fn from(raw: WordSize) -> Self {
|
||||||
|
match raw {
|
||||||
|
WordSize::OneByte => Self::BITS8,
|
||||||
|
WordSize::TwoBytes => Self::BITS16,
|
||||||
|
WordSize::FourBytes => Self::BITS32,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Dir> for vals::Dir {
|
||||||
|
fn from(raw: Dir) -> Self {
|
||||||
|
match raw {
|
||||||
|
Dir::MemoryToPeripheral => Self::FROMMEMORY,
|
||||||
|
Dir::PeripheralToMemory => Self::FROMPERIPHERAL,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct ChannelState {
|
||||||
|
waker: AtomicWaker,
|
||||||
|
complete_count: AtomicUsize,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ChannelState {
|
||||||
|
pub(crate) const NEW: Self = Self {
|
||||||
|
waker: AtomicWaker::new(),
|
||||||
|
complete_count: AtomicUsize::new(0),
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// safety: must be called only once
|
||||||
|
pub(crate) unsafe fn init(
|
||||||
|
cs: critical_section::CriticalSection,
|
||||||
|
#[cfg(dma)] dma_priority: Priority,
|
||||||
|
#[cfg(bdma)] bdma_priority: Priority,
|
||||||
|
) {
|
||||||
|
foreach_interrupt! {
|
||||||
|
($peri:ident, dma, $block:ident, $signal_name:ident, $irq:ident) => {
|
||||||
|
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, dma_priority);
|
||||||
|
crate::interrupt::typelevel::$irq::enable();
|
||||||
|
};
|
||||||
|
($peri:ident, bdma, $block:ident, $signal_name:ident, $irq:ident) => {
|
||||||
|
crate::interrupt::typelevel::$irq::set_priority_with_cs(cs, bdma_priority);
|
||||||
|
crate::interrupt::typelevel::$irq::enable();
|
||||||
|
};
|
||||||
|
}
|
||||||
|
crate::_generated::init_dma();
|
||||||
|
crate::_generated::init_bdma();
|
||||||
|
}
|
||||||
|
|
||||||
|
impl AnyChannel {
|
||||||
|
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
||||||
|
pub(crate) unsafe fn on_irq(&self) {
|
||||||
|
let info = self.info();
|
||||||
|
let state = &STATE[self.id as usize];
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => {
|
||||||
|
let cr = r.st(info.num).cr();
|
||||||
|
let isr = r.isr(info.num / 4).read();
|
||||||
|
|
||||||
|
if isr.teif(info.num % 4) {
|
||||||
|
panic!("DMA: error on DMA@{:08x} channel {}", r.as_ptr() as u32, info.num);
|
||||||
|
}
|
||||||
|
|
||||||
|
if isr.htif(info.num % 4) && cr.read().htie() {
|
||||||
|
// Acknowledge half transfer complete interrupt
|
||||||
|
r.ifcr(info.num / 4).write(|w| w.set_htif(info.num % 4, true));
|
||||||
|
} else if isr.tcif(info.num % 4) && cr.read().tcie() {
|
||||||
|
// Acknowledge transfer complete interrupt
|
||||||
|
r.ifcr(info.num / 4).write(|w| w.set_tcif(info.num % 4, true));
|
||||||
|
state.complete_count.fetch_add(1, Ordering::Release);
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.waker.wake();
|
||||||
|
}
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
let isr = r.isr().read();
|
||||||
|
let cr = r.ch(info.num).cr();
|
||||||
|
|
||||||
|
if isr.teif(info.num) {
|
||||||
|
panic!("DMA: error on BDMA@{:08x} channel {}", r.as_ptr() as u32, info.num);
|
||||||
|
}
|
||||||
|
|
||||||
|
if isr.htif(info.num) && cr.read().htie() {
|
||||||
|
// Acknowledge half transfer complete interrupt
|
||||||
|
r.ifcr().write(|w| w.set_htif(info.num, true));
|
||||||
|
} else if isr.tcif(info.num) && cr.read().tcie() {
|
||||||
|
// Acknowledge transfer complete interrupt
|
||||||
|
r.ifcr().write(|w| w.set_tcif(info.num, true));
|
||||||
|
#[cfg(not(armv6m))]
|
||||||
|
state.complete_count.fetch_add(1, Ordering::Release);
|
||||||
|
#[cfg(armv6m)]
|
||||||
|
critical_section::with(|_| {
|
||||||
|
let x = state.complete_count.load(Ordering::Relaxed);
|
||||||
|
state.complete_count.store(x + 1, Ordering::Release);
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
state.waker.wake();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn configure(
|
||||||
|
&self,
|
||||||
|
_request: Request,
|
||||||
|
dir: Dir,
|
||||||
|
peri_addr: *const u32,
|
||||||
|
mem_addr: *mut u32,
|
||||||
|
mem_len: usize,
|
||||||
|
incr_mem: bool,
|
||||||
|
data_size: WordSize,
|
||||||
|
options: TransferOptions,
|
||||||
|
) {
|
||||||
|
let info = self.info();
|
||||||
|
|
||||||
|
#[cfg(dmamux)]
|
||||||
|
super::dmamux::configure_dmamux(&info.dmamux, _request);
|
||||||
|
|
||||||
|
assert!(mem_len > 0 && mem_len <= 0xFFFF);
|
||||||
|
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => {
|
||||||
|
let ch = r.st(info.num);
|
||||||
|
|
||||||
|
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||||
|
fence(Ordering::SeqCst);
|
||||||
|
|
||||||
|
self.clear_irqs();
|
||||||
|
|
||||||
|
ch.par().write_value(peri_addr as u32);
|
||||||
|
ch.m0ar().write_value(mem_addr as u32);
|
||||||
|
ch.ndtr().write_value(pac::dma::regs::Ndtr(mem_len as _));
|
||||||
|
ch.fcr().write(|w| {
|
||||||
|
if let Some(fth) = options.fifo_threshold {
|
||||||
|
// FIFO mode
|
||||||
|
w.set_dmdis(pac::dma::vals::Dmdis::DISABLED);
|
||||||
|
w.set_fth(fth.into());
|
||||||
|
} else {
|
||||||
|
// Direct mode
|
||||||
|
w.set_dmdis(pac::dma::vals::Dmdis::ENABLED);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
ch.cr().write(|w| {
|
||||||
|
w.set_dir(dir.into());
|
||||||
|
w.set_msize(data_size.into());
|
||||||
|
w.set_psize(data_size.into());
|
||||||
|
w.set_pl(pac::dma::vals::Pl::VERYHIGH);
|
||||||
|
w.set_minc(incr_mem);
|
||||||
|
w.set_pinc(false);
|
||||||
|
w.set_teie(true);
|
||||||
|
w.set_htie(options.half_transfer_ir);
|
||||||
|
w.set_tcie(options.complete_transfer_ir);
|
||||||
|
w.set_circ(options.circular);
|
||||||
|
#[cfg(dma_v1)]
|
||||||
|
w.set_trbuff(true);
|
||||||
|
#[cfg(dma_v2)]
|
||||||
|
w.set_chsel(_request);
|
||||||
|
w.set_pburst(options.pburst.into());
|
||||||
|
w.set_mburst(options.mburst.into());
|
||||||
|
w.set_pfctrl(options.flow_ctrl.into());
|
||||||
|
w.set_en(false); // don't start yet
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
#[cfg(bdma_v2)]
|
||||||
|
critical_section::with(|_| r.cselr().modify(|w| w.set_cs(info.num, _request)));
|
||||||
|
|
||||||
|
let state: &ChannelState = &STATE[self.id as usize];
|
||||||
|
let ch = r.ch(info.num);
|
||||||
|
|
||||||
|
state.complete_count.store(0, Ordering::Release);
|
||||||
|
self.clear_irqs();
|
||||||
|
|
||||||
|
ch.par().write_value(peri_addr as u32);
|
||||||
|
ch.mar().write_value(mem_addr as u32);
|
||||||
|
ch.ndtr().write(|w| w.set_ndt(mem_len as u16));
|
||||||
|
ch.cr().write(|w| {
|
||||||
|
w.set_psize(data_size.into());
|
||||||
|
w.set_msize(data_size.into());
|
||||||
|
w.set_minc(incr_mem);
|
||||||
|
w.set_dir(dir.into());
|
||||||
|
w.set_teie(true);
|
||||||
|
w.set_tcie(options.complete_transfer_ir);
|
||||||
|
w.set_htie(options.half_transfer_ir);
|
||||||
|
w.set_circ(options.circular);
|
||||||
|
w.set_pl(pac::bdma::vals::Pl::VERYHIGH);
|
||||||
|
w.set_en(false); // don't start yet
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn start(&self) {
|
||||||
|
let info = self.info();
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => {
|
||||||
|
let ch = r.st(info.num);
|
||||||
|
ch.cr().modify(|w| w.set_en(true))
|
||||||
|
}
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
let ch = r.ch(info.num);
|
||||||
|
ch.cr().modify(|w| w.set_en(true));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn clear_irqs(&self) {
|
||||||
|
let info = self.info();
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => {
|
||||||
|
let isrn = info.num / 4;
|
||||||
|
let isrbit = info.num % 4;
|
||||||
|
|
||||||
|
r.ifcr(isrn).write(|w| {
|
||||||
|
w.set_htif(isrbit, true);
|
||||||
|
w.set_tcif(isrbit, true);
|
||||||
|
w.set_teif(isrbit, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
r.ifcr().write(|w| {
|
||||||
|
w.set_htif(info.num, true);
|
||||||
|
w.set_tcif(info.num, true);
|
||||||
|
w.set_teif(info.num, true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn request_stop(&self) {
|
||||||
|
let info = self.info();
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => {
|
||||||
|
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
||||||
|
r.st(info.num).cr().write(|w| {
|
||||||
|
w.set_teie(true);
|
||||||
|
w.set_tcie(true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
// Disable the channel. Keep the IEs enabled so the irqs still fire.
|
||||||
|
r.ch(info.num).cr().write(|w| {
|
||||||
|
w.set_teie(true);
|
||||||
|
w.set_tcie(true);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_running(&self) -> bool {
|
||||||
|
let info = self.info();
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => r.st(info.num).cr().read().en(),
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => {
|
||||||
|
let state: &ChannelState = &STATE[self.id as usize];
|
||||||
|
let ch = r.ch(info.num);
|
||||||
|
let en = ch.cr().read().en();
|
||||||
|
let circular = ch.cr().read().circ();
|
||||||
|
let tcif = state.complete_count.load(Ordering::Acquire) != 0;
|
||||||
|
en && (circular || !tcif)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_remaining_transfers(&self) -> u16 {
|
||||||
|
let info = self.info();
|
||||||
|
match self.info().dma {
|
||||||
|
#[cfg(dma)]
|
||||||
|
DmaInfo::Dma(r) => r.st(info.num).ndtr().read().ndt(),
|
||||||
|
#[cfg(bdma)]
|
||||||
|
DmaInfo::Bdma(r) => r.ch(info.num).ndtr().read().ndt(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA transfer.
|
||||||
|
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||||
|
pub struct Transfer<'a> {
|
||||||
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Transfer<'a> {
|
||||||
|
/// Create a new read DMA transfer (peripheral to memory).
|
||||||
|
pub unsafe fn new_read<W: Word>(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
request: Request,
|
||||||
|
peri_addr: *mut W,
|
||||||
|
buf: &'a mut [W],
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
Self::new_read_raw(channel, request, peri_addr, buf, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
||||||
|
pub unsafe fn new_read_raw<W: Word>(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
request: Request,
|
||||||
|
peri_addr: *mut W,
|
||||||
|
buf: *mut [W],
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
into_ref!(channel);
|
||||||
|
|
||||||
|
let (ptr, len) = super::slice_ptr_parts_mut(buf);
|
||||||
|
assert!(len > 0 && len <= 0xFFFF);
|
||||||
|
|
||||||
|
Self::new_inner(
|
||||||
|
channel.map_into(),
|
||||||
|
request,
|
||||||
|
Dir::PeripheralToMemory,
|
||||||
|
peri_addr as *const u32,
|
||||||
|
ptr as *mut u32,
|
||||||
|
len,
|
||||||
|
true,
|
||||||
|
W::size(),
|
||||||
|
options,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new write DMA transfer (memory to peripheral).
|
||||||
|
pub unsafe fn new_write<W: Word>(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
request: Request,
|
||||||
|
buf: &'a [W],
|
||||||
|
peri_addr: *mut W,
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
Self::new_write_raw(channel, request, buf, peri_addr, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
||||||
|
pub unsafe fn new_write_raw<W: Word>(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
request: Request,
|
||||||
|
buf: *const [W],
|
||||||
|
peri_addr: *mut W,
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
into_ref!(channel);
|
||||||
|
|
||||||
|
let (ptr, len) = super::slice_ptr_parts(buf);
|
||||||
|
assert!(len > 0 && len <= 0xFFFF);
|
||||||
|
|
||||||
|
Self::new_inner(
|
||||||
|
channel.map_into(),
|
||||||
|
request,
|
||||||
|
Dir::MemoryToPeripheral,
|
||||||
|
peri_addr as *const u32,
|
||||||
|
ptr as *mut u32,
|
||||||
|
len,
|
||||||
|
true,
|
||||||
|
W::size(),
|
||||||
|
options,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
||||||
|
pub unsafe fn new_write_repeated<W: Word>(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
request: Request,
|
||||||
|
repeated: &'a W,
|
||||||
|
count: usize,
|
||||||
|
peri_addr: *mut W,
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
into_ref!(channel);
|
||||||
|
|
||||||
|
Self::new_inner(
|
||||||
|
channel.map_into(),
|
||||||
|
request,
|
||||||
|
Dir::MemoryToPeripheral,
|
||||||
|
peri_addr as *const u32,
|
||||||
|
repeated as *const W as *mut u32,
|
||||||
|
count,
|
||||||
|
false,
|
||||||
|
W::size(),
|
||||||
|
options,
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
unsafe fn new_inner(
|
||||||
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
|
_request: Request,
|
||||||
|
dir: Dir,
|
||||||
|
peri_addr: *const u32,
|
||||||
|
mem_addr: *mut u32,
|
||||||
|
mem_len: usize,
|
||||||
|
incr_mem: bool,
|
||||||
|
data_size: WordSize,
|
||||||
|
options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
channel.configure(
|
||||||
|
_request, dir, peri_addr, mem_addr, mem_len, incr_mem, data_size, options,
|
||||||
|
);
|
||||||
|
channel.start();
|
||||||
|
|
||||||
|
Self { channel }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request the transfer to stop.
|
||||||
|
///
|
||||||
|
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||||
|
pub fn request_stop(&mut self) {
|
||||||
|
self.channel.request_stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return whether this transfer is still running.
|
||||||
|
///
|
||||||
|
/// If this returns `false`, it can be because either the transfer finished, or
|
||||||
|
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||||
|
pub fn is_running(&mut self) -> bool {
|
||||||
|
self.channel.is_running()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Gets the total remaining transfers for the channel
|
||||||
|
/// Note: this will be zero for transfers that completed without cancellation.
|
||||||
|
pub fn get_remaining_transfers(&self) -> u16 {
|
||||||
|
self.channel.get_remaining_transfers()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Blocking wait until the transfer finishes.
|
||||||
|
pub fn blocking_wait(mut self) {
|
||||||
|
while self.is_running() {}
|
||||||
|
|
||||||
|
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||||
|
fence(Ordering::SeqCst);
|
||||||
|
|
||||||
|
core::mem::forget(self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Drop for Transfer<'a> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.request_stop();
|
||||||
|
while self.is_running() {}
|
||||||
|
|
||||||
|
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||||
|
fence(Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> Unpin for Transfer<'a> {}
|
||||||
|
impl<'a> Future for Transfer<'a> {
|
||||||
|
type Output = ();
|
||||||
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
|
let state: &ChannelState = &STATE[self.channel.id as usize];
|
||||||
|
|
||||||
|
state.waker.register(cx.waker());
|
||||||
|
|
||||||
|
if self.is_running() {
|
||||||
|
Poll::Pending
|
||||||
|
} else {
|
||||||
|
Poll::Ready(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ==============================
|
||||||
|
|
||||||
|
struct DmaCtrlImpl<'a>(PeripheralRef<'a, AnyChannel>);
|
||||||
|
|
||||||
|
impl<'a> DmaCtrl for DmaCtrlImpl<'a> {
|
||||||
|
fn get_remaining_transfers(&self) -> usize {
|
||||||
|
self.0.get_remaining_transfers() as _
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_complete_count(&self) -> usize {
|
||||||
|
STATE[self.0.id as usize].complete_count.load(Ordering::Acquire)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn reset_complete_count(&mut self) -> usize {
|
||||||
|
let state = &STATE[self.0.id as usize];
|
||||||
|
#[cfg(not(armv6m))]
|
||||||
|
return state.complete_count.swap(0, Ordering::AcqRel);
|
||||||
|
#[cfg(armv6m)]
|
||||||
|
return critical_section::with(|_| {
|
||||||
|
let x = state.complete_count.load(Ordering::Acquire);
|
||||||
|
state.complete_count.store(0, Ordering::Release);
|
||||||
|
x
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_waker(&mut self, waker: &Waker) {
|
||||||
|
STATE[self.0.id as usize].waker.register(waker);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ringbuffer for receiving data using DMA circular mode.
|
||||||
|
pub struct ReadableRingBuffer<'a, W: Word> {
|
||||||
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
|
ringbuf: ReadableDmaRingBuffer<'a, W>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> ReadableRingBuffer<'a, W> {
|
||||||
|
/// Create a new ring buffer.
|
||||||
|
pub unsafe fn new(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
_request: Request,
|
||||||
|
peri_addr: *mut W,
|
||||||
|
buffer: &'a mut [W],
|
||||||
|
mut options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
into_ref!(channel);
|
||||||
|
let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
|
||||||
|
|
||||||
|
let buffer_ptr = buffer.as_mut_ptr();
|
||||||
|
let len = buffer.len();
|
||||||
|
let dir = Dir::PeripheralToMemory;
|
||||||
|
let data_size = W::size();
|
||||||
|
|
||||||
|
options.complete_transfer_ir = true;
|
||||||
|
options.circular = true;
|
||||||
|
|
||||||
|
channel.configure(
|
||||||
|
_request,
|
||||||
|
dir,
|
||||||
|
peri_addr as *mut u32,
|
||||||
|
buffer_ptr as *mut u32,
|
||||||
|
len,
|
||||||
|
true,
|
||||||
|
data_size,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
channel,
|
||||||
|
ringbuf: ReadableDmaRingBuffer::new(buffer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start the ring buffer operation.
|
||||||
|
///
|
||||||
|
/// You must call this after creating it for it to work.
|
||||||
|
pub fn start(&mut self) {
|
||||||
|
self.channel.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear all data in the ring buffer.
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read elements from the ring buffer
|
||||||
|
/// Return a tuple of the length read and the length remaining in the buffer
|
||||||
|
/// If not all of the elements were read, then there will be some elements in the buffer remaining
|
||||||
|
/// The length remaining is the capacity, ring_buf.len(), less the elements remaining after the read
|
||||||
|
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
||||||
|
pub fn read(&mut self, buf: &mut [W]) -> Result<(usize, usize), OverrunError> {
|
||||||
|
self.ringbuf.read(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Read an exact number of elements from the ringbuffer.
|
||||||
|
///
|
||||||
|
/// Returns the remaining number of elements available for immediate reading.
|
||||||
|
/// OverrunError is returned if the portion to be read was overwritten by the DMA controller.
|
||||||
|
///
|
||||||
|
/// Async/Wake Behavior:
|
||||||
|
/// The underlying DMA peripheral only can wake us when its buffer pointer has reached the halfway point,
|
||||||
|
/// and when it wraps around. This means that when called with a buffer of length 'M', when this
|
||||||
|
/// ring buffer was created with a buffer of size 'N':
|
||||||
|
/// - If M equals N/2 or N/2 divides evenly into M, this function will return every N/2 elements read on the DMA source.
|
||||||
|
/// - Otherwise, this function may need up to N/2 extra elements to arrive before returning.
|
||||||
|
pub async fn read_exact(&mut self, buffer: &mut [W]) -> Result<usize, OverrunError> {
|
||||||
|
self.ringbuf
|
||||||
|
.read_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The capacity of the ringbuffer
|
||||||
|
pub const fn capacity(&self) -> usize {
|
||||||
|
self.ringbuf.cap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a waker to be woken when at least one byte is received.
|
||||||
|
pub fn set_waker(&mut self, waker: &Waker) {
|
||||||
|
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request DMA to stop.
|
||||||
|
///
|
||||||
|
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||||
|
pub fn request_stop(&mut self) {
|
||||||
|
self.channel.request_stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return whether DMA is still running.
|
||||||
|
///
|
||||||
|
/// If this returns `false`, it can be because either the transfer finished, or
|
||||||
|
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||||
|
pub fn is_running(&mut self) -> bool {
|
||||||
|
self.channel.is_running()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> Drop for ReadableRingBuffer<'a, W> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.request_stop();
|
||||||
|
while self.is_running() {}
|
||||||
|
|
||||||
|
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||||
|
fence(Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Ringbuffer for writing data using DMA circular mode.
|
||||||
|
pub struct WritableRingBuffer<'a, W: Word> {
|
||||||
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
|
ringbuf: WritableDmaRingBuffer<'a, W>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> WritableRingBuffer<'a, W> {
|
||||||
|
/// Create a new ring buffer.
|
||||||
|
pub unsafe fn new(
|
||||||
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
|
_request: Request,
|
||||||
|
peri_addr: *mut W,
|
||||||
|
buffer: &'a mut [W],
|
||||||
|
mut options: TransferOptions,
|
||||||
|
) -> Self {
|
||||||
|
into_ref!(channel);
|
||||||
|
let channel: PeripheralRef<'a, AnyChannel> = channel.map_into();
|
||||||
|
|
||||||
|
let len = buffer.len();
|
||||||
|
let dir = Dir::MemoryToPeripheral;
|
||||||
|
let data_size = W::size();
|
||||||
|
let buffer_ptr = buffer.as_mut_ptr();
|
||||||
|
|
||||||
|
options.complete_transfer_ir = true;
|
||||||
|
options.circular = true;
|
||||||
|
|
||||||
|
channel.configure(
|
||||||
|
_request,
|
||||||
|
dir,
|
||||||
|
peri_addr as *mut u32,
|
||||||
|
buffer_ptr as *mut u32,
|
||||||
|
len,
|
||||||
|
true,
|
||||||
|
data_size,
|
||||||
|
options,
|
||||||
|
);
|
||||||
|
|
||||||
|
Self {
|
||||||
|
channel,
|
||||||
|
ringbuf: WritableDmaRingBuffer::new(buffer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Start the ring buffer operation.
|
||||||
|
///
|
||||||
|
/// You must call this after creating it for it to work.
|
||||||
|
pub fn start(&mut self) {
|
||||||
|
self.channel.start()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Clear all data in the ring buffer.
|
||||||
|
pub fn clear(&mut self) {
|
||||||
|
self.ringbuf.clear(&mut DmaCtrlImpl(self.channel.reborrow()));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write elements directly to the raw buffer.
|
||||||
|
/// This can be used to fill the buffer before starting the DMA transfer.
|
||||||
|
#[allow(dead_code)]
|
||||||
|
pub fn write_immediate(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
||||||
|
self.ringbuf.write_immediate(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write elements from the ring buffer
|
||||||
|
/// Return a tuple of the length written and the length remaining in the buffer
|
||||||
|
pub fn write(&mut self, buf: &[W]) -> Result<(usize, usize), OverrunError> {
|
||||||
|
self.ringbuf.write(&mut DmaCtrlImpl(self.channel.reborrow()), buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Write an exact number of elements to the ringbuffer.
|
||||||
|
pub async fn write_exact(&mut self, buffer: &[W]) -> Result<usize, OverrunError> {
|
||||||
|
self.ringbuf
|
||||||
|
.write_exact(&mut DmaCtrlImpl(self.channel.reborrow()), buffer)
|
||||||
|
.await
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The capacity of the ringbuffer
|
||||||
|
pub const fn capacity(&self) -> usize {
|
||||||
|
self.ringbuf.cap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Set a waker to be woken when at least one byte is received.
|
||||||
|
pub fn set_waker(&mut self, waker: &Waker) {
|
||||||
|
DmaCtrlImpl(self.channel.reborrow()).set_waker(waker);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Request DMA to stop.
|
||||||
|
///
|
||||||
|
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||||
|
pub fn request_stop(&mut self) {
|
||||||
|
self.channel.request_stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return whether DMA is still running.
|
||||||
|
///
|
||||||
|
/// If this returns `false`, it can be because either the transfer finished, or
|
||||||
|
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||||
|
pub fn is_running(&mut self) -> bool {
|
||||||
|
self.channel.is_running()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, W: Word> Drop for WritableRingBuffer<'a, W> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
self.request_stop();
|
||||||
|
while self.is_running() {}
|
||||||
|
|
||||||
|
// "Subsequent reads and writes cannot be moved ahead of preceding reads."
|
||||||
|
fence(Ordering::SeqCst);
|
||||||
|
}
|
||||||
|
}
|
@ -1,9 +1,14 @@
|
|||||||
#![macro_use]
|
#![macro_use]
|
||||||
|
|
||||||
use crate::{pac, peripherals};
|
use crate::pac;
|
||||||
|
|
||||||
pub(crate) fn configure_dmamux<M: MuxChannel>(channel: &M, request: u8) {
|
pub(crate) struct DmamuxInfo {
|
||||||
let ch_mux_regs = channel.mux_regs().ccr(channel.mux_num());
|
pub(crate) mux: pac::dmamux::Dmamux,
|
||||||
|
pub(crate) num: usize,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn configure_dmamux(info: &DmamuxInfo, request: u8) {
|
||||||
|
let ch_mux_regs = info.mux.ccr(info.num);
|
||||||
ch_mux_regs.write(|reg| {
|
ch_mux_regs.write(|reg| {
|
||||||
reg.set_nbreq(0);
|
reg.set_nbreq(0);
|
||||||
reg.set_dmareq_id(request);
|
reg.set_dmareq_id(request);
|
||||||
@ -15,11 +20,7 @@ pub(crate) fn configure_dmamux<M: MuxChannel>(channel: &M, request: u8) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) mod dmamux_sealed {
|
pub(crate) mod dmamux_sealed {
|
||||||
use super::*;
|
pub trait MuxChannel {}
|
||||||
pub trait MuxChannel {
|
|
||||||
fn mux_regs(&self) -> pac::dmamux::Dmamux;
|
|
||||||
fn mux_num(&self) -> usize;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DMAMUX1 instance.
|
/// DMAMUX1 instance.
|
||||||
@ -34,18 +35,11 @@ pub trait MuxChannel: dmamux_sealed::MuxChannel {
|
|||||||
type Mux;
|
type Mux;
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach_dma_channel! {
|
macro_rules! dmamux_channel_impl {
|
||||||
($channel_peri:ident, $dma_peri:ident, $version:ident, $channel_num:expr, $index:expr, {dmamux: $dmamux:ident, dmamux_channel: $dmamux_channel:expr}) => {
|
($channel_peri:ident, $dmamux:ident) => {
|
||||||
impl dmamux_sealed::MuxChannel for peripherals::$channel_peri {
|
impl crate::dma::dmamux_sealed::MuxChannel for crate::peripherals::$channel_peri {}
|
||||||
fn mux_regs(&self) -> pac::dmamux::Dmamux {
|
impl crate::dma::MuxChannel for crate::peripherals::$channel_peri {
|
||||||
pac::$dmamux
|
type Mux = crate::dma::$dmamux;
|
||||||
}
|
|
||||||
fn mux_num(&self) -> usize {
|
|
||||||
$dmamux_channel
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl MuxChannel for peripherals::$channel_peri {
|
|
||||||
type Mux = $dmamux;
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
@ -9,13 +9,17 @@ use embassy_hal_internal::{into_ref, Peripheral, PeripheralRef};
|
|||||||
use embassy_sync::waitqueue::AtomicWaker;
|
use embassy_sync::waitqueue::AtomicWaker;
|
||||||
|
|
||||||
use super::word::{Word, WordSize};
|
use super::word::{Word, WordSize};
|
||||||
use super::Dir;
|
use super::{AnyChannel, Channel, Dir, Request, STATE};
|
||||||
use crate::_generated::GPDMA_CHANNEL_COUNT;
|
|
||||||
use crate::interrupt::typelevel::Interrupt;
|
use crate::interrupt::typelevel::Interrupt;
|
||||||
use crate::interrupt::Priority;
|
use crate::interrupt::Priority;
|
||||||
use crate::pac;
|
use crate::pac;
|
||||||
use crate::pac::gpdma::vals;
|
use crate::pac::gpdma::vals;
|
||||||
|
|
||||||
|
pub(crate) struct ChannelInfo {
|
||||||
|
pub(crate) dma: pac::gpdma::Gpdma,
|
||||||
|
pub(crate) num: usize,
|
||||||
|
}
|
||||||
|
|
||||||
/// GPDMA transfer options.
|
/// GPDMA transfer options.
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
|
||||||
@ -38,21 +42,16 @@ impl From<WordSize> for vals::ChTr1Dw {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
struct State {
|
pub(crate) struct ChannelState {
|
||||||
ch_wakers: [AtomicWaker; GPDMA_CHANNEL_COUNT],
|
waker: AtomicWaker,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl State {
|
impl ChannelState {
|
||||||
const fn new() -> Self {
|
pub(crate) const NEW: Self = Self {
|
||||||
const AW: AtomicWaker = AtomicWaker::new();
|
waker: AtomicWaker::new(),
|
||||||
Self {
|
};
|
||||||
ch_wakers: [AW; GPDMA_CHANNEL_COUNT],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static STATE: State = State::new();
|
|
||||||
|
|
||||||
/// safety: must be called only once
|
/// safety: must be called only once
|
||||||
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
|
pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: Priority) {
|
||||||
foreach_interrupt! {
|
foreach_interrupt! {
|
||||||
@ -64,87 +63,50 @@ pub(crate) unsafe fn init(cs: critical_section::CriticalSection, irq_priority: P
|
|||||||
crate::_generated::init_gpdma();
|
crate::_generated::init_gpdma();
|
||||||
}
|
}
|
||||||
|
|
||||||
foreach_dma_channel! {
|
impl AnyChannel {
|
||||||
($channel_peri:ident, $dma_peri:ident, gpdma, $channel_num:expr, $index:expr, $dmamux:tt) => {
|
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
||||||
impl sealed::Channel for crate::peripherals::$channel_peri {
|
pub(crate) unsafe fn on_irq(&self) {
|
||||||
fn regs(&self) -> pac::gpdma::Gpdma {
|
let info = self.info();
|
||||||
pac::$dma_peri
|
let state = &STATE[self.id as usize];
|
||||||
}
|
|
||||||
fn num(&self) -> usize {
|
let ch = info.dma.ch(info.num);
|
||||||
$channel_num
|
let sr = ch.sr().read();
|
||||||
}
|
|
||||||
fn index(&self) -> usize {
|
if sr.dtef() {
|
||||||
$index
|
panic!(
|
||||||
}
|
"DMA: data transfer error on DMA@{:08x} channel {}",
|
||||||
fn on_irq() {
|
info.dma.as_ptr() as u32,
|
||||||
unsafe { on_irq_inner(pac::$dma_peri, $channel_num, $index) }
|
info.num
|
||||||
}
|
);
|
||||||
|
}
|
||||||
|
if sr.usef() {
|
||||||
|
panic!(
|
||||||
|
"DMA: user settings error on DMA@{:08x} channel {}",
|
||||||
|
info.dma.as_ptr() as u32,
|
||||||
|
info.num
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Channel for crate::peripherals::$channel_peri {}
|
if sr.suspf() || sr.tcf() {
|
||||||
};
|
// disable all xxIEs to prevent the irq from firing again.
|
||||||
}
|
ch.cr().write(|_| {});
|
||||||
|
|
||||||
/// Safety: Must be called with a matching set of parameters for a valid dma channel
|
// Wake the future. It'll look at tcf and see it's set.
|
||||||
pub(crate) unsafe fn on_irq_inner(dma: pac::gpdma::Gpdma, channel_num: usize, index: usize) {
|
state.waker.wake();
|
||||||
let ch = dma.ch(channel_num);
|
}
|
||||||
let sr = ch.sr().read();
|
|
||||||
|
|
||||||
if sr.dtef() {
|
|
||||||
panic!(
|
|
||||||
"DMA: data transfer error on DMA@{:08x} channel {}",
|
|
||||||
dma.as_ptr() as u32,
|
|
||||||
channel_num
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if sr.usef() {
|
|
||||||
panic!(
|
|
||||||
"DMA: user settings error on DMA@{:08x} channel {}",
|
|
||||||
dma.as_ptr() as u32,
|
|
||||||
channel_num
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
if sr.suspf() || sr.tcf() {
|
|
||||||
// disable all xxIEs to prevent the irq from firing again.
|
|
||||||
ch.cr().write(|_| {});
|
|
||||||
|
|
||||||
// Wake the future. It'll look at tcf and see it's set.
|
|
||||||
STATE.ch_wakers[index].wake();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// DMA request type alias. (also known as DMA channel number in some chips)
|
|
||||||
pub type Request = u8;
|
|
||||||
|
|
||||||
/// DMA channel.
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static + super::dmamux::MuxChannel {}
|
|
||||||
/// DMA channel.
|
|
||||||
#[cfg(not(dmamux))]
|
|
||||||
pub trait Channel: sealed::Channel + Peripheral<P = Self> + 'static {}
|
|
||||||
|
|
||||||
pub(crate) mod sealed {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
pub trait Channel {
|
|
||||||
fn regs(&self) -> pac::gpdma::Gpdma;
|
|
||||||
fn num(&self) -> usize;
|
|
||||||
fn index(&self) -> usize;
|
|
||||||
fn on_irq();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// DMA transfer.
|
/// DMA transfer.
|
||||||
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
#[must_use = "futures do nothing unless you `.await` or poll them"]
|
||||||
pub struct Transfer<'a, C: Channel> {
|
pub struct Transfer<'a> {
|
||||||
channel: PeripheralRef<'a, C>,
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C: Channel> Transfer<'a, C> {
|
impl<'a> Transfer<'a> {
|
||||||
/// Create a new read DMA transfer (peripheral to memory).
|
/// Create a new read DMA transfer (peripheral to memory).
|
||||||
pub unsafe fn new_read<W: Word>(
|
pub unsafe fn new_read<W: Word>(
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
request: Request,
|
request: Request,
|
||||||
peri_addr: *mut W,
|
peri_addr: *mut W,
|
||||||
buf: &'a mut [W],
|
buf: &'a mut [W],
|
||||||
@ -155,7 +117,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
|
|
||||||
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
/// Create a new read DMA transfer (peripheral to memory), using raw pointers.
|
||||||
pub unsafe fn new_read_raw<W: Word>(
|
pub unsafe fn new_read_raw<W: Word>(
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
request: Request,
|
request: Request,
|
||||||
peri_addr: *mut W,
|
peri_addr: *mut W,
|
||||||
buf: *mut [W],
|
buf: *mut [W],
|
||||||
@ -167,7 +129,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
assert!(len > 0 && len <= 0xFFFF);
|
assert!(len > 0 && len <= 0xFFFF);
|
||||||
|
|
||||||
Self::new_inner(
|
Self::new_inner(
|
||||||
channel,
|
channel.map_into(),
|
||||||
request,
|
request,
|
||||||
Dir::PeripheralToMemory,
|
Dir::PeripheralToMemory,
|
||||||
peri_addr as *const u32,
|
peri_addr as *const u32,
|
||||||
@ -181,7 +143,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral).
|
/// Create a new write DMA transfer (memory to peripheral).
|
||||||
pub unsafe fn new_write<W: Word>(
|
pub unsafe fn new_write<W: Word>(
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
request: Request,
|
request: Request,
|
||||||
buf: &'a [W],
|
buf: &'a [W],
|
||||||
peri_addr: *mut W,
|
peri_addr: *mut W,
|
||||||
@ -192,7 +154,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
/// Create a new write DMA transfer (memory to peripheral), using raw pointers.
|
||||||
pub unsafe fn new_write_raw<W: Word>(
|
pub unsafe fn new_write_raw<W: Word>(
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
request: Request,
|
request: Request,
|
||||||
buf: *const [W],
|
buf: *const [W],
|
||||||
peri_addr: *mut W,
|
peri_addr: *mut W,
|
||||||
@ -204,7 +166,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
assert!(len > 0 && len <= 0xFFFF);
|
assert!(len > 0 && len <= 0xFFFF);
|
||||||
|
|
||||||
Self::new_inner(
|
Self::new_inner(
|
||||||
channel,
|
channel.map_into(),
|
||||||
request,
|
request,
|
||||||
Dir::MemoryToPeripheral,
|
Dir::MemoryToPeripheral,
|
||||||
peri_addr as *const u32,
|
peri_addr as *const u32,
|
||||||
@ -218,7 +180,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
|
|
||||||
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
/// Create a new write DMA transfer (memory to peripheral), writing the same value repeatedly.
|
||||||
pub unsafe fn new_write_repeated<W: Word>(
|
pub unsafe fn new_write_repeated<W: Word>(
|
||||||
channel: impl Peripheral<P = C> + 'a,
|
channel: impl Peripheral<P = impl Channel> + 'a,
|
||||||
request: Request,
|
request: Request,
|
||||||
repeated: &'a W,
|
repeated: &'a W,
|
||||||
count: usize,
|
count: usize,
|
||||||
@ -228,7 +190,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
into_ref!(channel);
|
into_ref!(channel);
|
||||||
|
|
||||||
Self::new_inner(
|
Self::new_inner(
|
||||||
channel,
|
channel.map_into(),
|
||||||
request,
|
request,
|
||||||
Dir::MemoryToPeripheral,
|
Dir::MemoryToPeripheral,
|
||||||
peri_addr as *const u32,
|
peri_addr as *const u32,
|
||||||
@ -241,7 +203,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
unsafe fn new_inner(
|
unsafe fn new_inner(
|
||||||
channel: PeripheralRef<'a, C>,
|
channel: PeripheralRef<'a, AnyChannel>,
|
||||||
request: Request,
|
request: Request,
|
||||||
dir: Dir,
|
dir: Dir,
|
||||||
peri_addr: *const u32,
|
peri_addr: *const u32,
|
||||||
@ -251,7 +213,8 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
data_size: WordSize,
|
data_size: WordSize,
|
||||||
_options: TransferOptions,
|
_options: TransferOptions,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let ch = channel.regs().ch(channel.num());
|
let info = channel.info();
|
||||||
|
let ch = info.dma.ch(info.num);
|
||||||
|
|
||||||
// "Preceding reads and writes cannot be moved past subsequent writes."
|
// "Preceding reads and writes cannot be moved past subsequent writes."
|
||||||
fence(Ordering::SeqCst);
|
fence(Ordering::SeqCst);
|
||||||
@ -311,10 +274,10 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
///
|
///
|
||||||
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
/// This doesn't immediately stop the transfer, you have to wait until [`is_running`](Self::is_running) returns false.
|
||||||
pub fn request_stop(&mut self) {
|
pub fn request_stop(&mut self) {
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
let info = self.channel.info();
|
||||||
ch.cr().modify(|w| {
|
let ch = info.dma.ch(info.num);
|
||||||
w.set_susp(true);
|
|
||||||
})
|
ch.cr().modify(|w| w.set_susp(true))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return whether this transfer is still running.
|
/// Return whether this transfer is still running.
|
||||||
@ -322,7 +285,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
/// If this returns `false`, it can be because either the transfer finished, or
|
/// If this returns `false`, it can be because either the transfer finished, or
|
||||||
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
/// it was requested to stop early with [`request_stop`](Self::request_stop).
|
||||||
pub fn is_running(&mut self) -> bool {
|
pub fn is_running(&mut self) -> bool {
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
let info = self.channel.info();
|
||||||
|
let ch = info.dma.ch(info.num);
|
||||||
|
|
||||||
let sr = ch.sr().read();
|
let sr = ch.sr().read();
|
||||||
!sr.tcf() && !sr.suspf()
|
!sr.tcf() && !sr.suspf()
|
||||||
}
|
}
|
||||||
@ -330,7 +295,9 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
/// Gets the total remaining transfers for the channel
|
/// Gets the total remaining transfers for the channel
|
||||||
/// Note: this will be zero for transfers that completed without cancellation.
|
/// Note: this will be zero for transfers that completed without cancellation.
|
||||||
pub fn get_remaining_transfers(&self) -> u16 {
|
pub fn get_remaining_transfers(&self) -> u16 {
|
||||||
let ch = self.channel.regs().ch(self.channel.num());
|
let info = self.channel.info();
|
||||||
|
let ch = info.dma.ch(info.num);
|
||||||
|
|
||||||
ch.br1().read().bndt()
|
ch.br1().read().bndt()
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -345,7 +312,7 @@ impl<'a, C: Channel> Transfer<'a, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C: Channel> Drop for Transfer<'a, C> {
|
impl<'a> Drop for Transfer<'a> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.request_stop();
|
self.request_stop();
|
||||||
while self.is_running() {}
|
while self.is_running() {}
|
||||||
@ -355,11 +322,12 @@ impl<'a, C: Channel> Drop for Transfer<'a, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, C: Channel> Unpin for Transfer<'a, C> {}
|
impl<'a> Unpin for Transfer<'a> {}
|
||||||
impl<'a, C: Channel> Future for Transfer<'a, C> {
|
impl<'a> Future for Transfer<'a> {
|
||||||
type Output = ();
|
type Output = ();
|
||||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||||
STATE.ch_wakers[self.channel.index()].register(cx.waker());
|
let state = &STATE[self.channel.id as usize];
|
||||||
|
state.waker.register(cx.waker());
|
||||||
|
|
||||||
if self.is_running() {
|
if self.is_running() {
|
||||||
Poll::Pending
|
Poll::Pending
|
||||||
|
@ -1,19 +1,10 @@
|
|||||||
//! Direct Memory Access (DMA)
|
//! Direct Memory Access (DMA)
|
||||||
|
#![macro_use]
|
||||||
|
|
||||||
#[cfg(dma)]
|
#[cfg(any(bdma, dma))]
|
||||||
pub(crate) mod dma;
|
mod dma_bdma;
|
||||||
#[cfg(dma)]
|
#[cfg(any(bdma, dma))]
|
||||||
pub use dma::*;
|
pub use dma_bdma::*;
|
||||||
|
|
||||||
// stm32h7 has both dma and bdma. In that case, we export dma as "main" dma,
|
|
||||||
// and bdma as "secondary", under `embassy_stm32::dma::bdma`.
|
|
||||||
#[cfg(all(bdma, dma))]
|
|
||||||
pub mod bdma;
|
|
||||||
|
|
||||||
#[cfg(all(bdma, not(dma)))]
|
|
||||||
pub(crate) mod bdma;
|
|
||||||
#[cfg(all(bdma, not(dma)))]
|
|
||||||
pub use bdma::*;
|
|
||||||
|
|
||||||
#[cfg(gpdma)]
|
#[cfg(gpdma)]
|
||||||
pub(crate) mod gpdma;
|
pub(crate) mod gpdma;
|
||||||
@ -22,16 +13,16 @@ pub use gpdma::*;
|
|||||||
|
|
||||||
#[cfg(dmamux)]
|
#[cfg(dmamux)]
|
||||||
mod dmamux;
|
mod dmamux;
|
||||||
|
#[cfg(dmamux)]
|
||||||
|
pub use dmamux::*;
|
||||||
|
|
||||||
pub(crate) mod ringbuffer;
|
pub(crate) mod ringbuffer;
|
||||||
pub mod word;
|
pub mod word;
|
||||||
|
|
||||||
use core::mem;
|
use core::mem;
|
||||||
|
|
||||||
use embassy_hal_internal::impl_peripheral;
|
use embassy_hal_internal::{impl_peripheral, Peripheral};
|
||||||
|
|
||||||
#[cfg(dmamux)]
|
|
||||||
pub use self::dmamux::*;
|
|
||||||
use crate::interrupt::Priority;
|
use crate::interrupt::Priority;
|
||||||
|
|
||||||
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
|
||||||
@ -41,6 +32,73 @@ enum Dir {
|
|||||||
PeripheralToMemory,
|
PeripheralToMemory,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// DMA request type alias. (also known as DMA channel number in some chips)
|
||||||
|
#[cfg(any(dma_v2, bdma_v2, gpdma, dmamux))]
|
||||||
|
pub type Request = u8;
|
||||||
|
/// DMA request type alias. (also known as DMA channel number in some chips)
|
||||||
|
#[cfg(not(any(dma_v2, bdma_v2, gpdma, dmamux)))]
|
||||||
|
pub type Request = ();
|
||||||
|
|
||||||
|
pub(crate) mod sealed {
|
||||||
|
pub trait Channel {
|
||||||
|
fn id(&self) -> u8;
|
||||||
|
}
|
||||||
|
pub trait ChannelInterrupt {
|
||||||
|
unsafe fn on_irq();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// DMA channel.
|
||||||
|
pub trait Channel: sealed::Channel + Peripheral<P = Self> + Into<AnyChannel> + 'static {
|
||||||
|
/// Type-erase (degrade) this pin into an `AnyChannel`.
|
||||||
|
///
|
||||||
|
/// This converts DMA channel singletons (`DMA1_CH3`, `DMA2_CH1`, ...), which
|
||||||
|
/// are all different types, into the same type. It is useful for
|
||||||
|
/// creating arrays of channels, or avoiding generics.
|
||||||
|
#[inline]
|
||||||
|
fn degrade(self) -> AnyChannel {
|
||||||
|
AnyChannel { id: self.id() }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
macro_rules! dma_channel_impl {
|
||||||
|
($channel_peri:ident, $index:expr) => {
|
||||||
|
impl crate::dma::sealed::Channel for crate::peripherals::$channel_peri {
|
||||||
|
fn id(&self) -> u8 {
|
||||||
|
$index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl crate::dma::sealed::ChannelInterrupt for crate::peripherals::$channel_peri {
|
||||||
|
unsafe fn on_irq() {
|
||||||
|
crate::dma::AnyChannel { id: $index }.on_irq();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl crate::dma::Channel for crate::peripherals::$channel_peri {}
|
||||||
|
|
||||||
|
impl From<crate::peripherals::$channel_peri> for crate::dma::AnyChannel {
|
||||||
|
fn from(x: crate::peripherals::$channel_peri) -> Self {
|
||||||
|
crate::dma::Channel::degrade(x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Type-erased DMA channel.
|
||||||
|
pub struct AnyChannel {
|
||||||
|
pub(crate) id: u8,
|
||||||
|
}
|
||||||
|
impl_peripheral!(AnyChannel);
|
||||||
|
|
||||||
|
impl AnyChannel {
|
||||||
|
fn info(&self) -> &ChannelInfo {
|
||||||
|
&crate::_generated::DMA_CHANNELS[self.id as usize]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const CHANNEL_COUNT: usize = crate::_generated::DMA_CHANNELS.len();
|
||||||
|
static STATE: [ChannelState; CHANNEL_COUNT] = [ChannelState::NEW; CHANNEL_COUNT];
|
||||||
|
|
||||||
/// "No DMA" placeholder.
|
/// "No DMA" placeholder.
|
||||||
///
|
///
|
||||||
/// You may pass this in place of a real DMA channel when creating a driver
|
/// You may pass this in place of a real DMA channel when creating a driver
|
||||||
@ -70,10 +128,14 @@ pub(crate) unsafe fn init(
|
|||||||
#[cfg(dma)] dma_priority: Priority,
|
#[cfg(dma)] dma_priority: Priority,
|
||||||
#[cfg(gpdma)] gpdma_priority: Priority,
|
#[cfg(gpdma)] gpdma_priority: Priority,
|
||||||
) {
|
) {
|
||||||
#[cfg(bdma)]
|
#[cfg(any(dma, bdma))]
|
||||||
bdma::init(cs, bdma_priority);
|
dma_bdma::init(
|
||||||
#[cfg(dma)]
|
cs,
|
||||||
dma::init(cs, dma_priority);
|
#[cfg(dma)]
|
||||||
|
dma_priority,
|
||||||
|
#[cfg(bdma)]
|
||||||
|
bdma_priority,
|
||||||
|
);
|
||||||
#[cfg(gpdma)]
|
#[cfg(gpdma)]
|
||||||
gpdma::init(cs, gpdma_priority);
|
gpdma::init(cs, gpdma_priority);
|
||||||
#[cfg(dmamux)]
|
#[cfg(dmamux)]
|
||||||
|
@ -501,9 +501,9 @@ impl Config {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
enum RingBuffer<'d, C: Channel, W: word::Word> {
|
enum RingBuffer<'d, W: word::Word> {
|
||||||
Writable(WritableRingBuffer<'d, C, W>),
|
Writable(WritableRingBuffer<'d, W>),
|
||||||
Readable(ReadableRingBuffer<'d, C, W>),
|
Readable(ReadableRingBuffer<'d, W>),
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
|
#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
|
||||||
@ -528,13 +528,13 @@ fn get_af_types(mode: Mode, tx_rx: TxRx) -> (AFType, AFType) {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_ring_buffer<'d, T: Instance, C: Channel, W: word::Word>(
|
fn get_ring_buffer<'d, T: Instance, W: word::Word>(
|
||||||
dma: impl Peripheral<P = C> + 'd,
|
dma: impl Peripheral<P = impl Channel> + 'd,
|
||||||
dma_buf: &'d mut [W],
|
dma_buf: &'d mut [W],
|
||||||
request: Request,
|
request: Request,
|
||||||
sub_block: WhichSubBlock,
|
sub_block: WhichSubBlock,
|
||||||
tx_rx: TxRx,
|
tx_rx: TxRx,
|
||||||
) -> RingBuffer<'d, C, W> {
|
) -> RingBuffer<'d, W> {
|
||||||
let opts = TransferOptions {
|
let opts = TransferOptions {
|
||||||
half_transfer_ir: true,
|
half_transfer_ir: true,
|
||||||
//the new_write() and new_read() always use circular mode
|
//the new_write() and new_read() always use circular mode
|
||||||
@ -593,17 +593,17 @@ pub fn split_subblocks<'d, T: Instance>(peri: impl Peripheral<P = T> + 'd) -> (S
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// SAI sub-block driver.
|
/// SAI sub-block driver.
|
||||||
pub struct Sai<'d, T: Instance, C: Channel, W: word::Word> {
|
pub struct Sai<'d, T: Instance, W: word::Word> {
|
||||||
_peri: PeripheralRef<'d, T>,
|
_peri: PeripheralRef<'d, T>,
|
||||||
sd: Option<PeripheralRef<'d, AnyPin>>,
|
sd: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
fs: Option<PeripheralRef<'d, AnyPin>>,
|
fs: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
sck: Option<PeripheralRef<'d, AnyPin>>,
|
sck: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
mclk: Option<PeripheralRef<'d, AnyPin>>,
|
mclk: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
ring_buffer: RingBuffer<'d, C, W>,
|
ring_buffer: RingBuffer<'d, W>,
|
||||||
sub_block: WhichSubBlock,
|
sub_block: WhichSubBlock,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
impl<'d, T: Instance, W: word::Word> Sai<'d, T, W> {
|
||||||
/// Create a new SAI driver in asynchronous mode with MCLK.
|
/// Create a new SAI driver in asynchronous mode with MCLK.
|
||||||
///
|
///
|
||||||
/// You can obtain the [`SubBlock`] with [`split_subblocks`].
|
/// You can obtain the [`SubBlock`] with [`split_subblocks`].
|
||||||
@ -613,13 +613,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
||||||
fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
|
fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
|
||||||
mclk: impl Peripheral<P = impl MclkPin<T, S>> + 'd,
|
mclk: impl Peripheral<P = impl MclkPin<T, S>> + 'd,
|
||||||
dma: impl Peripheral<P = C> + 'd,
|
dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
|
||||||
dma_buf: &'d mut [W],
|
dma_buf: &'d mut [W],
|
||||||
mut config: Config,
|
mut config: Config,
|
||||||
) -> Self
|
) -> Self {
|
||||||
where
|
|
||||||
C: Channel + Dma<T, S>,
|
|
||||||
{
|
|
||||||
into_ref!(mclk);
|
into_ref!(mclk);
|
||||||
|
|
||||||
let (_sd_af_type, ck_af_type) = get_af_types(config.mode, config.tx_rx);
|
let (_sd_af_type, ck_af_type) = get_af_types(config.mode, config.tx_rx);
|
||||||
@ -642,13 +639,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
sck: impl Peripheral<P = impl SckPin<T, S>> + 'd,
|
sck: impl Peripheral<P = impl SckPin<T, S>> + 'd,
|
||||||
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
||||||
fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
|
fs: impl Peripheral<P = impl FsPin<T, S>> + 'd,
|
||||||
dma: impl Peripheral<P = C> + 'd,
|
dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
|
||||||
dma_buf: &'d mut [W],
|
dma_buf: &'d mut [W],
|
||||||
config: Config,
|
config: Config,
|
||||||
) -> Self
|
) -> Self {
|
||||||
where
|
|
||||||
C: Channel + Dma<T, S>,
|
|
||||||
{
|
|
||||||
let peri = peri.peri;
|
let peri = peri.peri;
|
||||||
into_ref!(peri, dma, sck, sd, fs);
|
into_ref!(peri, dma, sck, sd, fs);
|
||||||
|
|
||||||
@ -671,7 +665,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
None,
|
None,
|
||||||
Some(sd.map_into()),
|
Some(sd.map_into()),
|
||||||
Some(fs.map_into()),
|
Some(fs.map_into()),
|
||||||
get_ring_buffer::<T, C, W>(dma, dma_buf, request, sub_block, config.tx_rx),
|
get_ring_buffer::<T, W>(dma, dma_buf, request, sub_block, config.tx_rx),
|
||||||
config,
|
config,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -682,13 +676,10 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
pub fn new_synchronous<S: SubBlockInstance>(
|
pub fn new_synchronous<S: SubBlockInstance>(
|
||||||
peri: SubBlock<'d, T, S>,
|
peri: SubBlock<'d, T, S>,
|
||||||
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
sd: impl Peripheral<P = impl SdPin<T, S>> + 'd,
|
||||||
dma: impl Peripheral<P = C> + 'd,
|
dma: impl Peripheral<P = impl Channel + Dma<T, S>> + 'd,
|
||||||
dma_buf: &'d mut [W],
|
dma_buf: &'d mut [W],
|
||||||
mut config: Config,
|
mut config: Config,
|
||||||
) -> Self
|
) -> Self {
|
||||||
where
|
|
||||||
C: Channel + Dma<T, S>,
|
|
||||||
{
|
|
||||||
update_synchronous_config(&mut config);
|
update_synchronous_config(&mut config);
|
||||||
|
|
||||||
let peri = peri.peri;
|
let peri = peri.peri;
|
||||||
@ -709,7 +700,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
None,
|
None,
|
||||||
Some(sd.map_into()),
|
Some(sd.map_into()),
|
||||||
None,
|
None,
|
||||||
get_ring_buffer::<T, C, W>(dma, dma_buf, request, sub_block, config.tx_rx),
|
get_ring_buffer::<T, W>(dma, dma_buf, request, sub_block, config.tx_rx),
|
||||||
config,
|
config,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -721,7 +712,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
mclk: Option<PeripheralRef<'d, AnyPin>>,
|
mclk: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
sd: Option<PeripheralRef<'d, AnyPin>>,
|
sd: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
fs: Option<PeripheralRef<'d, AnyPin>>,
|
fs: Option<PeripheralRef<'d, AnyPin>>,
|
||||||
ring_buffer: RingBuffer<'d, C, W>,
|
ring_buffer: RingBuffer<'d, W>,
|
||||||
config: Config,
|
config: Config,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
|
#[cfg(any(sai_v1, sai_v2, sai_v3, sai_v4))]
|
||||||
@ -830,7 +821,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_transmitter(ring_buffer: &RingBuffer<C, W>) -> bool {
|
fn is_transmitter(ring_buffer: &RingBuffer<W>) -> bool {
|
||||||
match ring_buffer {
|
match ring_buffer {
|
||||||
RingBuffer::Writable(_) => true,
|
RingBuffer::Writable(_) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
@ -889,7 +880,7 @@ impl<'d, T: Instance, C: Channel, W: word::Word> Sai<'d, T, C, W> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'d, T: Instance, C: Channel, W: word::Word> Drop for Sai<'d, T, C, W> {
|
impl<'d, T: Instance, W: word::Word> Drop for Sai<'d, T, W> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
let ch = T::REGS.ch(self.sub_block as usize);
|
let ch = T::REGS.ch(self.sub_block as usize);
|
||||||
ch.cr1().modify(|w| w.set_saien(false));
|
ch.cr1().modify(|w| w.set_saien(false));
|
||||||
|
@ -228,10 +228,10 @@ fn clk_div(ker_ck: Hertz, sdmmc_ck: u32) -> Result<(bool, u16, Hertz), Error> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(sdmmc_v1)]
|
#[cfg(sdmmc_v1)]
|
||||||
type Transfer<'a, C> = crate::dma::Transfer<'a, C>;
|
type Transfer<'a> = crate::dma::Transfer<'a>;
|
||||||
#[cfg(sdmmc_v2)]
|
#[cfg(sdmmc_v2)]
|
||||||
struct Transfer<'a, C> {
|
struct Transfer<'a> {
|
||||||
_dummy: core::marker::PhantomData<&'a mut C>,
|
_dummy: PhantomData<&'a ()>,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(all(sdmmc_v1, dma))]
|
#[cfg(all(sdmmc_v1, dma))]
|
||||||
@ -548,7 +548,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
|
|||||||
buffer: &'a mut [u32],
|
buffer: &'a mut [u32],
|
||||||
length_bytes: u32,
|
length_bytes: u32,
|
||||||
block_size: u8,
|
block_size: u8,
|
||||||
) -> Transfer<'a, Dma> {
|
) -> Transfer<'a> {
|
||||||
assert!(block_size <= 14, "Block size up to 2^14 bytes");
|
assert!(block_size <= 14, "Block size up to 2^14 bytes");
|
||||||
let regs = T::regs();
|
let regs = T::regs();
|
||||||
|
|
||||||
@ -596,12 +596,7 @@ impl<'d, T: Instance, Dma: SdmmcDma<T> + 'd> Sdmmc<'d, T, Dma> {
|
|||||||
/// # Safety
|
/// # Safety
|
||||||
///
|
///
|
||||||
/// `buffer` must be valid for the whole transfer and word aligned
|
/// `buffer` must be valid for the whole transfer and word aligned
|
||||||
fn prepare_datapath_write<'a>(
|
fn prepare_datapath_write<'a>(&'a mut self, buffer: &'a [u32], length_bytes: u32, block_size: u8) -> Transfer<'a> {
|
||||||
&'a mut self,
|
|
||||||
buffer: &'a [u32],
|
|
||||||
length_bytes: u32,
|
|
||||||
block_size: u8,
|
|
||||||
) -> Transfer<'a, Dma> {
|
|
||||||
assert!(block_size <= 14, "Block size up to 2^14 bytes");
|
assert!(block_size <= 14, "Block size up to 2^14 bytes");
|
||||||
let regs = T::regs();
|
let regs = T::regs();
|
||||||
|
|
||||||
|
@ -7,19 +7,19 @@ use embassy_embedded_hal::SetConfig;
|
|||||||
use embassy_hal_internal::PeripheralRef;
|
use embassy_hal_internal::PeripheralRef;
|
||||||
use futures::future::{select, Either};
|
use futures::future::{select, Either};
|
||||||
|
|
||||||
use super::{clear_interrupt_flags, rdr, reconfigure, sr, BasicInstance, Config, ConfigError, Error, RxDma, UartRx};
|
use super::{clear_interrupt_flags, rdr, reconfigure, sr, BasicInstance, Config, ConfigError, Error, UartRx};
|
||||||
use crate::dma::ReadableRingBuffer;
|
use crate::dma::ReadableRingBuffer;
|
||||||
use crate::usart::{Regs, Sr};
|
use crate::usart::{Regs, Sr};
|
||||||
|
|
||||||
/// Rx-only Ring-buffered UART Driver
|
/// Rx-only Ring-buffered UART Driver
|
||||||
///
|
///
|
||||||
/// Created with [UartRx::into_ring_buffered]
|
/// Created with [UartRx::into_ring_buffered]
|
||||||
pub struct RingBufferedUartRx<'d, T: BasicInstance, RxDma: super::RxDma<T>> {
|
pub struct RingBufferedUartRx<'d, T: BasicInstance> {
|
||||||
_peri: PeripheralRef<'d, T>,
|
_peri: PeripheralRef<'d, T>,
|
||||||
ring_buf: ReadableRingBuffer<'d, RxDma, u8>,
|
ring_buf: ReadableRingBuffer<'d, u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> SetConfig for RingBufferedUartRx<'d, T, RxDma> {
|
impl<'d, T: BasicInstance> SetConfig for RingBufferedUartRx<'d, T> {
|
||||||
type Config = Config;
|
type Config = Config;
|
||||||
type ConfigError = ConfigError;
|
type ConfigError = ConfigError;
|
||||||
|
|
||||||
@ -32,7 +32,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
|
|||||||
/// Turn the `UartRx` into a buffered uart which can continously receive in the background
|
/// Turn the `UartRx` into a buffered uart which can continously receive in the background
|
||||||
/// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
|
/// without the possibility of losing bytes. The `dma_buf` is a buffer registered to the
|
||||||
/// DMA controller, and must be large enough to prevent overflows.
|
/// DMA controller, and must be large enough to prevent overflows.
|
||||||
pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T, RxDma> {
|
pub fn into_ring_buffered(self, dma_buf: &'d mut [u8]) -> RingBufferedUartRx<'d, T> {
|
||||||
assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
|
assert!(!dma_buf.is_empty() && dma_buf.len() <= 0xFFFF);
|
||||||
|
|
||||||
let request = self.rx_dma.request();
|
let request = self.rx_dma.request();
|
||||||
@ -51,7 +51,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> UartRx<'d, T, RxDma> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxDma> {
|
impl<'d, T: BasicInstance> RingBufferedUartRx<'d, T> {
|
||||||
/// Clear the ring buffer and start receiving in the background
|
/// Clear the ring buffer and start receiving in the background
|
||||||
pub fn start(&mut self) -> Result<(), Error> {
|
pub fn start(&mut self) -> Result<(), Error> {
|
||||||
// Clear the ring buffer so that it is ready to receive data
|
// Clear the ring buffer so that it is ready to receive data
|
||||||
@ -208,7 +208,7 @@ impl<'d, T: BasicInstance, RxDma: super::RxDma<T>> RingBufferedUartRx<'d, T, RxD
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: BasicInstance, RxDma: super::RxDma<T>> Drop for RingBufferedUartRx<'_, T, RxDma> {
|
impl<T: BasicInstance> Drop for RingBufferedUartRx<'_, T> {
|
||||||
fn drop(&mut self) {
|
fn drop(&mut self) {
|
||||||
self.teardown_uart();
|
self.teardown_uart();
|
||||||
|
|
||||||
@ -245,18 +245,16 @@ fn clear_idle_flag(r: Regs) -> Sr {
|
|||||||
sr
|
sr
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, Rx> embedded_io_async::ErrorType for RingBufferedUartRx<'_, T, Rx>
|
impl<T> embedded_io_async::ErrorType for RingBufferedUartRx<'_, T>
|
||||||
where
|
where
|
||||||
T: BasicInstance,
|
T: BasicInstance,
|
||||||
Rx: RxDma<T>,
|
|
||||||
{
|
{
|
||||||
type Error = Error;
|
type Error = Error;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T, Rx> embedded_io_async::Read for RingBufferedUartRx<'_, T, Rx>
|
impl<T> embedded_io_async::Read for RingBufferedUartRx<'_, T>
|
||||||
where
|
where
|
||||||
T: BasicInstance,
|
T: BasicInstance,
|
||||||
Rx: RxDma<T>,
|
|
||||||
{
|
{
|
||||||
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
|
async fn read(&mut self, buf: &mut [u8]) -> Result<usize, Self::Error> {
|
||||||
self.read(buf).await
|
self.read(buf).await
|
||||||
|
@ -74,7 +74,7 @@ async fn transmit_task(mut tx: UartTx<'static, peris::UART, peris::UART_TX_DMA>)
|
|||||||
}
|
}
|
||||||
|
|
||||||
#[embassy_executor::task]
|
#[embassy_executor::task]
|
||||||
async fn receive_task(mut rx: RingBufferedUartRx<'static, peris::UART, peris::UART_RX_DMA>) {
|
async fn receive_task(mut rx: RingBufferedUartRx<'static, peris::UART>) {
|
||||||
info!("Ready to receive...");
|
info!("Ready to receive...");
|
||||||
|
|
||||||
let mut rng = ChaCha8Rng::seed_from_u64(1337);
|
let mut rng = ChaCha8Rng::seed_from_u64(1337);
|
||||||
|
Loading…
Reference in New Issue
Block a user