finish implementing high level interface, polish, fix warnings and bugs

This commit is contained in:
Hans Josephsen 2024-09-25 22:37:41 +02:00
parent 819ff1b7e3
commit c2119705e9
20 changed files with 870 additions and 355 deletions

View File

@ -1,3 +1,4 @@
#![allow(dead_code)]
use embassy_sync::channel::{DynamicReceiver, DynamicSender}; use embassy_sync::channel::{DynamicReceiver, DynamicSender};
use super::enums::*; use super::enums::*;

View File

@ -1,5 +1,10 @@
//! Enums shared between CAN controller types. //! Enums shared between CAN controller types.
/// Indicator type which indicates a BusOff error.
#[derive(Debug, Clone, Copy)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))]
pub struct BusOff;
/// Bus error /// Bus error
#[derive(Debug)] #[derive(Debug)]
#[cfg_attr(feature = "defmt", derive(defmt::Format))] #[cfg_attr(feature = "defmt", derive(defmt::Format))]

View File

@ -125,7 +125,14 @@ pub struct TxConfig {
pub data_field_size: DataFieldSize, pub data_field_size: DataFieldSize,
} }
/// Configuration for Message RAM layout /// Configuration for Message RAM layout.
///
/// The default layout is as follows:
/// * 32 standard and extended filters
/// * 32 x 64 byte dedicated Rx buffers
/// * 16 x 64 byte elements in Rx queue 0 and 1
/// * 8 x 64 byte dedicated Tx buffers
/// * 24 x 64 byte Tx queue
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct MessageRamConfig { pub struct MessageRamConfig {
/// 0-128: Number of standard Message ID filter elements /// 0-128: Number of standard Message ID filter elements
@ -146,24 +153,60 @@ pub struct MessageRamConfig {
pub tx: TxConfig, pub tx: TxConfig,
} }
impl MessageRamConfig {
/// Contains a sane default Message RAM config for CAN Classic only frames.
pub const CAN_CLASSIC_DEFAULT: MessageRamConfig = MessageRamConfig {
standard_id_filter_size: 32,
extended_id_filter_size: 32,
rx_fifo_0: RxFifoConfig {
watermark_interrupt_level: 0,
fifo_size: 16,
data_field_size: DataFieldSize::B8,
},
rx_fifo_1: RxFifoConfig {
watermark_interrupt_level: 0,
fifo_size: 16,
data_field_size: DataFieldSize::B8,
},
rx_buffer: RxBufferConfig {
size: 16,
data_field_size: DataFieldSize::B8,
},
tx: TxConfig {
queue_size: 24,
dedicated_size: 8,
data_field_size: DataFieldSize::B8,
},
};
/// Contains a sane default message RAM config supporting CAN FD frames.
pub const CAN_FD_DEFAULT: MessageRamConfig = MessageRamConfig {
standard_id_filter_size: 32,
extended_id_filter_size: 32,
rx_fifo_0: RxFifoConfig {
watermark_interrupt_level: 0,
fifo_size: 6,
data_field_size: DataFieldSize::B64,
},
rx_fifo_1: RxFifoConfig {
watermark_interrupt_level: 0,
fifo_size: 6,
data_field_size: DataFieldSize::B64,
},
rx_buffer: RxBufferConfig {
size: 4,
data_field_size: DataFieldSize::B64,
},
tx: TxConfig {
queue_size: 12,
dedicated_size: 4,
data_field_size: DataFieldSize::B64,
},
};
}
impl Default for MessageRamConfig { impl Default for MessageRamConfig {
fn default() -> Self { fn default() -> Self {
// TODO make better default config. Self::CAN_FD_DEFAULT
MessageRamConfig {
standard_id_filter_size: 28,
extended_id_filter_size: 8,
rx_fifo_0: RxFifoConfig {
watermark_interrupt_level: 3,
fifo_size: 3,
data_field_size: DataFieldSize::B64,
},
rx_fifo_1: RxFifoConfig::DISABLED,
rx_buffer: RxBufferConfig::DISABLED,
tx: TxConfig {
queue_size: 3,
dedicated_size: 0,
data_field_size: DataFieldSize::B64,
},
}
} }
} }

View File

@ -324,6 +324,51 @@ pub enum CanFdMode {
AllowFdCanAndBRS, AllowFdCanAndBRS,
} }
/// Error handling mode for the driver.
/// Indicates how BusOff errors should be handled.
#[derive(Clone, Copy, Debug)]
pub enum ErrorHandlingMode {
/// Bus Off conditions will be automatically recovered from.
/// This is usually not what you want.
///
/// In this mode, if you attempt transmission of a frame it will be
/// retried forever. This has implications for both the bus and your code:
/// * The bus will be spammed with retries for the frame until it is ACKed
/// * If you are waiting on frame transmission, of the Tx buffers are full,
/// your code will block forever.
///
/// If there are no other devices on the bus, no ACK will ever occur.
Auto,
/// Error handling happens at every call site.
///
/// If bus off condition occurs while a task is waiting on transmission or
/// reception, the error will be returned in that task.
///
/// If multiple tasks are waiting for tx/rx, they will all receive the
/// error. This should be kept in mind if attempting to recover on the bus,
/// but most recovery related functions are indepotent and could be called
/// multiple times from several tasks without harm.
Local,
/// Same as `Local`, but only reception will return errors.
///
/// The rationale here is that Tx may be retried at will transparrently when
/// the bus has recovered, whereas Rx has the added implication of potential
/// missed messages.
LocalRx,
/// Error handling happens centrally.
///
/// If an error condition occurs while a task is waiting on tx or rx, they
/// will blocking until transmission succeeds.
///
/// You are expected to handle error conditions in a central location by
/// calling the dedicated error management functions.
///
/// If the error condition is not handled centrally by a task, transmitters and
/// receivers will block forever. Make sure you implement an error handling
/// task.
Central,
}
/// FdCan Config Struct /// FdCan Config Struct
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct FdCanConfig { pub struct FdCanConfig {
@ -357,6 +402,9 @@ pub struct FdCanConfig {
/// TX buffer mode (FIFO or priority queue) /// TX buffer mode (FIFO or priority queue)
pub tx_buffer_mode: TxBufferMode, pub tx_buffer_mode: TxBufferMode,
/// Error handling mode for the CAN driver
pub error_handling_mode: ErrorHandlingMode,
/// Configures whether the peripheral uses CAN FD as specified by the /// Configures whether the peripheral uses CAN FD as specified by the
/// Bosch CAN FD Specification V1.0 or if it is limited to regular classic /// Bosch CAN FD Specification V1.0 or if it is limited to regular classic
/// ISO CAN. /// ISO CAN.
@ -459,6 +507,10 @@ impl FdCanConfig {
self self
} }
/// The FDCAN/M_CAN peripheral uses a RAM region called `message_ram` to
/// store data for Rx/Tx and more.
/// The layout of this memory region is customizable depending on your needs.
#[cfg(can_fdcan_h7)]
#[inline] #[inline]
pub const fn set_message_ram_config(mut self, config: MessageRamConfig) -> Self { pub const fn set_message_ram_config(mut self, config: MessageRamConfig) -> Self {
self.message_ram_config = config; self.message_ram_config = config;
@ -480,6 +532,7 @@ impl Default for FdCanConfig {
timestamp_source: TimestampSource::None, timestamp_source: TimestampSource::None,
global_filter: GlobalFilter::default(), global_filter: GlobalFilter::default(),
tx_buffer_mode: TxBufferMode::Priority, tx_buffer_mode: TxBufferMode::Priority,
error_handling_mode: ErrorHandlingMode::Local,
can_fd_mode: CanFdMode::ClassicCanOnly, can_fd_mode: CanFdMode::ClassicCanOnly,
message_ram_config: MessageRamConfig::default(), message_ram_config: MessageRamConfig::default(),
} }

View File

@ -1,7 +1,11 @@
use core::marker::PhantomData; use core::marker::PhantomData;
use core::sync::atomic::Ordering;
use bit_field::BitField;
use embassy_hal_internal::into_ref; use embassy_hal_internal::into_ref;
use embedded_can::{ExtendedId, StandardId};
use crate::can::config::ErrorHandlingMode;
use crate::can::{Classic, Fd}; use crate::can::{Classic, Fd};
use crate::interrupt::typelevel::Interrupt; use crate::interrupt::typelevel::Interrupt;
use crate::{ use crate::{
@ -16,6 +20,7 @@ use crate::{
}; };
use super::config::CanFdMode; use super::config::CanFdMode;
use super::filter::Filter;
use super::{calc_ns_per_timer_tick, IT0InterruptHandler, IT1InterruptHandler, Info, State}; use super::{calc_ns_per_timer_tick, IT0InterruptHandler, IT1InterruptHandler, Info, State};
/// FDCAN Configuration instance instance /// FDCAN Configuration instance instance
@ -74,7 +79,7 @@ impl<'d> CanConfigurator<'d> {
config, config,
info: T::info(), info: T::info(),
state: T::state(), state: T::state(),
properties: Properties::new(T::info()), properties: Properties::new(T::state()),
periph_clock: T::frequency(), periph_clock: T::frequency(),
} }
} }
@ -156,16 +161,31 @@ impl<'d> CanConfigurator<'d> {
} }
}); });
let mut settings_flags = 0;
match self.config.error_handling_mode {
ErrorHandlingMode::Auto => {
settings_flags.set_bit(State::FLAG_AUTO_RECOVER_BUS_OFF, true);
}
ErrorHandlingMode::Local => {
settings_flags.set_bit(State::FLAG_PROPAGATE_ERRORS_TO_RX, true);
settings_flags.set_bit(State::FLAG_PROPAGATE_ERRORS_TO_TX, true);
}
ErrorHandlingMode::LocalRx => {
settings_flags.set_bit(State::FLAG_PROPAGATE_ERRORS_TO_RX, true);
}
ErrorHandlingMode::Central => (),
};
self.state.settings_flags.store(settings_flags, Ordering::Relaxed);
self.info.low.apply_config(&self.config); self.info.low.apply_config(&self.config);
self.info.low.into_mode(mode); self.info.low.into_mode(mode);
Can { Can {
_phantom: PhantomData, _phantom: PhantomData,
config: self.config,
info: self.info, info: self.info,
state: self.state, state: self.state,
_mode: mode, _mode: mode,
properties: Properties::new(self.info), properties: Properties::new(self.state),
} }
} }
@ -200,48 +220,92 @@ impl<'d> CanConfigurator<'d> {
} }
} }
trait SealedIdType: core::fmt::Debug + Clone + Copy {
type Storage: core::fmt::Debug + Clone + Copy;
fn alloc_slot(state: &'static State) -> Option<u8>;
fn dealloc_slot(state: &'static State, slot_idx: u8);
fn set_slot(state: &'static State, slot: u8, filter: Filter<Self, Self::Storage>);
}
#[allow(private_bounds)]
pub trait IdType: SealedIdType {}
impl SealedIdType for StandardId {
type Storage = u16;
fn alloc_slot(state: &'static State) -> Option<u8> {
state.standard_filter_alloc.allocate().map(|v| v as u8)
}
fn dealloc_slot(state: &'static State, slot_idx: u8) {
state.standard_filter_alloc.deallocate(slot_idx as usize);
}
fn set_slot(state: &'static State, slot: u8, filter: StandardFilter) {
state.info.low.set_standard_filter(slot, filter);
}
}
impl IdType for StandardId {}
impl SealedIdType for ExtendedId {
type Storage = u32;
fn alloc_slot(state: &'static State) -> Option<u8> {
state.extended_filter_alloc.allocate().map(|v| v as u8)
}
fn dealloc_slot(state: &'static State, slot_idx: u8) {
state.extended_filter_alloc.deallocate(slot_idx as usize);
}
fn set_slot(state: &'static State, slot: u8, filter: ExtendedFilter) {
state.info.low.set_extended_filter(slot, filter);
}
}
impl IdType for ExtendedId {}
pub struct FilterSlot<I: IdType> {
_phantom: PhantomData<I>,
state: &'static State,
slot_idx: u8,
}
impl<I: IdType> FilterSlot<I> {
/// Sets the filter slot to a given filter.
#[allow(private_interfaces)]
pub fn set(&self, filter: Filter<I, I::Storage>) {
I::set_slot(self.state, self.slot_idx, filter);
}
}
impl<I: IdType> Drop for FilterSlot<I> {
fn drop(&mut self) {
I::dealloc_slot(self.state, self.slot_idx);
}
}
/// Common driver properties, including filters and error counters /// Common driver properties, including filters and error counters
pub struct Properties { pub struct Properties {
info: &'static Info, info: &'static Info,
// phantom pointer to ensure !Sync state: &'static State,
//instance: PhantomData<*const T>,
} }
impl Properties { impl Properties {
fn new(info: &'static Info) -> Self { fn new(state: &'static State) -> Self {
Self { Self {
info, info: state.info,
//instance: Default::default(), state,
} }
} }
/// Set a standard address CAN filter in the specified slot in FDCAN memory. pub fn alloc_filter<I: IdType>(&self) -> Option<FilterSlot<I>> {
#[inline] I::alloc_slot(self.state).map(|slot_idx| FilterSlot {
pub fn set_standard_filter(&self, slot: u8, filter: StandardFilter) { _phantom: PhantomData,
self.info.low.set_standard_filter(slot, filter); state: self.state,
slot_idx,
})
} }
// /// Set the full array of standard address CAN filters in FDCAN memory. pub fn alloc_standard_filter(&self) -> Option<FilterSlot<StandardId>> {
// /// Overwrites all standard address filters in memory. self.alloc_filter()
// pub fn set_standard_filters(&self, filters: &[StandardFilter; STANDARD_FILTER_MAX as usize]) {
// for (i, f) in filters.iter().enumerate() {
// self.info.low.msg_ram_mut().filters.flssa[i].activate(*f);
// }
// }
/// Set an extended address CAN filter in the specified slot in FDCAN memory.
#[inline]
pub fn set_extended_filter(&self, slot: u8, filter: ExtendedFilter) {
self.info.low.set_extended_filter(slot, filter);
} }
// /// Set the full array of extended address CAN filters in FDCAN memory. pub fn alloc_extended_filter(&self) -> Option<FilterSlot<ExtendedId>> {
// /// Overwrites all extended address filters in memory. self.alloc_filter()
// pub fn set_extended_filters(&self, filters: &[ExtendedFilter; EXTENDED_FILTER_MAX as usize]) { }
// for (i, f) in filters.iter().enumerate() {
// self.info.low.msg_ram_mut().filters.flesa[i].activate(*f);
// }
// }
/// Get the CAN RX error counter /// Get the CAN RX error counter
pub fn rx_error_count(&self) -> u8 { pub fn rx_error_count(&self) -> u8 {

View File

@ -5,10 +5,6 @@ use embedded_can::{ExtendedId, StandardId};
use super::low_level::message_ram; use super::low_level::message_ram;
// TODO dynamicisize this
pub const STANDARD_FILTER_MAX: usize = 38;
pub const EXTENDED_FILTER_MAX: usize = 8;
/// A Standard Filter /// A Standard Filter
pub type StandardFilter = Filter<StandardId, u16>; pub type StandardFilter = Filter<StandardId, u16>;
/// An Extended Filter /// An Extended Filter
@ -28,7 +24,7 @@ impl Default for ExtendedFilter {
impl StandardFilter { impl StandardFilter {
/// Accept all messages in FIFO 0 /// Accept all messages in FIFO 0
pub fn accept_all_into_fifo0() -> StandardFilter { pub fn accept_all_into_fifo0() -> StandardFilter {
StandardFilter { StandardFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::StoreInFifo0, action: Action::StoreInFifo0,
} }
@ -36,7 +32,7 @@ impl StandardFilter {
/// Accept all messages in FIFO 1 /// Accept all messages in FIFO 1
pub fn accept_all_into_fifo1() -> StandardFilter { pub fn accept_all_into_fifo1() -> StandardFilter {
StandardFilter { StandardFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::StoreInFifo1, action: Action::StoreInFifo1,
} }
@ -44,7 +40,7 @@ impl StandardFilter {
/// Reject all messages /// Reject all messages
pub fn reject_all() -> StandardFilter { pub fn reject_all() -> StandardFilter {
StandardFilter { StandardFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::Reject, action: Action::Reject,
} }
@ -52,7 +48,7 @@ impl StandardFilter {
/// Disable the filter /// Disable the filter
pub fn disable() -> StandardFilter { pub fn disable() -> StandardFilter {
StandardFilter { StandardFilter::Queue {
filter: FilterType::Disabled, filter: FilterType::Disabled,
action: Action::Disable, action: Action::Disable,
} }
@ -62,7 +58,7 @@ impl StandardFilter {
impl ExtendedFilter { impl ExtendedFilter {
/// Accept all messages in FIFO 0 /// Accept all messages in FIFO 0
pub fn accept_all_into_fifo0() -> ExtendedFilter { pub fn accept_all_into_fifo0() -> ExtendedFilter {
ExtendedFilter { ExtendedFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::StoreInFifo0, action: Action::StoreInFifo0,
} }
@ -70,7 +66,7 @@ impl ExtendedFilter {
/// Accept all messages in FIFO 1 /// Accept all messages in FIFO 1
pub fn accept_all_into_fifo1() -> ExtendedFilter { pub fn accept_all_into_fifo1() -> ExtendedFilter {
ExtendedFilter { ExtendedFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::StoreInFifo1, action: Action::StoreInFifo1,
} }
@ -78,7 +74,7 @@ impl ExtendedFilter {
/// Reject all messages /// Reject all messages
pub fn reject_all() -> ExtendedFilter { pub fn reject_all() -> ExtendedFilter {
ExtendedFilter { ExtendedFilter::Queue {
filter: FilterType::BitMask { filter: 0x0, mask: 0x0 }, filter: FilterType::BitMask { filter: 0x0, mask: 0x0 },
action: Action::Reject, action: Action::Reject,
} }
@ -86,7 +82,7 @@ impl ExtendedFilter {
/// Disable the filter /// Disable the filter
pub fn disable() -> ExtendedFilter { pub fn disable() -> ExtendedFilter {
ExtendedFilter { ExtendedFilter::Queue {
filter: FilterType::Disabled, filter: FilterType::Disabled,
action: Action::Disable, action: Action::Disable,
} }
@ -169,17 +165,43 @@ impl From<Action> for message_ram::FilterElementConfig {
} }
} }
/// The destination for a buffer type filter.
#[derive(Clone, Copy, Debug)]
pub enum BufferDestination {
/// Message into the given dedicated Rx buffer index
DedicatedBuffer(u8),
// /// Debug message A
// DebugA,
// /// Debug message B
// DebugB,
// /// Debug message C
// DebugC,
}
/// Filter /// Filter
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct Filter<ID, UNIT> pub enum Filter<ID, UNIT>
where where
ID: Copy + Clone + core::fmt::Debug, ID: Copy + Clone + core::fmt::Debug,
UNIT: Copy + Clone + core::fmt::Debug, UNIT: Copy + Clone + core::fmt::Debug,
{ {
/// How to match an incoming message /// Filter which directs the message to a FIFO based on
pub filter: FilterType<ID, UNIT>, /// filter logic.
/// What to do with a matching message Queue {
pub action: Action, /// How to match an incoming message
filter: FilterType<ID, UNIT>,
/// What to do with a matching message
action: Action,
},
/// Filter which directs a specific message ID to a dedicated
/// Rx buffer.
DedicatedBuffer {
/// The CAN ID of the message to match
id: ID,
/// The destination buffer index
destination: BufferDestination,
// Dont support TTCAN for now, we don't support event pin stuff
},
} }
/// Enum over both Standard and Extended Filter ID's /// Enum over both Standard and Extended Filter ID's
@ -202,49 +224,90 @@ where
impl ActivateFilter<StandardId, u16> for message_ram::StandardFilter { impl ActivateFilter<StandardId, u16> for message_ram::StandardFilter {
fn activate(&mut self, f: Filter<StandardId, u16>) { fn activate(&mut self, f: Filter<StandardId, u16>) {
let sft = f.filter.into(); match f {
Filter::Queue { filter, action } => {
let sft = filter.into();
let (sfid1, sfid2) = match filter {
FilterType::Range { to, from } => (to.as_raw(), from.as_raw()),
FilterType::DedicatedSingle(id) => (id.as_raw(), id.as_raw()),
FilterType::DedicatedDual(id1, id2) => (id1.as_raw(), id2.as_raw()),
FilterType::BitMask { filter, mask } => (filter, mask),
FilterType::Disabled => (0x0, 0x0),
};
let sfec = action.into();
self.write(|w| {
unsafe { w.sfid1().bits(sfid1).sfid2().bits(sfid2) }
.sft()
.set_filter_type(sft)
.sfec()
.set_filter_element_config(sfec)
});
}
Filter::DedicatedBuffer { id, destination } => {
self.write(|w| {
w.sfec()
.set_filter_element_config(message_ram::FilterElementConfig::StoreInRxBuffer);
// Doesn't matter
w.sft().set_filter_type(message_ram::FilterType::RangeFilter);
let (sfid1, sfid2) = match f.filter { unsafe { w.sfid1().bits(id.as_raw()) };
FilterType::Range { to, from } => (to.as_raw(), from.as_raw()),
FilterType::DedicatedSingle(id) => (id.as_raw(), id.as_raw()), let (buf_idx, store_flag) = match destination {
FilterType::DedicatedDual(id1, id2) => (id1.as_raw(), id2.as_raw()), BufferDestination::DedicatedBuffer(buf_idx) => (buf_idx, 0),
FilterType::BitMask { filter, mask } => (filter, mask), // BufferDestination::DebugA => (0, 1),
FilterType::Disabled => (0x0, 0x0), // BufferDestination::DebugB => (0, 2),
}; // BufferDestination::DebugC => (0, 3),
let sfec = f.action.into(); };
self.write(|w| { let sfid2_bits = (buf_idx as u16 & 0b111111) | (store_flag << 9);
unsafe { w.sfid1().bits(sfid1).sfid2().bits(sfid2) } unsafe { w.sfid2().bits(sfid2_bits) };
.sft() w
.set_filter_type(sft) });
.sfec() }
.set_filter_element_config(sfec) }
});
} }
// fn read(&self) -> Filter<StandardId, u16> {
// todo!()
// }
} }
impl ActivateFilter<ExtendedId, u32> for message_ram::ExtendedFilter { impl ActivateFilter<ExtendedId, u32> for message_ram::ExtendedFilter {
fn activate(&mut self, f: Filter<ExtendedId, u32>) { fn activate(&mut self, f: Filter<ExtendedId, u32>) {
let eft = f.filter.into(); match f {
Filter::Queue { filter, action } => {
let eft = filter.into();
let (efid1, efid2) = match f.filter { let (efid1, efid2) = match filter {
FilterType::Range { to, from } => (to.as_raw(), from.as_raw()), FilterType::Range { to, from } => (to.as_raw(), from.as_raw()),
FilterType::DedicatedSingle(id) => (id.as_raw(), id.as_raw()), FilterType::DedicatedSingle(id) => (id.as_raw(), id.as_raw()),
FilterType::DedicatedDual(id1, id2) => (id1.as_raw(), id2.as_raw()), FilterType::DedicatedDual(id1, id2) => (id1.as_raw(), id2.as_raw()),
FilterType::BitMask { filter, mask } => (filter, mask), FilterType::BitMask { filter, mask } => (filter, mask),
FilterType::Disabled => (0x0, 0x0), FilterType::Disabled => (0x0, 0x0),
}; };
let efec = f.action.into(); let efec = action.into();
self.write(|w| { self.write(|w| {
unsafe { w.efid1().bits(efid1).efid2().bits(efid2) } unsafe { w.efid1().bits(efid1).efid2().bits(efid2) }
.eft() .eft()
.set_filter_type(eft) .set_filter_type(eft)
.efec() .efec()
.set_filter_element_config(efec) .set_filter_element_config(efec)
}); });
}
Filter::DedicatedBuffer { id, destination } => {
self.write(|w| {
w.efec()
.set_filter_element_config(message_ram::FilterElementConfig::StoreInRxBuffer);
// Doesn't matter
w.eft().set_filter_type(message_ram::FilterType::RangeFilter);
unsafe { w.efid1().bits(id.as_raw()) };
let (buf_idx, store_flag) = match destination {
BufferDestination::DedicatedBuffer(buf_idx) => (buf_idx, 0),
// BufferDestination::DebugA => (0, 1),
// BufferDestination::DebugB => (0, 2),
// BufferDestination::DebugC => (0, 3),
};
let efid2_bits = (buf_idx as u32 & 0b111111) | (store_flag << 9);
unsafe { w.efid2().bits(efid2_bits) };
w
});
}
}
} }
// fn read(&self) -> Filter<ExtendedId, u32> {
// todo!()
// }
} }

View File

@ -1,8 +1,11 @@
use core::marker::PhantomData; use core::{marker::PhantomData, sync::atomic::Ordering};
use bit_field::BitField;
use stm32_metapac::can::regs::Ir;
use crate::{can::Instance, interrupt}; use crate::{can::Instance, interrupt};
use super::SealedInstance; use super::State;
/// Interrupt handler channel 0. /// Interrupt handler channel 0.
pub struct IT0InterruptHandler<T: Instance> { pub struct IT0InterruptHandler<T: Instance> {
@ -59,40 +62,69 @@ impl<T: Instance> interrupt::typelevel::Handler<T::IT0Interrupt> for IT0Interrup
let regs = T::info().low.regs; let regs = T::info().low.regs;
let ir = regs.ir().read(); let ir = regs.ir().read();
// TX transfer complete let mut ir_clear: Ir = Ir(0);
if ir.tc() {
regs.ir().write(|w| w.set_tc(true)); // Sync status + wake on either Tx complete or Tx cancel.
} if ir.tc() || ir.tcf() {
// TX cancel request finished ir_clear.set_tc(true);
if ir.tcf() { ir_clear.set_tcf(true);
regs.ir().write(|w| w.set_tc(true));
T::state().sync_tx_status();
T::state().tx_done_waker.wake();
} }
// TX event FIFO new element // TX event FIFO new element
if ir.tefn() { if ir.tefn() {
regs.ir().write(|w| w.set_tefn(true)); ir_clear.set_tefn(true);
}
T::state().tx_waker.wake(); // TODO wake
}
// RX FIFO new element // RX FIFO new element
if ir.rfn(0) { if ir.rfn(0) {
T::info().low.regs.ir().write(|w| w.set_rfn(0, true)); ir_clear.set_rfn(0, true);
T::state().rx_waker.wake(); T::state().rx_fifo_waker[0].wake();
} }
if ir.rfn(1) { if ir.rfn(1) {
T::info().low.regs.ir().write(|w| w.set_rfn(1, true)); ir_clear.set_rfn(1, true);
T::state().rx_waker.wake(); T::state().rx_fifo_waker[1].wake();
}
// RX Dedicated stored
if ir.drx() {
ir_clear.set_drx(true);
T::state().rx_dedicated_waker.wake();
} }
// Bus_Off // Bus_Off
if ir.bo() { if ir.bo() {
regs.ir().write(|w| w.set_bo(true)); ir_clear.set_bo(true);
if regs.psr().read().bo() { if regs.psr().read().bo() {
// Initiate bus-off recovery sequence by resetting CCCR.INIT let state = T::state();
regs.cccr().modify(|w| w.set_init(false));
let settings_flags = state.settings_flags.load(Ordering::Relaxed);
if settings_flags.get_bit(State::FLAG_AUTO_RECOVER_BUS_OFF) {
// Initiate bus-off recovery sequence by resetting CCCR.INIT
regs.cccr().modify(|w| w.set_init(false));
} else {
state
.state_flags
.fetch_or(1 << State::STATE_FLAG_BUS_OFF, Ordering::Relaxed);
}
state.err_waker.wake();
if settings_flags.get_bit(State::FLAG_PROPAGATE_ERRORS_TO_RX) {
state.rx_dedicated_waker.wake();
state.rx_fifo_waker[0].wake();
state.rx_fifo_waker[1].wake();
}
if settings_flags.get_bit(State::FLAG_PROPAGATE_ERRORS_TO_TX) {
state.tx_done_waker.wake();
}
} }
// Bus Off status flag cleared by error recovery code.
} }
regs.ir().write_value(ir_clear);
} }
} }

View File

@ -1,2 +0,0 @@
//! Limits for the BOSCH M_CAN peripheral.
//! This is used in the STM32H7 series.

View File

@ -1,2 +0,0 @@
//! Limits for the simplified variant of the M_CAN peripheral.
//! This is used in STM32G4 and others.

View File

@ -1,8 +1,12 @@
use stm32_metapac::can::vals::Tfqm;
use super::{ use super::{
message_ram::{MessageRam, MessageRamSegment}, message_ram::{MessageRam, MessageRamSegment},
CanLowLevel, LoopbackMode, TimestampSource, CanLowLevel, LoopbackMode, TimestampSource,
}; };
use crate::can::config::{CanFdMode, DataBitTiming, FdCanConfig, GlobalFilter, MessageRamConfig, NominalBitTiming}; use crate::can::config::{
CanFdMode, DataBitTiming, FdCanConfig, GlobalFilter, MessageRamConfig, NominalBitTiming, TxBufferMode,
};
/// Configuration. /// Configuration.
/// Can only be called when in config mode (CCCR.INIT=1, CCCR.CCE=1). /// Can only be called when in config mode (CCCR.INIT=1, CCCR.CCE=1).
@ -33,18 +37,21 @@ impl CanLowLevel {
/// Applies the settings of a new FdCanConfig See [`FdCanConfig`] /// Applies the settings of a new FdCanConfig See [`FdCanConfig`]
pub fn apply_config(&self, config: &FdCanConfig) { pub fn apply_config(&self, config: &FdCanConfig) {
// self.set_tx_buffer_mode(config.tx_buffer_mode); self.set_tx_buffer_mode(config.tx_buffer_mode);
// Does not work for FDCAN, internal timer has inconsistent timebase. // Does not work for FDCAN, internal timer has inconsistent timebase.
self.set_timestamp_source(TimestampSource::Internal, 0); self.set_timestamp_source(TimestampSource::Internal, 0);
// TXBTIE bits need to be set for each tx element in order for // TXBTIE and TXBCIE bits need to be set for each tx element in order for
// interrupts to fire. // interrupts to fire.
self.regs.txbtie().write(|w| w.0 = 0xffff_ffff); self.regs.txbtie().write(|w| w.0 = 0xffff_ffff);
self.regs.txbcie().write(|w| w.0 = 0xffff_ffff);
self.regs.ie().modify(|w| { self.regs.ie().modify(|w| {
w.set_rfne(0, true); // Rx Fifo 0 New Msg w.set_rfne(0, true); // Rx Fifo 0 New Msg
w.set_rfne(1, true); // Rx Fifo 1 New Msg w.set_rfne(1, true); // Rx Fifo 1 New Msg
w.set_drxe(true); // Rx Dedicated Buffer New Msg
w.set_tce(true); // Tx Complete w.set_tce(true); // Tx Complete
w.set_tcfe(true); // Tx Cancel Finished
w.set_boe(true); // Bus-Off Status Changed w.set_boe(true); // Bus-Off Status Changed
}); });
self.regs.ile().modify(|w| { self.regs.ile().modify(|w| {
@ -62,6 +69,15 @@ impl CanLowLevel {
self.set_global_filter(config.global_filter); self.set_global_filter(config.global_filter);
} }
#[inline]
pub fn set_tx_buffer_mode(&self, mode: TxBufferMode) {
let mode = match mode {
TxBufferMode::Fifo => Tfqm::FIFO,
TxBufferMode::Priority => Tfqm::QUEUE,
};
self.regs.txbc().modify(|v| v.set_tfqm(mode));
}
/// Configures the global filter settings /// Configures the global filter settings
#[inline] #[inline]
pub fn set_global_filter(&self, filter: GlobalFilter) { pub fn set_global_filter(&self, filter: GlobalFilter) {
@ -202,11 +218,11 @@ impl CanLowLevel {
self.regs.cccr().modify(|w| w.set_test(enabled)); self.regs.cccr().modify(|w| w.set_test(enabled));
} }
#[inline] //#[inline]
pub fn set_clock_stop(&self, enabled: bool) { //pub fn set_clock_stop(&self, enabled: bool) {
self.regs.cccr().modify(|w| w.set_csr(enabled)); // self.regs.cccr().modify(|w| w.set_csr(enabled));
while self.regs.cccr().read().csa() != enabled {} // while self.regs.cccr().read().csa() != enabled {}
} //}
#[inline] #[inline]
pub fn set_restricted_operations(&self, enabled: bool) { pub fn set_restricted_operations(&self, enabled: bool) {

View File

@ -229,5 +229,6 @@ pub enum FilterElementConfig {
SetPriorityAndStoreInFifo0 = 0b101, SetPriorityAndStoreInFifo0 = 0b101,
/// Flag and store message in FIFO 1 /// Flag and store message in FIFO 1
SetPriorityAndStoreInFifo1 = 0b110, SetPriorityAndStoreInFifo1 = 0b110,
//_Unused = 0b111, /// Store a matching message in Rx Buffer or as debug message
StoreInRxBuffer = 0b111,
} }

View File

@ -285,7 +285,7 @@ impl<'a> EFC_W<'a> {
} }
} }
struct Marker(u8); pub(crate) struct Marker(u8);
impl From<Event> for Marker { impl From<Event> for Marker {
fn from(e: Event) -> Marker { fn from(e: Event) -> Marker {
match e { match e {

View File

@ -1,10 +1,15 @@
// This module contains a more or less full abstraction for message ram,
// we don't want to remove dead code as it might be used in future
// feature implementations.
#![allow(dead_code)]
use core::marker::PhantomData; use core::marker::PhantomData;
use element::tx_event::TxEventElement; use element::tx_event::TxEventElement;
use volatile_register::RW; use volatile_register::RW;
mod element; mod element;
pub(crate) use element::enums::{DataLength, Event, FilterElementConfig, FilterType, FrameFormat, IdType}; pub(crate) use element::enums::{DataLength, FilterElementConfig, FilterType, FrameFormat, IdType};
pub(crate) use element::{ pub(crate) use element::{
filter_extended::ExtendedFilter, filter_standard::StandardFilter, rx_buffer::RxFifoElementHeader, filter_extended::ExtendedFilter, filter_standard::StandardFilter, rx_buffer::RxFifoElementHeader,
tx_buffer::TxBufferElementHeader, tx_buffer::TxBufferElementHeader,
@ -347,7 +352,12 @@ impl MessageRamConfig {
let total_size_bytes = total_size_words << 2; let total_size_bytes = total_size_words << 2;
if let Some(avail) = segment.available_space { if let Some(avail) = segment.available_space {
assert!(total_size_bytes <= avail, "CAN RAM config exceeded available space!"); assert!(
total_size_bytes <= avail,
"CAN RAM config exceeded available space! ({} allocated, {} available)",
total_size_bytes,
avail
);
} }
// Standard ID filter config // Standard ID filter config

View File

@ -1,20 +1,14 @@
use core::convert::Infallible;
use cfg_if::cfg_if;
use message_ram::{HeaderElement, RxFifoElementHeader};
use stm32_metapac::can::regs::{Ndat1, Ndat2, Txbcr}; use stm32_metapac::can::regs::{Ndat1, Ndat2, Txbcr};
use util::{RxElementData, TxElementData}; use util::{RxElementData, TxElementData};
use crate::can::{ use crate::can::frame::Header;
enums::BusError,
frame::{CanHeader, Header},
};
mod configuration; mod configuration;
mod filter; mod filter;
pub(crate) mod message_ram; pub(crate) mod message_ram;
mod util;
pub(crate) mod util;
/// Loopback Mode /// Loopback Mode
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
@ -25,13 +19,22 @@ pub(crate) enum LoopbackMode {
} }
#[repr(u8)] #[repr(u8)]
enum TimestampSource { #[allow(dead_code)]
pub(crate) enum TimestampSource {
/// Timestamp always set to 0
Zero = 0b00, Zero = 0b00,
Internal = 0b01, Internal = 0b01,
/// tim3.cnt[0:15] used as source /// tim3.cnt[0:15] used as source
External = 0b11, External = 0b11,
} }
#[derive(defmt::Format)]
pub(crate) struct RxLevels {
pub rx_0_level: u8,
pub rx_1_level: u8,
pub buf_mask: u64,
}
pub(crate) struct CanLowLevel { pub(crate) struct CanLowLevel {
pub(crate) regs: crate::pac::can::Fdcan, pub(crate) regs: crate::pac::can::Fdcan,
pub(crate) msgram: crate::pac::fdcanram::Fdcanram, pub(crate) msgram: crate::pac::fdcanram::Fdcanram,
@ -89,16 +92,18 @@ impl CanLowLevel {
.put(element); .put(element);
} }
fn tx_element_get(&self, idx: u8) -> (Header, [u8; 64]) { #[allow(dead_code)]
todo!() pub fn tx_buffer_add(&self, idx: u8, header: &Header, data: &[u8], before_add: impl FnOnce(u8)) -> Option<u8> {
} // TODO validate that only dedicated buffers are passed.
// TODO change to bit mask
pub fn tx_buffer_add(&self, idx: u8, header: &Header, data: &[u8]) -> Option<u8> {
if self.regs.txbrp().read().trp(idx as usize) { if self.regs.txbrp().read().trp(idx as usize) {
// Transmit already pending for this buffer // Transmit already pending for this buffer
return None; return None;
} }
before_add(idx);
// Write message to message RAM // Write message to message RAM
self.tx_element_set(idx, header, data); self.tx_element_set(idx, header, data);
@ -108,7 +113,8 @@ impl CanLowLevel {
Some(idx) Some(idx)
} }
pub fn tx_queue_add(&self, header: &Header, data: &[u8]) -> Option<u8> { #[allow(dead_code)]
pub fn tx_queue_add(&self, header: &Header, data: &[u8], before_add: impl FnOnce(u8)) -> Option<u8> {
// We could use TXFQS.TFQPI here (same as for FIFO mode), but this // We could use TXFQS.TFQPI here (same as for FIFO mode), but this
// causes us to lose slots on TX cancellations. // causes us to lose slots on TX cancellations.
// Instead we find the first slot in the queue and insert the message // Instead we find the first slot in the queue and insert the message
@ -126,6 +132,8 @@ impl CanLowLevel {
return None; return None;
} }
before_add(free_idx);
// Write message to message RAM // Write message to message RAM
self.tx_element_set(free_idx, header, data); self.tx_element_set(free_idx, header, data);
@ -135,7 +143,7 @@ impl CanLowLevel {
Some(free_idx) Some(free_idx)
} }
pub fn tx_fifo_add(&self, header: &Header, data: &[u8]) -> Option<u8> { pub fn tx_fifo_add(&self, header: &Header, data: &[u8], before_add: impl FnOnce(u8)) -> Option<u8> {
let status = self.regs.txfqs().read(); let status = self.regs.txfqs().read();
if status.tfqf() { if status.tfqf() {
@ -145,6 +153,8 @@ impl CanLowLevel {
let free_idx = status.tfqpi(); let free_idx = status.tfqpi();
before_add(free_idx);
// Write message to message RAM // Write message to message RAM
self.tx_element_set(free_idx, header, data); self.tx_element_set(free_idx, header, data);
@ -166,31 +176,26 @@ impl CanLowLevel {
} }
impl CanLowLevel { impl CanLowLevel {
pub fn rx_buffer_read(&self, buffer_idx: u8) -> Option<RxElementData> { pub fn rx_buffer_read(&self, buffer_mask: u64) -> Option<(u8, RxElementData)> {
let bit_idx = buffer_idx & 0b11111; let ndat = self.get_ndat_mask();
let bit = 1 << bit_idx; let available = ndat & buffer_mask;
// TODO fix NDAT1 and NDAT2 should be indexed if available == 0 {
let has_data = match buffer_idx {
idx if idx < 32 => self.regs.ndat1().read().nd() & bit != 0,
idx if idx < 64 => self.regs.ndat2().read().nd() & bit != 0,
_ => panic!(),
};
if !has_data {
return None; return None;
} }
let buffer_idx = available.leading_zeros();
let element = self.message_ram.rx_buffer.get_mut(buffer_idx as usize); let element = self.message_ram.rx_buffer.get_mut(buffer_idx as usize);
let ret = RxElementData::extract(element); let ret = RxElementData::extract(element);
match buffer_idx { match buffer_idx {
idx if idx < 32 => self.regs.ndat1().write_value(Ndat1(bit)), idx if idx < 32 => self.regs.ndat1().write_value(Ndat1(buffer_idx << idx)),
idx if idx < 64 => self.regs.ndat2().write_value(Ndat2(bit)), idx if idx < 64 => self.regs.ndat2().write_value(Ndat2(buffer_idx << (idx - 32))),
_ => panic!(), _ => panic!(),
}; };
Some(ret) Some((buffer_idx as u8, ret))
} }
pub fn rx_fifo_read(&self, fifo_num: u8) -> Option<RxElementData> { pub fn rx_fifo_read(&self, fifo_num: u8) -> Option<RxElementData> {
@ -209,44 +214,58 @@ impl CanLowLevel {
Some(ret) Some(ret)
} }
#[inline]
pub fn get_ndat_mask(&self) -> u64 {
(self.regs.ndat1().read().nd() as u64) | ((self.regs.ndat2().read().nd() as u64) << 32)
}
#[inline]
pub fn get_rx_levels(&self) -> RxLevels {
RxLevels {
rx_0_level: self.regs.rxfs(0).read().ffl(),
rx_1_level: self.regs.rxfs(1).read().ffl(),
buf_mask: self.get_ndat_mask(),
}
}
} }
/// Error stuff /// Error stuff
impl CanLowLevel { impl CanLowLevel {
fn reg_to_error(value: u8) -> Option<BusError> { //fn reg_to_error(value: u8) -> Option<BusError> {
match value { // match value {
//0b000 => None, // //0b000 => None,
0b001 => Some(BusError::Stuff), // 0b001 => Some(BusError::Stuff),
0b010 => Some(BusError::Form), // 0b010 => Some(BusError::Form),
0b011 => Some(BusError::Acknowledge), // 0b011 => Some(BusError::Acknowledge),
0b100 => Some(BusError::BitRecessive), // 0b100 => Some(BusError::BitRecessive),
0b101 => Some(BusError::BitDominant), // 0b101 => Some(BusError::BitDominant),
0b110 => Some(BusError::Crc), // 0b110 => Some(BusError::Crc),
//0b111 => Some(BusError::NoError), // //0b111 => Some(BusError::NoError),
_ => None, // _ => None,
} // }
} //}
pub fn curr_error(&self) -> Option<BusError> { //pub fn curr_error(&self) -> Option<BusError> {
let err = { self.regs.psr().read() }; // let err = { self.regs.psr().read() };
if err.bo() { // if err.bo() {
return Some(BusError::BusOff); // return Some(BusError::BusOff);
} else if err.ep() { // } else if err.ep() {
return Some(BusError::BusPassive); // return Some(BusError::BusPassive);
} else if err.ew() { // } else if err.ew() {
return Some(BusError::BusWarning); // return Some(BusError::BusWarning);
} else { // } else {
cfg_if! { // cfg_if! {
if #[cfg(can_fdcan_h7)] { // if #[cfg(can_fdcan_h7)] {
let lec = err.lec(); // let lec = err.lec();
} else { // } else {
let lec = err.lec().to_bits(); // let lec = err.lec().to_bits();
} // }
} // }
if let Some(err) = Self::reg_to_error(lec) { // if let Some(err) = Self::reg_to_error(lec) {
return Some(err); // return Some(err);
} // }
} // }
None // None
} //}
} }

View File

@ -1,12 +1,6 @@
use core::slice;
use volatile_register::RW;
use crate::can::frame::Header; use crate::can::frame::Header;
use super::message_ram::{ use super::message_ram::{DataLength, FrameFormat, HeaderElement, IdType, RxFifoElementHeader, TxBufferElementHeader};
DataLength, Event, FrameFormat, HeaderElement, IdType, RxFifoElementHeader, TxBufferElementHeader,
};
fn make_id(id: u32, extended: bool) -> embedded_can::Id { fn make_id(id: u32, extended: bool) -> embedded_can::Id {
if extended { if extended {
@ -17,6 +11,7 @@ fn make_id(id: u32, extended: bool) -> embedded_can::Id {
} }
} }
#[allow(dead_code)]
pub(crate) struct TxElementData { pub(crate) struct TxElementData {
pub header: Header, pub header: Header,
pub data: [u8; 64], pub data: [u8; 64],
@ -65,6 +60,7 @@ impl TxElementData {
} }
} }
#[allow(dead_code)]
pub(crate) struct RxElementData { pub(crate) struct RxElementData {
pub header: Header, pub header: Header,
pub data: [u8; 64], pub data: [u8; 64],

View File

@ -4,47 +4,48 @@ use core::{
future::poll_fn, future::poll_fn,
marker::PhantomData, marker::PhantomData,
sync::atomic::{AtomicU32, Ordering}, sync::atomic::{AtomicU32, Ordering},
task::Poll, task::{Poll, Waker},
}; };
use bit_field::BitField;
use configurator::Properties; use configurator::Properties;
use embassy_sync::waitqueue::AtomicWaker; use embassy_sync::waitqueue::AtomicWaker;
use low_level::{util::RxElementData, RxLevels};
use peripheral::Info; use peripheral::Info;
use util::AtomicResourceAllocator;
use crate::can::common::CanMode; use crate::can::common::CanMode;
use super::{BaseEnvelope, BaseFrame, BusError, CanHeader as _, OperatingMode}; use super::{BaseEnvelope, BaseFrame, BusOff, OperatingMode};
pub mod config; pub mod config;
pub mod filter; pub mod filter;
pub(crate) mod configurator; pub(crate) mod configurator;
pub(crate) mod interrupt; pub(crate) mod interrupt;
#[cfg_attr(can_fdcan_h7, path = "limits/m_can.rs")]
#[cfg_attr(not(can_fdcan_h7), path = "limits/simplified.rs")]
mod limits;
pub(crate) mod low_level; pub(crate) mod low_level;
pub(crate) mod peripheral; pub(crate) mod peripheral;
mod util; mod util;
pub(self) use interrupt::{IT0InterruptHandler, IT1InterruptHandler}; pub(self) use interrupt::{IT0InterruptHandler, IT1InterruptHandler};
pub(self) use peripheral::*;
pub(self) use util::calc_ns_per_timer_tick; pub(self) use util::calc_ns_per_timer_tick;
/// FDCAN Instance /// FDCAN Instance
pub struct Can<'d, M> { pub struct Can<'d, M> {
_phantom: PhantomData<&'d M>, _phantom: PhantomData<&'d M>,
config: crate::can::fd::config::FdCanConfig,
info: &'static Info, info: &'static Info,
state: &'static State, state: &'static State,
_mode: OperatingMode, _mode: OperatingMode,
properties: Properties, properties: Properties,
} }
/// The CANFD/M_CAN peripheral has two configurable Rx FIFOs.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum RxQueue { pub enum RxFifo {
Q0 = 0, /// FIFO 1
Q1 = 1, F0 = 0,
/// FIFO 2
F1 = 1,
} }
impl<'d, M: CanMode> Can<'d, M> { impl<'d, M: CanMode> Can<'d, M> {
@ -53,70 +54,158 @@ impl<'d, M: CanMode> Can<'d, M> {
&self.properties &self.properties
} }
/// Flush one of the TX mailboxes. /// This waits until a bus off error occurs.
pub async fn flush(&self, idx: usize) { ///
/// Useful in conjunction with ErrorHandlingMode::Central if you
/// have a dedicated bus error handling task.
pub async fn wait_bus_off(&self) -> BusOff {
poll_fn(|cx| { poll_fn(|cx| {
//self.state.tx_mode.register(cx.waker());
self.state.tx_waker.register(cx.waker());
if idx > 3 {
panic!("Bad mailbox");
}
let idx = 1 << idx;
if !self.info.low.regs.txbrp().read().trp(idx) {
return Poll::Ready(());
}
Poll::Pending
})
.await;
}
fn internal_try_read(&self) -> Option<Result<BaseEnvelope<M>, BusError>> {
if let Some(element) = self.info.low.rx_fifo_read(0) {
let ts = self
.info
.calc_timestamp(self.state.ns_per_timer_tick, element.timestamp);
let data = &element.data[0..element.header.len() as usize];
let frame = match BaseFrame::from_header(element.header, data) {
Ok(frame) => frame,
Err(_) => panic!(),
};
Some(Ok(BaseEnvelope { ts, frame }))
} else if let Some(err) = self.info.low.curr_error() {
// TODO: this is probably wrong
Some(Err(err))
} else {
None
}
}
async fn internal_read(&self) -> Result<BaseEnvelope<M>, BusError> {
poll_fn(move |cx| {
self.state.err_waker.register(cx.waker()); self.state.err_waker.register(cx.waker());
self.state.rx_waker.register(cx.waker());
match self.internal_try_read() { if self
Some(result) => Poll::Ready(result), .state
.state_flags
.load(Ordering::Relaxed)
.get_bit(State::STATE_FLAG_BUS_OFF)
{
Poll::Ready(BusOff)
} else {
Poll::Pending
}
})
.await
}
/// Initiates the Bus Off error recovery procedure
pub fn recover_bus_off(&self) {
self.info.low.regs.cccr().modify(|w| w.set_init(false));
// We only need to wait for the command to make it across to the
// CAN clock domain, busywaiting should be fine here.
while self.info.low.regs.cccr().read().init() {}
self.state
.state_flags
.fetch_and(!(1 << State::STATE_FLAG_BUS_OFF), Ordering::Relaxed);
}
}
/// Rx
impl<'d, M: CanMode> Can<'d, M> {
async fn rx_inner<T>(
&self,
inner: impl Fn(&Waker) -> Option<(T, RxElementData)>,
) -> Result<(T, BaseEnvelope<M>), BusOff> {
poll_fn(|cx| {
let bus_off = self
.state
.state_flags
.load(Ordering::Relaxed)
.get_bit(State::STATE_FLAG_BUS_OFF);
let propagate_rx = self
.state
.settings_flags
.load(Ordering::Relaxed)
.get_bit(State::FLAG_PROPAGATE_ERRORS_TO_RX);
if bus_off && propagate_rx {
return Poll::Ready(Err(BusOff));
}
match inner(cx.waker()) {
Some((passthrough, element)) => {
let ts = self
.info
.calc_timestamp(self.state.ns_per_timer_tick, element.timestamp);
let len = element.header.len() as usize;
let data = &element.data[0..len];
// This should only fail when the data doesn't fit into the `Data` type,
// but we validate this at driver creation time. This should never fail.
let frame = BaseFrame::new(element.header, data).unwrap();
Poll::Ready(Ok((passthrough, BaseEnvelope { ts, frame })))
}
None => Poll::Pending, None => Poll::Pending,
} }
}) })
.await .await
} }
/// Returns the next received message frame /// Dedicated Rx buffers can be used as destinations of simple single ID
pub async fn read(&mut self) -> Result<BaseEnvelope<M>, BusError> { /// filters.
//self.state.rx_mode.read(self.info, self.state).await pub fn alloc_dedicated_rx(&self) -> Option<u8> {
self.internal_read().await self.state.dedicated_rx_alloc.allocate().map(|v| v as u8)
} }
/// Returns the next received message frame, if available. /// Receive a frame from any buffer or FIFO.
/// If there is no frame currently available to be read, this will return immediately. pub async fn rx_any(&self) -> Result<BaseEnvelope<M>, BusOff> {
pub fn try_read(&mut self) -> Option<Result<BaseEnvelope<M>, BusError>> { defmt::info!("rx_any");
//self.state.rx_mode.try_read(self.info, self.state) self.rx_inner(|waker| {
self.internal_try_read() self.state.rx_fifo_waker[0].register(waker);
self.state.rx_fifo_waker[1].register(waker);
self.state.rx_dedicated_waker.register(waker);
defmt::info!("levels: {}", self.info.low.get_rx_levels());
match self.info.low.get_rx_levels() {
RxLevels { rx_0_level: v, .. } if v > 0 => self.info.low.rx_fifo_read(0).map(|v| ((), v)),
RxLevels { rx_1_level: v, .. } if v > 0 => self.info.low.rx_fifo_read(1).map(|v| ((), v)),
RxLevels { buf_mask: v, .. } if v != 0 => self.info.low.rx_buffer_read(u64::MAX).map(|v| ((), v.1)),
_ => None,
}
})
.await
.map(|v| v.1)
.inspect(|v| defmt::info!("envelope: {}", v))
}
/// Receive a frame from the given FIFO
///
/// This blocks until either:
/// * A frame is available
/// * Depending on the driver error config, a BusOff error occurs.
pub async fn rx_fifo(&self, fifo: RxFifo) -> Result<BaseEnvelope<M>, BusOff> {
self.rx_inner(|waker| {
self.state.rx_fifo_waker[fifo as usize].register(waker);
self.info.low.rx_fifo_read(fifo as u8).map(|e| ((), e))
})
.await
.map(|v| v.1)
}
/// Receive a frame from the first of the available dedicated Rx buffers,
/// selected by `buffer_mask`.
/// Returns the index of the dedicated buffer the frame was pulled from,
/// along with the envelope.
///
/// This blocks until either:
/// * A frame is available
/// * Depending on the driver error config, a BusOff error occurs.
#[cfg(can_fdcan_h7)]
pub async fn rx_buffers(&self, buffer_mask: u64) -> Result<(u8, BaseEnvelope<M>), BusOff> {
self.rx_inner(|waker| {
self.state.rx_dedicated_waker.register(waker);
self.info.low.rx_buffer_read(buffer_mask)
})
.await
}
/// Receive a frame from the indicated dedicated Rx buffer index.
///
/// This blocks until either:
/// * A frame is available
/// * Depending on the driver error config, a BusOff error occurs.
#[cfg(can_fdcan_h7)]
pub async fn rx_buffer(&self, idx: u8) -> Result<BaseEnvelope<M>, BusOff> {
self.rx_inner(|waker| {
self.state.rx_dedicated_waker.register(waker);
self.info.low.rx_buffer_read(1u64 << (idx as usize))
})
.await
.map(|v| v.1)
} }
} }
/// Tx
impl<'d, M: CanMode> Can<'d, M> { impl<'d, M: CanMode> Can<'d, M> {
/// Queues the message to be sent but exerts backpressure. /// Queues the message to be sent but exerts backpressure.
/// If the given queue is full, this call will wait asynchronously /// If the given queue is full, this call will wait asynchronously
@ -125,12 +214,25 @@ impl<'d, M: CanMode> Can<'d, M> {
/// of the transmission. /// of the transmission.
pub async fn tx_queue(&self, frame: &BaseFrame<M>) -> TxRef { pub async fn tx_queue(&self, frame: &BaseFrame<M>) -> TxRef {
poll_fn(|cx| { poll_fn(|cx| {
self.state.tx_waker.register(cx.waker()); // This functions returns pending if there are no free slots,
// we register to the Tx Done waker to get notified when slots
// free up.
self.state.tx_done_waker.register(cx.waker());
match self.info.low.tx_fifo_add(frame.header(), frame.data()) { let before_add = |idx: u8| {
self.state.sync_tx_status();
let prev = self.state.tx_slot_busy_mask.fetch_or(1 << idx, Ordering::Relaxed);
// Debug assertion:
// `idx` from the TX FIFO add logic should always not have a pending TX.
// Therefore, after calling `sync_tx_status`, the bit should never be set.
assert!(prev & (1 << idx) == 0);
};
// TODO call queue function instead if in queue mode, it is more slot efficient.
match self.info.low.tx_fifo_add(frame.header(), frame.data(), before_add) {
// Frame was successfully added to slot. // Frame was successfully added to slot.
Some(slot_idx) => { Some(slot_idx) => {
let generation = self.state.tx_generations[slot_idx as usize].fetch_add(1, Ordering::Relaxed); let generation = self.state.tx_slot_generations[slot_idx as usize].load(Ordering::Relaxed);
Poll::Ready(TxRef { Poll::Ready(TxRef {
slot_idx, slot_idx,
@ -147,15 +249,6 @@ impl<'d, M: CanMode> Can<'d, M> {
} }
} }
// == TX
// Buffer add req -> TXBAR
// Buffer pending, TXBRP AS, TXBTO DE, TXBCF DE
//
// Buffer transmitted, TXBTO AS, TXBRP DE
//
// Buffer cancel req -> TXBCR
// Buffer failed, TXBCF AS, TXBRP DE
/// Reference to a frame which is in the process of being transmitted. /// Reference to a frame which is in the process of being transmitted.
pub struct TxRef { pub struct TxRef {
slot_idx: u8, slot_idx: u8,
@ -166,10 +259,26 @@ pub struct TxRef {
impl TxRef { impl TxRef {
/// Waits for transmission to finish. /// Waits for transmission to finish.
pub async fn wait(&mut self) { pub async fn wait(&mut self) -> Result<(), BusOff> {
poll_fn(|cx| { poll_fn(|cx| {
if self.poll_done() { if self.poll_done() {
Poll::Ready(()) return Poll::Ready(Ok(()));
}
let bus_off = self
.state
.state_flags
.load(Ordering::Relaxed)
.get_bit(State::STATE_FLAG_BUS_OFF);
let propagate_tx = self
.state
.settings_flags
.load(Ordering::Relaxed)
.get_bit(State::FLAG_PROPAGATE_ERRORS_TO_TX);
if bus_off && propagate_tx {
// If we are in BUS_OFF and the Rx propagation bit is set, we return a bus off error.
Poll::Ready(Err(BusOff))
} else { } else {
self.state.tx_done_waker.register(cx.waker()); self.state.tx_done_waker.register(cx.waker());
Poll::Pending Poll::Pending
@ -187,7 +296,7 @@ impl TxRef {
// Check the generation for the current slot, if it is different // Check the generation for the current slot, if it is different
// the slot finished. // the slot finished.
let current_generation = self.state.tx_generations[self.slot_idx as usize].load(Ordering::Relaxed); let current_generation = self.state.tx_slot_generations[self.slot_idx as usize].load(Ordering::Relaxed);
if current_generation != self.generation { if current_generation != self.generation {
// Store completion state to handle subsequent calls. // Store completion state to handle subsequent calls.
self.completed = true; self.completed = true;
@ -213,65 +322,117 @@ impl TxRef {
pub(self) struct State { pub(self) struct State {
pub info: &'static Info, pub info: &'static Info,
pub rx_waker: AtomicWaker, // Wakers
pub tx_waker: AtomicWaker, pub rx_dedicated_waker: AtomicWaker,
pub rx_fifo_waker: [AtomicWaker; 2],
pub tx_done_waker: AtomicWaker, pub tx_done_waker: AtomicWaker,
pub tx_busy: AtomicU32,
pub tx_generations: [AtomicU32; 32],
pub err_waker: AtomicWaker, pub err_waker: AtomicWaker,
/// State flags
pub state_flags: AtomicU32,
/// Settings flags
pub settings_flags: AtomicU32,
/// Bitmask where each bit represents one TX slot.
///
/// After transmission is done or cancelled, a bit will be set in
/// either the "Tx Occurred" or "Tx Cancelled" register, and the
/// "Tx Pending" bit will be cleared.
///
/// We need to bump the generation for the Tx slot exactly once,
/// and there is no way to clear the "Tx Occurred" or "Tx Cancelled"
/// registers without transmitting a new message in the slot.
///
/// The `tx_slot_busy` bitmask is set when a Tx slot is populated,
/// and cleared when the status bit for the slot is handled
/// appropriately. This gets us "exactly once" acknowledgement of
/// each slot.
pub tx_slot_busy_mask: AtomicU32,
/// Each Tx slot has a generation number. This generation number is
/// incremented each time a transmission is finished.
///
/// If a TxRef referred to only the slot index, we would have no way
/// of knowing if it is referring to the current message in the Tx
/// slot of if it is referring to a previour usage of it.
///
/// To solve this issue, a TxRef always refers to BOTH the index and
/// the generation.
///
/// ## Access
/// * Incremented on Tx done/cancel
/// * Loaded by `TxRef` to check if generation matches current
#[cfg(can_fdcan_h7)]
pub tx_slot_generations: [AtomicU32; 32],
#[cfg(not(can_fdcan_h7))]
pub tx_slot_generations: [AtomicU32; 3],
pub standard_filter_alloc: AtomicResourceAllocator<128, 4>,
pub extended_filter_alloc: AtomicResourceAllocator<64, 2>,
pub dedicated_rx_alloc: AtomicResourceAllocator<64, 2>,
pub ns_per_timer_tick: u64, pub ns_per_timer_tick: u64,
} }
impl State { impl State {
const FLAG_AUTO_RECOVER_BUS_OFF: usize = 0;
const FLAG_PROPAGATE_ERRORS_TO_TX: usize = 1;
const FLAG_PROPAGATE_ERRORS_TO_RX: usize = 2;
/// Stored in the status flag because reading the error flag clears counters.
/// Maintained by the interrupts.
const STATE_FLAG_BUS_OFF: usize = 0;
const fn new(info: &'static Info) -> Self { const fn new(info: &'static Info) -> Self {
Self { Self {
info, info,
rx_waker: AtomicWaker::new(), // TODO set settings
tx_waker: AtomicWaker::new(), settings_flags: AtomicU32::new(0),
state_flags: AtomicU32::new(0),
rx_dedicated_waker: AtomicWaker::new(),
rx_fifo_waker: [AtomicWaker::new(), AtomicWaker::new()],
tx_done_waker: AtomicWaker::new(), tx_done_waker: AtomicWaker::new(),
tx_busy: AtomicU32::new(0),
tx_generations: [
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
AtomicU32::new(0),
],
err_waker: AtomicWaker::new(), err_waker: AtomicWaker::new(),
tx_slot_busy_mask: AtomicU32::new(0),
#[cfg(can_fdcan_h7)]
tx_slot_generations: util::new_atomic_u32_array(),
#[cfg(not(can_fdcan_h7))]
tx_slot_generations: util::new_atomic_u32_array(),
standard_filter_alloc: AtomicResourceAllocator::new(),
extended_filter_alloc: AtomicResourceAllocator::new(),
dedicated_rx_alloc: AtomicResourceAllocator::new(),
ns_per_timer_tick: 0, ns_per_timer_tick: 0,
} }
} }
/// Can only be called while the state mutex is locked!
fn sync_tx_status(&self) {
critical_section::with(|_| {
// When a slot is transmitted, a bit is set in either of these two registers.
let tx_cancel = self.info.low.regs.txbcf().read().0;
let tx_finish = self.info.low.regs.txbto().read().0;
// Only slots which are currently marked as busy are taken into account.
let tx_busy = self.tx_slot_busy_mask.load(Ordering::Relaxed);
// Slots which have a cancel/finish bit and are marked as busy have not
// been previously acked.
let to_bump = (tx_cancel | tx_finish) & tx_busy;
for n in 0..32 {
if to_bump.get_bit(n) {
self.tx_slot_generations[n].fetch_add(1, Ordering::Relaxed);
}
}
// Generation should only be bumbed once per slot finish, clear the bits we handled.
self.tx_slot_busy_mask.fetch_and(!to_bump, Ordering::Relaxed);
});
}
} }

View File

@ -1,12 +1,10 @@
use core::sync::atomic::AtomicU32;
use embassy_hal_internal::PeripheralRef; use embassy_hal_internal::PeripheralRef;
use embassy_sync::waitqueue::AtomicWaker;
use super::{low_level::CanLowLevel, State}; use super::{low_level::CanLowLevel, State};
use crate::interrupt::typelevel::Interrupt; use crate::interrupt::typelevel::Interrupt;
use crate::{can::Timestamp, peripherals, rcc::RccPeripheral}; use crate::{can::Timestamp, peripherals, rcc::RccPeripheral};
#[allow(dead_code)]
pub(super) struct Info { pub(super) struct Info {
pub low: CanLowLevel, pub low: CanLowLevel,
pub interrupt0: crate::interrupt::Interrupt, pub interrupt0: crate::interrupt::Interrupt,
@ -45,7 +43,6 @@ pub(super) trait SealedInstance {
unsafe fn mut_info() -> &'static mut Info; unsafe fn mut_info() -> &'static mut Info;
fn state() -> &'static State; fn state() -> &'static State;
unsafe fn mut_state() -> &'static mut State; unsafe fn mut_state() -> &'static mut State;
fn calc_timestamp(ns_per_timer_tick: u64, ts_val: u16) -> Timestamp;
} }
/// Instance trait /// Instance trait
@ -95,23 +92,6 @@ macro_rules! impl_fdcan {
unsafe { peripherals::$inst::mut_state() } unsafe { peripherals::$inst::mut_state() }
} }
#[cfg(feature = "time")]
fn calc_timestamp(ns_per_timer_tick: u64, ts_val: u16) -> Timestamp {
let now_embassy = embassy_time::Instant::now();
if ns_per_timer_tick == 0 {
return now_embassy;
}
let cantime = { Self::info().low.regs.tscv().read().tsc() };
let delta = cantime.overflowing_sub(ts_val).0 as u64;
let ns = ns_per_timer_tick * delta as u64;
now_embassy - embassy_time::Duration::from_nanos(ns)
}
#[cfg(not(feature = "time"))]
fn calc_timestamp(_ns_per_timer_tick: u64, ts_val: u16) -> Timestamp {
ts_val
}
} }
foreach_interrupt!( foreach_interrupt!(

View File

@ -1,3 +1,10 @@
use core::{
marker::PhantomData,
sync::atomic::{AtomicU32, Ordering},
};
use bit_field::BitField;
use super::Info; use super::Info;
pub fn calc_ns_per_timer_tick( pub fn calc_ns_per_timer_tick(
@ -17,3 +24,68 @@ pub fn calc_ns_per_timer_tick(
_ => 0, _ => 0,
} }
} }
/// There is no good way from what I can tell to initalize a
/// `[AtomicU32; N]` for a N const generic parameter.
/// Initializing it as zero should be safe because an atomic is
/// the same size as its underling type, so this should be safe to
/// do.
pub const fn new_atomic_u32_array<const N: usize>() -> [AtomicU32; N] {
static_assertions::const_assert!(core::mem::size_of::<AtomicU32>() == 4);
unsafe { core::mem::zeroed() }
}
pub struct AtomicResourceAllocator<const N: usize, const S: usize> {
_phantom: PhantomData<[(); N]>,
atomics: [AtomicU32; S],
}
impl<const N: usize, const S: usize> AtomicResourceAllocator<N, S> {
pub const fn new() -> Self {
AtomicResourceAllocator {
_phantom: PhantomData,
atomics: new_atomic_u32_array(),
}
}
pub fn allocate(&self) -> Option<usize> {
for n in 0..S {
let val = self.atomics[n].load(Ordering::Relaxed);
loop {
let free_idx = val.leading_ones() as usize;
if free_idx == 32 {
break;
}
let max_slots = (N + 31) / 32;
let max_last_bits = N % 32;
if n == max_slots && free_idx >= max_last_bits {
break;
}
let mask = 1 << free_idx;
match self.atomics[n].compare_exchange(val, val | mask, Ordering::Relaxed, Ordering::Relaxed) {
Ok(_) => {
let slot_idx = free_idx * n;
if slot_idx < N {
return Some(slot_idx);
}
}
Err(_) => (),
}
}
}
None
}
pub fn deallocate(&self, index: usize) {
let atomic_idx = index % 32;
let bit_idx = index / 32;
let prev = self.atomics[atomic_idx].fetch_and(!(1 << bit_idx), Ordering::Relaxed);
assert!(
prev.get_bit(bit_idx),
"attempted to deallocate a non allocated resource!"
);
}
}

View File

@ -14,7 +14,7 @@ pub use super::common::{BufferedCanReceiver, BufferedCanSender};
pub use fd::configurator::CanConfigurator; pub use fd::configurator::CanConfigurator;
pub use fd::interrupt::{IT0InterruptHandler, IT1InterruptHandler}; pub use fd::interrupt::{IT0InterruptHandler, IT1InterruptHandler};
pub use fd::peripheral::*; pub use fd::peripheral::*;
pub use fd::Can; pub use fd::{Can, RxFifo};
/// Timestamp for incoming packets. Use Embassy time when enabled. /// Timestamp for incoming packets. Use Embassy time when enabled.
#[cfg(feature = "time")] #[cfg(feature = "time")]

View File

@ -57,12 +57,14 @@ impl Header {
Header { id, len, flags } Header { id, len, flags }
} }
/// Sets the CAN FD flags for the header
pub fn set_can_fd(mut self, flag: bool, brs: bool) -> Header { pub fn set_can_fd(mut self, flag: bool, brs: bool) -> Header {
self.flags.set_bit(Self::FLAG_FDCAN, flag); self.flags.set_bit(Self::FLAG_FDCAN, flag);
self.flags.set_bit(Self::FLAG_BRS, brs); self.flags.set_bit(Self::FLAG_BRS, brs);
self self
} }
/// Sets the error passive indicator bit
pub fn set_esi(mut self, flag: bool) -> Header { pub fn set_esi(mut self, flag: bool) -> Header {
self.flags.set_bit(Self::FLAG_ESI, flag); self.flags.set_bit(Self::FLAG_ESI, flag);
self self
@ -83,16 +85,17 @@ impl Header {
self.flags.get_bit(Self::FLAG_RTR) self.flags.get_bit(Self::FLAG_RTR)
} }
/// Has error passive indicator bit set
pub fn esi(&self) -> bool { pub fn esi(&self) -> bool {
self.flags.get_bit(Self::FLAG_ESI) self.flags.get_bit(Self::FLAG_ESI)
} }
/// Request/is FDCAN frame /// Request is FDCAN frame
pub fn fdcan(&self) -> bool { pub fn fdcan(&self) -> bool {
self.flags.get_bit(Self::FLAG_FDCAN) self.flags.get_bit(Self::FLAG_FDCAN)
} }
/// Request/is Flexible Data Rate /// Request is Flexible Data Rate
pub fn bit_rate_switching(&self) -> bool { pub fn bit_rate_switching(&self) -> bool {
self.flags.get_bit(Self::FLAG_BRS) self.flags.get_bit(Self::FLAG_BRS)
} }