stm32/wpan: convert to new ipcc

This commit is contained in:
xoviat 2023-06-17 12:00:33 -05:00
parent b0a2f0c4fe
commit 6b5d55eb29
9 changed files with 330 additions and 335 deletions

View File

@ -1,10 +1,11 @@
use core::mem::MaybeUninit;
use core::ptr;
use embassy_stm32::ipcc::Ipcc;
use crate::cmd::CmdPacket;
use crate::cmd::{Cmd, CmdPacket, CmdSerial};
use crate::consts::TlPacketType;
use crate::evt::EvtBox;
use crate::evt::{EvtBox, EvtPacket};
use crate::tables::BleTable;
use crate::unsafe_linked_list::LinkedListNode;
use crate::{
@ -25,45 +26,26 @@ impl Ble {
phci_acl_data_buffer: HCI_ACL_DATA_BUFFER.as_mut_ptr().cast(),
});
}
Ipcc::c1_set_rx_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL, true);
}
pub(super) fn evt_handler() {
unsafe {
while let Some(node_ptr) = LinkedListNode::remove_head(EVT_QUEUE.as_mut_ptr()) {
let event = EvtBox::new(node_ptr.cast());
EVT_CHANNEL.try_send(event).unwrap();
/// `HW_IPCC_BLE_EvtNot`
pub async fn read() -> EvtBox {
Ipcc::receive(channels::cpu2::IPCC_BLE_EVENT_CHANNEL, || unsafe {
if let Some(node_ptr) = LinkedListNode::remove_head(EVT_QUEUE.as_mut_ptr()) {
Some(EvtBox::new(node_ptr.cast()))
} else {
None
}
}
Ipcc::c1_clear_flag_channel(channels::cpu2::IPCC_BLE_EVENT_CHANNEL);
})
.await
}
pub(super) fn acl_data_handler() {
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_HCI_ACL_DATA_CHANNEL, false);
// TODO: ACL data ack to the user
}
pub fn send_cmd(opcode: u16, payload: &[u8]) {
debug!("writing ble cmd");
unsafe {
/// `TL_BLE_SendCmd`
pub async fn write(opcode: u16, payload: &[u8]) {
Ipcc::send(channels::cpu1::IPCC_BLE_CMD_CHANNEL, || unsafe {
CmdPacket::write_into(BLE_CMD_BUFFER.as_mut_ptr(), TlPacketType::BleCmd, opcode, payload);
}
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_BLE_CMD_CHANNEL);
})
.await;
}
#[allow(dead_code)] // Not used currently but reserved
pub(super) fn ble_send_acl_data() {
let cmd_packet = unsafe { &mut *(*TL_REF_TABLE.assume_init().ble_table).phci_acl_data_buffer };
cmd_packet.acl_data_serial.ty = TlPacketType::AclData as u8;
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_HCI_ACL_DATA_CHANNEL);
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_HCI_ACL_DATA_CHANNEL, true);
}
// TODO: acl commands
}

View File

@ -171,6 +171,8 @@ impl EvtBox {
impl Drop for EvtBox {
fn drop(&mut self) {
mm::MemoryManager::evt_drop(self.ptr);
trace!("evt box drop packet");
unsafe { mm::MemoryManager::drop_event_packet(self.ptr) };
}
}

View File

@ -7,11 +7,10 @@ use core::mem::MaybeUninit;
use core::sync::atomic::{compiler_fence, Ordering};
use cmd::CmdPacket;
use embassy_futures::block_on;
use embassy_hal_common::{into_ref, Peripheral, PeripheralRef};
use embassy_stm32::interrupt;
use embassy_stm32::interrupt::typelevel::Interrupt;
use embassy_stm32::ipcc::{Config, Ipcc};
use embassy_stm32::ipcc::{Config, Ipcc, ReceiveInterruptHandler, TransmitInterruptHandler};
use embassy_stm32::peripherals::IPCC;
use embassy_sync::blocking_mutex::raw::CriticalSectionRawMutex;
use embassy_sync::channel::Channel;
@ -29,50 +28,11 @@ pub mod cmd;
pub mod consts;
pub mod evt;
pub mod mm;
pub mod rc;
pub mod shci;
pub mod sys;
pub mod tables;
pub mod unsafe_linked_list;
/// Interrupt handler.
pub struct ReceiveInterruptHandler {}
impl interrupt::typelevel::Handler<interrupt::typelevel::IPCC_C1_RX> for ReceiveInterruptHandler {
unsafe fn on_interrupt() {
if Ipcc::is_rx_pending(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL) {
debug!("RX SYS evt");
sys::Sys::evt_handler();
} else if Ipcc::is_rx_pending(channels::cpu2::IPCC_BLE_EVENT_CHANNEL) {
debug!("RX BLE evt");
ble::Ble::evt_handler();
}
STATE.signal(());
}
}
pub struct TransmitInterruptHandler {}
impl interrupt::typelevel::Handler<interrupt::typelevel::IPCC_C1_TX> for TransmitInterruptHandler {
unsafe fn on_interrupt() {
if Ipcc::is_tx_pending(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL) {
debug!("TX SYS cmd rsp");
let cc = sys::Sys::cmd_evt_handler();
LAST_CC_EVT.signal(cc);
} else if Ipcc::is_tx_pending(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL) {
debug!("TX MM release");
mm::MemoryManager::free_buf_handler();
} else if Ipcc::is_tx_pending(channels::cpu1::IPCC_HCI_ACL_DATA_CHANNEL) {
debug!("TX HCI acl");
ble::Ble::acl_data_handler();
}
STATE.signal(());
}
}
#[link_section = "TL_REF_TABLE"]
pub static mut TL_REF_TABLE: MaybeUninit<RefTable> = MaybeUninit::uninit();
@ -289,22 +249,4 @@ impl<'d> TlMbox<'d> {
None
}
}
/// picks single [`EvtBox`] from internal event queue.
///
/// Internal event queu is populated in IPCC_RX_IRQ handler
pub fn dequeue_event(&mut self) -> Option<EvtBox> {
EVT_CHANNEL.try_recv().ok()
}
/// retrieves last Command Complete event and removes it from mailbox
pub fn pop_last_cc_evt(&mut self) -> Option<CcEvt> {
if LAST_CC_EVT.signaled() {
let cc = block_on(LAST_CC_EVT.wait());
LAST_CC_EVT.reset();
Some(cc)
} else {
None
}
}
}

View File

@ -1,6 +1,11 @@
//! Memory manager routines
use core::future::poll_fn;
use core::task::Poll;
use cortex_m::interrupt;
use embassy_stm32::ipcc::Ipcc;
use embassy_sync::waitqueue::AtomicWaker;
use crate::evt::EvtPacket;
use crate::tables::MemManagerTable;
@ -10,7 +15,9 @@ use crate::{
TL_MEM_MANAGER_TABLE,
};
pub(super) struct MemoryManager;
static MM_WAKER: AtomicWaker = AtomicWaker::new();
pub struct MemoryManager;
impl MemoryManager {
pub fn enable() {
@ -30,37 +37,36 @@ impl MemoryManager {
}
}
pub fn evt_drop(evt: *mut EvtPacket) {
// unsafe {
// let list_node = evt.cast();
//
// LinkedListNode::insert_tail(LOCAL_FREE_BUF_QUEUE.as_mut_ptr(), list_node);
//
// let channel_is_busy = Ipcc::c1_is_active_flag(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
//
// // postpone event buffer freeing to IPCC interrupt handler
// if channel_is_busy {
// Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, true);
// } else {
// Self::send_free_buf();
// Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
// }
// }
/// SAFETY: passing a pointer to something other than an event packet is UB
pub unsafe fn drop_event_packet(evt: *mut EvtPacket) {
interrupt::free(|_| unsafe {
LinkedListNode::insert_head(LOCAL_FREE_BUF_QUEUE.as_mut_ptr(), evt as *mut _);
});
MM_WAKER.wake();
}
/// gives free event buffers back to CPU2 from local buffer queue
pub fn send_free_buf() {
// unsafe {
// while let Some(node_ptr) = LinkedListNode::remove_head(LOCAL_FREE_BUF_QUEUE.as_mut_ptr()) {
// LinkedListNode::insert_head(FREE_BUF_QUEUE.as_mut_ptr(), node_ptr);
// }
// }
}
pub async fn run_queue() {
loop {
poll_fn(|cx| unsafe {
MM_WAKER.register(cx.waker());
if LinkedListNode::is_empty(LOCAL_FREE_BUF_QUEUE.as_mut_ptr()) {
Poll::Pending
} else {
Poll::Ready(())
}
})
.await;
/// free buffer channel interrupt handler
pub fn free_buf_handler() {
// Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, false);
// Self::send_free_buf();
// Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL);
Ipcc::send(channels::cpu1::IPCC_MM_RELEASE_BUFFER_CHANNEL, || {
interrupt::free(|_| unsafe {
// CS required while moving nodes
while let Some(node_ptr) = LinkedListNode::remove_head(LOCAL_FREE_BUF_QUEUE.as_mut_ptr()) {
LinkedListNode::insert_head(FREE_BUF_QUEUE.as_mut_ptr(), node_ptr);
}
})
})
.await;
}
}
}

View File

@ -1,43 +0,0 @@
use crate::ble::Ble;
use crate::consts::TlPacketType;
use crate::{TlMbox, STATE};
pub struct RadioCoprocessor<'d> {
mbox: TlMbox<'d>,
rx_buf: [u8; 500],
}
impl<'d> RadioCoprocessor<'d> {
pub fn new(mbox: TlMbox<'d>) -> Self {
Self {
mbox,
rx_buf: [0u8; 500],
}
}
pub fn write(&self, opcode: u16, buf: &[u8]) {
let cmd_code = buf[0];
let cmd = TlPacketType::try_from(cmd_code).unwrap();
match &cmd {
TlPacketType::BleCmd => Ble::send_cmd(opcode, buf),
_ => todo!(),
}
}
pub async fn read(&mut self) -> &[u8] {
loop {
STATE.wait().await;
while let Some(evt) = self.mbox.dequeue_event() {
evt.write(&mut self.rx_buf).unwrap();
}
if self.mbox.pop_last_cc_evt().is_some() {
continue;
}
return &self.rx_buf;
}
}
}

View File

@ -1,19 +1,18 @@
use core::mem::MaybeUninit;
use core::sync::atomic::{compiler_fence, Ordering};
use core::{mem, ptr};
use embassy_stm32::ipcc::Ipcc;
use crate::cmd::{CmdPacket, CmdSerial};
use crate::cmd::{CmdPacket, CmdSerialStub};
use crate::consts::TlPacketType;
use crate::evt::{CcEvt, EvtBox, EvtSerial};
use crate::shci::{ShciBleInitCmdParam, SCHI_OPCODE_BLE_INIT};
use crate::evt::{CcEvt, EvtBox, EvtPacket, EvtSerial};
use crate::shci::{ShciBleInitCmdPacket, ShciBleInitCmdParam, ShciHeader, SCHI_OPCODE_BLE_INIT};
use crate::tables::SysTable;
use crate::unsafe_linked_list::LinkedListNode;
use crate::{channels, EVT_CHANNEL, SYSTEM_EVT_QUEUE, SYS_CMD_BUF, TL_SYS_TABLE};
use crate::{channels, mm, Ipcc, EVT_CHANNEL, SYSTEM_EVT_QUEUE, SYS_CMD_BUF, TL_SYS_TABLE};
pub struct Sys;
impl Sys {
/// TL_Sys_Init
pub fn enable() {
unsafe {
LinkedListNode::init_head(SYSTEM_EVT_QUEUE.as_mut_ptr());
@ -21,59 +20,47 @@ impl Sys {
TL_SYS_TABLE.as_mut_ptr().write_volatile(SysTable {
pcmd_buffer: SYS_CMD_BUF.as_mut_ptr(),
sys_queue: SYSTEM_EVT_QUEUE.as_ptr(),
})
}
Ipcc::c1_set_rx_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL, true);
}
pub fn cmd_evt_handler() -> CcEvt {
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, false);
// ST's command response data structure is really convoluted.
//
// for command response events on SYS channel, the header is missing
// and one should:
// 1. interpret the content of CMD_BUFFER as CmdPacket
// 2. Access CmdPacket's cmdserial field and interpret its content as EvtSerial
// 3. Access EvtSerial's evt field (as Evt) and interpret its payload as CcEvt
// 4. CcEvt type is the actual SHCI response
// 5. profit
unsafe {
let pcmd: *const CmdPacket = (*TL_SYS_TABLE.as_ptr()).pcmd_buffer;
let cmd_serial: *const CmdSerial = &(*pcmd).cmdserial;
let evt_serial: *const EvtSerial = cmd_serial.cast();
let cc: *const CcEvt = (*evt_serial).evt.payload.as_ptr().cast();
*cc
});
}
}
pub fn evt_handler() {
unsafe {
while let Some(node_ptr) = LinkedListNode::remove_head(SYSTEM_EVT_QUEUE.as_mut_ptr()) {
let event = EvtBox::new(node_ptr.cast());
// pub async fn shci_c2_ble_init(&mut self, param: ShciBleInitCmdParam) -> SchiCommandStatus {
// let command_event = self
// .write_and_get_response(TlPacketType::SysCmd, ShciOpcode::BleInit as u16, param.payload())
// .await;
//
// let payload = command_event.payload[0];
// // info!("payload: {:x}", payload);
//
// payload.try_into().unwrap()
// }
EVT_CHANNEL.try_send(event).unwrap();
}
}
Ipcc::c1_clear_flag_channel(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL);
}
pub fn shci_ble_init(param: ShciBleInitCmdParam) {
debug!("sending SHCI");
Self::send_cmd(SCHI_OPCODE_BLE_INIT, param.payload());
}
pub fn send_cmd(opcode: u16, payload: &[u8]) {
pub fn write(opcode: u16, payload: &[u8]) {
unsafe {
CmdPacket::write_into(SYS_CMD_BUF.as_mut_ptr(), TlPacketType::SysCmd, opcode, payload);
}
}
compiler_fence(Ordering::SeqCst);
pub async fn shci_c2_ble_init(param: ShciBleInitCmdParam) {
debug!("sending SHCI");
Ipcc::c1_set_flag_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL);
Ipcc::c1_set_tx_channel(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, true);
Ipcc::send(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL, || {
Self::write(SCHI_OPCODE_BLE_INIT, param.payload());
})
.await;
Ipcc::flush(channels::cpu1::IPCC_SYSTEM_CMD_RSP_CHANNEL).await;
}
/// `HW_IPCC_SYS_EvtNot`
pub async fn read() -> EvtBox {
Ipcc::receive(channels::cpu2::IPCC_SYSTEM_EVENT_CHANNEL, || unsafe {
if let Some(node_ptr) = LinkedListNode::remove_head(SYSTEM_EVT_QUEUE.as_mut_ptr()) {
Some(EvtBox::new(node_ptr.cast()))
} else {
None
}
})
.await
}
}

View File

@ -117,9 +117,11 @@ impl LinkedListNode {
/// Remove `node` from the linked list
pub unsafe fn remove_node(mut p_node: *mut LinkedListNode) {
interrupt::free(|_| {
trace!("remove node: {:x}", p_node);
let node = ptr::read_volatile(p_node);
trace!("remove node: prev/next {:x}/{:x}", node.prev, node.next);
// trace!("remove node: {:x}", p_node);
// apparently linked list nodes are not always aligned.
// if more hardfaults occur, more of these may need to be converted to unaligned.
let node = ptr::read_unaligned(p_node);
// trace!("remove node: prev/next {:x}/{:x}", node.prev, node.next);
if node.next != node.prev {
let mut node_next = ptr::read_volatile(node.next);

View File

@ -1,7 +1,77 @@
use core::future::poll_fn;
use core::task::Poll;
use atomic_polyfill::{compiler_fence, Ordering};
use self::sealed::Instance;
use crate::interrupt;
use crate::interrupt::typelevel::Interrupt;
use crate::peripherals::IPCC;
use crate::rcc::sealed::RccPeripheral;
/// Interrupt handler.
pub struct ReceiveInterruptHandler {}
impl interrupt::typelevel::Handler<interrupt::typelevel::IPCC_C1_RX> for ReceiveInterruptHandler {
unsafe fn on_interrupt() {
let regs = IPCC::regs();
let channels = [
IpccChannel::Channel1,
IpccChannel::Channel2,
IpccChannel::Channel3,
IpccChannel::Channel4,
IpccChannel::Channel5,
IpccChannel::Channel6,
];
// Status register gives channel occupied status. For rx, use cpu1.
let sr = unsafe { regs.cpu(1).sr().read() };
regs.cpu(0).mr().modify(|w| {
for channel in channels {
if sr.chf(channel as usize) {
// If bit is set to 1 then interrupt is disabled; we want to disable the interrupt
w.set_chom(channel as usize, true);
// There shouldn't be a race because the channel is masked only if the interrupt has fired
IPCC::state().rx_waker_for(channel).wake();
}
}
})
}
}
pub struct TransmitInterruptHandler {}
impl interrupt::typelevel::Handler<interrupt::typelevel::IPCC_C1_TX> for TransmitInterruptHandler {
unsafe fn on_interrupt() {
let regs = IPCC::regs();
let channels = [
IpccChannel::Channel1,
IpccChannel::Channel2,
IpccChannel::Channel3,
IpccChannel::Channel4,
IpccChannel::Channel5,
IpccChannel::Channel6,
];
// Status register gives channel occupied status. For tx, use cpu0.
let sr = unsafe { regs.cpu(0).sr().read() };
regs.cpu(0).mr().modify(|w| {
for channel in channels {
if !sr.chf(channel as usize) {
// If bit is set to 1 then interrupt is disabled; we want to disable the interrupt
w.set_chfm(channel as usize, true);
// There shouldn't be a race because the channel is masked only if the interrupt has fired
IPCC::state().tx_waker_for(channel).wake();
}
}
});
}
}
#[non_exhaustive]
#[derive(Clone, Copy, Default)]
pub struct Config {
@ -20,13 +90,6 @@ pub enum IpccChannel {
Channel6 = 5,
}
pub mod sealed {
pub trait Instance: crate::rcc::RccPeripheral {
fn regs() -> crate::pac::ipcc::Ipcc;
fn set_cpu2(enabled: bool);
}
}
pub struct Ipcc;
impl Ipcc {
@ -45,115 +108,99 @@ impl Ipcc {
w.set_txfie(true);
})
}
// enable interrupts
crate::interrupt::typelevel::IPCC_C1_RX::unpend();
crate::interrupt::typelevel::IPCC_C1_TX::unpend();
unsafe { crate::interrupt::typelevel::IPCC_C1_RX::enable() };
unsafe { crate::interrupt::typelevel::IPCC_C1_TX::enable() };
}
pub fn c1_set_rx_channel(channel: IpccChannel, enabled: bool) {
/// Send data to an IPCC channel. The closure is called to write the data when appropriate.
pub async fn send(channel: IpccChannel, f: impl FnOnce()) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(0).mr().modify(|w| w.set_chom(channel as usize, !enabled)) }
}
Self::flush(channel).await;
compiler_fence(Ordering::SeqCst);
pub fn c1_get_rx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
f();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(0).mr().read().chom(channel as usize) }
}
#[allow(dead_code)]
pub fn c2_set_rx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(1).mr().modify(|w| w.set_chom(channel as usize, !enabled)) }
}
#[allow(dead_code)]
pub fn c2_get_rx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(1).mr().read().chom(channel as usize) }
}
pub fn c1_set_tx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(0).mr().modify(|w| w.set_chfm(channel as usize, !enabled)) }
}
pub fn c1_get_tx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(0).mr().read().chfm(channel as usize) }
}
#[allow(dead_code)]
pub fn c2_set_tx_channel(channel: IpccChannel, enabled: bool) {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { regs.cpu(1).mr().modify(|w| w.set_chfm(channel as usize, !enabled)) }
}
#[allow(dead_code)]
pub fn c2_get_tx_channel(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
// If bit is set to 1 then interrupt is disabled
unsafe { !regs.cpu(1).mr().read().chfm(channel as usize) }
}
/// clears IPCC receive channel status for CPU1
pub fn c1_clear_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
trace!("ipcc: ch {}: clear rx", channel as u8);
unsafe { regs.cpu(0).scr().write(|w| w.set_chc(channel as usize, true)) }
}
#[allow(dead_code)]
/// clears IPCC receive channel status for CPU2
pub fn c2_clear_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(1).scr().write(|w| w.set_chc(channel as usize, true)) }
}
pub fn c1_set_flag_channel(channel: IpccChannel) {
let regs = IPCC::regs();
compiler_fence(Ordering::SeqCst);
trace!("ipcc: ch {}: send data", channel as u8);
unsafe { regs.cpu(0).scr().write(|w| w.set_chs(channel as usize, true)) }
}
#[allow(dead_code)]
pub fn c2_set_flag_channel(channel: IpccChannel) {
/// Wait for the tx channel to become clear
pub async fn flush(channel: IpccChannel) {
let regs = IPCC::regs();
unsafe { regs.cpu(1).scr().write(|w| w.set_chs(channel as usize, true)) }
// This is a race, but is nice for debugging
if unsafe { regs.cpu(0).sr().read() }.chf(channel as usize) {
trace!("ipcc: ch {}: wait for tx free", channel as u8);
}
poll_fn(|cx| {
IPCC::state().tx_waker_for(channel).register(cx.waker());
// If bit is set to 1 then interrupt is disabled; we want to enable the interrupt
unsafe { regs.cpu(0).mr().modify(|w| w.set_chfm(channel as usize, false)) }
compiler_fence(Ordering::SeqCst);
if !unsafe { regs.cpu(0).sr().read() }.chf(channel as usize) {
// If bit is set to 1 then interrupt is disabled; we want to disable the interrupt
unsafe { regs.cpu(0).mr().modify(|w| w.set_chfm(channel as usize, true)) }
Poll::Ready(())
} else {
Poll::Pending
}
})
.await;
}
pub fn c1_is_active_flag(channel: IpccChannel) -> bool {
/// Receive data from an IPCC channel. The closure is called to read the data when appropriate.
pub async fn receive<R>(channel: IpccChannel, mut f: impl FnMut() -> Option<R>) -> R {
let regs = IPCC::regs();
unsafe { regs.cpu(0).sr().read().chf(channel as usize) }
}
loop {
// This is a race, but is nice for debugging
if !unsafe { regs.cpu(1).sr().read() }.chf(channel as usize) {
trace!("ipcc: ch {}: wait for rx occupied", channel as u8);
}
pub fn c2_is_active_flag(channel: IpccChannel) -> bool {
let regs = IPCC::regs();
poll_fn(|cx| {
IPCC::state().rx_waker_for(channel).register(cx.waker());
// If bit is set to 1 then interrupt is disabled; we want to enable the interrupt
unsafe { regs.cpu(0).mr().modify(|w| w.set_chom(channel as usize, false)) }
unsafe { regs.cpu(1).sr().read().chf(channel as usize) }
}
compiler_fence(Ordering::SeqCst);
pub fn is_tx_pending(channel: IpccChannel) -> bool {
!Self::c1_is_active_flag(channel) && Self::c1_get_tx_channel(channel)
}
if unsafe { regs.cpu(1).sr().read() }.chf(channel as usize) {
// If bit is set to 1 then interrupt is disabled; we want to disable the interrupt
unsafe { regs.cpu(0).mr().modify(|w| w.set_chfm(channel as usize, true)) }
pub fn is_rx_pending(channel: IpccChannel) -> bool {
Self::c2_is_active_flag(channel) && Self::c1_get_rx_channel(channel)
Poll::Ready(())
} else {
Poll::Pending
}
})
.await;
trace!("ipcc: ch {}: read data", channel as u8);
compiler_fence(Ordering::SeqCst);
match f() {
Some(ret) => return ret,
None => {}
}
trace!("ipcc: ch {}: clear rx", channel as u8);
compiler_fence(Ordering::SeqCst);
// If the channel is clear and the read function returns none, fetch more data
unsafe { regs.cpu(0).scr().write(|w| w.set_chc(channel as usize, true)) }
}
}
}
@ -165,9 +212,66 @@ impl sealed::Instance for crate::peripherals::IPCC {
fn set_cpu2(enabled: bool) {
unsafe { crate::pac::PWR.cr4().modify(|w| w.set_c2boot(enabled)) }
}
fn state() -> &'static self::sealed::State {
static STATE: self::sealed::State = self::sealed::State::new();
&STATE
}
}
pub(crate) mod sealed {
use embassy_sync::waitqueue::AtomicWaker;
use super::*;
pub struct State {
rx_wakers: [AtomicWaker; 6],
tx_wakers: [AtomicWaker; 6],
}
impl State {
pub const fn new() -> Self {
const WAKER: AtomicWaker = AtomicWaker::new();
Self {
rx_wakers: [WAKER; 6],
tx_wakers: [WAKER; 6],
}
}
pub fn rx_waker_for(&self, channel: IpccChannel) -> &AtomicWaker {
match channel {
IpccChannel::Channel1 => &self.rx_wakers[0],
IpccChannel::Channel2 => &self.rx_wakers[1],
IpccChannel::Channel3 => &self.rx_wakers[2],
IpccChannel::Channel4 => &self.rx_wakers[3],
IpccChannel::Channel5 => &self.rx_wakers[4],
IpccChannel::Channel6 => &self.rx_wakers[5],
}
}
pub fn tx_waker_for(&self, channel: IpccChannel) -> &AtomicWaker {
match channel {
IpccChannel::Channel1 => &self.tx_wakers[0],
IpccChannel::Channel2 => &self.tx_wakers[1],
IpccChannel::Channel3 => &self.tx_wakers[2],
IpccChannel::Channel4 => &self.tx_wakers[3],
IpccChannel::Channel5 => &self.tx_wakers[4],
IpccChannel::Channel6 => &self.tx_wakers[5],
}
}
}
pub trait Instance: crate::rcc::RccPeripheral {
fn regs() -> crate::pac::ipcc::Ipcc;
fn set_cpu2(enabled: bool);
fn state() -> &'static State;
}
}
unsafe fn _configure_pwr() {
// TODO: move this to RCC
let pwr = crate::pac::PWR;
let rcc = crate::pac::RCC;

View File

@ -8,27 +8,41 @@ mod common;
use common::*;
use embassy_executor::Spawner;
use embassy_futures::poll_once;
use embassy_stm32::bind_interrupts;
use embassy_stm32::ipcc::Config;
use embassy_stm32::ipcc::{Config, ReceiveInterruptHandler, TransmitInterruptHandler};
use embassy_stm32_wpan::ble::Ble;
use embassy_stm32_wpan::rc::RadioCoprocessor;
use embassy_stm32_wpan::sys::Sys;
use embassy_stm32_wpan::TlMbox;
use embassy_stm32_wpan::{mm, TlMbox};
use embassy_time::{Duration, Timer};
bind_interrupts!(struct Irqs{
IPCC_C1_RX => embassy_stm32_wpan::ReceiveInterruptHandler;
IPCC_C1_TX => embassy_stm32_wpan::TransmitInterruptHandler;
IPCC_C1_RX => ReceiveInterruptHandler;
IPCC_C1_TX => TransmitInterruptHandler;
});
#[embassy_executor::task]
async fn run_mm_queue() {
mm::MemoryManager::run_queue().await;
}
#[embassy_executor::main]
async fn main(_spawner: Spawner) {
async fn main(spawner: Spawner) {
let p = embassy_stm32::init(config());
info!("Hello World!");
spawner.spawn(run_mm_queue()).unwrap();
let config = Config::default();
let mbox = TlMbox::init(p.IPCC, Irqs, config);
let mut rx_buf = [0u8; 500];
let ready_event = Sys::read().await;
let _ = poll_once(Sys::read()); // clear rx not
ready_event.write(&mut rx_buf).unwrap();
info!("coprocessor ready {}", rx_buf);
loop {
let wireless_fw_info = mbox.wireless_fw_info();
match wireless_fw_info {
@ -53,19 +67,18 @@ async fn main(_spawner: Spawner) {
Timer::after(Duration::from_millis(50)).await;
}
let mut rc = RadioCoprocessor::new(mbox);
Sys::shci_c2_ble_init(Default::default()).await;
let response = rc.read().await;
info!("coprocessor ready {}", response);
info!("starting ble...");
Ble::write(0x0c, &[]).await;
Sys::shci_ble_init(Default::default());
info!("waiting for ble...");
let ble_event = Ble::read().await;
ble_event.write(&mut rx_buf).unwrap();
// rc.write(&[0x01, 0x03, 0x0c, 0x00, 0x00]);
Ble::send_cmd(0x0c, &[]);
let response = rc.read().await;
info!("ble reset rsp {}", response);
info!("ble event: {}", rx_buf);
// Timer::after(Duration::from_secs(3)).await;
info!("Test OK");
cortex_m::asm::bkpt();
}