diff --git a/esp-hal/CHANGELOG.md b/esp-hal/CHANGELOG.md index 3521cd95f..b273f6e0a 100644 --- a/esp-hal/CHANGELOG.md +++ b/esp-hal/CHANGELOG.md @@ -25,6 +25,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - uart: Removed `configure_pins` methods (#1592) +- Removed `DmaError::Exhausted` error by improving the implementation of the `pop` function (#1664) ## [0.18.0] - 2024-06-04 diff --git a/esp-hal/src/dma/gdma.rs b/esp-hal/src/dma/gdma.rs index d7c42d1e1..c5667682d 100644 --- a/esp-hal/src/dma/gdma.rs +++ b/esp-hal/src/dma/gdma.rs @@ -282,10 +282,6 @@ impl RegisterAccess for Channel { Self::in_int().raw().read().in_suc_eof().bit() } - fn last_in_dscr_address() -> usize { - Self::ch().in_dscr_bf0().read().inlink_dscr_bf0().bits() as _ - } - fn is_listening_in_eof() -> bool { Self::in_int().ena().read().in_suc_eof().bit_is_set() } diff --git a/esp-hal/src/dma/mod.rs b/esp-hal/src/dma/mod.rs index e9891a38a..0eb86ed34 100644 --- a/esp-hal/src/dma/mod.rs +++ b/esp-hal/src/dma/mod.rs @@ -93,10 +93,6 @@ impl DmaDescriptor { self.flags.length() as usize } - fn is_empty(&self) -> bool { - self.len() == 0 - } - fn set_suc_eof(&mut self, suc_eof: bool) { self.flags.set_suc_eof(suc_eof) } @@ -319,8 +315,6 @@ pub enum DmaError { DescriptorError, /// The available free buffer is less than the amount of data to push Overflow, - /// The available amount of data is less than requested - Exhausted, /// The given buffer is too small BufferTooSmall, /// Descriptors or buffers are not located in a supported memory region @@ -620,10 +614,6 @@ where R::is_in_done() } - fn last_in_dscr_address(&self) -> usize { - R::last_in_dscr_address() - } - #[cfg(feature = "async")] fn waker() -> &'static embassy_sync::waitqueue::AtomicWaker; } @@ -642,7 +632,7 @@ where pub(crate) read_descr_ptr: *mut DmaDescriptor, pub(crate) available: usize, pub(crate) last_seen_handled_descriptor_ptr: *mut DmaDescriptor, - pub(crate) read_buffer_start: *mut u8, + pub(crate) read_descriptor_ptr: *mut DmaDescriptor, pub(crate) _phantom: PhantomData, } @@ -659,7 +649,7 @@ where read_descr_ptr: core::ptr::null_mut(), available: 0, last_seen_handled_descriptor_ptr: core::ptr::null_mut(), - read_buffer_start: core::ptr::null_mut(), + read_descriptor_ptr: core::ptr::null_mut(), _phantom: PhantomData, } } @@ -710,7 +700,7 @@ where self.available = 0; self.read_descr_ptr = self.descriptors.as_mut_ptr(); self.last_seen_handled_descriptor_ptr = core::ptr::null_mut(); - self.read_buffer_start = data; + self.read_descriptor_ptr = core::ptr::null_mut(); self.rx_impl .prepare_transfer_without_start(self.descriptors, circular, peri, data, len) @@ -750,58 +740,72 @@ where fn available(&mut self) -> usize { if self.last_seen_handled_descriptor_ptr.is_null() { - self.last_seen_handled_descriptor_ptr = self.descriptors.as_mut_ptr(); - return 0; + // initially start at last descriptor (so that next will be the first + // descriptor) + self.last_seen_handled_descriptor_ptr = + addr_of_mut!(self.descriptors[self.descriptors.len() - 1]); } - if self.available != 0 { - return self.available; - } + let mut current_in_descr_ptr = + unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next; + let mut current_in_descr = unsafe { current_in_descr_ptr.read_volatile() }; - let descr_address = self.last_seen_handled_descriptor_ptr; - let mut dw0 = unsafe { descr_address.read_volatile() }; + while current_in_descr.owner() == Owner::Cpu { + self.available += current_in_descr.len(); + self.last_seen_handled_descriptor_ptr = current_in_descr_ptr; - if dw0.owner() == Owner::Cpu && !dw0.is_empty() { - let descriptor_buffer = dw0.buffer; - let next_descriptor = dw0.next; - - self.read_buffer_start = descriptor_buffer; - self.available = dw0.len(); - - dw0.set_owner(Owner::Dma); - dw0.set_length(0); - dw0.set_suc_eof(false); - - unsafe { - descr_address.write_volatile(dw0); - } - - self.last_seen_handled_descriptor_ptr = if next_descriptor.is_null() { - self.descriptors.as_mut_ptr() - } else { - next_descriptor - }; + current_in_descr_ptr = + unsafe { self.last_seen_handled_descriptor_ptr.read_volatile() }.next; + current_in_descr = unsafe { current_in_descr_ptr.read_volatile() }; } self.available } fn pop(&mut self, data: &mut [u8]) -> Result { - let avail = self.available; + let len = data.len(); + let mut avail = self.available; - if avail < data.len() { - return Err(DmaError::Exhausted); + if avail > len { + return Err(DmaError::BufferTooSmall); } - unsafe { - let dst = data.as_mut_ptr(); - let src = self.read_buffer_start; - let count = self.available; - core::ptr::copy_nonoverlapping(src, dst, count); + let mut remaining_buffer = data; + let mut descr_ptr = self.read_descr_ptr; + + if descr_ptr.is_null() { + return Ok(0); } - self.available = 0; - Ok(data.len()) + let mut descr = unsafe { descr_ptr.read_volatile() }; + + while avail > 0 && !remaining_buffer.is_empty() && remaining_buffer.len() >= descr.len() { + unsafe { + let dst = remaining_buffer.as_mut_ptr(); + let src = descr.buffer; + let count = descr.len(); + core::ptr::copy_nonoverlapping(src, dst, count); + + descr.set_owner(Owner::Dma); + descr.set_suc_eof(false); + descr.set_length(0); + descr_ptr.write_volatile(descr); + + remaining_buffer = &mut remaining_buffer[count..]; + avail -= count; + descr_ptr = descr.next; + } + + if descr_ptr.is_null() { + break; + } + + descr = unsafe { descr_ptr.read_volatile() }; + } + + self.read_descr_ptr = descr_ptr; + self.available = avail; + Ok(len - remaining_buffer.len()) } fn drain_buffer(&mut self, mut dst: &mut [u8]) -> Result { @@ -1407,7 +1411,6 @@ pub trait RegisterAccess: crate::private::Sealed { fn set_in_peripheral(peripheral: u8); fn start_in(); fn is_in_done() -> bool; - fn last_in_dscr_address() -> usize; fn is_listening_in_eof() -> bool; fn is_listening_out_eof() -> bool; @@ -1532,9 +1535,13 @@ pub(crate) mod dma_private { /// /// After this all data should be processed by the peripheral - i.e. the /// peripheral should have processed it's FIFO(s) + /// + /// Please note: This is called in the transfer's `wait` function _and_ + /// by it's [Drop] implementation. fn peripheral_wait_dma(&mut self, is_tx: bool, is_rx: bool); - /// Only used by circular DMA transfers + /// Only used by circular DMA transfers in both, the `stop` function + /// _and_ it's [Drop] implementation fn peripheral_dma_stop(&mut self); } @@ -1761,12 +1768,22 @@ where Self { instance } } - /// Amount of bytes which can be popped + /// Amount of bytes which can be popped. + /// + /// It's expected to call this before trying to [DmaTransferRxCircular::pop] + /// data. pub fn available(&mut self) -> usize { self.instance.rx().available() } - /// Get available data + /// Get available data. + /// + /// It's expected that the amount of available data is checked before by + /// calling [DmaTransferRxCircular::available] and that the buffer can hold + /// all available data. + /// + /// Fails with [DmaError::BufferTooSmall] if the given buffer is too small + /// to hold all available data pub fn pop(&mut self, data: &mut [u8]) -> Result { self.instance.rx().pop(data) } diff --git a/esp-hal/src/dma/pdma.rs b/esp-hal/src/dma/pdma.rs index bb8f0a183..bca45edbb 100644 --- a/esp-hal/src/dma/pdma.rs +++ b/esp-hal/src/dma/pdma.rs @@ -231,11 +231,6 @@ macro_rules! ImplSpiChannel { spi.dma_int_raw().read().in_done().bit() } - fn last_in_dscr_address() -> usize { - let spi = unsafe { &*crate::peripherals::[]::PTR }; - spi.inlink_dscr_bf0().read().dma_inlink_dscr_bf0().bits() as usize - } - fn is_listening_in_eof() -> bool { let spi = unsafe { &*crate::peripherals::[]::PTR }; spi.dma_int_ena().read().in_suc_eof().bit_is_set() @@ -633,11 +628,6 @@ macro_rules! ImplI2sChannel { reg_block.int_raw().read().in_done().bit() } - fn last_in_dscr_address() -> usize { - let reg_block = unsafe { &*crate::peripherals::[<$peripheral>]::PTR }; - reg_block.inlink_dscr_bf0().read().inlink_dscr_bf0().bits() as usize - } - fn is_listening_in_eof() -> bool { let reg_block = unsafe { &*crate::peripherals::[<$peripheral>]::PTR }; reg_block.int_ena().read().in_suc_eof().bit() diff --git a/hil-test/tests/i2s.rs b/hil-test/tests/i2s.rs index 08261fa64..c545045fb 100644 --- a/hil-test/tests/i2s.rs +++ b/hil-test/tests/i2s.rs @@ -16,6 +16,7 @@ use defmt_rtt as _; use esp_backtrace as _; use esp_hal::{ clock::ClockControl, + delay::Delay, dma::{Dma, DmaPriority}, dma_buffers, gpio::Io, @@ -26,9 +27,14 @@ use esp_hal::{ system::SystemControl, }; +// choose values which DON'T restart on every descriptor buffer's start +const ADD: u8 = 5; +const CUT_OFF: u8 = 113; + #[cfg(test)] #[embedded_test::tests] mod tests { + use super::*; #[init] @@ -42,17 +48,19 @@ mod tests { let mut io = Io::new(peripherals.GPIO, peripherals.IO_MUX); + let delay = Delay::new(&clocks); + let dma = Dma::new(peripherals.DMA); let dma_channel = dma.channel0; let (tx_buffer, mut tx_descriptors, mut rx_buffer, mut rx_descriptors) = - dma_buffers!(32000, 32000); + dma_buffers!(16000, 16000); let i2s = I2s::new( peripherals.I2S0, Standard::Philips, DataFormat::Data16Channel16, - 441000.Hz(), + 16000.Hz(), dma_channel.configure( false, &mut tx_descriptors, @@ -96,39 +104,72 @@ mod tests { let mut i = 0; for b in tx_buffer.iter_mut() { *b = i; - i = i.wrapping_add(1); + i = (i + ADD) % CUT_OFF; } - let mut rcv = [0u8; 5000]; - let mut filler = [0x1u8; 10000]; + let mut rcv = [0u8; 11000]; + let mut filler = [0x1u8; 12000]; let mut rx_transfer = i2s_rx.read_dma_circular(&mut rx_buffer).unwrap(); + // trying to pop data before calling `available` should just do nothing + assert_eq!(0, rx_transfer.pop(&mut rcv[..100]).unwrap()); + + // no data available yet + assert_eq!(0, rx_transfer.available()); + let mut tx_transfer = i2s_tx.write_dma_circular(&tx_buffer).unwrap(); 'outer: loop { let tx_avail = tx_transfer.available(); - if tx_avail > 0 { + + // make sure there are more than one descriptor buffers ready to push + if tx_avail > 5000 { for b in &mut filler[0..tx_avail].iter_mut() { *b = i; - i = i.wrapping_add(1); + i = (i + ADD) % CUT_OFF; } tx_transfer.push(&filler[0..tx_avail]).unwrap(); } + // test calling available multiple times doesn't break anything + rx_transfer.available(); + rx_transfer.available(); + rx_transfer.available(); + rx_transfer.available(); + rx_transfer.available(); + rx_transfer.available(); let rx_avail = rx_transfer.available(); + + // make sure there are more than one descriptor buffers ready to pop if rx_avail > 0 { - rx_transfer.pop(&mut rcv[..rx_avail]).unwrap(); - for &b in &rcv[..rx_avail] { + // trying to pop less data than available is an error + assert_eq!( + Err(esp_hal::dma::DmaError::BufferTooSmall), + rx_transfer.pop(&mut rcv[..rx_avail / 2]) + ); + + rcv.fill(0xff); + let len = rx_transfer.pop(&mut rcv).unwrap(); + assert!(len > 0); + + for &b in &rcv[..len] { if b != check_i { failed = true; break 'outer; } - check_i = check_i.wrapping_add(1); + check_i = (check_i + ADD) % CUT_OFF; } + iteration += 1; + + if iteration == 1 { + // delay to make it likely `available` will need to handle more than one + // descriptor next time + delay.delay_millis(160); + } } - if iteration > 100 { + if iteration > 30 { break; } }