mirror of
https://github.com/esp-rs/esp-hal.git
synced 2025-10-02 06:40:47 +00:00
Fix Uart::flush_async
(#3186)
* Fix Uart::flush_async * Apply single-byte workaround
This commit is contained in:
parent
1050ce5bb4
commit
502d41f313
@ -14,6 +14,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
||||
### Fixed
|
||||
|
||||
- Full-duplex SPI works when mixed with half-duplex SPI (#3176)
|
||||
- `Uart::flush_async` should no longer return prematurely (#3186)
|
||||
|
||||
### Removed
|
||||
|
||||
|
@ -632,12 +632,18 @@ where
|
||||
#[instability::unstable]
|
||||
pub fn flush(&mut self) -> Result<(), TxError> {
|
||||
while self.tx_fifo_count() > 0 {}
|
||||
// The FSM is in the Idle state for a short while after the last byte is moved
|
||||
// out of the FIFO. It is unclear how long this takes, but 10us seems to be a
|
||||
// good enough duration to wait, for both fast and slow baud rates.
|
||||
self.flush_last_byte();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn flush_last_byte(&mut self) {
|
||||
// This function handles an edge case that happens when the TX FIFO count
|
||||
// changes to 0. The FSM is in the Idle state for a short while after
|
||||
// the last byte is moved out of the FIFO. It is unclear how long this
|
||||
// takes, but 10us seems to be a good enough duration to wait, for both
|
||||
// fast and slow baud rates.
|
||||
crate::rom::ets_delay_us(10);
|
||||
while !self.is_tx_idle() {}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the TX line is idle for this UART instance.
|
||||
@ -1935,10 +1941,15 @@ impl UartTx<'_, Async> {
|
||||
///
|
||||
/// This function is cancellation safe.
|
||||
pub async fn flush_async(&mut self) -> Result<(), TxError> {
|
||||
if self.tx_fifo_count() > 0 {
|
||||
// Nothing is guaranteed to clear the Done status, so let's loop here in case Tx
|
||||
// was Done before the last write operation that pushed data into the
|
||||
// FIFO.
|
||||
while self.tx_fifo_count() > 0 {
|
||||
UartTxFuture::new(self.uart.reborrow(), TxEvent::Done).await;
|
||||
}
|
||||
|
||||
self.flush_last_byte();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -75,6 +75,43 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn flush_async_does_not_return_prematurely(mut ctx: Context) {
|
||||
// Force TX_DONE to be set
|
||||
ctx.tx.write_async(&[0u8; 10]).await.unwrap();
|
||||
|
||||
let mut read = [0u8; 10];
|
||||
ctx.rx.read_exact_async(&mut read).await.unwrap();
|
||||
|
||||
// The flush should not return until the data is actually sent, regardless of
|
||||
// previous TX_DONE status.
|
||||
for _ in 0..10 {
|
||||
ctx.tx.write_async(&[1u8; 10]).await.unwrap();
|
||||
ctx.tx.flush_async().await.unwrap();
|
||||
|
||||
let read_count = ctx.rx.read_buffered(&mut read).unwrap();
|
||||
|
||||
assert_eq!(read_count, 10);
|
||||
assert_eq!(&read, &[1u8; 10]);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn flush_async_does_not_return_prematurely_even_for_short_writes(mut ctx: Context) {
|
||||
let mut read = [0u8; 2];
|
||||
// The flush should not return until the data is actually sent, regardless of
|
||||
// previous TX_DONE status.
|
||||
for i in 0..10 {
|
||||
ctx.tx.write_async(&[i as u8]).await.unwrap();
|
||||
ctx.tx.flush_async().await.unwrap();
|
||||
|
||||
let read_count = ctx.rx.read_buffered(&mut read).unwrap();
|
||||
|
||||
assert_eq!(read_count, 1);
|
||||
assert_eq!(read[0], i as u8);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
async fn rx_overflow_is_detected(mut ctx: Context) {
|
||||
let mut to_send: &[u8] = &[0; 250];
|
||||
|
Loading…
x
Reference in New Issue
Block a user