Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
166 changes: 74 additions & 92 deletions bsp/stm32/libraries/HAL_Drivers/drivers/drv_spi.c
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@
* 2019-01-03 zylx modify DMA initialization and spixfer function
* 2020-01-15 whj4674672 Porting for stm32h7xx
* 2020-06-18 thread-liu Porting for stm32mp1xx
* 2020-10-14 PeakRacing Porting for stm32wbxx
* 2020-10-14 Dozingfiretruck Porting for stm32wbxx
* 2025-09-22 wdfk_prog Refactor spixfer to fix DMA reception bug, correct timeout calculation.
*/

#include <rtthread.h>
Expand Down Expand Up @@ -286,7 +287,7 @@ static rt_err_t stm32_spi_init(struct stm32_spi *spi_drv, struct rt_spi_configur

static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *message)
{
#define DMA_TRANS_MIN_LEN 10 /* only buffer length >= DMA_TRANS_MIN_LEN will use DMA mode */
#define DMA_TRANS_MIN_LEN 10 /* only buffer length >= DMA_TRANS_MIN_LEN will use DMA mode */

HAL_StatusTypeDef state = HAL_OK;
rt_size_t message_length, already_send_length;
Expand All @@ -300,6 +301,7 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m

struct stm32_spi *spi_drv = rt_container_of(device->bus, struct stm32_spi, spi_bus);
SPI_HandleTypeDef *spi_handle = &spi_drv->handle;

rt_uint64_t total_byte_ms = (rt_uint64_t)message->length * 1000;
rt_uint32_t speed_bytes_per_sec = spi_drv->cfg->usage_freq / 8;
if (speed_bytes_per_sec == 0)
Expand All @@ -323,7 +325,7 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m
}

LOG_D("%s transfer prepare and start", spi_drv->config->bus_name);
LOG_D("%s sendbuf: %X, recvbuf: %X, length: %d",
LOG_D("%s sendbuf: 0x%08x, recvbuf: 0x%08x, length: %d",
spi_drv->config->bus_name,
(uint32_t)message->send_buf,
(uint32_t)message->recv_buf, message->length);
Expand Down Expand Up @@ -357,96 +359,73 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m
recv_buf = (rt_uint8_t *)message->recv_buf + already_send_length;
}

rt_uint32_t* dma_aligned_buffer = RT_NULL;
rt_uint32_t* p_txrx_buffer = RT_NULL;
const rt_uint8_t *dma_send_buf = send_buf;
rt_uint8_t *dma_recv_buf = recv_buf;

rt_uint8_t *aligned_send_buf = RT_NULL;
rt_uint8_t *aligned_recv_buf = RT_NULL;

const rt_bool_t dma_eligible = (send_length >= DMA_TRANS_MIN_LEN);
const rt_bool_t use_tx_dma = dma_eligible && (spi_drv->spi_dma_flag & SPI_USING_TX_DMA_FLAG);
const rt_bool_t use_rx_dma = dma_eligible && (spi_drv->spi_dma_flag & SPI_USING_RX_DMA_FLAG);

if ((spi_drv->spi_dma_flag & SPI_USING_TX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
if (dma_eligible)
{
#if defined(SOC_SERIES_STM32H7) || defined(SOC_SERIES_STM32F7)
if (RT_IS_ALIGN((rt_uint32_t)send_buf, 32) && send_buf != RT_NULL) /* aligned with 32 bytes? */
{
p_txrx_buffer = (rt_uint32_t *)send_buf; /* send_buf aligns with 32 bytes, no more operations */
}
else
{
/* send_buf doesn't align with 32 bytes, so creat a cache buffer with 32 bytes aligned */
dma_aligned_buffer = (rt_uint32_t *)rt_malloc_align(send_length, 32);
rt_memcpy(dma_aligned_buffer, send_buf, send_length);
p_txrx_buffer = dma_aligned_buffer;
}
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, dma_aligned_buffer, send_length);
const rt_uint8_t align_size = 32;
#else
if (RT_IS_ALIGN((rt_uint32_t)send_buf, 4) && send_buf != RT_NULL) /* aligned with 4 bytes? */
const rt_uint8_t align_size = 4;
#endif
/* Prepare DMA buffers only if the corresponding DMA will be used */
if (send_buf && use_tx_dma && !RT_IS_ALIGN((rt_uint32_t)send_buf, align_size))
{
p_txrx_buffer = (rt_uint32_t *)send_buf; /* send_buf aligns with 4 bytes, no more operations */
aligned_send_buf = rt_malloc_align(send_length, align_size);
if (aligned_send_buf == RT_NULL)
{
state = HAL_ERROR;
LOG_E("malloc aligned_send_buf failed!");
goto transfer_cleanup;
}
rt_memcpy(aligned_send_buf, send_buf, send_length);
dma_send_buf = aligned_send_buf;
}
else

if (recv_buf && use_rx_dma && !RT_IS_ALIGN((rt_uint32_t)recv_buf, align_size))
{
/* send_buf doesn't align with 4 bytes, so creat a cache buffer with 4 bytes aligned */
dma_aligned_buffer = (rt_uint32_t *)rt_malloc(send_length); /* aligned with RT_ALIGN_SIZE (8 bytes by default) */
rt_memcpy(dma_aligned_buffer, send_buf, send_length);
p_txrx_buffer = dma_aligned_buffer;
aligned_recv_buf = rt_malloc_align(send_length, align_size);
if (aligned_recv_buf == RT_NULL)
{
state = HAL_ERROR;
LOG_E("malloc aligned_recv_buf failed!");
goto transfer_cleanup;
}
dma_recv_buf = aligned_recv_buf;
}
#endif /* SOC_SERIES_STM32H7 || SOC_SERIES_STM32F7 */
}
else if ((spi_drv->spi_dma_flag & SPI_USING_RX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
{

#if defined(SOC_SERIES_STM32H7) || defined(SOC_SERIES_STM32F7)
if (RT_IS_ALIGN((rt_uint32_t)recv_buf, 32) && recv_buf != RT_NULL) /* aligned with 32 bytes? */
{
p_txrx_buffer = (rt_uint32_t *)recv_buf; /* recv_buf aligns with 32 bytes, no more operations */
}
else
{
/* recv_buf doesn't align with 32 bytes, so creat a cache buffer with 32 bytes aligned */
dma_aligned_buffer = (rt_uint32_t *)rt_malloc_align(send_length, 32);
rt_memcpy(dma_aligned_buffer, recv_buf, send_length);
p_txrx_buffer = dma_aligned_buffer;
}
rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, dma_aligned_buffer, send_length);
#else
if (RT_IS_ALIGN((rt_uint32_t)recv_buf, 4) && recv_buf != RT_NULL) /* aligned with 4 bytes? */
{
p_txrx_buffer = (rt_uint32_t *)recv_buf; /* recv_buf aligns with 4 bytes, no more operations */
}
else
{
/* recv_buf doesn't align with 4 bytes, so creat a cache buffer with 4 bytes aligned */
dma_aligned_buffer = (rt_uint32_t *)rt_malloc(send_length); /* aligned with RT_ALIGN_SIZE (8 bytes by default) */
rt_memcpy(dma_aligned_buffer, recv_buf, send_length);
p_txrx_buffer = dma_aligned_buffer;
}
#endif /* SOC_SERIES_STM32H7 || SOC_SERIES_STM32F7 */
// D-Cache maintenance for buffers that will be used by DMA
if (dma_send_buf) rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, (void *)dma_send_buf, send_length);
if (dma_recv_buf) rt_hw_cpu_dcache_ops(RT_HW_CACHE_FLUSH, dma_recv_buf, send_length);
#endif
}

/* start once data exchange in DMA mode */
/* Start data exchange in full-duplex DMA mode. */
if (message->send_buf && message->recv_buf)
{
if ((spi_drv->spi_dma_flag & SPI_USING_TX_DMA_FLAG) && (spi_drv->spi_dma_flag & SPI_USING_RX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
{
state = HAL_SPI_TransmitReceive_DMA(spi_handle, (uint8_t *)p_txrx_buffer, (uint8_t *)p_txrx_buffer, send_length);
}
else if ((spi_drv->spi_dma_flag & SPI_USING_TX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
{
/* same as Tx ONLY. It will not receive SPI data any more. */
state = HAL_SPI_Transmit_DMA(spi_handle, (uint8_t *)p_txrx_buffer, send_length);
}
else if ((spi_drv->spi_dma_flag & SPI_USING_RX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
if (use_tx_dma && use_rx_dma)
{
state = HAL_ERROR;
LOG_E("It shoule be enabled both BSP_SPIx_TX_USING_DMA and BSP_SPIx_TX_USING_DMA flag, if wants to use SPI DMA Rx singly.");
break;
state = HAL_SPI_TransmitReceive_DMA(spi_handle, (uint8_t *)dma_send_buf, dma_recv_buf, send_length);
}
else
{
state = HAL_SPI_TransmitReceive(spi_handle, (uint8_t *)send_buf, (uint8_t *)recv_buf, send_length, timeout_ms);
state = HAL_SPI_TransmitReceive(spi_handle, (uint8_t *)send_buf, recv_buf, send_length, timeout_ms);
}
}
else if (message->send_buf)
{
if ((spi_drv->spi_dma_flag & SPI_USING_TX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
if (use_tx_dma)
{
state = HAL_SPI_Transmit_DMA(spi_handle, (uint8_t *)p_txrx_buffer, send_length);
state = HAL_SPI_Transmit_DMA(spi_handle, (uint8_t *)dma_send_buf, send_length);
}
else
{
Expand All @@ -461,16 +440,16 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m
}
else if(message->recv_buf)
{
rt_memset((uint8_t *)recv_buf, 0xff, send_length);
if ((spi_drv->spi_dma_flag & SPI_USING_RX_DMA_FLAG) && (send_length >= DMA_TRANS_MIN_LEN))
rt_memset(dma_recv_buf, 0xFF, send_length);
if (use_rx_dma)
{
state = HAL_SPI_Receive_DMA(spi_handle, (uint8_t *)p_txrx_buffer, send_length);
state = HAL_SPI_Receive_DMA(spi_handle, dma_recv_buf, send_length);
}
else
{
/* clear the old error flag */
__HAL_SPI_CLEAR_OVRFLAG(spi_handle);
state = HAL_SPI_Receive(spi_handle, (uint8_t *)recv_buf, send_length, timeout_ms);
state = HAL_SPI_Receive(spi_handle, recv_buf, send_length, timeout_ms);
}
}
else
Expand All @@ -484,24 +463,22 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m
LOG_E("SPI transfer error: %d", state);
message->length = 0;
spi_handle->State = HAL_SPI_STATE_READY;
break;
goto transfer_cleanup;
}
else
{
LOG_D("%s transfer done", spi_drv->config->bus_name);
}

/* For simplicity reasons, this example is just waiting till the end of the
transfer, but application may perform other tasks while transfer operation
is ongoing. */
if ((spi_drv->spi_dma_flag & (SPI_USING_TX_DMA_FLAG | SPI_USING_RX_DMA_FLAG)) && (send_length >= DMA_TRANS_MIN_LEN))
if (use_tx_dma || use_rx_dma)
{
/* blocking the thread,and the other tasks can run */
if (rt_completion_wait(&spi_drv->cpt, timeout_tick) != RT_EOK)
{
state = HAL_ERROR;
LOG_E("wait for DMA interrupt overtime!");
break;
HAL_SPI_DMAStop(spi_handle);
goto transfer_cleanup;
}
}
else
Expand All @@ -522,20 +499,26 @@ static rt_ssize_t spixfer(struct rt_spi_device *device, struct rt_spi_message *m
}
}

if(dma_aligned_buffer != RT_NULL) /* re-aligned, so need to copy the data to recv_buf */
transfer_cleanup:
/* Post-transfer processing */
if (state == HAL_OK)
{
if(recv_buf != RT_NULL)
if (aligned_recv_buf != RT_NULL)
{
#if defined(SOC_SERIES_STM32H7) || defined(SOC_SERIES_STM32F7)
rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, p_txrx_buffer, send_length);
#endif /* SOC_SERIES_STM32H7 || SOC_SERIES_STM32F7 */
rt_memcpy(recv_buf, p_txrx_buffer, send_length);
rt_hw_cpu_dcache_ops(RT_HW_CACHE_INVALIDATE, aligned_recv_buf, send_length);
#endif
rt_memcpy(recv_buf, aligned_recv_buf, send_length);
}
#if defined(SOC_SERIES_STM32H7) || defined(SOC_SERIES_STM32F7)
rt_free_align(dma_aligned_buffer);
#else
rt_free(dma_aligned_buffer);
#endif /* SOC_SERIES_STM32H7 || SOC_SERIES_STM32F7 */
}

// Free any temporary buffers that were allocated
if (aligned_send_buf) rt_free_align(aligned_send_buf);
if (aligned_recv_buf) rt_free_align(aligned_recv_buf);

if (state != HAL_OK)
{
break;
}
}

Expand All @@ -562,7 +545,6 @@ static rt_err_t spi_configure(struct rt_spi_device *device,

struct stm32_spi *spi_drv = rt_container_of(device->bus, struct stm32_spi, spi_bus);
spi_drv->cfg = configuration;
rt_kprintf("@spi_configure\n");

return stm32_spi_init(spi_drv, configuration);
}
Expand Down
Loading