libxr  1.0
Want to be the best embedded framework
Loading...
Searching...
No Matches
esp_uart_dma.cpp
1#include "esp_uart.hpp"
2
3#if SOC_GDMA_SUPPORTED && SOC_UHCI_SUPPORTED
4
5#include <algorithm>
6#include <array>
7
8#include "esp_attr.h"
9#include "esp_err.h"
10#include "esp_heap_caps.h"
11#include "esp_memory_utils.h"
12#include "esp_private/periph_ctrl.h"
13#include "hal/uhci_ll.h"
14#include "soc/ext_mem_defs.h"
15
16namespace
17{
18// RX uses a circular DMA descriptor ring, similar to STM/CH circular RX DMA
19// behavior (continuous receive + software consumer index).
20constexpr uint32_t kDmaRxNodeCount = 8;
21constexpr size_t kDmaMaxBufferSizePerLinkItem = 4095U;
22
23struct GdmaLinkItem
24{
25 struct
26 {
27 uint32_t size : 12;
28 uint32_t length : 12;
29 uint32_t reserved24 : 4;
30 uint32_t err_eof : 1;
31 uint32_t reserved29 : 1;
32 uint32_t suc_eof : 1;
33 uint32_t owner : 1;
34 } dw0;
35 void* buffer;
36 GdmaLinkItem* next;
37};
38
39constexpr uint32_t kGdmaOwnerCpu = 0U;
40constexpr uint32_t kGdmaOwnerDma = 1U;
41
42size_t AlignUp(size_t value, size_t align)
43{
44 if (align <= 1)
45 {
46 return value;
47 }
48 return ((value + align - 1) / align) * align;
49}
50
51uintptr_t CacheAddrToNonCache(uintptr_t addr)
52{
53#if SOC_NON_CACHEABLE_OFFSET
54 return addr + SOC_NON_CACHEABLE_OFFSET;
55#else
56 return addr;
57#endif
58}
59
60GdmaLinkItem* LinkItemFromHeadAddr(uintptr_t head_addr)
61{
62 return reinterpret_cast<GdmaLinkItem*>(CacheAddrToNonCache(head_addr));
63}
64
65#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE || SOC_PSRAM_DMA_CAPABLE
66extern "C" esp_err_t esp_cache_msync(void* addr, size_t size, int flags);
67
68constexpr int kCacheSyncFlagUnaligned = (1 << 1);
69constexpr int kCacheSyncFlagDirC2M = (1 << 2);
70constexpr int kCacheSyncFlagDirM2C = (1 << 3);
71
72bool CacheSyncDmaBuffer(const void* addr, size_t size, bool cache_to_mem)
73{
74 if ((addr == nullptr) || (size == 0U))
75 {
76 return true;
77 }
78
79#if SOC_PSRAM_DMA_CAPABLE && !SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE
80 if (!esp_ptr_external_ram(addr))
81 {
82 return true;
83 }
84#endif
85
86 int flags = cache_to_mem ? kCacheSyncFlagDirC2M : kCacheSyncFlagDirM2C;
87 flags |= kCacheSyncFlagUnaligned;
88
89 const esp_err_t ret = esp_cache_msync(const_cast<void*>(addr), size, flags);
90 // Non-cacheable regions can return ESP_ERR_INVALID_ARG; treat as no-op success.
91 return (ret == ESP_OK) || (ret == ESP_ERR_INVALID_ARG);
92}
93#endif
94} // namespace
95
96namespace LibXR
97{
98
99bool IRAM_ATTR ESP32UART::DmaTxEofCallback(gdma_channel_handle_t, gdma_event_data_t*,
100 void* user_data)
101{
102 auto* uart = static_cast<ESP32UART*>(user_data);
103 if (uart != nullptr)
104 {
105 uart->OnTxTransferDone(true, ErrorCode::OK);
106 }
107 return false;
108}
109
110bool IRAM_ATTR ESP32UART::DmaTxDescrErrCallback(gdma_channel_handle_t, gdma_event_data_t*,
111 void* user_data)
112{
113 auto* uart = static_cast<ESP32UART*>(user_data);
114 if (uart != nullptr)
115 {
116 uart->HandleDmaTxError();
117 }
118 return false;
119}
120
121bool IRAM_ATTR ESP32UART::DmaRxDoneCallback(gdma_channel_handle_t,
122 gdma_event_data_t* event_data,
123 void* user_data)
124{
125 auto* uart = static_cast<ESP32UART*>(user_data);
126 if (uart != nullptr)
127 {
128 uart->HandleDmaRxDone(event_data);
129 }
130 return false;
131}
132
133bool IRAM_ATTR ESP32UART::DmaRxDescrErrCallback(gdma_channel_handle_t, gdma_event_data_t*,
134 void* user_data)
135{
136 auto* uart = static_cast<ESP32UART*>(user_data);
137 if (uart != nullptr)
138 {
139 uart->HandleDmaRxError();
140 }
141 return false;
142}
143
144ErrorCode ESP32UART::InitDmaBackend()
145{
146 if (dma_backend_enabled_)
147 {
148 return ErrorCode::OK;
149 }
150
151 periph_module_enable(PERIPH_UHCI0_MODULE);
152 periph_module_reset(PERIPH_UHCI0_MODULE);
153
154 uhci_hal_init(&uhci_hal_, 0);
155 uhci_ll_attach_uart_port(uhci_hal_.dev, uart_num_);
156
157 uhci_seper_chr_t sep_chr = {};
158 sep_chr.sub_chr_en = 0;
159 uhci_ll_set_seper_chr(uhci_hal_.dev, &sep_chr);
160 uhci_ll_rx_set_eof_mode(uhci_hal_.dev, UHCI_RX_IDLE_EOF);
161
162 gdma_channel_alloc_config_t tx_cfg = {
163 .sibling_chan = nullptr,
164 .direction = GDMA_CHANNEL_DIRECTION_TX,
165 .flags = {},
166 };
167 if (gdma_new_ahb_channel(&tx_cfg, &tx_dma_channel_) != ESP_OK)
168 {
169 return ErrorCode::INIT_ERR;
170 }
171
172 if (gdma_connect(tx_dma_channel_, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UHCI, 0)) !=
173 ESP_OK)
174 {
175 return ErrorCode::INIT_ERR;
176 }
177
178 gdma_transfer_config_t transfer_cfg = {
179 .max_data_burst_size = 0,
180 .access_ext_mem = true,
181 };
182 if (gdma_config_transfer(tx_dma_channel_, &transfer_cfg) != ESP_OK)
183 {
184 return ErrorCode::INIT_ERR;
185 }
186
187 size_t tx_int_alignment = 1;
188 size_t tx_ext_alignment = 1;
189 if (gdma_get_alignment_constraints(tx_dma_channel_, &tx_int_alignment,
190 &tx_ext_alignment) != ESP_OK)
191 {
192 return ErrorCode::INIT_ERR;
193 }
194 tx_dma_alignment_ = std::max<size_t>(1, std::max(tx_int_alignment, tx_ext_alignment));
195
196 gdma_strategy_config_t tx_strategy = {
197 .owner_check = true,
198 .auto_update_desc = true,
199 .eof_till_data_popped = true,
200 };
201 if (gdma_apply_strategy(tx_dma_channel_, &tx_strategy) != ESP_OK)
202 {
203 return ErrorCode::INIT_ERR;
204 }
205
206 gdma_link_list_config_t tx_link_cfg = {
207 .num_items = 1,
208 .item_alignment = 4,
209 .flags = {},
210 };
211 tx_dma_buffer_addr_[0] = tx_active_buffer_;
212 tx_dma_buffer_addr_[1] = tx_pending_buffer_;
213
214 for (int i = 0; i < 2; ++i)
215 {
216 if (gdma_new_link_list(&tx_link_cfg, &tx_dma_links_[i]) != ESP_OK)
217 {
218 return ErrorCode::INIT_ERR;
219 }
220
221 gdma_buffer_mount_config_t tx_mount = {
222 .buffer = tx_dma_buffer_addr_[i],
223 .buffer_alignment = tx_dma_alignment_,
224 .length = 1,
225 .flags =
226 {
227 .mark_eof = 1,
228 .mark_final = 1,
229 .bypass_buffer_align_check = 0,
230 },
231 };
232
233 if (gdma_link_mount_buffers(tx_dma_links_[i], 0, &tx_mount, 1, nullptr) != ESP_OK)
234 {
235 return ErrorCode::INIT_ERR;
236 }
237
238 tx_dma_head_addr_[i] = gdma_link_get_head_addr(tx_dma_links_[i]);
239 if (tx_dma_head_addr_[i] == 0U)
240 {
241 return ErrorCode::INIT_ERR;
242 }
243 }
244
245 gdma_tx_event_callbacks_t tx_callbacks = {
246 .on_trans_eof = DmaTxEofCallback,
247 .on_descr_err = DmaTxDescrErrCallback,
248 };
249 if (gdma_register_tx_event_callbacks(tx_dma_channel_, &tx_callbacks, this) != ESP_OK)
250 {
251 return ErrorCode::INIT_ERR;
252 }
253
254 gdma_channel_alloc_config_t rx_cfg = {
255 .sibling_chan = nullptr,
256 .direction = GDMA_CHANNEL_DIRECTION_RX,
257 .flags = {},
258 };
259 if (gdma_new_ahb_channel(&rx_cfg, &rx_dma_channel_) != ESP_OK)
260 {
261 return ErrorCode::INIT_ERR;
262 }
263
264 if (gdma_connect(rx_dma_channel_, GDMA_MAKE_TRIGGER(GDMA_TRIG_PERIPH_UHCI, 0)) !=
265 ESP_OK)
266 {
267 return ErrorCode::INIT_ERR;
268 }
269
270 if (gdma_config_transfer(rx_dma_channel_, &transfer_cfg) != ESP_OK)
271 {
272 return ErrorCode::INIT_ERR;
273 }
274
275 size_t rx_int_alignment = 1;
276 size_t rx_ext_alignment = 1;
277 if (gdma_get_alignment_constraints(rx_dma_channel_, &rx_int_alignment,
278 &rx_ext_alignment) != ESP_OK)
279 {
280 return ErrorCode::INIT_ERR;
281 }
282 rx_dma_alignment_ = std::max<size_t>(1, std::max(rx_int_alignment, rx_ext_alignment));
283
284 gdma_link_list_config_t rx_link_cfg = {
285 .num_items = kDmaRxNodeCount,
286 .item_alignment = 4,
287 .flags = {},
288 };
289 if (gdma_new_link_list(&rx_link_cfg, &rx_dma_link_) != ESP_OK)
290 {
291 return ErrorCode::INIT_ERR;
292 }
293
294 // Keep one ring window reasonably large to lower ISR pressure at high baud.
295 const size_t rx_chunk_target =
296 std::min<size_t>(std::max<size_t>(32, rx_isr_buffer_size_ / kDmaRxNodeCount), 512);
297 rx_dma_chunk_size_ = std::max<size_t>(AlignUp(rx_chunk_target, 4), 32);
298 rx_dma_node_count_ = kDmaRxNodeCount;
299 const size_t rx_storage_alignment = std::max<size_t>(4, rx_dma_alignment_);
300 const size_t rx_storage_bytes =
301 AlignUp(rx_dma_chunk_size_ * rx_dma_node_count_, rx_storage_alignment);
302
303 rx_dma_storage_ = static_cast<uint8_t*>(
304 heap_caps_aligned_alloc(rx_storage_alignment, rx_storage_bytes,
305 MALLOC_CAP_INTERNAL | MALLOC_CAP_DMA | MALLOC_CAP_8BIT));
306 if (rx_dma_storage_ == nullptr)
307 {
308 return ErrorCode::NO_MEM;
309 }
310
311 std::array<gdma_buffer_mount_config_t, kDmaRxNodeCount> rx_mount = {};
312 for (uint32_t i = 0; i < kDmaRxNodeCount; ++i)
313 {
314 rx_mount[i] = gdma_buffer_mount_config_t{
315 .buffer = rx_dma_storage_ + (static_cast<size_t>(i) * rx_dma_chunk_size_),
316 .buffer_alignment = rx_dma_alignment_,
317 .length = rx_dma_chunk_size_,
318 .flags =
319 {
320 .mark_eof = 0,
321 .mark_final = 0,
322 .bypass_buffer_align_check = 0,
323 },
324 };
325 }
326
327 if (gdma_link_mount_buffers(rx_dma_link_, 0, rx_mount.data(), kDmaRxNodeCount,
328 nullptr) != ESP_OK)
329 {
330 return ErrorCode::INIT_ERR;
331 }
332
333 gdma_rx_event_callbacks_t rx_callbacks = {
334 .on_recv_eof = nullptr,
335 .on_descr_err = DmaRxDescrErrCallback,
336 .on_recv_done = DmaRxDoneCallback,
337 };
338 if (gdma_register_rx_event_callbacks(rx_dma_channel_, &rx_callbacks, this) != ESP_OK)
339 {
340 return ErrorCode::INIT_ERR;
341 }
342
343 if (gdma_reset(rx_dma_channel_) != ESP_OK)
344 {
345 return ErrorCode::INIT_ERR;
346 }
347
348 if (gdma_start(rx_dma_channel_, gdma_link_get_head_addr(rx_dma_link_)) != ESP_OK)
349 {
350 return ErrorCode::INIT_ERR;
351 }
352
353 rx_dma_node_index_ = 0;
354 dma_backend_enabled_ = true;
355 return ErrorCode::OK;
356}
357
358bool IRAM_ATTR ESP32UART::StartDmaTx()
359{
360 if ((tx_dma_channel_ == nullptr) || !tx_active_valid_)
361 {
362 return false;
363 }
364
365 uint8_t* const active_buffer = tx_active_buffer_;
366 const size_t active_len = tx_active_length_;
367 if ((active_buffer == nullptr) || (active_len == 0) ||
368 (active_len > kDmaMaxBufferSizePerLinkItem))
369 {
370 return false;
371 }
372
373 int link_index = -1;
374 if (active_buffer == tx_dma_buffer_addr_[0])
375 {
376 link_index = 0;
377 }
378 else if (active_buffer == tx_dma_buffer_addr_[1])
379 {
380 link_index = 1;
381 }
382 else
383 {
384 return false;
385 }
386
387 if ((tx_dma_links_[link_index] == nullptr) || (tx_dma_head_addr_[link_index] == 0U))
388 {
389 return false;
390 }
391
392 auto* desc = LinkItemFromHeadAddr(tx_dma_head_addr_[link_index]);
393 if (desc == nullptr)
394 {
395 return false;
396 }
397
398 // Keep descriptor list pre-mounted and only patch the dynamic transfer length in-place.
399 desc->buffer = active_buffer;
400 desc->dw0.size = static_cast<uint32_t>(active_len);
401 desc->dw0.length = static_cast<uint32_t>(active_len);
402 desc->dw0.err_eof = 0U;
403 desc->dw0.suc_eof = 1U;
404 desc->dw0.owner = kGdmaOwnerDma;
405 desc->next = nullptr;
406 std::atomic_thread_fence(std::memory_order_release);
407
408#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE || SOC_PSRAM_DMA_CAPABLE
409 if (!CacheSyncDmaBuffer(active_buffer, active_len, true))
410 {
411 return false;
412 }
413#endif
414
415 return gdma_start(tx_dma_channel_, tx_dma_head_addr_[link_index]) == ESP_OK;
416}
417
418void IRAM_ATTR ESP32UART::PushDmaRxData(size_t recv_size, bool in_isr)
419{
420 if ((rx_dma_storage_ == nullptr) || (rx_dma_chunk_size_ == 0) ||
421 (rx_dma_node_count_ == 0))
422 {
423 return;
424 }
425
426 const size_t max_window = rx_dma_chunk_size_ * rx_dma_node_count_;
427 size_t remaining = std::min(recv_size, max_window);
428
429 while (remaining > 0)
430 {
431 const size_t offset = static_cast<size_t>(rx_dma_node_index_) * rx_dma_chunk_size_;
432 const size_t chunk = std::min(remaining, rx_dma_chunk_size_);
433 auto* chunk_ptr = rx_dma_storage_ + offset;
434
435#if SOC_CACHE_INTERNAL_MEM_VIA_L1CACHE || SOC_PSRAM_DMA_CAPABLE
436 if (!CacheSyncDmaBuffer(chunk_ptr, chunk, false))
437 {
438 HandleDmaRxError();
439 return;
440 }
441#endif
442 PushRxBytes(chunk_ptr, chunk, in_isr);
443 remaining -= chunk;
444 rx_dma_node_index_ = (rx_dma_node_index_ + 1U) % rx_dma_node_count_;
445 }
446}
447
448void IRAM_ATTR ESP32UART::HandleDmaRxDone(gdma_event_data_t* event_data)
449{
450 if ((rx_dma_storage_ == nullptr) || (rx_dma_chunk_size_ == 0) ||
451 (rx_dma_node_count_ == 0))
452 {
453 return;
454 }
455
456 if ((event_data != nullptr) && event_data->flags.abnormal_eof)
457 {
458 HandleDmaRxError();
459 return;
460 }
461
462 size_t recv_size = rx_dma_chunk_size_;
463 if ((event_data != nullptr) && event_data->flags.normal_eof)
464 {
465 const size_t eof_size = gdma_link_count_buffer_size_till_eof(
466 rx_dma_link_, static_cast<int>(rx_dma_node_index_));
467 if (eof_size > 0)
468 {
469 recv_size = eof_size;
470 }
471 }
472
473 PushDmaRxData(recv_size, true);
474}
475
476void IRAM_ATTR ESP32UART::HandleDmaRxError()
477{
478 if ((rx_dma_channel_ == nullptr) || (rx_dma_link_ == nullptr))
479 {
480 return;
481 }
482
483 gdma_stop(rx_dma_channel_);
484 gdma_reset(rx_dma_channel_);
485 rx_dma_node_index_ = 0;
486 (void)gdma_start(rx_dma_channel_, gdma_link_get_head_addr(rx_dma_link_));
487}
488
489void IRAM_ATTR ESP32UART::HandleDmaTxError()
490{
491 if (tx_dma_channel_ != nullptr)
492 {
493 gdma_stop(tx_dma_channel_);
494 gdma_reset(tx_dma_channel_);
495 }
496 OnTxTransferDone(true, ErrorCode::FAILED);
497}
498
499} // namespace LibXR
500
501#endif
LibXR 命名空间
Definition ch32_can.hpp:14
ErrorCode
定义错误码枚举
Definition libxr_def.hpp:64
@ INIT_ERR
初始化错误 | Initialization error
@ NO_MEM
内存不足 | Insufficient memory
@ FAILED
操作失败 | Operation failed
@ OK
操作成功 | Operation successful