4 #include <debug-uart.h> 10 #include <app_util_platform.h> 12 #ifndef DBG_XMIT_BUFFER_LEN 13 #define DBG_XMIT_BUFFER_LEN (1024) 16 #ifndef DBG_SWO_XMIT_INTERVAL 17 #define DBG_SWO_XMIT_INTERVAL (UWORK_USEC(100)) 20 static void dbg_do_xmit(
struct uwork_t *work);
21 static void dbg_do_semi_xmit(
void);
25 uint8_t *
volatile xmit_buffer_head;
26 uint8_t *
volatile xmit_buffer_tail;
27 uint8_t *
volatile xmit_buffer_phead;
29 uint8_t write_overrun;
32 uint8_t xmit_buffer[DBG_XMIT_BUFFER_LEN];
35 static struct dbg_state_t dbg_state;
36 #define XMIT_BUFFER_END &dbg_state.xmit_buffer[DBG_XMIT_BUFFER_LEN] 42 dbg_state.xmit_buffer_head = dbg_state.xmit_buffer;
43 dbg_state.xmit_buffer_tail = dbg_state.xmit_buffer;
48 ITM->TER |= (1UL << 0);
49 dbg_state.enabled = 1;
53 void dbg_force_enabled(
void)
55 ITM->TER |= (1UL << 0);
56 dbg_state.enabled = 1;
59 static void dbg_do_xmit(
struct uwork_t *work)
61 uint8_t *head = dbg_state.xmit_buffer_head;
62 uint8_t *tail = dbg_state.xmit_buffer_tail;
63 unsigned interval = DBG_SWO_XMIT_INTERVAL;
65 while (head != tail) {
66 if (ITM->PORT[0].u32 == 0)
68 ITM->PORT[0].u8 = (uint8_t) *head;
69 if (++head == XMIT_BUFFER_END)
70 head = dbg_state.xmit_buffer;
73 CRITICAL_REGION_ENTER();
74 if (head != dbg_state.xmit_buffer_tail) {
78 dbg_state.xmit_buffer_head = head;
79 CRITICAL_REGION_EXIT();
82 static void dbg_do_semi_xmit(
void)
84 uint8_t *head = dbg_state.xmit_buffer_phead;
85 uint8_t *tail = dbg_state.xmit_buffer_tail;
87 while (head != tail) {
92 chunk = XMIT_BUFFER_END - head;
95 volatile uint8_t *p = &head[chunk - 1];
104 if (head == XMIT_BUFFER_END)
105 head = dbg_state.xmit_buffer;
108 dbg_state.xmit_buffer_phead = head;
111 unsigned int dbg_send_bytes(
const uint8_t *
seq,
unsigned int len)
113 if (dbg_state.enabled) {
117 CRITICAL_REGION_ENTER();
119 uint8_t *head = dbg_state.xmit_buffer_head;
120 uint8_t *tail = dbg_state.xmit_buffer_tail;
124 unsigned int xfer_len = XMIT_BUFFER_END - tail;
125 unsigned int free = DBG_XMIT_BUFFER_LEN - (tail - head) - 1;
129 if (xfer_len < len) {
130 memcpy(tail, seq, xfer_len);
132 xfer_len = len - xfer_len;
133 memcpy(dbg_state.xmit_buffer, seq, xfer_len);
134 tail = dbg_state.xmit_buffer + xfer_len;
136 memcpy(tail, seq, len);
138 if (tail == XMIT_BUFFER_END)
139 tail = dbg_state.xmit_buffer;
143 unsigned int free = (head - tail) - 1;
146 memcpy(tail, seq, len);
150 dbg_state.xmit_buffer_tail = tail;
155 CRITICAL_REGION_EXIT();
161 void dbg_putchar(
const char ch)
163 if (dbg_state.write_overrun) {
164 if (dbg_send_bytes((
const uint8_t*)
"^", 1) != 1)
167 dbg_state.write_overrun = 0;
168 if (dbg_send_bytes((
const uint8_t*)&ch, 1) != 1) {
169 dbg_state.write_overrun = 1;
173 void dbg_blocking_putchar(
const char ch)
175 if (dbg_state.write_overrun) {
176 while (dbg_send_bytes((
const uint8_t*)
"^", 1) != 1)
179 dbg_state.write_overrun = 0;
180 while (dbg_send_bytes((
const uint8_t*)&ch, 1) != 1)
187 if (dbg_state.enabled) {
188 CRITICAL_REGION_ENTER();
189 while(dbg_state.xmit_buffer_tail != dbg_state.xmit_buffer_head)
190 dbg_do_xmit(&dbg_state.xmit_work);
191 if (dbg_state.semihosting)
193 CRITICAL_REGION_EXIT();
199 dbg_state.xmit_buffer_phead = dbg_state.xmit_buffer_tail;
200 dbg_state.semihosting =
true;
Header file for the EVE power management framework.
void uwork_schedule(struct uwork_t *work)
static int32_t call_semihost(uint32_t cmd, const void *msg)
static bool uwork_pending(struct uwork_t *work)
Header file for the EVE microsecond-scale work scheduling.
#define UWORK_INIT_TYPED(x, callback)
uwork_time_t uwork_now(void)