2 #include <lib/assert.h> 6 #include <debug-uart.h> 12 #include <app_util_platform.h> 15 #define CLOCK_SYNC_GUARD (4) 16 #define CLOCK_GUARD_WIDTH (2) 17 #define CLOCK_COUNTER_WIDTH (24) 18 #define CLOCK_COUNTER_MSB (1 << (CLOCK_COUNTER_WIDTH - 1)) 19 #define CLOCK_SECOND_WIDTH (12) 20 #define CLOCK_LFCLK_DIV (8 - 1) 21 #define CLOCK_HZ (1 << (CLOCK_SECOND_WIDTH - CLOCK_GUARD_WIDTH)) 25 uint32_t seconds_const;
26 uint32_t seconds_volatile;
27 uint32_t etimer_wrap_rem;
28 uint16_t etimer_wrap_cnt;
36 clock_delay_usec_done(struct
uwork_t *work);
42 static volatile struct clock_state_t clock_state;
44 uint32_t rtc1_pending_interrupt;
46 static inline uint32_t current_clock()
48 return (clock_state.seconds_const << (CLOCK_SECOND_WIDTH - CLOCK_GUARD_WIDTH))
49 + (nrf_rtc_counter_get(NRF_RTC1) >> CLOCK_GUARD_WIDTH);
52 static inline uint32_t current_seconds()
54 return clock_state.seconds_volatile
55 + (nrf_rtc_counter_get(NRF_RTC1) >> (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
58 void clock_cc_set(NRF_RTC_Type *rtc, uint32_t ch, uint32_t cc_val)
61 nrf_rtc_cc_set(rtc, ch, cc_val);
63 now = nrf_rtc_counter_get(rtc);
64 if ((cc_val - now - CLOCK_SYNC_GUARD) & CLOCK_COUNTER_MSB) {
66 atomic_or32(&rtc1_pending_interrupt, NRF_RTC_INT_COMPARE0_MASK << ch);
67 NVIC->STIR = RTC1_IRQn;
71 static void update_wakeup(uint32_t dist)
73 CRITICAL_REGION_ENTER();
74 uint32_t now = nrf_rtc_counter_get(NRF_RTC1);
75 uint32_t wrap = dist >> (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH);
76 uint32_t negative = dist >> (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH - 1);
77 if (!wrap && negative)
79 dist = (1 << (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH - 1)) - 1;
81 uint32_t wup = now + (dist << CLOCK_GUARD_WIDTH);
85 clock_state.etimer_wrap_cnt = wrap;
86 clock_state.etimer_wrap_rem = wup;
87 nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
91 clock_cc_set(NRF_RTC1, 0, wup);
92 nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
94 CRITICAL_REGION_EXIT();
100 uint32_t interrupt =
atomic_xchg32(&rtc1_pending_interrupt, 0);
103 if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_COMPARE_1)
104 || (interrupt & NRF_RTC_INT_COMPARE1_MASK))
106 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_COMPARE_1);
110 if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_OVERFLOW))
112 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_OVERFLOW);
115 clock_state.seconds_const += (1 << (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
116 clock_state.seconds_volatile += (1 << (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
119 if (clock_state.etimer_wrap_cnt != 0)
121 clock_state.etimer_wrap_cnt--;
122 if (!clock_state.etimer_wrap_cnt)
124 comp = clock_state.etimer_wrap_rem;
126 if (comp < CLOCK_SYNC_GUARD)
127 comp = CLOCK_SYNC_GUARD;
129 clock_cc_set(NRF_RTC1, 0, comp);
130 nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
132 clock_state.etimer_wrap_rem = 0;
137 if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_COMPARE_0)
138 || (interrupt & NRF_RTC_INT_COMPARE0_MASK))
140 nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_COMPARE_0);
142 if (clock_state.clock_wait)
148 uint32_t clock = current_clock();
153 nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
164 nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
172 nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_STOP);
175 nrf_rtc_prescaler_set(NRF_RTC1, CLOCK_LFCLK_DIV + 1);
176 }
while (rtc_prescaler_get(NRF_RTC1) != CLOCK_LFCLK_DIV + 1);
179 nrf_rtc_prescaler_set(NRF_RTC1, CLOCK_LFCLK_DIV);
180 }
while (rtc_prescaler_get(NRF_RTC1) != CLOCK_LFCLK_DIV);
181 nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_CLEAR);
182 nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_START);
183 nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_OVERFLOW_MASK);
189 sd_nvic_ClearPendingIRQ(RTC1_IRQn);
190 sd_nvic_SetPriority(RTC1_IRQn, EVE_IRQ_PRIORITY_LOW);
191 sd_nvic_EnableIRQ(RTC1_IRQn);
197 return current_clock();
203 return current_seconds();
209 clock_state.seconds_volatile = sec
210 - (nrf_rtc_counter_get(NRF_RTC1) >> (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
224 clock_state.clock_wait = 1;
233 clock_state.clock_wait = 0;
238 clock_delay_usec_done(
struct uwork_t *work)
254 arch_update_time(
void)
256 uint32_t clock = current_clock();
258 int32_t dist = next_expiration_time - clock;
260 if (next_expiration_time == 0)
262 if (clock_pm_lock.locked)
270 if (!clock_pm_lock.locked)
Header file for the EVE millisecond-scale work scheduling.
clock_time_t clock_time(void)
#define DECLARE_UWORK(x, callback)
void etimer_request_poll(void)
Make the event timer aware that the clock has changed.
Header file for the EVE power management framework.
void uwork_schedule(struct uwork_t *work)
clock_time_t etimer_next_expiration_time(void)
Get next event timer expiration time.
__attribute__((always_inline)) static inline void swint_enable_indirect_adapter(swint_state_t *state)
uint32_t atomic_or32(volatile uint32_t *p, uint32_t value)
int etimer_pending(void)
Check if there are any non-expired event timers.
static bool uwork_pending(struct uwork_t *work)
Header file for the EVE atomic primitives set.
Header file for the EVE microsecond-scale work scheduling.
uint32_t atomic_xchg32(volatile uint32_t *p, uint32_t value)
#define DECLARE_PM_LOCK(l, pm, cb, d)
void clock_delay_usec(uint16_t dt)
void clock_set_seconds(unsigned long sec)
unsigned long clock_seconds(void)
uwork_time_t uwork_now(void)
void clock_wait(clock_time_t t)