EVE 1.0
clock.c
1 #include <stdio.h>
2 #include <lib/assert.h>
3 #include <sys/clock.h>
4 #include <sys/cc.h>
5 #include <sys/etimer.h>
6 #include <debug-uart.h>
7 #include <core/atomic.h>
8 #include <core/irq-prio.h>
9 #include <core/uwork.h>
10 #include <core/mwork.h>
11 #include <core/pm.h>
12 #include <app_util_platform.h>
13 #include <nrf_nvic.h>
14 
15 #define CLOCK_SYNC_GUARD (4)
16 #define CLOCK_GUARD_WIDTH (2)
17 #define CLOCK_COUNTER_WIDTH (24)
18 #define CLOCK_COUNTER_MSB (1 << (CLOCK_COUNTER_WIDTH - 1))
19 #define CLOCK_SECOND_WIDTH (12)
20 #define CLOCK_LFCLK_DIV (8 - 1) /* 4096 Hz */
21 #define CLOCK_HZ (1 << (CLOCK_SECOND_WIDTH - CLOCK_GUARD_WIDTH))
22 
23 struct clock_state_t
24 {
25  uint32_t seconds_const;
26  uint32_t seconds_volatile;
27  uint32_t etimer_wrap_rem;
28  uint16_t etimer_wrap_cnt;
29  uint8_t clock_wait;
30 };
31 
32 void
33 RTC1_IRQHandler(void) __attribute__ ((interrupt));
34 
35 static void
36 clock_delay_usec_done(struct uwork_t *work);
37 
38 DECLARE_PM_LOCK(PM_LEVEL_LOWPWR, clock_pm_lock, NULL, 0);
39 
40 static DECLARE_UWORK(udelay_work, clock_delay_usec_done);
41 
42 static volatile struct clock_state_t clock_state;
43 
44 uint32_t rtc1_pending_interrupt;
45 
46 static inline uint32_t current_clock()
47 {
48  return (clock_state.seconds_const << (CLOCK_SECOND_WIDTH - CLOCK_GUARD_WIDTH))
49  + (nrf_rtc_counter_get(NRF_RTC1) >> CLOCK_GUARD_WIDTH);
50 }
51 
52 static inline uint32_t current_seconds()
53 {
54  return clock_state.seconds_volatile
55  + (nrf_rtc_counter_get(NRF_RTC1) >> (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
56 }
57 
58 void clock_cc_set(NRF_RTC_Type *rtc, uint32_t ch, uint32_t cc_val)
59 {
60  uint32_t now;
61  nrf_rtc_cc_set(rtc, ch, cc_val);
62 
63  now = nrf_rtc_counter_get(rtc);
64  if ((cc_val - now - CLOCK_SYNC_GUARD) & CLOCK_COUNTER_MSB) {
65  /* Work is in the past. Trigger the event */
66  atomic_or32(&rtc1_pending_interrupt, NRF_RTC_INT_COMPARE0_MASK << ch);
67  NVIC->STIR = RTC1_IRQn;
68  }
69 }
70 
71 static void update_wakeup(uint32_t dist)
72 {
73  CRITICAL_REGION_ENTER();
74  uint32_t now = nrf_rtc_counter_get(NRF_RTC1);
75  uint32_t wrap = dist >> (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH);
76  uint32_t negative = dist >> (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH - 1);
77  if (!wrap && negative)
78  {
79  dist = (1 << (CLOCK_COUNTER_WIDTH - CLOCK_GUARD_WIDTH - 1)) - 1;
80  }
81  uint32_t wup = now + (dist << CLOCK_GUARD_WIDTH);
82 
83  if (wrap)
84  {
85  clock_state.etimer_wrap_cnt = wrap;
86  clock_state.etimer_wrap_rem = wup;
87  nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
88  }
89  else
90  {
91  clock_cc_set(NRF_RTC1, 0, wup);
92  nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
93  }
94  CRITICAL_REGION_EXIT();
95 }
96 
97 void
98 RTC1_IRQHandler(void)
99 {
100  uint32_t interrupt = atomic_xchg32(&rtc1_pending_interrupt, 0);
101  uint32_t comp;
102 
103  if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_COMPARE_1)
104  || (interrupt & NRF_RTC_INT_COMPARE1_MASK))
105  {
106  nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_COMPARE_1);
107  mwork_timer();
108  }
109 
110  if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_OVERFLOW))
111  {
112  nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_OVERFLOW);
113 
114  /* Update long-term clock */
115  clock_state.seconds_const += (1 << (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
116  clock_state.seconds_volatile += (1 << (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
117 
118  /* Update long-term etimers */
119  if (clock_state.etimer_wrap_cnt != 0)
120  {
121  clock_state.etimer_wrap_cnt--;
122  if (!clock_state.etimer_wrap_cnt)
123  {
124  comp = clock_state.etimer_wrap_rem;
125 
126  if (comp < CLOCK_SYNC_GUARD)
127  comp = CLOCK_SYNC_GUARD;
128 
129  clock_cc_set(NRF_RTC1, 0, comp);
130  nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
131 
132  clock_state.etimer_wrap_rem = 0;
133  }
134  }
135  }
136 
137  if (nrf_rtc_event_pending(NRF_RTC1, NRF_RTC_EVENT_COMPARE_0)
138  || (interrupt & NRF_RTC_INT_COMPARE0_MASK))
139  {
140  nrf_rtc_event_clear(NRF_RTC1, NRF_RTC_EVENT_COMPARE_0);
141 
142  if (clock_state.clock_wait)
143  {
144  pm_wakeup();
145  }
146  if (etimer_pending())
147  {
148  uint32_t clock = current_clock();
149  int32_t dist = etimer_next_expiration_time() - clock;
150 
151  if (dist <= 0)
152  {
153  nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
155  pm_wakeup();
156  }
157  else
158  {
159  update_wakeup(dist);
160  }
161  }
162  else
163  {
164  nrf_rtc_int_disable(NRF_RTC1, NRF_RTC_INT_COMPARE0_MASK);
165  }
166  }
167 }
168 
169 void
171 {
172  nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_STOP);
173  do
174  {
175  nrf_rtc_prescaler_set(NRF_RTC1, CLOCK_LFCLK_DIV + 1);
176  } while (rtc_prescaler_get(NRF_RTC1) != CLOCK_LFCLK_DIV + 1);
177  do
178  {
179  nrf_rtc_prescaler_set(NRF_RTC1, CLOCK_LFCLK_DIV);
180  } while (rtc_prescaler_get(NRF_RTC1) != CLOCK_LFCLK_DIV);
181  nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_CLEAR);
182  nrf_rtc_task_trigger(NRF_RTC1, NRF_RTC_TASK_START);
183  nrf_rtc_int_enable(NRF_RTC1, NRF_RTC_INT_OVERFLOW_MASK);
184 
185 // nrf_rtc_event_enable(NRF_RTC1, RTC_EVTENSET_COMPARE0_Msk
186 // | RTC_EVTENSET_COMPARE1_Msk
187 // | RTC_EVTENSET_OVRFLW_Msk);
188 
189  sd_nvic_ClearPendingIRQ(RTC1_IRQn);
190  sd_nvic_SetPriority(RTC1_IRQn, EVE_IRQ_PRIORITY_LOW);
191  sd_nvic_EnableIRQ(RTC1_IRQn);
192 }
193 
194 clock_time_t
196 {
197  return current_clock();
198 }
199 
200 unsigned long
202 {
203  return current_seconds();
204 }
205 
206 void
207 clock_set_seconds(unsigned long sec)
208 {
209  clock_state.seconds_volatile = sec
210  - (nrf_rtc_counter_get(NRF_RTC1) >> (CLOCK_COUNTER_WIDTH - CLOCK_SECOND_WIDTH));
211 }
212 
213 void
214 clock_wait(clock_time_t t)
215 {
216  clock_time_t stop;
217  int32_t dist;
218 
219  stop = clock_time() + t;
220  /* This loop blocks all the running timers until timeout is expired.
221  * It's fine, anyway the timers have no chance to be executed before
222  * the function is finished */
223  PM_LOCK(clock_pm_lock);
224  clock_state.clock_wait = 1;
225  while (1)
226  {
227  dist = stop - clock_time();
228  if (dist <= 0)
229  break;
230  update_wakeup(dist);
231  pm_relax();
232  }
233  clock_state.clock_wait = 0;
234  PM_UNLOCK(clock_pm_lock);
235 }
236 
237 static void
238 clock_delay_usec_done(struct uwork_t *work)
239 {
240  pm_wakeup();
241 }
242 
243 void
244 clock_delay_usec(uint16_t dt)
245 {
246  udelay_work.at = uwork_now();
247  udelay_work.at += UWORK_USEC(dt);
248  uwork_schedule(&udelay_work);
249  while (uwork_pending(&udelay_work))
250  pm_relax();
251 }
252 
253 void
254 arch_update_time(void)
255 {
256  uint32_t clock = current_clock();
257  uint32_t next_expiration_time = etimer_next_expiration_time();
258  int32_t dist = next_expiration_time - clock;
259 
260  if (next_expiration_time == 0)
261  {
262  if (clock_pm_lock.locked)
263  PM_UNLOCK(clock_pm_lock);
264  }
265  else
266  {
267  if (dist <= 0)
268  dist = 1;
269  update_wakeup(dist);
270  if (!clock_pm_lock.locked)
271  PM_LOCK(clock_pm_lock);
272  }
273 }
Header file for the EVE millisecond-scale work scheduling.
clock_time_t clock_time(void)
Definition: clock.c:195
#define DECLARE_UWORK(x, callback)
Definition: uwork.h:104
#define PM_UNLOCK(pm)
Definition: pm.h:204
void etimer_request_poll(void)
Make the event timer aware that the clock has changed.
Definition: etimer.c:173
IRQ priorities.
void mwork_timer(void)
Header file for the EVE power management framework.
void uwork_schedule(struct uwork_t *work)
clock_time_t etimer_next_expiration_time(void)
Get next event timer expiration time.
Definition: etimer.c:257
#define PM_LOCK(pm)
Definition: pm.h:203
void clock_init()
Definition: clock.c:170
__attribute__((always_inline)) static inline void swint_enable_indirect_adapter(swint_state_t *state)
Definition: work.h:245
uint32_t atomic_or32(volatile uint32_t *p, uint32_t value)
void pm_relax(void)
int etimer_pending(void)
Check if there are any non-expired event timers.
Definition: etimer.c:251
static bool uwork_pending(struct uwork_t *work)
Definition: uwork.h:207
Header file for the EVE atomic primitives set.
Header file for the EVE microsecond-scale work scheduling.
#define UWORK_USEC(us)
Definition: uwork.h:58
uint32_t atomic_xchg32(volatile uint32_t *p, uint32_t value)
void pm_wakeup(void)
Definition: uwork.h:142
#define DECLARE_PM_LOCK(l, pm, cb, d)
Definition: pm.h:210
void clock_delay_usec(uint16_t dt)
Definition: clock.c:244
void clock_set_seconds(unsigned long sec)
Definition: clock.c:207
unsigned long clock_seconds(void)
Definition: clock.c:201
uwork_time_t uwork_now(void)
void clock_wait(clock_time_t t)
Definition: clock.c:214