sync.h
1/*
2 * Copyright (c) 2020 Raspberry Pi (Trading) Ltd.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7#ifndef _HARDWARE_SYNC_H
8#define _HARDWARE_SYNC_H
9
10#include "pico.h"
12#include "hardware/regs/sio.h"
13
14#ifdef __cplusplus
15extern "C" {
16#endif
17
48// PICO_CONFIG: PARAM_ASSERTIONS_ENABLED_SYNC, Enable/disable assertions in the HW sync module, type=bool, default=0, group=hardware_sync
49#ifndef PARAM_ASSERTIONS_ENABLED_SYNC
50#define PARAM_ASSERTIONS_ENABLED_SYNC 0
51#endif
52
56typedef volatile uint32_t spin_lock_t;
57
58// PICO_CONFIG: PICO_SPINLOCK_ID_IRQ, Spinlock ID for IRQ protection, min=0, max=31, default=9, group=hardware_sync
59#ifndef PICO_SPINLOCK_ID_IRQ
60#define PICO_SPINLOCK_ID_IRQ 9
61#endif
62
63// PICO_CONFIG: PICO_SPINLOCK_ID_TIMER, Spinlock ID for Timer protection, min=0, max=31, default=10, group=hardware_sync
64#ifndef PICO_SPINLOCK_ID_TIMER
65#define PICO_SPINLOCK_ID_TIMER 10
66#endif
67
68// PICO_CONFIG: PICO_SPINLOCK_ID_HARDWARE_CLAIM, Spinlock ID for Hardware claim protection, min=0, max=31, default=11, group=hardware_sync
69#ifndef PICO_SPINLOCK_ID_HARDWARE_CLAIM
70#define PICO_SPINLOCK_ID_HARDWARE_CLAIM 11
71#endif
72
73// PICO_CONFIG: PICO_SPINLOCK_ID_OS1, First Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=14, group=hardware_sync
74#ifndef PICO_SPINLOCK_ID_OS1
75#define PICO_SPINLOCK_ID_OS1 14
76#endif
77
78// PICO_CONFIG: PICO_SPINLOCK_ID_OS2, Second Spinlock ID reserved for use by low level OS style software, min=0, max=31, default=15, group=hardware_sync
79#ifndef PICO_SPINLOCK_ID_OS2
80#define PICO_SPINLOCK_ID_OS2 15
81#endif
82
83// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_FIRST, Lowest Spinlock ID in the 'striped' range, min=0, max=31, default=16, group=hardware_sync
84#ifndef PICO_SPINLOCK_ID_STRIPED_FIRST
85#define PICO_SPINLOCK_ID_STRIPED_FIRST 16
86#endif
87
88// PICO_CONFIG: PICO_SPINLOCK_ID_STRIPED_LAST, Highest Spinlock ID in the 'striped' range, min=0, max=31, default=23, group=hardware_sync
89#ifndef PICO_SPINLOCK_ID_STRIPED_LAST
90#define PICO_SPINLOCK_ID_STRIPED_LAST 23
91#endif
92
93// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_FIRST, Lowest Spinlock ID in the 'claim free' range, min=0, max=31, default=24, group=hardware_sync
94#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_FIRST
95#define PICO_SPINLOCK_ID_CLAIM_FREE_FIRST 24
96#endif
97
98#ifdef PICO_SPINLOCK_ID_CLAIM_FREE_END
99#warning PICO_SPINLOCK_ID_CLAIM_FREE_END has been renamed to PICO_SPINLOCK_ID_CLAIM_FREE_LAST
100#endif
101
102// PICO_CONFIG: PICO_SPINLOCK_ID_CLAIM_FREE_LAST, Highest Spinlock ID in the 'claim free' range, min=0, max=31, default=31, group=hardware_sync
103#ifndef PICO_SPINLOCK_ID_CLAIM_FREE_LAST
104#define PICO_SPINLOCK_ID_CLAIM_FREE_LAST 31
105#endif
106
112__force_inline static void __sev(void) {
113 __asm volatile ("sev");
114}
115
122__force_inline static void __wfe(void) {
123 __asm volatile ("wfe");
124}
125
131__force_inline static void __wfi(void) {
132 __asm volatile ("wfi");
133}
134
141__force_inline static void __dmb(void) {
142 __asm volatile ("dmb" : : : "memory");
143}
144
152__force_inline static void __dsb(void) {
153 __asm volatile ("dsb" : : : "memory");
154}
155
163__force_inline static void __isb(void) {
164 __asm volatile ("isb");
165}
166
171 // the original code below makes it hard for us to be included from C++ via a header
172 // which itself is in an extern "C", so just use __dmb instead, which is what
173 // is required on Cortex M0+
174 __dmb();
175//#ifndef __cplusplus
176// atomic_thread_fence(memory_order_acquire);
177//#else
178// std::atomic_thread_fence(std::memory_order_acquire);
179//#endif
180}
181
187 // the original code below makes it hard for us to be included from C++ via a header
188 // which itself is in an extern "C", so just use __dmb instead, which is what
189 // is required on Cortex M0+
190 __dmb();
191//#ifndef __cplusplus
192// atomic_thread_fence(memory_order_release);
193//#else
194// std::atomic_thread_fence(std::memory_order_release);
195//#endif
196}
197
204 uint32_t status;
205 __asm volatile ("mrs %0, PRIMASK" : "=r" (status)::);
206 __asm volatile ("cpsid i");
207 return status;
208}
209
215__force_inline static void restore_interrupts(uint32_t status) {
216 __asm volatile ("msr PRIMASK,%0"::"r" (status) : );
217}
218
226 invalid_params_if(SYNC, lock_num >= NUM_SPIN_LOCKS);
227 return (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET + lock_num * 4);
228}
229
237 invalid_params_if(SYNC, (uint) lock < SIO_BASE + SIO_SPINLOCK0_OFFSET ||
238 (uint) lock >= NUM_SPIN_LOCKS * sizeof(spin_lock_t) + SIO_BASE + SIO_SPINLOCK0_OFFSET ||
239 ((uint) lock - SIO_BASE + SIO_SPINLOCK0_OFFSET) % sizeof(spin_lock_t) != 0);
240 return (uint) (lock - (spin_lock_t *) (SIO_BASE + SIO_SPINLOCK0_OFFSET));
241}
242
249 // Note we don't do a wfe or anything, because by convention these spin_locks are VERY SHORT LIVED and NEVER BLOCK and run
250 // with INTERRUPTS disabled (to ensure that)... therefore nothing on our core could be blocking us, so we just need to wait on another core
251 // anyway which should be finished soon
252 while (__builtin_expect(!*lock, 0));
254}
255
263 *lock = 0;
264}
265
275 uint32_t save = save_and_disable_interrupts();
277 return save;
278}
279
285inline static bool is_spin_locked(spin_lock_t *lock) {
286 check_hw_size(spin_lock_t, 4);
287 uint lock_num = spin_lock_get_num(lock);
288 return 0 != (*(io_ro_32 *) (SIO_BASE + SIO_SPINLOCK_ST_OFFSET) & (1u << lock_num));
289}
290
302__force_inline static void spin_unlock(spin_lock_t *lock, uint32_t saved_irq) {
303 spin_unlock_unsafe(lock);
304 restore_interrupts(saved_irq);
305}
306
312__force_inline static uint get_core_num(void) {
313 return (*(uint32_t *) (SIO_BASE + SIO_CPUID_OFFSET));
314}
315
324spin_lock_t *spin_lock_init(uint lock_num);
325
329void spin_locks_reset(void);
330
347
357void spin_lock_claim(uint lock_num);
358
368void spin_lock_claim_mask(uint32_t lock_num_mask);
369
377void spin_lock_unclaim(uint lock_num);
378
385int spin_lock_claim_unused(bool required);
386
395bool spin_lock_is_claimed(uint lock_num);
396
397#define remove_volatile_cast(t, x) ({__mem_fence_acquire(); (t)(x); })
398
399#ifdef __cplusplus
400}
401#endif
402
403#endif
static __force_inline uint32_t spin_lock_blocking(spin_lock_t *lock)
Acquire a spin lock safely.
Definition: sync.h:274
int spin_lock_claim_unused(bool required)
Claim a free spin lock.
Definition: sync_core0_only.c:133
static __force_inline uint32_t save_and_disable_interrupts(void)
Save and disable interrupts.
Definition: sync.h:203
void spin_lock_unclaim(uint lock_num)
Mark a spin lock as no longer used.
Definition: sync_core0_only.c:129
static __force_inline void __mem_fence_release(void)
Release a memory fence.
Definition: sync.h:186
static __force_inline void spin_unlock_unsafe(spin_lock_t *lock)
Release a spin lock without re-enabling interrupts.
Definition: sync.h:261
static __force_inline void spin_unlock(spin_lock_t *lock, uint32_t saved_irq)
Release a spin lock safely.
Definition: sync.h:302
void spin_lock_claim_mask(uint32_t lock_num_mask)
Mark multiple spin locks as used.
Definition: sync_core0_only.c:125
static __force_inline void __sev(void)
Insert a SEV instruction in to the code path.
Definition: sync.h:112
void spin_lock_claim(uint lock_num)
Mark a spin lock as used.
Definition: sync_core0_only.c:121
static __force_inline void __dmb(void)
Insert a DMB instruction in to the code path.
Definition: sync.h:141
static __force_inline uint get_core_num(void)
Get the current core number.
Definition: sync.h:312
void spin_locks_reset(void)
Release all spin locks.
Definition: sync.c:18
static __force_inline void __wfe(void)
Insert a WFE instruction in to the code path.
Definition: sync.h:122
static __force_inline void restore_interrupts(uint32_t status)
Restore interrupts to a specified state.
Definition: sync.h:215
static __force_inline spin_lock_t * spin_lock_instance(uint lock_num)
Get HW Spinlock instance from number.
Definition: sync.h:225
uint next_striped_spin_lock_num(void)
Return a spin lock number from the striped range.
Definition: sync_core0_only.c:116
bool spin_lock_is_claimed(uint lock_num)
Determine if a spin lock is claimed.
Definition: sync.c:60
static __force_inline void __mem_fence_acquire(void)
Acquire a memory fence.
Definition: sync.h:170
static __force_inline void __wfi(void)
Insert a WFI instruction in to the code path.
Definition: sync.h:131
static __force_inline void spin_lock_unsafe_blocking(spin_lock_t *lock)
Acquire a spin lock without disabling interrupts (hence unsafe)
Definition: sync.h:248
volatile uint32_t spin_lock_t
A spin lock identifier.
Definition: sync.h:56
spin_lock_t * spin_lock_init(uint lock_num)
Initialise a spin lock.
Definition: sync_core0_only.c:44
static __force_inline uint spin_lock_get_num(spin_lock_t *lock)
Get HW Spinlock number from instance.
Definition: sync.h:236
static bool is_spin_locked(spin_lock_t *lock)
Check to see if a spinlock is currently acquired elsewhere.
Definition: sync.h:285
static __force_inline void __isb(void)
Insert a ISB instruction in to the code path.
Definition: sync.h:163
static __force_inline void __dsb(void)
Insert a DSB instruction in to the code path.
Definition: sync.h:152
#define __force_inline
Attribute to force inlining of a function regardless of optimization level.
Definition: platform.h:239
Definition: sync_core0_only.c:12