Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 | /*
* Copyright (c) 2019 Carlo Caione <ccaione@baylibre.com>
*
* SPDX-License-Identifier: Apache-2.0
*/
#include <device.h>
#include <drivers/timer/arm_arch_timer.h>
#include <drivers/timer/system_timer.h>
#include <sys_clock.h>
#include <spinlock.h>
#include <arch/cpu.h>
#define CYC_PER_TICK ((uint64_t)sys_clock_hw_cycles_per_sec() \
/ (uint64_t)CONFIG_SYS_CLOCK_TICKS_PER_SEC)
#define MAX_TICKS INT32_MAX
#define MIN_DELAY (1000)
static struct k_spinlock lock;
static uint64_t last_cycle;
static void arm_arch_timer_compare_isr(const void *arg)
{
ARG_UNUSED(arg);
k_spinlock_key_t key = k_spin_lock(&lock);
#ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
/*
* Workaround required for Cortex-A9 MPCore erratum 740657
* comp. ARM Cortex-A9 processors Software Developers Errata Notice,
* ARM document ID032315.
*/
if (!arm_arch_timer_get_int_status()) {
/*
* If the event flag is not set, this is a spurious interrupt.
* DO NOT modify the compare register's value, DO NOT announce
* elapsed ticks!
*/
k_spin_unlock(&lock, key);
return;
}
#endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
uint64_t curr_cycle = arm_arch_timer_count();
uint32_t delta_ticks = (uint32_t)((curr_cycle - last_cycle) / CYC_PER_TICK);
last_cycle += delta_ticks * CYC_PER_TICK;
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
uint64_t next_cycle = last_cycle + CYC_PER_TICK;
if ((uint64_t)(next_cycle - curr_cycle) < MIN_DELAY) {
next_cycle += CYC_PER_TICK;
}
arm_arch_timer_set_compare(next_cycle);
arm_arch_timer_set_irq_mask(false);
} else {
arm_arch_timer_set_irq_mask(true);
#ifdef CONFIG_ARM_ARCH_TIMER_ERRATUM_740657
/*
* In tickless mode, the compare register is normally not
* updated from within the ISR. Yet, to work around the timer's
* erratum, a new value *must* be written while the interrupt
* is being processed before the interrupt is acknowledged
* by the handling interrupt controller.
*/
arm_arch_timer_set_compare(~0ULL);
}
/*
* Clear the event flag so that in case the erratum strikes (the timer's
* vector will still be indicated as pending by the GIC's pending register
* after this ISR has been executed) the error will be detected by the
* check performed upon entry of the ISR -> the event flag is not set,
* therefore, no actual hardware interrupt has occurred.
*/
arm_arch_timer_clear_int_status();
#else
}
#endif /* CONFIG_ARM_ARCH_TIMER_ERRATUM_740657 */
k_spin_unlock(&lock, key);
sys_clock_announce(delta_ticks);
}
void sys_clock_set_timeout(int32_t ticks, bool idle)
{
#if defined(CONFIG_TICKLESS_KERNEL)
if (ticks == K_TICKS_FOREVER && idle) {
return;
}
ticks = (ticks == K_TICKS_FOREVER) ? MAX_TICKS : \
MIN(MAX_TICKS, MAX(ticks - 1, 0));
k_spinlock_key_t key = k_spin_lock(&lock);
uint64_t curr_cycle = arm_arch_timer_count();
uint64_t req_cycle = ticks * CYC_PER_TICK;
/* Round up to next tick boundary */
req_cycle += (curr_cycle - last_cycle) + (CYC_PER_TICK - 1);
req_cycle = (req_cycle / CYC_PER_TICK) * CYC_PER_TICK;
if ((req_cycle + last_cycle - curr_cycle) < MIN_DELAY) {
req_cycle += CYC_PER_TICK;
}
arm_arch_timer_set_compare(req_cycle + last_cycle);
arm_arch_timer_set_irq_mask(false);
k_spin_unlock(&lock, key);
#else /* CONFIG_TICKLESS_KERNEL */
ARG_UNUSED(ticks);
ARG_UNUSED(idle);
#endif
}
uint32_t sys_clock_elapsed(void)
{
if (!IS_ENABLED(CONFIG_TICKLESS_KERNEL)) {
return 0;
}
k_spinlock_key_t key = k_spin_lock(&lock);
uint32_t ret = (uint32_t)((arm_arch_timer_count() - last_cycle)
/ CYC_PER_TICK);
k_spin_unlock(&lock, key);
return ret;
}
uint32_t sys_clock_cycle_get_32(void)
{
return (uint32_t)arm_arch_timer_count();
}
uint64_t sys_clock_cycle_get_64(void)
{
return arm_arch_timer_count();
}
#ifdef CONFIG_ARCH_HAS_CUSTOM_BUSY_WAIT
void arch_busy_wait(uint32_t usec_to_wait)
{
if (usec_to_wait == 0) {
return;
}
uint64_t start_cycles = arm_arch_timer_count();
uint64_t cycles_to_wait = sys_clock_hw_cycles_per_sec() / USEC_PER_SEC * usec_to_wait;
for (;;) {
uint64_t current_cycles = arm_arch_timer_count();
/* this handles the rollover on an unsigned 32-bit value */
if ((current_cycles - start_cycles) >= cycles_to_wait) {
break;
}
}
}
#endif
#ifdef CONFIG_SMP
void smp_timer_init(void)
{
/*
* set the initial status of timer0 of each secondary core
*/
arm_arch_timer_set_compare(arm_arch_timer_count() + CYC_PER_TICK);
arm_arch_timer_enable(true);
irq_enable(ARM_ARCH_TIMER_IRQ);
arm_arch_timer_set_irq_mask(false);
}
#endif
static int sys_clock_driver_init(const struct device *dev)
{
ARG_UNUSED(dev);
IRQ_CONNECT(ARM_ARCH_TIMER_IRQ, ARM_ARCH_TIMER_PRIO,
arm_arch_timer_compare_isr, NULL, ARM_ARCH_TIMER_FLAGS);
arm_arch_timer_init();
arm_arch_timer_set_compare(arm_arch_timer_count() + CYC_PER_TICK);
arm_arch_timer_enable(true);
irq_enable(ARM_ARCH_TIMER_IRQ);
arm_arch_timer_set_irq_mask(false);
return 0;
}
SYS_INIT(sys_clock_driver_init, PRE_KERNEL_2,
CONFIG_SYSTEM_CLOCK_INIT_PRIORITY);
|