mirror of
https://github.com/torvalds/linux.git
synced 2026-03-08 01:04:41 +01:00
arm64: Fix sampling the "stable" virtual counter in preemptible section
Ben reports that when running with CONFIG_DEBUG_PREEMPT, using
__arch_counter_get_cntvct_stable() results in well deserves warnings,
as we access a per-CPU variable without preemption disabled.
Fix the issue by disabling preemption on reading the counter. We can
probably do a lot better by not disabling preemption on systems that
do not require horrible workarounds to return a valid counter value,
but this plugs the issue for the time being.
Fixes: 29cc0f3aa7 ("arm64: Force the use of CNTVCT_EL0 in __delay()")
Reported-by: Ben Horgan <ben.horgan@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/aZw3EGs4rbQvbAzV@e134344.arm.com
Tested-by: Ben Horgan <ben.horgan@arm.com>
Tested-by: André Draszik <andre.draszik@linaro.org>
Signed-off-by: Will Deacon <will@kernel.org>
This commit is contained in:
parent
a8f78680ee
commit
e5cb94ba5f
1 changed files with 5 additions and 1 deletions
|
|
@ -32,7 +32,11 @@ static inline unsigned long xloops_to_cycles(unsigned long xloops)
|
|||
* Note that userspace cannot change the offset behind our back either,
|
||||
* as the vcpu mutex is held as long as KVM_RUN is in progress.
|
||||
*/
|
||||
#define __delay_cycles() __arch_counter_get_cntvct_stable()
|
||||
static cycles_t notrace __delay_cycles(void)
|
||||
{
|
||||
guard(preempt_notrace)();
|
||||
return __arch_counter_get_cntvct_stable();
|
||||
}
|
||||
|
||||
void __delay(unsigned long cycles)
|
||||
{
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue