s390/idle: Inline update_timer_idle()

Inline update_timer_idle() again to avoid an extra function call. This
way the generated code is close to old assembler version again.

Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2026-02-18 15:20:08 +01:00 committed by Vasily Gorbik
parent 00d8b035eb
commit 257c14e5a1
4 changed files with 38 additions and 35 deletions

View file

@ -19,10 +19,11 @@ struct s390_idle_data {
unsigned long mt_cycles_enter[8];
};
DECLARE_PER_CPU(struct s390_idle_data, s390_idle);
extern struct device_attribute dev_attr_idle_count;
extern struct device_attribute dev_attr_idle_time_us;
void psw_idle(struct s390_idle_data *data, unsigned long psw_mask);
void update_timer_idle(void);
#endif /* _S390_IDLE_H */

View file

@ -2,6 +2,12 @@
#ifndef _S390_VTIME_H
#define _S390_VTIME_H
#include <asm/lowcore.h>
#include <asm/cpu_mf.h>
#include <asm/idle.h>
DECLARE_PER_CPU(u64, mt_cycles[8]);
static inline void update_timer_sys(void)
{
struct lowcore *lc = get_lowcore();
@ -20,4 +26,32 @@ static inline void update_timer_mcck(void)
lc->last_update_timer = lc->mcck_enter_timer;
}
static inline void update_timer_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
struct lowcore *lc = get_lowcore();
u64 cycles_new[8];
int i, mtid;
mtid = smp_cpu_mtid;
if (mtid) {
stcctm(MT_DIAG, mtid, cycles_new);
for (i = 0; i < mtid; i++)
__this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
/*
* This is a bit subtle: Forward last_update_clock so it excludes idle
* time. For correct steal time calculation in do_account_vtime() add
* passed wall time before idle_enter to steal_timer:
* During the passed wall time before idle_enter CPU time may have
* been accounted to system, hardirq, softirq, etc. lowcore fields.
* The accounted CPU times will be subtracted again from steal_timer
* when accumulated steal time is calculated in do_account_vtime().
*/
lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
lc->last_update_clock = lc->int_clock;
lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
lc->last_update_timer = lc->sys_enter_timer;
}
#endif /* _S390_VTIME_H */

View file

@ -56,8 +56,6 @@ long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
long sys_s390_sthyi(unsigned long function_code, void __user *buffer, u64 __user *return_code, unsigned long flags);
DECLARE_PER_CPU(u64, mt_cycles[8]);
unsigned long stack_alloc(void);
void stack_free(unsigned long stack);

View file

@ -15,41 +15,11 @@
#include <trace/events/power.h>
#include <asm/cpu_mf.h>
#include <asm/cputime.h>
#include <asm/idle.h>
#include <asm/nmi.h>
#include <asm/smp.h>
#include "entry.h"
static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void update_timer_idle(void)
{
struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
struct lowcore *lc = get_lowcore();
u64 cycles_new[8];
int i, mtid;
mtid = smp_cpu_mtid;
if (mtid) {
stcctm(MT_DIAG, mtid, cycles_new);
for (i = 0; i < mtid; i++)
__this_cpu_add(mt_cycles[i], cycles_new[i] - idle->mt_cycles_enter[i]);
}
/*
* This is a bit subtle: Forward last_update_clock so it excludes idle
* time. For correct steal time calculation in do_account_vtime() add
* passed wall time before idle_enter to steal_timer:
* During the passed wall time before idle_enter CPU time may have
* been accounted to system, hardirq, softirq, etc. lowcore fields.
* The accounted CPU times will be subtracted again from steal_timer
* when accumulated steal time is calculated in do_account_vtime().
*/
lc->steal_timer += idle->clock_idle_enter - lc->last_update_clock;
lc->last_update_clock = lc->int_clock;
lc->system_timer += lc->last_update_timer - idle->timer_idle_enter;
lc->last_update_timer = lc->sys_enter_timer;
}
DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
void account_idle_time_irq(void)
{