Index: arch/amd64/include/cpu_full.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/cpu_full.h,v retrieving revision 1.5 diff -u -p -r1.5 cpu_full.h --- arch/amd64/include/cpu_full.h 17 May 2019 19:07:47 -0000 1.5 +++ arch/amd64/include/cpu_full.h 19 Jan 2024 04:50:22 -0000 @@ -58,6 +58,7 @@ CTASSERT(_ALIGN(sizeof(struct x86_64_tss /* verify expected alignment */ CTASSERT(offsetof(struct cpu_info_full, cif_cpu.ci_PAGEALIGN) % PAGE_SIZE == 0); +CTASSERT(offsetof(struct cpu_info_full, cif_cpu.ci_mds_tmp) % 32 == 0); /* verify total size is multiple of page size */ CTASSERT(sizeof(struct cpu_info_full) % PAGE_SIZE == 0); Index: dev/kstat.c =================================================================== RCS file: /cvs/src/sys/dev/kstat.c,v retrieving revision 1.2 diff -u -p -r1.2 kstat.c --- dev/kstat.c 31 Jan 2022 05:09:17 -0000 1.2 +++ dev/kstat.c 19 Jan 2024 04:50:24 -0000 @@ -22,6 +22,7 @@ #include #include #include +#include /* for kstat_set_cpu */ #include @@ -162,10 +163,16 @@ struct rwlock kstat_default_lock = RWLO int kstat_read(struct kstat *); int kstat_copy(struct kstat *, void *); +static void kstat_attach_late(void *); + +static struct task kstat_attach_task = + TASK_INITIALIZER(kstat_attach_late, NULL); + int kstatattach(int num) { /* XXX install system stats here */ + task_add(systq, &kstat_attach_task); return (0); } @@ -698,4 +705,77 @@ kstat_kv_unit_init(struct kstat_kv *kv, strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */ kv->kv_type = type; kv->kv_unit = unit; +} + +/* + * common system level kstats + */ + +static const char *kstat_cpustate_names[CPUSTATES] = { + [CP_USER] = "user", + [CP_NICE] = "nice", + [CP_SYS] = "sys", + [CP_SPIN] = "spin", + [CP_INTR] = "intr", + [CP_IDLE] = "idle", +}; + +static int +kstat_cpustates_read(struct kstat *ks) +{ + struct cpu_info *ci = ks->ks_softc; + struct kstat_kv *kvs = ks->ks_data; + struct schedstate_percpu *spc = &ci->ci_schedstate; + unsigned int gen; + size_t i; + + pc_cons_enter(&spc->spc_cp_time_lock, &gen); + do { + for (i = 0; i < CPUSTATES; i++) + kstat_kv_u64(&kvs[i]) = spc->spc_cp_time[i]; + } while (pc_cons_leave(&spc->spc_cp_time_lock, &gen) != 0); + + getnanouptime(&ks->ks_updated); + + return (0); +} + +static void +kstat_cpustates_attach(struct cpu_info *ci) +{ + struct kstat *ks; + struct kstat_kv *kvs; + size_t i; + + ks = kstat_create(ci->ci_dev->dv_xname, 0, "cpustates", 0, + KSTAT_T_KV, 0); + if (ks == NULL) { + /* printf oh well */ + return; + } + + kvs = mallocarray(CPUSTATES, sizeof(*kvs), M_DEVBUF, + M_WAITOK | M_ZERO); + + for (i = 0; i < CPUSTATES; i++) { + kstat_kv_init(&kvs[i], kstat_cpustate_names[i], + KSTAT_KV_T_COUNTER64); + } + + ks->ks_softc = ci; + ks->ks_data = kvs; + ks->ks_datalen = CPUSTATES * sizeof(struct kstat_kv); + ks->ks_read = kstat_cpustates_read; + + kstat_install(ks); +} + +static void +kstat_attach_late(void *null) +{ + CPU_INFO_ITERATOR cii; + struct cpu_info *ci; + + CPU_INFO_FOREACH(cii, ci) + kstat_cpustates_attach(ci); } Index: kern/kern_clock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_clock.c,v retrieving revision 1.121 diff -u -p -r1.121 kern_clock.c --- kern/kern_clock.c 17 Oct 2023 00:04:02 -0000 1.121 +++ kern/kern_clock.c 19 Jan 2024 04:50:27 -0000 @@ -285,6 +285,8 @@ statclock(struct clockrequest *cr, void struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p = curproc; struct process *pr; + int cp; + unsigned int gen; if (statclock_is_randomized) { count = clockrequest_advance_random(cr, statclock_min, @@ -300,10 +302,7 @@ statclock(struct clockrequest *cr, void * If this process is being profiled record the tick. */ p->p_uticks += count; - if (pr->ps_nice > NZERO) - spc->spc_cp_time[CP_NICE] += count; - else - spc->spc_cp_time[CP_USER] += count; + cp = (pr->ps_nice > NZERO) ? CP_NICE : CP_USER; } else { /* * Came from kernel mode, so we were: @@ -321,16 +320,20 @@ statclock(struct clockrequest *cr, void if (CLKF_INTR(frame)) { if (p != NULL) p->p_iticks += count; - spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_INTR] += count; + cp = CP_INTR; } else if (p != NULL && p != spc->spc_idleproc) { p->p_sticks += count; - spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_SYS] += count; + cp = CP_SYS; } else - spc->spc_cp_time[spc->spc_spinning ? - CP_SPIN : CP_IDLE] += count; + cp = CP_IDLE; + + if (spc->spc_spinning) + cp = CP_SPIN; } + + gen = pc_sprod_enter(&spc->spc_cp_time_lock); + spc->spc_cp_time[cp] += count; + pc_sprod_leave(&spc->spc_cp_time_lock, gen); if (p != NULL) { p->p_cpticks += count; Index: kern/kern_lock.c =================================================================== RCS file: /cvs/src/sys/kern/kern_lock.c,v retrieving revision 1.72 diff -u -p -r1.72 kern_lock.c --- kern/kern_lock.c 26 Apr 2022 15:31:14 -0000 1.72 +++ kern/kern_lock.c 19 Jan 2024 04:50:27 -0000 @@ -24,6 +24,7 @@ #include #include #include +#include #include @@ -443,3 +444,95 @@ _mtx_init_flags(struct mutex *m, int ipl _mtx_init(m, ipl); } #endif /* WITNESS */ + +void +pc_lock_init(struct pc_lock *pcl) +{ + pcl->pcl_gen = 0; +} + +unsigned int +pc_mprod_enter(struct pc_lock *pcl) +{ + unsigned int gen, ngen, ogen; + + gen = pcl->pcl_gen; + for (;;) { + while (gen & 1) { + CPU_BUSY_CYCLE(); + gen = pcl->pcl_gen; + } + + ngen = 1 + gen; + ogen = atomic_cas_uint(&pcl->pcl_gen, gen, ngen); + if (gen == ogen) + break; + + CPU_BUSY_CYCLE(); + gen = ogen; + } + + membar_enter_after_atomic(); + return (ngen); +} + +void +pc_mprod_leave(struct pc_lock *pcl, unsigned int gen) +{ + membar_exit(); + pcl->pcl_gen = ++gen; +} + +unsigned int +pc_sprod_enter(struct pc_lock *pcl) +{ + unsigned int gen; + + gen = pcl->pcl_gen; + pcl->pcl_gen = ++gen; + membar_producer(); + + return (gen); +} + +void +pc_sprod_leave(struct pc_lock *pcl, unsigned int gen) +{ + membar_producer(); + pcl->pcl_gen = ++gen; +} + +void +pc_cons_enter(struct pc_lock *pcl, unsigned int *genp) +{ + unsigned int gen; + + gen = pcl->pcl_gen; + while (gen & 1) { + CPU_BUSY_CYCLE(); + gen = pcl->pcl_gen; + } + + membar_consumer(); + *genp = gen; +} + +int +pc_cons_leave(struct pc_lock *pcl, unsigned int *genp) +{ + unsigned int gen; + + membar_consumer(); + + gen = pcl->pcl_gen; + if (gen & 1) { + do { + CPU_BUSY_CYCLE(); + gen = pcl->pcl_gen; + } while (gen & 1); + } else if (gen == *genp) + return (0); + + *genp = gen; + return (EBUSY); +} Index: kern/kern_sched.c =================================================================== RCS file: /cvs/src/sys/kern/kern_sched.c,v retrieving revision 1.93 diff -u -p -r1.93 kern_sched.c --- kern/kern_sched.c 24 Oct 2023 13:20:11 -0000 1.93 +++ kern/kern_sched.c 19 Jan 2024 04:50:27 -0000 @@ -83,6 +83,8 @@ sched_init_cpu(struct cpu_info *ci) struct schedstate_percpu *spc = &ci->ci_schedstate; int i; + pc_lock_init(&spc->spc_cp_time_lock); + for (i = 0; i < SCHED_NQS; i++) TAILQ_INIT(&spc->spc_qs[i]); Index: sys/pclock.h =================================================================== RCS file: sys/pclock.h diff -N sys/pclock.h --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/pclock.h 19 Jan 2024 04:50:28 -0000 @@ -0,0 +1,49 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2023 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SYS_PCLOCK_H +#define _SYS_PCLOCK_H + +#include + +struct pc_lock { + volatile unsigned int pcl_gen; +}; + +#ifdef _KERNEL + +#define PC_LOCK_INITIALIZER() { .pcl_gen = 0 } + +void pc_lock_init(struct pc_lock *); + +/* single (non-interlocking) producer */ +unsigned int pc_sprod_enter(struct pc_lock *); +void pc_sprod_leave(struct pc_lock *, unsigned int); + +/* multiple (interlocking) producers */ +unsigned int pc_mprod_enter(struct pc_lock *); +void pc_mprod_leave(struct pc_lock *, unsigned int); + +/* consumer */ +void pc_cons_enter(struct pc_lock *, unsigned int *); +__warn_unused_result int + pc_cons_leave(struct pc_lock *, unsigned int *); + +#endif /* _KERNEL */ + +#endif /* _SYS_RWLOCK_H */ Index: sys/sched.h =================================================================== RCS file: /cvs/src/sys/sys/sched.h,v retrieving revision 1.69 diff -u -p -r1.69 sched.h --- sys/sched.h 14 Jan 2024 17:23:56 -0000 1.69 +++ sys/sched.h 19 Jan 2024 04:50:28 -0000 @@ -96,6 +96,7 @@ struct cpustats { #ifdef _KERNEL #include +#include #define SCHED_NQS 32 /* 32 run queues. */ @@ -112,6 +113,7 @@ struct schedstate_percpu { struct timespec spc_runtime; /* time curproc started running */ volatile int spc_schedflags; /* flags; see below */ u_int spc_schedticks; /* ticks for schedclock() */ + struct pc_lock spc_cp_time_lock; u_int64_t spc_cp_time[CPUSTATES]; /* CPU state statistics */ u_char spc_curpriority; /* usrpri of curproc */