dlg@r6415 ~$ kstat msrs:::0; sleep 1; kstat msrs:::0 msrs:0:msrs-thread:0 device: cpu0 package: 0 core: 0 thread: 0 tsc: 4692239857644640 cycles mperf: 6261759072380 cycles aperf: 8133454793675 cycles msrs:0:msrs-thread:0 device: cpu0 package: 0 core: 0 thread: 0 tsc: 4692242164638840 cycles mperf: 6261813931600 cycles aperf: 8133535293792 cycles Index: amd64/amd64/intr.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/intr.c,v retrieving revision 1.55 diff -u -p -r1.55 intr.c --- amd64/amd64/intr.c 28 Dec 2020 14:23:30 -0000 1.55 +++ amd64/amd64/intr.c 9 Nov 2022 09:46:45 -0000 @@ -552,7 +552,10 @@ struct intrhand fake_softclock_intrhand; struct intrhand fake_softnet_intrhand; struct intrhand fake_softtty_intrhand; struct intrhand fake_timer_intrhand; +#ifdef MULTIPROCESSOR struct intrhand fake_ipi_intrhand; +struct intrhand fake_xcall_intrhand; +#endif #if NXEN > 0 struct intrhand fake_xen_intrhand; #endif @@ -619,6 +622,17 @@ cpu_intr_init(struct cpu_info *ci) isp->is_handlers = &fake_ipi_intrhand; isp->is_pic = &local_pic; ci->ci_isources[LIR_IPI] = isp; + + isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO); + if (isp == NULL) + panic("can't allocate fixed interrupt source"); + isp->is_recurse = Xxcallintr; + isp->is_resume = Xxcallintr; + fake_xcall_intrhand.ih_level = IPL_SOFTCLOCK; + fake_xcall_intrhand.ih_flags = IPL_MPSAFE; + isp->is_handlers = &fake_xcall_intrhand; + isp->is_pic = &local_pic; + ci->ci_isources[SIR_XCALL] = isp; #endif #if NXEN > 0 isp = malloc(sizeof (struct intrsource), M_DEVBUF, M_NOWAIT|M_ZERO); Index: amd64/amd64/ipifuncs.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/ipifuncs.c,v retrieving revision 1.36 diff -u -p -r1.36 ipifuncs.c --- amd64/amd64/ipifuncs.c 31 Aug 2021 17:40:59 -0000 1.36 +++ amd64/amd64/ipifuncs.c 9 Nov 2022 09:46:45 -0000 @@ -61,6 +61,7 @@ void x86_64_ipi_nop(struct cpu_info *); void x86_64_ipi_halt(struct cpu_info *); void x86_64_ipi_wbinvd(struct cpu_info *); +void x86_64_ipi_xcall(struct cpu_info *); #if NVMM > 0 void x86_64_ipi_vmclear_vmm(struct cpu_info *); @@ -108,6 +109,7 @@ void (*ipifunc[X86_NIPI])(struct cpu_inf NULL, #endif x86_64_ipi_wbinvd, + x86_64_ipi_xcall, }; void @@ -166,3 +168,13 @@ x86_64_ipi_wbinvd(struct cpu_info *ci) { wbinvd(); } + +void +x86_64_ipi_xcall(struct cpu_info *ci) +{ + /* + * this is an inlining of softintr() because we already have + * curcpu() and the SIR_XCALL bit to set. + */ + x86_atomic_setbits_u64(&ci->ci_ipending, 1UL << SIR_XCALL); +}; Index: amd64/amd64/mainbus.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/mainbus.c,v retrieving revision 1.52 diff -u -p -r1.52 mainbus.c --- amd64/amd64/mainbus.c 21 Feb 2022 11:03:39 -0000 1.52 +++ amd64/amd64/mainbus.c 9 Nov 2022 09:46:45 -0000 @@ -49,6 +49,7 @@ #include "bios.h" #include "mpbios.h" #include "vmm.h" +#include "msrs.h" #include "pvbus.h" #include "efifb.h" @@ -253,6 +254,13 @@ mainbus_attach(struct device *parent, st if (isa_has_been_seen == 0) config_found(self, &mba_iba, mainbus_print); #endif + +#if NMSRS > 0 + { + mba.mba_busname = "msrs"; + config_found(self, &mba.mba_busname, mainbus_print); + } +#endif /* NMSRS > 0 */ #if NVMM > 0 if (vmm_enabled()) { Index: amd64/amd64/msrs.c =================================================================== RCS file: amd64/amd64/msrs.c diff -N amd64/amd64/msrs.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ amd64/amd64/msrs.c 9 Nov 2022 09:46:45 -0000 @@ -0,0 +1,445 @@ +/* $OpenBSD$ */ +/* + * Copyright (c) 2022 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "kstat.h" + +#if NKSTAT == 0 +#error msrs(4) requires kstat(4) +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define MSR_RAPL_PWR_UNIT 0xc0010299 +#define MSR_RAPL_PWR_UNIT_ESU_SHIFT 8 +#define MSR_RAPL_PWR_UNIT_ESU_MASK 0x1f +#define MSR_CORE_ENERGY_STATE 0xc001029a +#define MSR_PKG_ENERGY_STATE 0xc001029b + +struct msrs_kstats_thread { + struct kstat_kv mt_device; + struct kstat_kv mt_package; + struct kstat_kv mt_core; + struct kstat_kv mt_thread; + + struct kstat_kv mt_tsc; + struct kstat_kv mt_mperf; + struct kstat_kv mt_aperf; +}; + +struct msrs_amd17h_energy { + struct timeout tick; + struct task xcall; + uint64_t counter; + uint32_t msr; + uint32_t prev; + uint32_t esu; + unsigned int interval; +}; + +struct msrs_kstats_core { + struct kstat_kv mc_raw; + struct kstat_kv mc_energy; +}; + +struct msrs_kstats_pkg { + struct kstat_kv mp_energy; +}; + +struct msrs_softc; + +struct msrs_link { + struct cpu_info *l_ci; + struct kstat *l_ks; + struct rwlock l_lk; + TAILQ_ENTRY(msrs_link) l_entry; +}; + +TAILQ_HEAD(msrs_links, msrs_link); + +struct msrs_softc { + struct device sc_dev; + struct task sc_deferred; + + struct msrs_links sc_threads; + struct msrs_links sc_cores; + struct msrs_links sc_pkgs; +}; + +static int msrs_match(struct device *, void *, void *); +static void msrs_attach(struct device *, struct device *, void *); + +struct cfdriver msrs_cd = { + NULL, "msrs", DV_DULL, CD_SKIPHIBERNATE +}; + +const struct cfattach msrs_ca = { + sizeof(struct msrs_softc), msrs_match, msrs_attach, NULL, NULL +}; + +static void msrs_deferred(void *); + +#if 0 +static int msrs_link_cmp(struct msrs_links *, struct cpu_info *ci, + int (*)(struct cpu_info *, struct cpu_info *)); +static int msrs_link_cmp_core(struct cpu_info *, struct cpu_info *); +static int msrs_link_cmp_pkg(struct cpu_info *, struct cpu_info *); +#endif + +static struct kstat * + msrs_attach_thread(struct msrs_softc *, struct cpu_info *); +#if 0 +static struct kstat * + msrs_attach_core(struct msrs_softc *, struct cpu_info *); +static struct kstat * + msrs_attach_pkg(struct msrs_softc *, struct cpu_info *); +#endif + +#define msrs_core_exists(_sc, _ci) \ + msrs_link_cmp(&(_sc)->sc_cores, (_ci), msrs_link_cmp_core) + +#define msrs_pkg_exists(_sc, _ci) \ + msrs_link_cmp(&(_sc)->sc_pkgs, (_ci), msrs_link_cmp_pkg) + +static int +msrs_match(struct device *parent, void *match, void *aux) +{ + const char **busname = (const char **)aux; + + if (strcmp(*busname, msrs_cd.cd_name) != 0) + return (0); + + return (1); +} + +static void +msrs_attach(struct device *parent, struct device *self, void *aux) +{ + struct msrs_softc *sc = (struct msrs_softc *)self; + + TAILQ_INIT(&sc->sc_threads); + TAILQ_INIT(&sc->sc_cores); + TAILQ_INIT(&sc->sc_pkgs); + + task_set(&sc->sc_deferred, msrs_deferred, sc); + task_add(systqmp, &sc->sc_deferred); + + printf(": tsc, aperf, mperf\n"); +} + +static void +msrs_deferred(void *arg) +{ + struct msrs_softc *sc = arg; + struct msrs_link *l; + struct cpu_info *ci; + struct kstat *ks; + CPU_INFO_ITERATOR cii; + + CPU_INFO_FOREACH(cii, ci) { + sched_peg_curproc(ci); + + l = malloc(sizeof(*l), M_DEVBUF, M_WAITOK); + ks = msrs_attach_thread(sc, ci); + + l->l_ci = ci; + l->l_ks = ks; + + rw_init(&l->l_lk, "msrslk"); + kstat_set_wlock(ks, &l->l_lk); + ks->ks_ptr = l; + kstat_install(ks); + + TAILQ_INSERT_TAIL(&sc->sc_threads, l, l_entry); + } + + atomic_clearbits_int(&curproc->p_flag, P_CPUPEG); +} + +struct msrs_xcall { + struct kstat *mx_ks; + struct cond mx_c; +}; + +static void +msrs_read_thread_xcall(void *arg) +{ + struct msrs_xcall *mx = arg; + struct kstat *ks = mx->mx_ks; + struct msrs_kstats_thread *mt = ks->ks_data; + unsigned long s; + + s = intr_disable(); + kstat_kv_u64(&mt->mt_tsc) = rdtsc_lfence(); + kstat_kv_u64(&mt->mt_mperf) = rdmsr(0xe7); + kstat_kv_u64(&mt->mt_aperf) = rdmsr(0xe8); + nanouptime(&ks->ks_updated); + intr_restore(s); + + cond_signal(&mx->mx_c); +} + +static int +msrs_read_thread(struct kstat *ks) +{ + struct msrs_link *l = ks->ks_ptr; + struct timespec now, diff; + + /* rate limit the updates to roughly twice a second */ + getnanouptime(&now); + timespecsub(&now, &ks->ks_updated, &diff); + if (diff.tv_sec > 0 || diff.tv_nsec > 500000000) { + struct msrs_xcall mx = { ks, COND_INITIALIZER() }; + struct task t = TASK_INITIALIZER(msrs_read_thread_xcall, &mx); + + cpu_xcall(l->l_ci, &t); + + cond_wait(&mx.mx_c, "msrsxcall"); + } + + rw_enter(&l->l_lk, RW_DOWNGRADE); + + return (0); +} + +static struct kstat * +msrs_attach_thread(struct msrs_softc *sc, struct cpu_info *ci) +{ + struct kstat *ks; + struct msrs_kstats_thread *mt; +#if 0 + unsigned long msr; + uint32_t eax, ebx, ecx, edx; + + /* be conservative */ + if (strcmp(cpu_vendor, "GenuineIntel") != 0 && + strcmp(cpu_vendor, "AuthenticAMD") != 0) + return; + + CPUID(0x06, eax, ebx, ecx, edx); + if (!ISSET(ecx, 1U << 0)) + return; + + msr = rdmsr(0xC0010015); + SET(msr, 1UL << 30); + wrmsr(0xC0010015, msr); +#endif + + KASSERT(ci == curcpu()); + + ks = kstat_create(msrs_cd.cd_name, 0, + "msrs-thread", ci->ci_dev->dv_unit, + KSTAT_T_KV, 0); + if (ks == NULL) { + /* XXX printf */ + return (NULL); + } + + mt = malloc(sizeof(*mt), M_DEVBUF, M_WAITOK|M_ZERO); + + kstat_kv_init(&mt->mt_device, "device", KSTAT_KV_T_ISTR); + if (strlcpy(kstat_kv_istr(&mt->mt_device), ci->ci_dev->dv_xname, + sizeof(kstat_kv_istr(&mt->mt_device))) >= + sizeof(kstat_kv_istr(&mt->mt_device))) + panic("%s: devname too long", __func__); + + kstat_kv_init(&mt->mt_package, "package", KSTAT_KV_T_UINT32); + kstat_kv_u32(&mt->mt_package) = ci->ci_pkg_id; + kstat_kv_init(&mt->mt_core, "core", KSTAT_KV_T_UINT32); + kstat_kv_u32(&mt->mt_core) = ci->ci_core_id; + kstat_kv_init(&mt->mt_thread, "thread", KSTAT_KV_T_UINT32); + kstat_kv_u32(&mt->mt_thread) = ci->ci_smt_id; + + kstat_kv_unit_init(&mt->mt_tsc, "tsc", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_CYCLES); + kstat_kv_unit_init(&mt->mt_mperf, "mperf", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_CYCLES); + kstat_kv_unit_init(&mt->mt_aperf, "aperf", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_CYCLES); + + ks->ks_softc = sc; + ks->ks_data = mt; + ks->ks_datalen = sizeof(*mt); + ks->ks_read = msrs_read_thread; + + return (ks); +} + +#if 0 +static int +msrs_read_core(struct kstat *ks) +{ + struct msrs_kstats_core *mc = ks->ks_data; + struct msrs_amd17h_energy *thing = ks->ks_ptr; + uint32_t next, diff; + + next = rdmsr(MSR_CORE_ENERGY_STATE); + diff = next - thing->prev; + thing->prev = next; + + thing->counter += (uint64_t)diff; + + kstat_kv_u64(&mc->mc_raw) = thing->counter; + kstat_kv_u64(&mc->mc_energy) = + (1000000ULL * thing->counter) >> thing->esu; + + nanouptime(&ks->ks_updated); + return (0); +} + +static struct kstat * +msrs_attach_core(struct msrs_softc *sc, struct cpu_info *ci) +{ + struct kstat *ks; + struct msrs_kstats_core *mc; + struct msrs_amd17h_energy *thing; + +#if 0 + uint32_t eax, ebx, ecx, edx; + + /* be conservative */ + if (strcmp(cpu_vendor, "GenuineIntel") != 0 && + strcmp(cpu_vendor, "AuthenticAMD") != 0) + return; + + CPUID(0x06, eax, ebx, ecx, edx); + if (!ISSET(ecx, 1U << 0)) + return; +#endif + + ks = kstat_create(msrs_cd.cd_name, 0, + "msrs-core", ci->ci_dev->dv_unit, + KSTAT_T_KV, 0); + if (ks == NULL) { + /* XXX printf */ + return (NULL); + } + + thing = malloc(sizeof(*thing), M_DEVBUF, M_WAITOK); + thing->prev = rdmsr(MSR_CORE_ENERGY_STATE); + thing->counter = 0; + thing->esu = (rdmsr(MSR_RAPL_PWR_UNIT) >> MSR_RAPL_PWR_UNIT_ESU_SHIFT) & + MSR_RAPL_PWR_UNIT_ESU_MASK; + + mc = malloc(sizeof(*mc), M_DEVBUF, M_WAITOK|M_ZERO); + kstat_kv_init(&mc->mc_raw, "raw", KSTAT_KV_T_COUNTER64); + kstat_kv_unit_init(&mc->mc_energy, "energy", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_UJOULES); + + kstat_set_cpu(ks, ci); + ks->ks_softc = sc; + ks->ks_ptr = thing; + ks->ks_data = mc; + ks->ks_datalen = sizeof(*mc); + ks->ks_read = msrs_read_core; + + kstat_install(ks); + + return (ks); +} + +int +msrs_read_pkg(struct kstat *ks) +{ + struct msrs_kstats_pkg *mp = ks->ks_data; + kstat_kv_u32(&mp->mp_energy) = rdmsr(0xc001029b); + nanouptime(&ks->ks_updated); + return (0); +} + +static struct kstat * +msrs_attach_pkg(struct msrs_softc *sc, struct cpu_info *ci) +{ + struct kstat *ks; + struct msrs_kstats_pkg *mp; +#if 0 + uint32_t eax, ebx, ecx, edx; + + /* be conservative */ + if (strcmp(cpu_vendor, "GenuineIntel") != 0 && + strcmp(cpu_vendor, "AuthenticAMD") != 0) + return; + + CPUID(0x06, eax, ebx, ecx, edx); + if (!ISSET(ecx, 1U << 0)) + return; +#endif + + ks = kstat_create(msrs_cd.cd_name, 0, + "msrs-pkg", ci->ci_dev->dv_unit, + KSTAT_T_KV, 0); + if (ks == NULL) { + /* XXX printf */ + return (NULL); + } + + kstat_set_cpu(ks, ci); + + mp = malloc(sizeof(*mp), M_DEVBUF, M_WAITOK|M_ZERO); + + kstat_kv_init(&mp->mp_energy, "energy", KSTAT_KV_T_COUNTER32); + + ks->ks_softc = sc; + ks->ks_data = mp; + ks->ks_datalen = sizeof(*mp); + ks->ks_read = msrs_read_pkg; + + kstat_install(ks); + + return (ks); +} + +static int +msrs_link_cmp_core(struct cpu_info *a, struct cpu_info *b) +{ + return (a->ci_pkg_id == b->ci_pkg_id) && + (a->ci_core_id == b->ci_core_id); +} + +static int +msrs_link_cmp_pkg(struct cpu_info *a, struct cpu_info *b) +{ + return (a->ci_pkg_id == b->ci_pkg_id); +} + +static int +msrs_link_cmp(struct msrs_links *ls, struct cpu_info *ci, + int (*cmp)(struct cpu_info *, struct cpu_info *)) +{ + struct msrs_link *l; + + TAILQ_FOREACH(l, ls, l_entry) { + if ((*cmp)(l->l_ci, ci)) + return (1); + } + + return (0); +} +#endif Index: amd64/amd64/softintr.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/softintr.c,v retrieving revision 1.10 diff -u -p -r1.10 softintr.c --- amd64/amd64/softintr.c 11 Sep 2020 09:27:09 -0000 1.10 +++ amd64/amd64/softintr.c 9 Nov 2022 09:46:45 -0000 @@ -38,6 +38,9 @@ #include #include +#include +#include + #include #include @@ -169,3 +172,58 @@ softintr_disestablish(void *arg) free(sih, M_DEVBUF, sizeof(*sih)); } + +void +#ifdef MULTIPROCESSOR +cpu_xcall_self(struct task *t) +#else +cpu_xcall(struct cpu_info *ci, struct task *t) +#endif +{ + int s = splsoftclock(); + (*t->t_func)(t->t_arg); + splx(s); +} + +#ifdef MULTIPROCESSOR +void +cpu_xcall(struct cpu_info *ci, struct task *t) +{ + size_t i; + + if (ci == curcpu()) { + /* execute the task immediately on the local cpu */ + cpu_xcall_self(t); + return; + } + + for (;;) { + for (i = 0; i < nitems(ci->ci_xcalls); i++) { + if (atomic_cas_ptr(&ci->ci_xcalls[i], NULL, t) != NULL) + continue; + + /* membar_producer(); */ + x86_send_ipi(ci, X86_IPI_XCALL); + return; + } + + CPU_BUSY_CYCLE(); + } +} + +void +cpu_xcall_dispatch(void) +{ + struct cpu_info *ci = curcpu(); + struct task *t; + size_t i; + + for (i = 0; i < nitems(ci->ci_xcalls); i++) { + t = ci->ci_xcalls[i]; + if (t != NULL) { + ci->ci_xcalls[i] = NULL; + (*t->t_func)(t->t_arg); + } + } +} +#endif /* MULTIPROCESSOR */ Index: amd64/amd64/vector.S =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/vector.S,v retrieving revision 1.86 diff -u -p -r1.86 vector.S --- amd64/amd64/vector.S 4 Sep 2021 22:15:33 -0000 1.86 +++ amd64/amd64/vector.S 9 Nov 2022 09:46:45 -0000 @@ -1445,3 +1445,12 @@ KIDTVEC(softclock) decl CPUVAR(IDEPTH) jmp retpoline_r13 END(Xsoftclock) + +KIDTVEC(xcallintr) + movl $IPL_SOFTCLOCK, CPUVAR(ILEVEL) + sti + incl CPUVAR(IDEPTH) + call _C_LABEL(cpu_xcall_dispatch) + decl CPUVAR(IDEPTH) + jmp retpoline_r13 +END(Xsoftclock) Index: amd64/conf/GENERIC =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/GENERIC,v retrieving revision 1.512 diff -u -p -r1.512 GENERIC --- amd64/conf/GENERIC 8 Mar 2022 15:08:01 -0000 1.512 +++ amd64/conf/GENERIC 9 Nov 2022 09:46:45 -0000 @@ -35,6 +35,7 @@ isa0 at amdpcib? isa0 at tcpcib? pci* at mainbus0 vmm0 at mainbus0 +msrs0 at mainbus0 pvbus0 at mainbus0 acpi0 at bios0 Index: amd64/conf/files.amd64 =================================================================== RCS file: /cvs/src/sys/arch/amd64/conf/files.amd64,v retrieving revision 1.105 diff -u -p -r1.105 files.amd64 --- amd64/conf/files.amd64 9 Feb 2022 23:54:34 -0000 1.105 +++ amd64/conf/files.amd64 9 Nov 2022 09:46:45 -0000 @@ -251,6 +251,13 @@ file arch/amd64/amd64/vmm.c vmm needs file arch/amd64/amd64/vmm_support.S vmm # +# MSR kstats +# +device msrs {} +attach msrs at mainbus +file arch/amd64/amd64/msrs.c msrs needs-flag + +# # Machine-independent SD/MMC drivers # include "dev/sdmmc/files.sdmmc" Index: amd64/include/cpu.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/cpu.h,v retrieving revision 1.143 diff -u -p -r1.143 cpu.h --- amd64/include/cpu.h 26 Jun 2022 07:14:55 -0000 1.143 +++ amd64/include/cpu.h 9 Nov 2022 09:46:45 -0000 @@ -90,6 +90,11 @@ union vmm_cpu_cap { struct svm vcc_svm; }; +/* + * for xcalls + */ +struct task; + struct x86_64_tss; struct cpu_info { /* @@ -192,6 +197,7 @@ struct cpu_info { #ifdef MULTIPROCESSOR struct srp_hazard ci_srp_hazards[SRP_HAZARD_NUM]; + struct task *ci_xcalls[4]; #endif struct ksensordev ci_sensordev; Index: amd64/include/intr.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/intr.h,v retrieving revision 1.33 diff -u -p -r1.33 intr.h --- amd64/include/intr.h 14 Dec 2021 18:16:14 -0000 1.33 +++ amd64/include/intr.h 9 Nov 2022 09:46:45 -0000 @@ -207,6 +207,9 @@ void cpu_intr_init(struct cpu_info *); void intr_printconfig(void); void intr_barrier(void *); +struct task; +void cpu_xcall(struct cpu_info *ci, struct task *); + #ifdef MULTIPROCESSOR void x86_send_ipi(struct cpu_info *, int); int x86_fast_ipi(struct cpu_info *, int); @@ -215,6 +218,8 @@ void x86_ipi_handler(void); void x86_setperf_ipi(struct cpu_info *); extern void (*ipifunc[X86_NIPI])(struct cpu_info *); + +extern void Xxcallintr(void); #endif #endif /* !_LOCORE */ Index: amd64/include/intrdefs.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/intrdefs.h,v retrieving revision 1.22 diff -u -p -r1.22 intrdefs.h --- amd64/include/intrdefs.h 31 Aug 2021 17:40:59 -0000 1.22 +++ amd64/include/intrdefs.h 9 Nov 2022 09:46:45 -0000 @@ -54,9 +54,10 @@ #define SIR_CLOCK 61 #define SIR_NET 60 #define SIR_TTY 59 +#define SIR_XCALL 58 -#define LIR_XEN 58 -#define LIR_HYPERV 57 +#define LIR_XEN 57 +#define LIR_HYPERV 56 /* * Maximum # of interrupt sources per CPU. 64 to fit in one word. @@ -84,8 +85,9 @@ #define X86_IPI_START_VMM 0x00000100 #define X86_IPI_STOP_VMM 0x00000200 #define X86_IPI_WBINVD 0x00000400 +#define X86_IPI_XCALL 0x00000800 -#define X86_NIPI 12 +#define X86_NIPI 13 #define IREENT_MAGIC 0x18041969 Index: amd64/include/specialreg.h =================================================================== RCS file: /cvs/src/sys/arch/amd64/include/specialreg.h,v retrieving revision 1.92 diff -u -p -r1.92 specialreg.h --- amd64/include/specialreg.h 26 Apr 2022 08:35:30 -0000 1.92 +++ amd64/include/specialreg.h 9 Nov 2022 09:46:45 -0000 @@ -553,6 +553,16 @@ #define IPM_SMI_CMP_HLT 0x08000000 /* + * AMD 17h (Zen) MSRs + */ + +#define MSR_RAPL_PWR_UNIT 0xc0010299 +#define MSR_RAPL_PWR_UNIT_ESU_SHIFT 8 +#define MSR_RAPL_PWR_UNIT_ESU_MASK 0x1f +#define MSR_CORE_ENERGY_STATE 0xc001029a +#define MSR_PKG_ENERGY_STATE 0xc001029b + +/* * These require a 'passcode' for access. See cpufunc.h. */ #define MSR_HWCR 0xc0010015