Index: sys/arch/amd64/amd64/conf.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/conf.c,v retrieving revision 1.70 diff -u -p -r1.70 conf.c --- sys/arch/amd64/amd64/conf.c 25 May 2020 06:37:52 -0000 1.70 +++ sys/arch/amd64/amd64/conf.c 27 Jun 2020 06:34:43 -0000 @@ -142,6 +142,7 @@ cdev_decl(cy); #include "pctr.h" #include "bktr.h" #include "ksyms.h" +#include "kstat.h" #include "usb.h" #include "uhid.h" #include "fido.h" @@ -238,7 +239,7 @@ struct cdevsw cdevsw[] = cdev_notdef(), /* 48 */ cdev_bktr_init(NBKTR,bktr), /* 49: Bt848 video capture device */ cdev_ksyms_init(NKSYMS,ksyms), /* 50: Kernel symbols device */ - cdev_notdef(), /* 51 */ + cdev_kstat_init(NKSTAT,kstat), /* 51: Kernel statistics */ cdev_midi_init(NMIDI,midi), /* 52: MIDI I/O */ cdev_notdef(), /* 53 was: sequencer I/O */ cdev_notdef(), /* 54 was: RAIDframe disk driver */ Index: sys/arch/sparc64/sparc64/conf.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/sparc64/conf.c,v retrieving revision 1.83 diff -u -p -r1.83 conf.c --- sys/arch/sparc64/sparc64/conf.c 23 Jan 2020 02:40:22 -0000 1.83 +++ sys/arch/sparc64/sparc64/conf.c 27 Jun 2020 06:34:46 -0000 @@ -112,6 +112,7 @@ cdev_decl(pci); #include "pf.h" #include "ksyms.h" +#include "kstat.h" #include "hotplug.h" #include "vscsi.h" @@ -203,7 +204,7 @@ struct cdevsw cdevsw[] = cdev_notdef(), /* 48 */ cdev_notdef(), /* 49 */ cdev_notdef(), /* 50 */ - cdev_notdef(), /* 51 */ + cdev_kstat_init(NKSTAT,kstat), /* 51: kernel statistics */ #ifdef USER_PCICONF cdev_pci_init(NPCI,pci), /* 52: PCI user */ #else Index: sys/conf/GENERIC =================================================================== RCS file: /cvs/src/sys/conf/GENERIC,v retrieving revision 1.271 diff -u -p -r1.271 GENERIC --- sys/conf/GENERIC 23 Jun 2020 23:35:39 -0000 1.271 +++ sys/conf/GENERIC 27 Jun 2020 06:34:46 -0000 @@ -82,6 +82,7 @@ pseudo-device msts 1 # MSTS line discipl pseudo-device endrun 1 # EndRun line discipline pseudo-device vnd 4 # vnode disk devices pseudo-device ksyms 1 # kernel symbols device +pseudo-device kstat 1 # kernel statistics #pseudo-device dt # Dynamic Tracer # clonable devices Index: sys/conf/files =================================================================== RCS file: /cvs/src/sys/conf/files,v retrieving revision 1.689 diff -u -p -r1.689 files --- sys/conf/files 21 Jun 2020 12:14:48 -0000 1.689 +++ sys/conf/files 27 Jun 2020 06:34:46 -0000 @@ -573,6 +573,9 @@ pseudo-device wg: ifnet pseudo-device ksyms file dev/ksyms.c ksyms needs-flag +pseudo-device kstat +file dev/kstat.c kstat needs-flag + pseudo-device fuse file miscfs/fuse/fuse_device.c fuse needs-flag file miscfs/fuse/fuse_file.c fuse Index: sys/dev/kstat.c =================================================================== RCS file: sys/dev/kstat.c diff -N sys/dev/kstat.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/dev/kstat.c 27 Jun 2020 06:34:46 -0000 @@ -0,0 +1,693 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2020 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include + +/* for kstat_set_cpu */ +#include +#include + +#include + +#define Static + +RBT_HEAD(kstat_id_tree, kstat); + +static inline int +kstat_id_cmp(const struct kstat *a, const struct kstat *b) +{ + if (a->ks_id > b->ks_id) + return (1); + if (a->ks_id < b->ks_id) + return (-1); + + return (0); +} + +RBT_PROTOTYPE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp); + +RBT_HEAD(kstat_pv_tree, kstat); + +static inline int +kstat_pv_cmp(const struct kstat *a, const struct kstat *b) +{ + int rv; + + rv = strcmp(a->ks_provider, b->ks_provider); + if (rv != 0) + return (rv); + + if (a->ks_instance > b->ks_instance) + return (1); + if (a->ks_instance < b->ks_instance) + return (-1); + + rv = strcmp(a->ks_name, b->ks_name); + if (rv != 0) + return (rv); + + if (a->ks_unit > b->ks_unit) + return (1); + if (a->ks_unit < b->ks_unit) + return (-1); + + return (0); +} + +RBT_PROTOTYPE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp); + +RBT_HEAD(kstat_nm_tree, kstat); + +static inline int +kstat_nm_cmp(const struct kstat *a, const struct kstat *b) +{ + int rv; + + rv = strcmp(a->ks_name, b->ks_name); + if (rv != 0) + return (rv); + + if (a->ks_unit > b->ks_unit) + return (1); + if (a->ks_unit < b->ks_unit) + return (-1); + + rv = strcmp(a->ks_provider, b->ks_provider); + if (rv != 0) + return (rv); + + if (a->ks_instance > b->ks_instance) + return (1); + if (a->ks_instance < b->ks_instance) + return (-1); + + return (0); +} + +RBT_PROTOTYPE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp); + +struct kstat_lock_ops { + void (*enter)(void *); + void (*leave)(void *); +}; + +#define kstat_enter(_ks) (_ks)->ks_lock_ops->enter((_ks)->ks_lock) +#define kstat_leave(_ks) (_ks)->ks_lock_ops->leave((_ks)->ks_lock) + +Static const struct kstat_lock_ops kstat_rlock_ops = { + (void (*)(void *))rw_enter_read, + (void (*)(void *))rw_exit_read, +}; + +Static const struct kstat_lock_ops kstat_wlock_ops = { + (void (*)(void *))rw_enter_write, + (void (*)(void *))rw_exit_write, +}; + +Static const struct kstat_lock_ops kstat_mutex_ops = { + (void (*)(void *))mtx_enter, + (void (*)(void *))mtx_leave, +}; + +Static void kstat_cpu_enter(void *); +Static void kstat_cpu_leave(void *); + +Static const struct kstat_lock_ops kstat_cpu_ops = { + kstat_cpu_enter, + kstat_cpu_leave, +}; + +Static struct rwlock kstat_lock = RWLOCK_INITIALIZER("kstat"); + +/* + * The global state is versioned so changes to the set of kstats + * can be detected. This is an int so it can be read atomically on + * any arch, which is ridiculous optimisation, really. + */ +Static unsigned int kstat_version = 0; + +/* + * kstat structures have a unique identifier so they can be found + * quickly. Identifiers are 64bit in the hope that it won't wrap + * during the runtime of a system. The identifiers start at 1 so that + * 0 can be used as the first value for userland to iterate with. + */ +Static uint64_t kstat_next_id = 1; + +Static struct kstat_id_tree kstat_id_tree = RBT_INITIALIZER(); +Static struct kstat_pv_tree kstat_pv_tree = RBT_INITIALIZER(); +Static struct kstat_nm_tree kstat_nm_tree = RBT_INITIALIZER(); +Static struct pool kstat_pool; + +Static struct rwlock kstat_default_lock = + RWLOCK_INITIALIZER("kstatlk"); + +Static int kstat_read(struct kstat *); +Static int kstat_copy(struct kstat *, void *); + +int +kstatattach(int num) +{ + /* XXX install system stats here */ + return (0); +} + +int +kstatopen(dev_t dev, int flag, int mode, struct proc *p) +{ + return (0); +} + +int +kstatclose(dev_t dev, int flag, int mode, struct proc *p) +{ + return (0); +} + +Static int +kstatioc_enter(struct kstat_req *ksreq) +{ + int error; + + error = rw_enter(&kstat_lock, RW_READ | RW_INTR); + if (error != 0) + return (error); + + if (!ISSET(ksreq->ks_rflags, KSTATIOC_F_IGNVER) && + ksreq->ks_version != kstat_version) { + error = EBUSY; + goto error; + } + + return (0); + +error: + rw_exit(&kstat_lock); + return (error); +} + +#define sstrlcpy(_dst, _src) \ + (strlcpy((_dst), (_src), sizeof((_dst))) >= sizeof((_dst))) + +Static int +kstatioc_leave(struct kstat_req *ksreq, struct kstat *ks) +{ + void *buf = NULL; + size_t klen = 0, ulen = 0; + struct timespec updated; + int error = 0; + + if (ks == NULL) { + error = ENOENT; + goto error; + } + + switch (ks->ks_state) { + case KSTAT_S_CREATED: + ksreq->ks_updated = ks->ks_created; + ksreq->ks_interval.tv_sec = 0; + ksreq->ks_interval.tv_nsec = 0; + ksreq->ks_datalen = 0; + ksreq->ks_dataver = 0; + break; + + case KSTAT_S_INSTALLED: + ksreq->ks_dataver = ks->ks_dataver; + ksreq->ks_interval = ks->ks_interval; + + if (ksreq->ks_data == NULL) { + /* userland doesn't want actual data, so shortcut */ + kstat_enter(ks); + ksreq->ks_datalen = ks->ks_datalen; + ksreq->ks_updated = ks->ks_updated; + kstat_leave(ks); + break; + } + + klen = ks->ks_datalen; /* KSTAT_F_REALLOC */ + buf = malloc(klen, M_TEMP, M_WAITOK|M_CANFAIL); + if (buf == NULL) { + error = ENOMEM; + goto error; + } + + kstat_enter(ks); + error = (*ks->ks_read)(ks); + if (error == 0) { + updated = ks->ks_updated; + + /* KSTAT_F_REALLOC */ + KASSERTMSG(ks->ks_datalen == klen, + "kstat doesnt support resized data yet"); + + error = (*ks->ks_copy)(ks, buf); + } + kstat_leave(ks); + + if (error != 0) + goto error; + + ulen = ksreq->ks_datalen; + ksreq->ks_datalen = klen; /* KSTAT_F_REALLOC */ + ksreq->ks_updated = updated; + break; + default: + panic("ks %p unexpected state %u", ks, ks->ks_state); + } + + ksreq->ks_version = kstat_version; + ksreq->ks_id = ks->ks_id; + + if (sstrlcpy(ksreq->ks_provider, ks->ks_provider) != 0) + panic("kstat provider string has grown"); + ksreq->ks_instance = ks->ks_instance; + if (sstrlcpy(ksreq->ks_name, ks->ks_name) != 0) + panic("kstat name string has grown"); + ksreq->ks_unit = ks->ks_unit; + + ksreq->ks_created = ks->ks_created; + ksreq->ks_type = ks->ks_type; + ksreq->ks_state = ks->ks_state; + +error: + rw_exit(&kstat_lock); + + if (buf != NULL) { + if (error == 0) + error = copyout(buf, ksreq->ks_data, min(klen, ulen)); + + free(buf, M_TEMP, klen); + } + + return (error); +} + +Static int +kstatioc_find_id(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_id = ksreq->ks_id; + + ks = RBT_FIND(kstat_id_tree, &kstat_id_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +Static int +kstatioc_nfind_id(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_id = ksreq->ks_id; + + ks = RBT_NFIND(kstat_id_tree, &kstat_id_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +Static int +kstatioc_find_pv(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_provider = ksreq->ks_provider; + key.ks_instance = ksreq->ks_instance; + key.ks_name = ksreq->ks_name; + key.ks_unit = ksreq->ks_unit; + + ks = RBT_FIND(kstat_pv_tree, &kstat_pv_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +Static int +kstatioc_nfind_pv(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_provider = ksreq->ks_provider; + key.ks_instance = ksreq->ks_instance; + key.ks_name = ksreq->ks_name; + key.ks_unit = ksreq->ks_unit; + + ks = RBT_NFIND(kstat_pv_tree, &kstat_pv_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +Static int +kstatioc_find_nm(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_name = ksreq->ks_name; + key.ks_unit = ksreq->ks_unit; + key.ks_provider = ksreq->ks_provider; + key.ks_instance = ksreq->ks_instance; + + ks = RBT_FIND(kstat_nm_tree, &kstat_nm_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +Static int +kstatioc_nfind_nm(struct kstat_req *ksreq) +{ + struct kstat *ks, key; + int error; + + error = kstatioc_enter(ksreq); + if (error != 0) + return (error); + + key.ks_name = ksreq->ks_name; + key.ks_unit = ksreq->ks_unit; + key.ks_provider = ksreq->ks_provider; + key.ks_instance = ksreq->ks_instance; + + ks = RBT_NFIND(kstat_nm_tree, &kstat_nm_tree, &key); + + return (kstatioc_leave(ksreq, ks)); +} + +int +kstatioctl(dev_t dev, u_long cmd, caddr_t data, int flag, struct proc *p) +{ + struct kstat_req *ksreq = (struct kstat_req *)data; + int error = 0; + + KERNEL_UNLOCK(); + + switch (cmd) { + case KSTATIOC_VERSION: + *(unsigned int *)data = kstat_version; + break; + + case KSTATIOC_FIND_ID: + error = kstatioc_find_id(ksreq); + break; + case KSTATIOC_NFIND_ID: + error = kstatioc_nfind_id(ksreq); + break; + case KSTATIOC_FIND_PROVIDER: + error = kstatioc_find_pv(ksreq); + break; + case KSTATIOC_NFIND_PROVIDER: + error = kstatioc_nfind_pv(ksreq); + break; + case KSTATIOC_FIND_NAME: + error = kstatioc_find_nm(ksreq); + break; + case KSTATIOC_NFIND_NAME: + error = kstatioc_nfind_nm(ksreq); + break; + + default: + error = ENOTTY; + break; + } + + KERNEL_LOCK(); + + return (error); +} + +Static void +kstat_init(void) +{ + static int initialized = 0; + + if (initialized) + return; + + pool_init(&kstat_pool, sizeof(struct kstat), 0, IPL_NONE, + PR_WAITOK | PR_RWLOCK, "kstatmem", NULL); + + initialized = 1; +} + +Static int +kstat_strcheck(const char *str) +{ + size_t i, l; + + l = strlen(str); + if (l == 0 || l >= KSTAT_STRLEN) + return (-1); + for (i = 0; i < l; i++) { + int ch = str[i]; + if (ch >= 'a' && ch <= 'z') + continue; + if (ch >= 'A' && ch <= 'Z') + continue; + if (ch >= '0' && ch <= '9') + continue; + switch (ch) { + case '-': + case '_': + case '.': + break; + default: + return (-1); + } + } + + return (0); +} + +struct kstat * +kstat_create(const char *provider, unsigned int instance, + const char *name, unsigned int unit, + unsigned int type, unsigned int flags) +{ + struct kstat *ks, *oks; + + if (kstat_strcheck(provider) == -1) + panic("invalid provider string"); + if (kstat_strcheck(name) == -1) + panic("invalid name string"); + + kstat_init(); + + ks = pool_get(&kstat_pool, PR_WAITOK|PR_ZERO); + + ks->ks_provider = provider; + ks->ks_instance = instance; + ks->ks_name = name; + ks->ks_unit = unit; + ks->ks_flags = flags; + ks->ks_type = type; + ks->ks_state = KSTAT_S_CREATED; + + getnanouptime(&ks->ks_created); + ks->ks_updated = ks->ks_created; + + ks->ks_lock = &kstat_default_lock; + ks->ks_lock_ops = &kstat_wlock_ops; + ks->ks_read = kstat_read; + ks->ks_copy = kstat_copy; + + rw_enter_write(&kstat_lock); + ks->ks_id = kstat_next_id; + + oks = RBT_INSERT(kstat_pv_tree, &kstat_pv_tree, ks); + if (oks == NULL) { + /* commit */ + kstat_next_id++; + kstat_version++; + + oks = RBT_INSERT(kstat_nm_tree, &kstat_nm_tree, ks); + if (oks != NULL) + panic("kstat name collision! (%llu)", ks->ks_id); + + oks = RBT_INSERT(kstat_id_tree, &kstat_id_tree, ks); + if (oks != NULL) + panic("kstat id collision! (%llu)", ks->ks_id); + } + rw_exit_write(&kstat_lock); + + if (oks != NULL) { + pool_put(&kstat_pool, ks); + return (NULL); + } + + return (ks); +} + +void +kstat_set_rlock(struct kstat *ks, struct rwlock *rwl) +{ + KASSERT(ks->ks_state == KSTAT_S_CREATED); + + ks->ks_lock = rwl; + ks->ks_lock_ops = &kstat_rlock_ops; +} + +void +kstat_set_wlock(struct kstat *ks, struct rwlock *rwl) +{ + KASSERT(ks->ks_state == KSTAT_S_CREATED); + + ks->ks_lock = rwl; + ks->ks_lock_ops = &kstat_wlock_ops; +} + +void +kstat_set_mutex(struct kstat *ks, struct mutex *mtx) +{ + KASSERT(ks->ks_state == KSTAT_S_CREATED); + + ks->ks_lock = mtx; + ks->ks_lock_ops = &kstat_mutex_ops; +} + +Static void +kstat_cpu_enter(void *p) +{ + struct cpu_info *ci = p; + sched_peg_curproc(ci); +} + +Static void +kstat_cpu_leave(void *p) +{ + atomic_clearbits_int(&curproc->p_flag, P_CPUPEG); +} + +void +kstat_set_cpu(struct kstat *ks, struct cpu_info *ci) +{ + KASSERT(ks->ks_state == KSTAT_S_CREATED); + + ks->ks_lock = ci; + ks->ks_lock_ops = &kstat_cpu_ops; +} + +int +kstat_read_nop(struct kstat *ks) +{ + return (0); +} + +void +kstat_install(struct kstat *ks) +{ + if (!ISSET(ks->ks_flags, KSTAT_F_REALLOC)) { + KASSERTMSG(ks->ks_copy != NULL || ks->ks_data != NULL, + "kstat %s:%u:%s:%u must provide ks_copy or ks_data", + ks->ks_provider, ks->ks_instance, ks->ks_name, ks->ks_unit); + KASSERT(ks->ks_datalen > 0); + } + + rw_enter_write(&kstat_lock); + ks->ks_state = KSTAT_S_INSTALLED; + rw_exit_write(&kstat_lock); +} + +void +kstat_destroy(struct kstat *ks) +{ + rw_enter_write(&kstat_lock); + RBT_REMOVE(kstat_id_tree, &kstat_id_tree, ks); + RBT_REMOVE(kstat_pv_tree, &kstat_pv_tree, ks); + RBT_REMOVE(kstat_nm_tree, &kstat_nm_tree, ks); + kstat_version++; + rw_exit_write(&kstat_lock); + + pool_put(&kstat_pool, ks); +} + +Static int +kstat_read(struct kstat *ks) +{ + getnanouptime(&ks->ks_updated); + return (0); +} + +Static int +kstat_copy(struct kstat *ks, void *buf) +{ + memcpy(buf, ks->ks_data, ks->ks_datalen); + return (0); +} + +RBT_GENERATE(kstat_id_tree, kstat, ks_id_entry, kstat_id_cmp); +RBT_GENERATE(kstat_pv_tree, kstat, ks_pv_entry, kstat_pv_cmp); +RBT_GENERATE(kstat_nm_tree, kstat, ks_nm_entry, kstat_nm_cmp); + +void +kstat_kv_init(struct kstat_kv *kv, const char *name, enum kstat_kv_type type) +{ + memset(kv, 0, sizeof(*kv)); + strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */ + kv->kv_type = type; + kv->kv_unit = KSTAT_KV_U_NONE; +} + +void +kstat_kv_unit_init(struct kstat_kv *kv, const char *name, + enum kstat_kv_type type, enum kstat_kv_unit unit) +{ + switch (type) { + case KSTAT_KV_T_COUNTER64: + case KSTAT_KV_T_COUNTER32: + case KSTAT_KV_T_UINT64: + case KSTAT_KV_T_INT64: + case KSTAT_KV_T_UINT32: + case KSTAT_KV_T_INT32: + break; + default: + panic("kv unit init %s: unit for non-integer type", name); + } + + memset(kv, 0, sizeof(*kv)); + strlcpy(kv->kv_key, name, sizeof(kv->kv_key)); /* XXX truncated? */ + kv->kv_type = type; + kv->kv_unit = unit; +} Index: sys/dev/pci/if_mcx.c =================================================================== RCS file: /cvs/src/sys/dev/pci/if_mcx.c,v retrieving revision 1.58 diff -u -p -r1.58 if_mcx.c --- sys/dev/pci/if_mcx.c 26 Jun 2020 05:05:42 -0000 1.58 +++ sys/dev/pci/if_mcx.c 27 Jun 2020 06:34:47 -0000 @@ -1,4 +1,4 @@ -/* $OpenBSD: if_mcx.c,v 1.58 2020/06/26 05:05:42 dlg Exp $ */ +/* $OpenBSD: if_mcx.c,v 1.57 2020/06/26 03:07:10 dlg Exp $ */ /* * Copyright (c) 2017 David Gwynne @@ -19,6 +19,7 @@ #include "bpfilter.h" #include "vlan.h" +#include "kstat.h" #include #include @@ -44,6 +45,10 @@ #include #endif +#if NKSTAT > 0 +#include +#endif + #include #include @@ -142,6 +147,8 @@ CTASSERT(ETHER_HDR_LEN + ETHER_VLAN_ENCA #define MCX_REG_PAOS 0x5006 #define MCX_REG_PFCC 0x5007 #define MCX_REG_PPCNT 0x5008 +#define MCX_REG_MTCAP 0x9009 /* mgmt temp capabilities */ +#define MCX_REG_MTMP 0x900a /* mgmt temp */ #define MCX_REG_MCIA 0x9014 #define MCX_ETHER_CAP_SGMII 0 @@ -430,6 +437,9 @@ struct mcx_reg_pmlp { } __packed __aligned(4); struct mcx_reg_ppcnt { + uint8_t ppcnt_swid; + uint8_t ppcnt_local_port; + uint8_t ppcnt_pnat; uint8_t ppcnt_grp; #define MCX_REG_PPCNT_GRP_IEEE8023 0x00 #define MCX_REG_PPCNT_GRP_RFC2863 0x01 @@ -438,13 +448,10 @@ struct mcx_reg_ppcnt { #define MCX_REG_PPCNT_GRP_PER_PRIO 0x10 #define MCX_REG_PPCNT_GRP_PER_TC 0x11 #define MCX_REG_PPCNT_GRP_PER_RX_BUFFER 0x11 - uint8_t ppcnt_pnat; - uint8_t ppcnt_local_port; - uint8_t ppcnt_swid; - uint8_t ppcnt_prio_tc; - uint8_t ppcnt_reserved1[2]; uint8_t ppcnt_clr; + uint8_t ppcnt_reserved1[2]; + uint8_t ppcnt_prio_tc; #define MCX_REG_PPCNT_CLR (1 << 7) uint8_t ppcnt_counter_set[248]; @@ -453,90 +460,134 @@ CTASSERT(sizeof(struct mcx_reg_ppcnt) == CTASSERT((offsetof(struct mcx_reg_ppcnt, ppcnt_counter_set) % sizeof(uint64_t)) == 0); -struct mcx_ppcnt_ieee8023 { - uint64_t frames_transmitted_ok; - uint64_t frames_received_ok; - uint64_t frame_check_sequence_errors; - uint64_t alignment_errors; - uint64_t octets_transmitted_ok; - uint64_t octets_received_ok; - uint64_t multicast_frames_xmitted_ok; - uint64_t broadcast_frames_xmitted_ok; - uint64_t multicast_frames_received_ok; - uint64_t broadcast_frames_received_ok; - uint64_t in_range_length_errors; - uint64_t out_of_range_length_field; - uint64_t frame_too_long_errors; - uint64_t symbol_error_during_carrier; - uint64_t mac_control_frames_transmitted; - uint64_t mac_control_frames_received; - uint64_t unsupported_opcodes_received; - uint64_t pause_mac_ctrl_frames_received; - uint64_t pause_mac_ctrl_frames_transmitted; +enum mcx_ppcnt_ieee8023 { + frames_transmitted_ok, + frames_received_ok, + frame_check_sequence_errors, + alignment_errors, + octets_transmitted_ok, + octets_received_ok, + multicast_frames_xmitted_ok, + broadcast_frames_xmitted_ok, + multicast_frames_received_ok, + broadcast_frames_received_ok, + in_range_length_errors, + out_of_range_length_field, + frame_too_long_errors, + symbol_error_during_carrier, + mac_control_frames_transmitted, + mac_control_frames_received, + unsupported_opcodes_received, + pause_mac_ctrl_frames_received, + pause_mac_ctrl_frames_transmitted, + + mcx_ppcnt_ieee8023_count }; -CTASSERT(sizeof(struct mcx_ppcnt_ieee8023) == 0x98); +CTASSERT(mcx_ppcnt_ieee8023_count * sizeof(uint64_t) == 0x98); -struct mcx_ppcnt_rfc2863 { - uint64_t in_octets; - uint64_t in_ucast_pkts; - uint64_t in_discards; - uint64_t in_errors; - uint64_t in_unknown_protos; - uint64_t out_octets; - uint64_t out_ucast_pkts; - uint64_t out_discards; - uint64_t out_errors; - uint64_t in_multicast_pkts; - uint64_t in_broadcast_pkts; - uint64_t out_multicast_pkts; - uint64_t out_broadcast_pkts; +enum mcx_ppcnt_rfc2863 { + in_octets, + in_ucast_pkts, + in_discards, + in_errors, + in_unknown_protos, + out_octets, + out_ucast_pkts, + out_discards, + out_errors, + in_multicast_pkts, + in_broadcast_pkts, + out_multicast_pkts, + out_broadcast_pkts, + + mcx_ppcnt_rfc2863_count }; -CTASSERT(sizeof(struct mcx_ppcnt_rfc2863) == 0x68); +CTASSERT(mcx_ppcnt_rfc2863_count * sizeof(uint64_t) == 0x68); -struct mcx_ppcnt_rfc2819 { - uint64_t drop_events; - uint64_t octets; - uint64_t pkts; - uint64_t broadcast_pkts; - uint64_t multicast_pkts; - uint64_t crc_align_errors; - uint64_t undersize_pkts; - uint64_t oversize_pkts; - uint64_t fragments; - uint64_t jabbers; - uint64_t collisions; - uint64_t pkts64octets; - uint64_t pkts65to127octets; - uint64_t pkts128to255octets; - uint64_t pkts256to511octets; - uint64_t pkts512to1023octets; - uint64_t pkts1024to1518octets; - uint64_t pkts1519to2047octets; - uint64_t pkts2048to4095octets; - uint64_t pkts4096to8191octets; - uint64_t pkts8192to10239octets; +enum mcx_ppcnt_rfc2819 { + drop_events, + octets, + pkts, + broadcast_pkts, + multicast_pkts, + crc_align_errors, + undersize_pkts, + oversize_pkts, + fragments, + jabbers, + collisions, + pkts64octets, + pkts65to127octets, + pkts128to255octets, + pkts256to511octets, + pkts512to1023octets, + pkts1024to1518octets, + pkts1519to2047octets, + pkts2048to4095octets, + pkts4096to8191octets, + pkts8192to10239octets, + + mcx_ppcnt_rfc2819_count +}; +CTASSERT((mcx_ppcnt_rfc2819_count * sizeof(uint64_t)) == 0xa8); + +enum mcx_ppcnt_rfc3635 { + dot3stats_alignment_errors, + dot3stats_fcs_errors, + dot3stats_single_collision_frames, + dot3stats_multiple_collision_frames, + dot3stats_sqe_test_errors, + dot3stats_deferred_transmissions, + dot3stats_late_collisions, + dot3stats_excessive_collisions, + dot3stats_internal_mac_transmit_errors, + dot3stats_carrier_sense_errors, + dot3stats_frame_too_longs, + dot3stats_internal_mac_receive_errors, + dot3stats_symbol_errors, + dot3control_in_unknown_opcodes, + dot3in_pause_frames, + dot3out_pause_frames, + + mcx_ppcnt_rfc3635_count }; -CTASSERT(sizeof(struct mcx_ppcnt_rfc2819) == 0xa8); +CTASSERT((mcx_ppcnt_rfc3635_count * sizeof(uint64_t)) == 0x80); + +struct mcx_reg_mtcap { + uint8_t _reserved1[3]; + uint8_t mtcap_sensor_count; + uint8_t _reserved2[4]; -struct mcx_ppcnt_rfc3635 { - uint64_t alignment_errors; - uint64_t fcs_errors; - uint64_t single_collision_frames; - uint64_t multiple_collision_frames; - uint64_t sqe_test_errors; - uint64_t deferred_transmissions; - uint64_t late_collisions; - uint64_t excessive_collisions; - uint64_t internal_mac_transmit_errors; - uint64_t carrier_sense_errors; - uint64_t frame_too_longs; - uint64_t internal_mac_receive_errors; - uint64_t symbol_errors; - uint64_t control_in_unknown_opcodes; - uint64_t control_in_pause_frames; - uint64_t control_out_pause_frames; + uint64_t mtcap_sensor_map; }; -CTASSERT(sizeof(struct mcx_ppcnt_rfc3635) == 0x80); + +struct mcx_reg_mtmp { + uint8_t _reserved1[2]; + uint16_t mtmp_sensor_index; + + uint8_t _reserved2[2]; + uint16_t mtmp_temperature; + + uint16_t mtmp_mte_mtr; +#define MCX_REG_MTMP_MTE (1 << 15) +#define MCX_REG_MTMP_MTR (1 << 14) + uint16_t mtmp_max_temperature; + + uint16_t mtmp_tee; +#define MCX_REG_MTMP_TEE_NOPE (0 << 14) +#define MCX_REG_MTMP_TEE_GENERATE (1 << 14) +#define MCX_REG_MTMP_TEE_GENERATE_ONE (2 << 14) + uint16_t mtmp_temperature_threshold_hi; + + uint8_t _reserved3[2]; + uint16_t mtmp_temperature_threshold_lo; + + uint8_t _reserved4[4]; + + uint8_t mtmp_sensor_name[8]; +}; +CTASSERT(sizeof(struct mcx_reg_mtmp) == 0x20); +CTASSERT(offsetof(struct mcx_reg_mtmp, mtmp_sensor_name) == 0x18); #define MCX_MCIA_EEPROM_BYTES 32 struct mcx_reg_mcia { @@ -2124,12 +2175,25 @@ struct mcx_softc { unsigned int sc_nqueues; int sc_num_cq; + +#if NKSTAT > 0 + struct kstat *sc_kstat_ieee8023; + struct kstat *sc_kstat_rfc2863; + struct kstat *sc_kstat_rfc2819; + struct kstat *sc_kstat_rfc3635; + unsigned int sc_kstat_mtmp_count; + struct kstat **sc_kstat_mtmp; +#endif }; #define DEVNAME(_sc) ((_sc)->sc_dev.dv_xname) static int mcx_match(struct device *, void *, void *); static void mcx_attach(struct device *, struct device *, void *); +#if NKSTAT > 0 +static void mcx_kstat_attach(struct mcx_softc *); +#endif + static int mcx_version(struct mcx_softc *); static int mcx_init_wait(struct mcx_softc *); static int mcx_enable_hca(struct mcx_softc *); @@ -2568,6 +2632,10 @@ mcx_attach(struct device *parent, struct } sc->sc_extra_mcast = 0; memset(sc->sc_mcast_flows, 0, sizeof(sc->sc_mcast_flows)); + +#if NKSTAT > 0 + mcx_kstat_attach(sc); +#endif return; teardown: @@ -7130,3 +7198,441 @@ mcx_hwmem_free(struct mcx_softc *sc, str mhm->mhm_npages = 0; } + +#if NKSTAT > 0 +static const struct kstat_kv mcx_kstat_ieee8023_tpl[] = { + KSTAT_KV_UNIT_INITIALIZER("Good Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Good Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("FCS Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Alignment Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Good Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("Good Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("Multicast Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Broadcast Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Multicast Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Broadcast Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("In Range Len", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Out Of Range Len", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Frame Too Long", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Symbol Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("MAC Ctrl Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("MAC Ctrl Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("MAC Ctrl Unsup", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Pause Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Pause Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), +}; +CTASSERT(nitems(mcx_kstat_ieee8023_tpl) == mcx_ppcnt_ieee8023_count); + +static const struct kstat_kv mcx_kstat_rfc2863_tpl[] = { + KSTAT_KV_UNIT_INITIALIZER("Rx Bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("Rx Unicast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Rx Discards", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Rx Errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Rx Unknown Proto", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Tx Bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("Tx Unicast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Tx Discards", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Tx Errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Rx Multicast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Rx Broadcast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Tx Multicast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Tx Broadcast", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), +}; +CTASSERT(nitems(mcx_kstat_rfc2863_tpl) == mcx_ppcnt_rfc2863_count); + +static const struct kstat_kv mcx_kstat_rfc2819_tpl[] = { + KSTAT_KV_UNIT_INITIALIZER("Drop Events", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Octets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("Packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Broadcasts", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Multicasts", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("CRC Align Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Undersize", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Oversize", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Fragments", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Jabbers", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Collisions", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("64B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("65-127B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("128-255B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("256-511B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("512-1023B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("1024-1518B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("1519-2047B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("2048-4095B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("4096-8191B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("8192-10239B", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), +}; +CTASSERT(nitems(mcx_kstat_rfc2819_tpl) == mcx_ppcnt_rfc2819_count); + +static const struct kstat_kv mcx_kstat_rfc3635_tpl[] = { + KSTAT_KV_UNIT_INITIALIZER("Alignment Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("FCS Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Single Colls", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Multiple Colls", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("SQE Test Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("Deferred Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Late Colls", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("Exess Colls", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("Int MAC Tx Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("CSM Sense Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("Too Long", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Int MAC Rx Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Symbol Errs", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_NONE), + KSTAT_KV_UNIT_INITIALIZER("Unknown Control", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Pause Rx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("Pause Tx", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), +}; +CTASSERT(nitems(mcx_kstat_rfc3635_tpl) == mcx_ppcnt_rfc3635_count); + +struct mcx_kstat_ppcnt { + const struct kstat_kv *ksp_tpl; + unsigned int ksp_n; + uint8_t ksp_grp; +}; + +static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_ieee8023 = { + .ksp_tpl = mcx_kstat_ieee8023_tpl, + .ksp_n = nitems(mcx_kstat_ieee8023_tpl), + .ksp_grp = MCX_REG_PPCNT_GRP_IEEE8023, +}; + +static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2863 = { + .ksp_tpl = mcx_kstat_rfc2863_tpl, + .ksp_n = nitems(mcx_kstat_rfc2863_tpl), + .ksp_grp = MCX_REG_PPCNT_GRP_RFC2863, +}; + +static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc2819 = { + .ksp_tpl = mcx_kstat_rfc2819_tpl, + .ksp_n = nitems(mcx_kstat_rfc2819_tpl), + .ksp_grp = MCX_REG_PPCNT_GRP_RFC2819, +}; + +static const struct mcx_kstat_ppcnt mcx_kstat_ppcnt_rfc3635 = { + .ksp_tpl = mcx_kstat_rfc3635_tpl, + .ksp_n = nitems(mcx_kstat_rfc3635_tpl), + .ksp_grp = MCX_REG_PPCNT_GRP_RFC3635, +}; + +static int mcx_kstat_ppcnt_read(struct kstat *); +static int mcx_kstat_ppcnt_copy(struct kstat *, void *); + +static void mcx_kstat_attach_tmps(struct mcx_softc *sc); + +static void +mcx_kstat_attach(struct mcx_softc *sc) +{ + struct kstat *ks; + + ks = kstat_create(DEVNAME(sc), 0, "ieee802.3", 0, KSTAT_T_KV, 0); + if (ks != NULL) { + /* keep counters in between read and copy */ + ks->ks_data = mallocarray(mcx_kstat_ppcnt_ieee8023.ksp_n, + sizeof(uint64_t), M_DEVBUF, M_WAITOK); + + /* but the template models what we want to copy out */ + ks->ks_datalen = sizeof(mcx_kstat_ieee8023_tpl); + + + ks->ks_softc = sc; + ks->ks_ptr = (void *)&mcx_kstat_ppcnt_ieee8023; + ks->ks_read = mcx_kstat_ppcnt_read; + ks->ks_copy = mcx_kstat_ppcnt_copy; + + kstat_install(ks); + sc->sc_kstat_ieee8023 = ks; + } + + ks = kstat_create(DEVNAME(sc), 0, "rfc2863", 0, KSTAT_T_KV, 0); + if (ks != NULL) { + /* keep counters in between read and copy */ + ks->ks_data = mallocarray(mcx_kstat_ppcnt_rfc2863.ksp_n, + sizeof(uint64_t), M_DEVBUF, M_WAITOK); + + /* but the template models what we want to copy out */ + ks->ks_datalen = sizeof(mcx_kstat_rfc2863_tpl); + + + ks->ks_softc = sc; + ks->ks_ptr = (void *)&mcx_kstat_ppcnt_rfc2863; + ks->ks_read = mcx_kstat_ppcnt_read; + ks->ks_copy = mcx_kstat_ppcnt_copy; + + kstat_install(ks); + sc->sc_kstat_rfc2863 = ks; + } + + ks = kstat_create(DEVNAME(sc), 0, "rfc2819", 0, KSTAT_T_KV, 0); + if (ks != NULL) { + /* keep counters in between read and copy */ + ks->ks_data = mallocarray(mcx_kstat_ppcnt_rfc2819.ksp_n, + sizeof(uint64_t), M_DEVBUF, M_WAITOK); + + /* but the template models what we want to copy out */ + ks->ks_datalen = sizeof(mcx_kstat_rfc2819_tpl); + + + ks->ks_softc = sc; + ks->ks_ptr = (void *)&mcx_kstat_ppcnt_rfc2819; + ks->ks_read = mcx_kstat_ppcnt_read; + ks->ks_copy = mcx_kstat_ppcnt_copy; + + kstat_install(ks); + sc->sc_kstat_rfc2819 = ks; + } + + ks = kstat_create(DEVNAME(sc), 0, "rfc3635", 0, KSTAT_T_KV, 0); + if (ks != NULL) { + /* keep counters in between read and copy */ + ks->ks_data = mallocarray(mcx_kstat_ppcnt_rfc3635.ksp_n, + sizeof(uint64_t), M_DEVBUF, M_WAITOK); + + /* but the template models what we want to copy out */ + ks->ks_datalen = sizeof(mcx_kstat_rfc3635_tpl); + + + ks->ks_softc = sc; + ks->ks_ptr = (void *)&mcx_kstat_ppcnt_rfc3635; + ks->ks_read = mcx_kstat_ppcnt_read; + ks->ks_copy = mcx_kstat_ppcnt_copy; + + kstat_install(ks); + sc->sc_kstat_rfc3635 = ks; + } + + mcx_kstat_attach_tmps(sc); +} + +static int +mcx_kstat_ppcnt_read(struct kstat *ks) +{ + struct mcx_softc *sc = ks->ks_softc; + struct mcx_kstat_ppcnt *ksp = ks->ks_ptr; + struct mcx_reg_ppcnt ppcnt = { + .ppcnt_grp = ksp->ksp_grp, + .ppcnt_local_port = 1, + }; + int rv; + + KERNEL_LOCK(); /* XXX */ + rv = mcx_access_hca_reg(sc, MCX_REG_PPCNT, MCX_REG_OP_READ, + &ppcnt, sizeof(ppcnt)); + KERNEL_UNLOCK(); + if (rv != 0) + return (EIO); + + memcpy(ks->ks_data, ppcnt.ppcnt_counter_set, + ksp->ksp_n * sizeof(uint64_t)); + + nanouptime(&ks->ks_updated); + + return (0); +} + +static int +mcx_kstat_ppcnt_copy(struct kstat *ks, void *dst) +{ + struct mcx_kstat_ppcnt *ksp = ks->ks_ptr; + struct kstat_kv *kvs = dst; + struct kstat_kv *kv; + uint64_t *vs = ks->ks_data; + unsigned int i; + + for (i = 0; i < ksp->ksp_n; i++) { + kv = &kvs[i]; + *kv = ksp->ksp_tpl[i]; + kstat_kv_u64(kv) = bemtoh64(&vs[i]); + } + + return (0); +} + +struct mcx_kstat_mtmp { + struct kstat_kv ktmp_name; + struct kstat_kv ktmp_temperature; + struct kstat_kv ktmp_threshold_lo; + struct kstat_kv ktmp_threshold_hi; +}; + +static const struct timeval mcx_kstat_mtmp_rate = { 1, 0 }; + +static int mcx_kstat_mtmp_read(struct kstat *); + +static void +mcx_kstat_attach_tmps(struct mcx_softc *sc) +{ + struct kstat *ks; + struct mcx_reg_mtcap mtcap; + struct mcx_kstat_mtmp *ktmp; + uint64_t map; + unsigned int i, n; + + memset(&mtcap, 0, sizeof(mtcap)); + + KERNEL_LOCK(); /* XXX */ + if (mcx_access_hca_reg(sc, MCX_REG_MTCAP, MCX_REG_OP_READ, + &mtcap, sizeof(mtcap)) != 0) { + /* unable to find temperature sensors */ + return; + } + + sc->sc_kstat_mtmp_count = mtcap.mtcap_sensor_count; + sc->sc_kstat_mtmp = mallocarray(sc->sc_kstat_mtmp_count, + sizeof(*sc->sc_kstat_mtmp), M_DEVBUF, M_WAITOK); + + n = 0; + map = bemtoh64(&mtcap.mtcap_sensor_map); + for (i = 0; i < sizeof(map) * NBBY; i++) { + if (!ISSET(map, (1ULL << i))) + continue; + + ks = kstat_create(DEVNAME(sc), 0, "temperature", i, + KSTAT_T_KV, 0); + if (ks == NULL) { + /* unable to attach temperature sensor %u, i */ + continue; + } + + ktmp = malloc(sizeof(*ktmp), M_DEVBUF, M_WAITOK|M_ZERO); + kstat_kv_init(&ktmp->ktmp_name, "name", KSTAT_KV_T_ISTR); + kstat_kv_init(&ktmp->ktmp_temperature, "temperature", + KSTAT_KV_T_TEMP); + kstat_kv_init(&ktmp->ktmp_threshold_lo, "lo threshold", + KSTAT_KV_T_TEMP); + kstat_kv_init(&ktmp->ktmp_threshold_hi, "hi threshold", + KSTAT_KV_T_TEMP); + + ks->ks_data = ktmp; + ks->ks_datalen = sizeof(*ktmp); + TIMEVAL_TO_TIMESPEC(&mcx_kstat_mtmp_rate, &ks->ks_interval); + ks->ks_read = mcx_kstat_mtmp_read; + + ks->ks_softc = sc; + kstat_install(ks); + + sc->sc_kstat_mtmp[n++] = ks; + if (n >= sc->sc_kstat_mtmp_count) + break; + } +} + +static uint64_t +mcx_tmp_to_uK(uint16_t *t) +{ + int64_t mt = (int16_t)bemtoh16(t); /* 0.125 C units */ + mt *= 1000000 / 8; /* convert to uC */ + mt += 273150000; /* convert to uK */ + + return (mt); +} + +static int +mcx_kstat_mtmp_read(struct kstat *ks) +{ + struct mcx_softc *sc = ks->ks_softc; + struct mcx_kstat_mtmp *ktmp = ks->ks_data; + struct mcx_reg_mtmp mtmp; + int rv; + struct timeval updated; + + TIMESPEC_TO_TIMEVAL(&updated, &ks->ks_updated); + + if (!ratecheck(&updated, &mcx_kstat_mtmp_rate)) + return (0); + + memset(&mtmp, 0, sizeof(&mtmp)); + htobem16(&mtmp.mtmp_sensor_index, ks->ks_unit); + + KERNEL_LOCK(); /* XXX */ + rv = mcx_access_hca_reg(sc, MCX_REG_MTMP, MCX_REG_OP_READ, + &mtmp, sizeof(mtmp)); + KERNEL_UNLOCK(); + if (rv != 0) + return (EIO); + + memset(kstat_kv_istr(&ktmp->ktmp_name), 0, + sizeof(kstat_kv_istr(&ktmp->ktmp_name))); + memcpy(kstat_kv_istr(&ktmp->ktmp_name), + mtmp.mtmp_sensor_name, sizeof(mtmp.mtmp_sensor_name)); + kstat_kv_temp(&ktmp->ktmp_temperature) = + mcx_tmp_to_uK(&mtmp.mtmp_temperature); + kstat_kv_temp(&ktmp->ktmp_threshold_lo) = + mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_lo); + kstat_kv_temp(&ktmp->ktmp_threshold_hi) = + mcx_tmp_to_uK(&mtmp.mtmp_temperature_threshold_hi); + + TIMEVAL_TO_TIMESPEC(&updated, &ks->ks_updated); + + return (0); +} +#endif /* NKSTAT > 0 */ Index: sys/dev/pci/if_vmx.c =================================================================== RCS file: /cvs/src/sys/dev/pci/if_vmx.c,v retrieving revision 1.59 diff -u -p -r1.59 if_vmx.c --- sys/dev/pci/if_vmx.c 17 Jun 2020 07:08:39 -0000 1.59 +++ sys/dev/pci/if_vmx.c 27 Jun 2020 06:34:47 -0000 @@ -17,6 +17,7 @@ */ #include "bpfilter.h" +#include "kstat.h" #include #include @@ -26,6 +27,7 @@ #include #include #include +#include #include #include @@ -92,20 +94,48 @@ struct vmxnet3_comp_ring { u_int32_t gen; }; +struct vmx_txstats_kv { + struct kstat_kv tso_packets; + struct kstat_kv tso_bytes; + struct kstat_kv ucast_packets; + struct kstat_kv ucast_bytes; + struct kstat_kv mcast_packets; + struct kstat_kv mcast_bytes; + struct kstat_kv bcast_packets; + struct kstat_kv bcast_bytes; + struct kstat_kv errors; + struct kstat_kv discards; +}; + struct vmxnet3_txqueue { struct vmxnet3_softc *sc; /* sigh */ struct vmxnet3_txring cmd_ring; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_txq_shared *ts; struct ifqueue *ifq; + struct kstat *txkstat; } __aligned(64); +struct vmx_rxstats_kv { + struct kstat_kv lro_packets; + struct kstat_kv lro_bytes; + struct kstat_kv ucast_packets; + struct kstat_kv ucast_bytes; + struct kstat_kv mcast_packets; + struct kstat_kv mcast_bytes; + struct kstat_kv bcast_packets; + struct kstat_kv bcast_bytes; + struct kstat_kv nobuffers; + struct kstat_kv errors; + }; + struct vmxnet3_rxqueue { struct vmxnet3_softc *sc; /* sigh */ struct vmxnet3_rxring cmd_ring[2]; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_rxq_shared *rs; struct ifiqueue *ifiq; + struct kstat *rxkstat; } __aligned(64); struct vmxnet3_queue { @@ -117,6 +147,14 @@ struct vmxnet3_queue { int intr; }; +struct vmx_kstats { + struct rwlock lock; + struct timeval updated; + + struct vmx_txstats_kv txstats; + struct vmx_rxstats_kv rxstats; +}; + struct vmxnet3_softc { struct device sc_dev; struct arpcom sc_arpcom; @@ -136,24 +174,11 @@ struct vmxnet3_softc { struct vmxnet3_driver_shared *sc_ds; u_int8_t *sc_mcast; struct vmxnet3_upt1_rss_conf *sc_rss; -}; - -#define VMXNET3_STAT -#ifdef VMXNET3_STAT -struct { - u_int ntxdesc; - u_int nrxdesc; - u_int txhead; - u_int txdone; - u_int maxtxlen; - u_int rxdone; - u_int rxfill; - u_int intr; -} vmxstat = { - NTXDESC, NRXDESC -}; +#if NKSTAT > 0 + struct vmx_kstats sc_kstats; #endif +}; #define JUMBO_LEN (1024 * 9) #define DMAADDR(map) ((map)->dm_segs[0].ds_addr) @@ -202,6 +227,14 @@ void vmxnet3_media_status(struct ifnet * int vmxnet3_media_change(struct ifnet *); void *vmxnet3_dma_allocmem(struct vmxnet3_softc *, u_int, u_int, bus_addr_t *); +#if NKSTAT > 0 +static void vmx_kstat_init(struct vmxnet3_softc *); +static void vmx_kstat_txstats(struct vmxnet3_softc *, + struct vmxnet3_txqueue *, int); +static void vmx_kstat_rxstats(struct vmxnet3_softc *, + struct vmxnet3_rxqueue *, int); +#endif /* NKSTAT > 0 */ + const struct pci_matchid vmx_devices[] = { { PCI_VENDOR_VMWARE, PCI_PRODUCT_VMWARE_NET_3 } }; @@ -323,9 +356,9 @@ vmxnet3_attach(struct device *parent, st snprintf(q->intrname, sizeof(q->intrname), "%s:%d", self->dv_xname, i); /* this should be pci_intr_establish_cpu */ - q->ih = pci_intr_establish(pa->pa_pc, ih, + q->ih = pci_intr_establish_cpu(pa->pa_pc, ih, IPL_NET | IPL_MPSAFE, - /* intrmap_cpu(sc->sc_intrmap, i), */ + intrmap_cpu(sc->sc_intrmap, i), vmxnet3_intr_queue, q, q->intrname); q->intr = vec; @@ -389,10 +422,20 @@ vmxnet3_attach(struct device *parent, st if_attach_queues(ifp, sc->sc_nqueues); if_attach_iqueues(ifp, sc->sc_nqueues); + +#if NKSTAT > 0 + vmx_kstat_init(sc); +#endif + for (i = 0; i < sc->sc_nqueues; i++) { ifp->if_ifqs[i]->ifq_softc = &sc->sc_q[i].tx; sc->sc_q[i].tx.ifq = ifp->if_ifqs[i]; sc->sc_q[i].rx.ifiq = ifp->if_iqs[i]; + +#if NKSTAT > 0 + vmx_kstat_txstats(sc, &sc->sc_q[i].tx, i); + vmx_kstat_rxstats(sc, &sc->sc_q[i].rx, i); +#endif } } @@ -1024,9 +1067,6 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, ml_enqueue(&ml, m); skip_buffer: -#ifdef VMXNET3_STAT - vmxstat.rxdone = idx; -#endif if (rq->rs->update_rxhead) { u_int qid = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M); @@ -1424,3 +1464,154 @@ vmxnet3_dma_allocmem(struct vmxnet3_soft bus_dmamap_destroy(t, map); return va; } + +#if NKSTAT > 0 +static const struct timeval vmx_kstat_rate = { 1, 0 }; + +static void +vmx_kstat_init(struct vmxnet3_softc *sc) +{ + struct vmx_txstats_kv *txkvs = &sc->sc_kstats.txstats; + struct vmx_rxstats_kv *rxkvs = &sc->sc_kstats.rxstats; + + rw_init(&sc->sc_kstats.lock, "vmxstats"); + + kstat_kv_unit_init(&txkvs->tso_packets, "TSO packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&txkvs->tso_bytes, "TSO bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&txkvs->ucast_packets, "ucast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&txkvs->ucast_bytes, "ucast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&txkvs->mcast_packets, "mcast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&txkvs->mcast_bytes, "mcast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&txkvs->bcast_packets, "bcast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&txkvs->bcast_bytes, "bcast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&txkvs->errors, "errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&txkvs->discards, "discards", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + + kstat_kv_unit_init(&rxkvs->lro_packets, "LRO packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&rxkvs->lro_bytes, "LRO bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&rxkvs->ucast_packets, "ucast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&rxkvs->ucast_bytes, "ucast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&rxkvs->mcast_packets, "mcast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&rxkvs->mcast_bytes, "mcast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&rxkvs->bcast_packets, "bcast packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&rxkvs->bcast_bytes, "bcast bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES); + kstat_kv_unit_init(&rxkvs->nobuffers, "no buffers", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); + kstat_kv_unit_init(&rxkvs->errors, "errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS); +} + +static int +vmx_txstats_read(struct kstat *ks) +{ + struct vmxnet3_txqueue *tq = ks->ks_softc; + struct vmxnet3_softc *sc = tq->sc; + struct vmx_txstats_kv *txkvs = ks->ks_data; + struct UPT1_TxStats *txstats = &tq->ts->stats; + + if (ratecheck(&sc->sc_kstats.updated, &vmx_kstat_rate)) + WRITE_CMD(sc, VMXNET3_CMD_GET_STATS); + + txkvs->tso_packets.kv_v.v_u64 = txstats->TSO_packets; + txkvs->tso_bytes.kv_v.v_u64 = txstats->TSO_bytes; + txkvs->ucast_packets.kv_v.v_u64 = txstats->ucast_packets; + txkvs->ucast_bytes.kv_v.v_u64 = txstats->ucast_bytes; + txkvs->mcast_packets.kv_v.v_u64 = txstats->mcast_packets; + txkvs->mcast_bytes.kv_v.v_u64 = txstats->mcast_bytes; + txkvs->bcast_packets.kv_v.v_u64 = txstats->bcast_packets; + txkvs->bcast_bytes.kv_v.v_u64 = txstats->bcast_bytes; + txkvs->errors.kv_v.v_u64 = txstats->error; + txkvs->discards.kv_v.v_u64 = txstats->discard; + + TIMEVAL_TO_TIMESPEC(&sc->sc_kstats.updated, &ks->ks_updated); + + return (0); +} + +static void +vmx_kstat_txstats(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq, int i) +{ + tq->sc = sc; + + tq->txkstat = kstat_create(sc->sc_dev.dv_xname, 0, "vmx-txstats", i, + KSTAT_T_KV, 0); + if (tq->txkstat == NULL) + return; + + kstat_set_wlock(tq->txkstat, &sc->sc_kstats.lock); + + tq->txkstat->ks_softc = tq; + tq->txkstat->ks_data = &sc->sc_kstats.txstats; + tq->txkstat->ks_datalen = sizeof(sc->sc_kstats.txstats); + tq->txkstat->ks_read = vmx_txstats_read; + TIMEVAL_TO_TIMESPEC(&vmx_kstat_rate, &tq->txkstat->ks_interval); + + kstat_install(tq->txkstat); +} + +static int +vmx_rxstats_read(struct kstat *ks) +{ + struct vmxnet3_rxqueue *rq = ks->ks_softc; + struct vmxnet3_softc *sc = rq->sc; + struct vmx_rxstats_kv *rxkvs = ks->ks_data; + struct UPT1_RxStats *rxstats = &rq->rs->stats; + + if (ratecheck(&sc->sc_kstats.updated, &vmx_kstat_rate)) + WRITE_CMD(sc, VMXNET3_CMD_GET_STATS); + + rxkvs->lro_packets.kv_v.v_u64 = rxstats->LRO_packets; + rxkvs->lro_bytes.kv_v.v_u64 = rxstats->LRO_bytes; + rxkvs->ucast_packets.kv_v.v_u64 = rxstats->ucast_packets; + rxkvs->ucast_bytes.kv_v.v_u64 = rxstats->ucast_bytes; + rxkvs->mcast_packets.kv_v.v_u64 = rxstats->mcast_packets; + rxkvs->mcast_bytes.kv_v.v_u64 = rxstats->mcast_bytes; + rxkvs->bcast_packets.kv_v.v_u64 = rxstats->bcast_packets; + rxkvs->bcast_bytes.kv_v.v_u64 = rxstats->bcast_bytes; + rxkvs->nobuffers.kv_v.v_u64 = rxstats->nobuffer; + rxkvs->errors.kv_v.v_u64 = rxstats->error; + + TIMEVAL_TO_TIMESPEC(&sc->sc_kstats.updated, &ks->ks_updated); + + return (0); +} + +static void +vmx_kstat_rxstats(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq, int i) +{ + rq->sc = sc; + + rq->rxkstat = kstat_create(sc->sc_dev.dv_xname, 0, "vmx-rxstats", i, + KSTAT_T_KV, 0); + if (rq->rxkstat == NULL) + return; + + kstat_set_wlock(rq->rxkstat, &rq->sc->sc_kstats.lock); + + rq->rxkstat->ks_softc = rq; + rq->rxkstat->ks_data = &sc->sc_kstats.rxstats; + rq->rxkstat->ks_datalen = sizeof(sc->sc_kstats.rxstats); + rq->rxkstat->ks_read = vmx_rxstats_read; + TIMEVAL_TO_TIMESPEC(&vmx_kstat_rate, &rq->rxkstat->ks_interval); + + kstat_install(rq->rxkstat); +} +#endif /* NKSTAT > 0 */ Index: sys/net/ifq.c =================================================================== RCS file: /cvs/src/sys/net/ifq.c,v retrieving revision 1.40 diff -u -p -r1.40 ifq.c --- sys/net/ifq.c 17 Jun 2020 06:45:22 -0000 1.40 +++ sys/net/ifq.c 27 Jun 2020 06:34:50 -0000 @@ -17,6 +17,7 @@ */ #include "bpfilter.h" +#include "kstat.h" #include #include @@ -32,6 +33,10 @@ #include #endif +#if NKSTAT > 0 +#include +#endif + /* * priq glue */ @@ -122,7 +127,10 @@ ifq_is_serialized(struct ifqueue *ifq) void ifq_start(struct ifqueue *ifq) { - if (ifq_len(ifq) >= min(ifq->ifq_if->if_txmit, ifq->ifq_maxlen)) { + struct ifnet *ifp = ifq->ifq_if; + + if (ISSET(ifp->if_xflags, IFXF_MPSAFE) && + ifq_len(ifq) >= min(ifp->if_txmit, ifq->ifq_maxlen)) { task_del(ifq->ifq_softnet, &ifq->ifq_bundle); ifq_run_start(ifq); } else @@ -188,11 +196,58 @@ ifq_barrier_task(void *p) * ifqueue mbuf queue API */ +#if NKSTAT > 0 +struct ifq_kstat_data { + struct kstat_kv kd_packets; + struct kstat_kv kd_bytes; + struct kstat_kv kd_qdrops; + struct kstat_kv kd_errors; + struct kstat_kv kd_qlen; + struct kstat_kv kd_maxqlen; + struct kstat_kv kd_oactive; +}; + +static const struct ifq_kstat_data ifq_kstat_tpl = { + KSTAT_KV_UNIT_INITIALIZER("packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("qdrops", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("qlen", + KSTAT_KV_T_UINT32, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("maxqlen", + KSTAT_KV_T_UINT32, KSTAT_KV_U_PACKETS), + KSTAT_KV_INITIALIZER("oactive", KSTAT_KV_T_BOOL), +}; + +int +ifq_kstat_copy(struct kstat *ks, void *dst) +{ + struct ifqueue *ifq = ks->ks_softc; + struct ifq_kstat_data *kd = dst; + + *kd = ifq_kstat_tpl; + kstat_kv_u64(&kd->kd_packets) = ifq->ifq_packets; + kstat_kv_u64(&kd->kd_bytes) = ifq->ifq_bytes; + kstat_kv_u64(&kd->kd_qdrops) = ifq->ifq_qdrops; + kstat_kv_u64(&kd->kd_errors) = ifq->ifq_errors; + kstat_kv_u32(&kd->kd_qlen) = ifq->ifq_len; + kstat_kv_u32(&kd->kd_maxqlen) = ifq->ifq_maxlen; + kstat_kv_bool(&kd->kd_oactive) = ifq->ifq_oactive; + + return (0); +} +#endif + void ifq_init(struct ifqueue *ifq, struct ifnet *ifp, unsigned int idx) { ifq->ifq_if = ifp; - ifq->ifq_softnet = net_tq(ifp->if_index); /* + idx */ + ifq->ifq_softnet = ISSET(ifp->if_xflags, IFXF_MPSAFE) ? + net_tq(ifp->if_index /* + idx */) : systq; ifq->ifq_softc = NULL; mtx_init(&ifq->ifq_mtx, IPL_NET); @@ -222,6 +277,18 @@ ifq_init(struct ifqueue *ifq, struct ifn ifq_set_maxlen(ifq, IFQ_MAXLEN); ifq->ifq_idx = idx; + +#if NKSTAT > 0 + /* XXX xname vs driver name and unit */ + ifq->ifq_kstat = kstat_create(ifp->if_xname, 0, + "txq", ifq->ifq_idx, KSTAT_T_KV, 0); + KASSERT(ifq->ifq_kstat != NULL); + kstat_set_mutex(ifq->ifq_kstat, &ifq->ifq_mtx); + ifq->ifq_kstat->ks_softc = ifq; + ifq->ifq_kstat->ks_datalen = sizeof(ifq_kstat_tpl); + ifq->ifq_kstat->ks_copy = ifq_kstat_copy; + kstat_install(ifq->ifq_kstat); +#endif } void @@ -265,6 +332,10 @@ ifq_destroy(struct ifqueue *ifq) { struct mbuf_list ml = MBUF_LIST_INITIALIZER(); +#if NKSTAT > 0 + kstat_destroy(ifq->ifq_kstat); +#endif + NET_ASSERT_UNLOCKED(); if (!task_del(ifq->ifq_softnet, &ifq->ifq_bundle)) taskq_barrier(ifq->ifq_softnet); @@ -505,6 +576,45 @@ ifq_mfreeml(struct ifqueue *ifq, struct * ifiq */ +#if NKSTAT > 0 +struct ifiq_kstat_data { + struct kstat_kv kd_packets; + struct kstat_kv kd_bytes; + struct kstat_kv kd_qdrops; + struct kstat_kv kd_errors; + struct kstat_kv kd_qlen; +}; + +static const struct ifiq_kstat_data ifiq_kstat_tpl = { + KSTAT_KV_UNIT_INITIALIZER("packets", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("bytes", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_BYTES), + KSTAT_KV_UNIT_INITIALIZER("qdrops", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("errors", + KSTAT_KV_T_COUNTER64, KSTAT_KV_U_PACKETS), + KSTAT_KV_UNIT_INITIALIZER("qlen", + KSTAT_KV_T_UINT32, KSTAT_KV_U_PACKETS), +}; + +int +ifiq_kstat_copy(struct kstat *ks, void *dst) +{ + struct ifiqueue *ifiq = ks->ks_softc; + struct ifiq_kstat_data *kd = dst; + + *kd = ifiq_kstat_tpl; + kstat_kv_u64(&kd->kd_packets) = ifiq->ifiq_packets; + kstat_kv_u64(&kd->kd_bytes) = ifiq->ifiq_bytes; + kstat_kv_u64(&kd->kd_qdrops) = ifiq->ifiq_qdrops; + kstat_kv_u64(&kd->kd_errors) = ifiq->ifiq_errors; + kstat_kv_u32(&kd->kd_qlen) = ml_len(&ifiq->ifiq_ml); + + return (0); +} +#endif + static void ifiq_process(void *); void @@ -525,11 +635,27 @@ ifiq_init(struct ifiqueue *ifiq, struct ifiq->ifiq_errors = 0; ifiq->ifiq_idx = idx; + +#if NKSTAT > 0 + /* XXX xname vs driver name and unit */ + ifiq->ifiq_kstat = kstat_create(ifp->if_xname, 0, + "rxq", ifiq->ifiq_idx, KSTAT_T_KV, 0); + KASSERT(ifiq->ifiq_kstat != NULL); + kstat_set_mutex(ifiq->ifiq_kstat, &ifiq->ifiq_mtx); + ifiq->ifiq_kstat->ks_softc = ifiq; + ifiq->ifiq_kstat->ks_datalen = sizeof(ifiq_kstat_tpl); + ifiq->ifiq_kstat->ks_copy = ifiq_kstat_copy; + kstat_install(ifiq->ifiq_kstat); +#endif } void ifiq_destroy(struct ifiqueue *ifiq) { +#if NKSTAT > 0 + kstat_destroy(ifiq->ifiq_kstat); +#endif + NET_ASSERT_UNLOCKED(); if (!task_del(ifiq->ifiq_softnet, &ifiq->ifiq_task)) taskq_barrier(ifiq->ifiq_softnet); Index: sys/net/ifq.h =================================================================== RCS file: /cvs/src/sys/net/ifq.h,v retrieving revision 1.31 diff -u -p -r1.31 ifq.h --- sys/net/ifq.h 22 May 2020 07:02:24 -0000 1.31 +++ sys/net/ifq.h 27 Jun 2020 06:34:50 -0000 @@ -20,6 +20,7 @@ #define _NET_IFQ_H_ struct ifnet; +struct kstat; struct ifq_ops; @@ -54,6 +55,8 @@ struct ifqueue { uint64_t ifq_errors; uint64_t ifq_mcasts; + struct kstat *ifq_kstat; + /* work serialisation */ struct mutex ifq_task_mtx; struct task_list ifq_task_list; @@ -91,6 +94,8 @@ struct ifiqueue { uint64_t ifiq_errors; uint64_t ifiq_mcasts; uint64_t ifiq_noproto; + + struct kstat *ifiq_kstat; /* properties */ unsigned int ifiq_idx; Index: sys/sys/conf.h =================================================================== RCS file: /cvs/src/sys/sys/conf.h,v retrieving revision 1.152 diff -u -p -r1.152 conf.h --- sys/sys/conf.h 26 May 2020 07:53:00 -0000 1.152 +++ sys/sys/conf.h 27 Jun 2020 06:34:50 -0000 @@ -328,6 +328,13 @@ extern struct cdevsw cdevsw[]; (dev_type_stop((*))) enodev, 0, seltrue, \ (dev_type_mmap((*))) enodev, 0, 0, seltrue_kqfilter } +/* open, close, ioctl */ +#define cdev_kstat_init(c,n) { \ + dev_init(c,n,open), dev_init(c,n,close), (dev_type_read((*))) enodev, \ + (dev_type_write((*))) enodev, dev_init(c,n,ioctl), \ + (dev_type_stop((*))) enodev, 0, selfalse, \ + (dev_type_mmap((*))) enodev } + /* open, close, read, write, ioctl, stop, tty, poll, mmap, kqfilter */ #define cdev_wsdisplay_init(c,n) { \ dev_init(c,n,open), dev_init(c,n,close), dev_init(c,n,read), \ @@ -605,6 +612,7 @@ cdev_decl(wsmouse); cdev_decl(wsmux); cdev_decl(ksyms); +cdev_decl(kstat); cdev_decl(bio); cdev_decl(vscsi); Index: sys/sys/kstat.h =================================================================== RCS file: sys/sys/kstat.h diff -N sys/sys/kstat.h --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ sys/sys/kstat.h 27 Jun 2020 06:34:51 -0000 @@ -0,0 +1,193 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2020 David Gwynne + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _SYS_KSTAT_H_ +#define _SYS_KSTAT_H_ + +#include + +#define KSTAT_STRLEN 32 + +#define KSTAT_T_RAW 0 +#define KSTAT_T_KV 1 +#define KSTAT_T_COUNTERS 2 + +struct kstat_req { + unsigned int ks_rflags; +#define KSTATIOC_F_IGNVER (1 << 0) + /* the current version of the kstat subsystem */ + unsigned int ks_version; + + uint64_t ks_id; + + char ks_provider[KSTAT_STRLEN]; + unsigned int ks_instance; + char ks_name[KSTAT_STRLEN]; + unsigned int ks_unit; + + struct timespec ks_created; + struct timespec ks_updated; + struct timespec ks_interval; + unsigned int ks_type; + unsigned int ks_state; + + void *ks_data; + size_t ks_datalen; + unsigned int ks_dataver; +}; + +/* ioctls */ + +#define KSTATIOC_VERSION _IOR('k', 1, unsigned int) +#define KSTATIOC_FIND_ID _IOWR('k', 2, struct kstat_req) +#define KSTATIOC_NFIND_ID _IOWR('k', 3, struct kstat_req) +#define KSTATIOC_FIND_PROVIDER _IOWR('k', 4, struct kstat_req) +#define KSTATIOC_NFIND_PROVIDER _IOWR('k', 5, struct kstat_req) +#define KSTATIOC_FIND_NAME _IOWR('k', 6, struct kstat_req) +#define KSTATIOC_NFIND_NAME _IOWR('k', 7, struct kstat_req) + +/* named data */ + +#define KSTAT_KV_NAMELEN 16 +#define KSTAT_KV_ALIGN sizeof(uint64_t) + +enum kstat_kv_type { + KSTAT_KV_T_NULL, + KSTAT_KV_T_BOOL, + KSTAT_KV_T_COUNTER64, + KSTAT_KV_T_COUNTER32, + KSTAT_KV_T_UINT64, + KSTAT_KV_T_INT64, + KSTAT_KV_T_UINT32, + KSTAT_KV_T_INT32, + KSTAT_KV_T_ISTR, /* inline string */ + KSTAT_KV_T_STR, /* trailing string */ + KSTAT_KV_T_BYTES, /* trailing bytes */ + KSTAT_KV_T_TEMP, /* temperature (uK) */ +}; + +/* units only apply to integer types */ +enum kstat_kv_unit { + KSTAT_KV_U_NONE = 0, + KSTAT_KV_U_PACKETS, /* packets */ + KSTAT_KV_U_BYTES, /* bytes */ + KSTAT_KV_U_CYCLES, /* cycles */ +}; + +struct kstat_kv { + char kv_key[KSTAT_KV_NAMELEN]; + union { + char v_istr[16]; + unsigned int v_bool; + uint64_t v_u64; + int64_t v_s64; + uint32_t v_u32; + int32_t v_s32; + size_t v_len; + } kv_v; + enum kstat_kv_type kv_type; + enum kstat_kv_unit kv_unit; +} __aligned(KSTAT_KV_ALIGN); + +#define kstat_kv_istr(_kv) (_kv)->kv_v.v_istr +#define kstat_kv_bool(_kv) (_kv)->kv_v.v_bool +#define kstat_kv_u64(_kv) (_kv)->kv_v.v_u64 +#define kstat_kv_s64(_kv) (_kv)->kv_v.v_s64 +#define kstat_kv_u32(_kv) (_kv)->kv_v.v_u32 +#define kstat_kv_s32(_kv) (_kv)->kv_v.v_s32 +#define kstat_kv_len(_kv) (_kv)->kv_v.v_len +#define kstat_kv_temp(_kv) (_kv)->kv_v.v_u64 + +#ifdef _KERNEL + +#include + +struct kstat_lock_ops; + +struct kstat { + uint64_t ks_id; + + const char *ks_provider; + unsigned int ks_instance; + const char *ks_name; + unsigned int ks_unit; + + unsigned int ks_type; + unsigned int ks_flags; +#define KSTAT_F_REALLOC (1 << 0) + unsigned int ks_state; +#define KSTAT_S_CREATED 0 +#define KSTAT_S_INSTALLED 1 + + struct timespec ks_created; + RBT_ENTRY(kstat) ks_id_entry; + RBT_ENTRY(kstat) ks_pv_entry; + RBT_ENTRY(kstat) ks_nm_entry; + + /* the driver can update these between kstat creation and install */ + unsigned int ks_dataver; + void *ks_softc; + void *ks_ptr; + int (*ks_read)(struct kstat *); + int (*ks_copy)(struct kstat *, void *); + + const struct kstat_lock_ops * + ks_lock_ops; + void *ks_lock; + + /* the data that is updated by ks_read */ + void *ks_data; + size_t ks_datalen; + struct timespec ks_updated; + struct timespec ks_interval; +}; + +struct kstat *kstat_create(const char *, unsigned int, + const char *, unsigned int, + unsigned int, unsigned int); + +void kstat_set_rlock(struct kstat *, struct rwlock *); +void kstat_set_wlock(struct kstat *, struct rwlock *); +void kstat_set_mutex(struct kstat *, struct mutex *); +void kstat_set_cpu(struct kstat *, struct cpu_info *); + +int kstat_read_nop(struct kstat *); + +void kstat_install(struct kstat *); +void kstat_destroy(struct kstat *); + +/* + * kstat_kv api + */ + +#define KSTAT_KV_UNIT_INITIALIZER(_key, _type, _unit) { \ + .kv_key = (_key), \ + .kv_type = (_type), \ + .kv_unit = (_unit), \ +} + +#define KSTAT_KV_INITIALIZER(_key, _type) \ + KSTAT_KV_UNIT_INITIALIZER((_key), (_type), KSTAT_KV_U_NONE) + +void kstat_kv_init(struct kstat_kv *, const char *, enum kstat_kv_type); +void kstat_kv_unit_init(struct kstat_kv *, const char *, + enum kstat_kv_type, enum kstat_kv_unit); + +#endif /* _KERNEL */ + +#endif /* _SYS_KSTAT_H_ */ Index: usr.bin/kstat/Makefile =================================================================== RCS file: usr.bin/kstat/Makefile diff -N usr.bin/kstat/Makefile --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ usr.bin/kstat/Makefile 27 Jun 2020 06:34:52 -0000 @@ -0,0 +1,9 @@ +# $OpenBSD$ + +PROG= kstat +SRCS= kstat.c +MAN= +WARNINGS=Yes +DEBUG=-g + +.include Index: usr.bin/kstat/kstat.c =================================================================== RCS file: usr.bin/kstat/kstat.c diff -N usr.bin/kstat/kstat.c --- /dev/null 1 Jan 1970 00:00:00 -0000 +++ usr.bin/kstat/kstat.c 27 Jun 2020 06:34:52 -0000 @@ -0,0 +1,340 @@ +/* $OpenBSD$ */ + +/* + * Copyright (c) 2020 David Gwynne + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "/usr/src/sys/sys/kstat.h" + +#ifndef roundup +#define roundup(x, y) ((((x)+((y)-1))/(y))*(y)) +#endif + +#define DEV_KSTAT "/dev/kstat" + +static void kstat_list(int, unsigned int); + +#if 0 +__dead static void +usage(void) +{ + extern char *__progname; + fprintf(stderr, "usage: %s\n", __progname); + exit(1); +} +#endif + +int +main(int argc, char *argv[]) +{ + unsigned int version; + int fd; + + fd = open(DEV_KSTAT, O_RDONLY); + if (fd == -1) + err(1, "%s", DEV_KSTAT); + + if (ioctl(fd, KSTATIOC_VERSION, &version) == -1) + err(1, "kstat version"); + + kstat_list(fd, version); + + return (0); +} + +struct kstat_entry { + struct kstat_req kstat; + RBT_ENTRY(kstat_entry) entry; + int serrno; +}; + +RBT_HEAD(kstat_tree, kstat_entry); + +static inline int +kstat_cmp(const struct kstat_entry *ea, const struct kstat_entry *eb) +{ + const struct kstat_req *a = &ea->kstat; + const struct kstat_req *b = &eb->kstat; + int rv; + + rv = strncmp(a->ks_provider, b->ks_provider, sizeof(a->ks_provider)); + if (rv != 0) + return (rv); + if (a->ks_instance > b->ks_instance) + return (1); + if (a->ks_instance < b->ks_instance) + return (-1); + + rv = strncmp(a->ks_name, b->ks_name, sizeof(a->ks_name)); + if (rv != 0) + return (rv); + if (a->ks_unit > b->ks_unit) + return (1); + if (a->ks_unit < b->ks_unit) + return (-1); + + return (0); +} + +RBT_PROTOTYPE(kstat_tree, kstat_entry, entry, kstat_cmp); +RBT_GENERATE(kstat_tree, kstat_entry, entry, kstat_cmp); + +static int +printable(int ch) +{ + if (ch == '\0') + return ('_'); + if (!isprint(ch)) + return ('~'); + return (ch); +} + +static void +hexdump(const void *d, size_t datalen) +{ + const uint8_t *data = d; + size_t i, j = 0; + + for (i = 0; i < datalen; i += j) { + printf("%4zu: ", i); + + for (j = 0; j < 16 && i+j < datalen; j++) + printf("%02x ", data[i + j]); + while (j++ < 16) + printf(" "); + printf("|"); + + for (j = 0; j < 16 && i+j < datalen; j++) + putchar(printable(data[i + j])); + printf("|\n"); + } +} + +static void +strdump(const void *s, size_t len) +{ + const char *str = s; + char dst[8]; + size_t i; + + for (i = 0; i < len; i++) { + char ch = str[i]; + if (ch == '\0') + break; + + vis(dst, ch, VIS_TAB | VIS_NL, 0); + printf("%s", dst); + } +} + +static void +strdumpnl(const void *s, size_t len) +{ + strdump(s, len); + printf("\n"); +} + +static void +kstat_kv(const void *d, ssize_t len) +{ + const uint8_t *buf; + const struct kstat_kv *kv; + ssize_t blen; + void (*trailer)(const void *, size_t); + double f; + + if (len < (ssize_t)sizeof(*kv)) { + warn("short kv (len %zu < size %zu)", len, sizeof(*kv)); + return; + } + + buf = d; + do { + kv = (const struct kstat_kv *)buf; + + buf += sizeof(*kv); + len -= sizeof(*kv); + + blen = 0; + trailer = hexdump; + + printf("%16.16s: ", kv->kv_key); + + switch (kv->kv_type) { + case KSTAT_KV_T_NULL: + printf("null"); + break; + case KSTAT_KV_T_BOOL: + printf("%s", kstat_kv_bool(kv) ? "true" : "false"); + break; + case KSTAT_KV_T_COUNTER64: + case KSTAT_KV_T_UINT64: + printf("%" PRIu64, kstat_kv_u64(kv)); + break; + case KSTAT_KV_T_INT64: + printf("%" PRId64, kstat_kv_s64(kv)); + break; + case KSTAT_KV_T_COUNTER32: + case KSTAT_KV_T_UINT32: + printf("%" PRIu32, kstat_kv_u32(kv)); + break; + case KSTAT_KV_T_INT32: + printf("%" PRId32, kstat_kv_s32(kv)); + break; + case KSTAT_KV_T_STR: + blen = kstat_kv_len(kv); + trailer = strdumpnl; + break; + case KSTAT_KV_T_BYTES: + blen = kstat_kv_len(kv); + trailer = hexdump; + + printf("\n"); + break; + + case KSTAT_KV_T_ISTR: + strdump(kstat_kv_istr(kv), sizeof(kstat_kv_istr(kv))); + break; + + case KSTAT_KV_T_TEMP: + f = kstat_kv_temp(kv); + printf("%.2f degC", (f - 273150000.0) / 1000000.0); + break; + + default: + printf("unknown type %u, stopping\n", kv->kv_type); + return; + } + + switch (kv->kv_unit) { + case KSTAT_KV_U_NONE: + break; + case KSTAT_KV_U_PACKETS: + printf(" packets"); + break; + case KSTAT_KV_U_BYTES: + printf(" bytes"); + break; + case KSTAT_KV_U_CYCLES: + printf(" cycles"); + break; + + default: + printf(" unit-type-%u", kv->kv_unit); + break; + } + + if (blen > 0) { + if (blen > len) { + blen = len; + } + + (*trailer)(buf, blen); + } else + printf("\n"); + + blen = roundup(blen, KSTAT_KV_ALIGN); + buf += blen; + len -= blen; + } while (len >= (ssize_t)sizeof(*kv)); +} + +static void +kstat_list(int fd, unsigned int version) +{ + struct kstat_entry *kse; + struct kstat_req *ksreq; + size_t len; + uint64_t id = 0; + struct kstat_tree kstat_tree = RBT_INITIALIZER(); + + for (;;) { + kse = malloc(sizeof(*kse)); + if (kse == NULL) + err(1, NULL); + + memset(kse, 0, sizeof(*kse)); + ksreq = &kse->kstat; + ksreq->ks_version = version; + ksreq->ks_id = ++id; + + ksreq->ks_datalen = len = 64; /* magic */ + ksreq->ks_data = malloc(len); + if (ksreq->ks_data == NULL) + err(1, "data alloc"); + + if (ioctl(fd, KSTATIOC_NFIND_ID, ksreq) == -1) { + if (errno == ENOENT) { + free(ksreq->ks_data); + free(kse); + break; + } + + kse->serrno = errno; + goto next; + } + + while (ksreq->ks_datalen > len) { + len = ksreq->ks_datalen; + ksreq->ks_data = realloc(ksreq->ks_data, len); + if (ksreq->ks_data == NULL) + err(1, "data resize (%zu)", len); + + if (ioctl(fd, KSTATIOC_FIND_ID, ksreq) == -1) + err(1, "find id %llu", id); + } + +next: + if (RBT_INSERT(kstat_tree, &kstat_tree, kse) != NULL) + errx(1, "duplicate kstat entry"); + + id = ksreq->ks_id; + } + + RBT_FOREACH(kse, kstat_tree, &kstat_tree) { + ksreq = &kse->kstat; + printf("%s:%u:%s:%u\n", + ksreq->ks_provider, ksreq->ks_instance, + ksreq->ks_name, ksreq->ks_unit); + if (kse->serrno != 0) { + printf("\t%s\n", strerror(kse->serrno)); + continue; + } + switch (ksreq->ks_type) { + case KSTAT_T_RAW: + hexdump(ksreq->ks_data, ksreq->ks_datalen); + break; + case KSTAT_T_KV: + kstat_kv(ksreq->ks_data, ksreq->ks_datalen); + break; + default: + hexdump(ksreq->ks_data, ksreq->ks_datalen); + break; + } + } +}