dlg@bgp0 net$ kstat pf::: pf:0:pf-counters:0 match: 1411 bad-offset: 0 fragment: 0 short: 16 normalize: 0 memory: 0 bad-timestamp: 0 congestion: 0 ip-option: 3 proto-cksum: 0 state-mismatch: 0 state-insert: 0 state-limit: 0 src-limit: 0 synproxy: 0 translate: 0 no-route: 0 pf:0:pf-limits:0 rule-state: 0 src-node-state: 0 src-node: 0 src-conn: 0 src-conn-rate: 0 overload-table: 0 overload-flush: 0 synflood: 0 syncookie-sent: 0 syncookie-valid: 0 pf:0:pf-src-nodes:0 count: 0 search: 0 insert: 0 removal: 0 pf:0:pf-states:0 count: 22 search: 24601 insert: 2745 removal: 2723 Index: pf_ioctl.c =================================================================== RCS file: /cvs/src/sys/net/pf_ioctl.c,v retrieving revision 1.415 diff -u -p -r1.415 pf_ioctl.c --- pf_ioctl.c 6 Jul 2023 04:55:05 -0000 1.415 +++ pf_ioctl.c 23 Feb 2024 01:48:17 -0000 @@ -37,6 +37,7 @@ #include "pfsync.h" #include "pflog.h" +#include "kstat.h" #include #include @@ -128,6 +129,10 @@ void pf_cleanup_tgetrule(struct pf_tr struct pf_rule pf_default_rule, pf_default_rule_new; +#if NKSTAT > 0 +static void pf_kstat_attach(void); +#endif /* NKSTAT > 0 */ + struct { char statusif[IFNAMSIZ]; u_int32_t debug; @@ -291,6 +296,10 @@ pfattach(int num) M_WAITOK|M_ZERO); CPUMEM_FOREACH(sf, &cmi, pf_anchor_stack) sf[PF_ANCHOR_STACK_MAX].sf_stack_top = &sf[0]; + +#if NKSTAT > 0 + pf_kstat_attach(); +#endif } int @@ -3365,3 +3374,273 @@ pf_rollback_trans(struct pf_trans *t) pf_free_trans(t); } } + +#if NKSTAT > 0 +#include + +struct pf_kstat_counters { + struct kstat_kv counters[PFRES_MAX]; +}; +static const char *pf_kstat_counters_names[] = PFRES_NAMES; + +struct pf_kstat_lcounters { + struct kstat_kv counters[LCNT_MAX]; +}; + +static const char *pf_kstat_lcounters_names[LCNT_MAX] = { + [LCNT_STATES] = "rule-state", + [LCNT_SRCSTATES] = "src-node-state", + [LCNT_SRCNODES] = "src-node", + [LCNT_SRCCONN] = "src-conn", + [LCNT_SRCCONNRATE] = "src-conn-rate", + [LCNT_OVERLOAD_TABLE] = "overload-table", + [LCNT_OVERLOAD_FLUSH] = "overload-flush", + [LCNT_SYNFLOODS] = "synflood", + [LCNT_SYNCOOKIES_SENT] = "syncookie-sent", + [LCNT_SYNCOOKIES_VALID] = "syncookie-valid", +}; + +struct pf_kstat_fcounters { + struct kstat_kv count; + struct kstat_kv counters[FCNT_MAX]; +}; + +static const char *pf_kstat_fcounters_names[FCNT_MAX] = { + [FCNT_STATE_SEARCH] = "search", + [FCNT_STATE_INSERT] = "insert", + [FCNT_STATE_REMOVALS] = "removal", +}; + +struct pf_kstat_scounters { + struct kstat_kv count; + struct kstat_kv counters[SCNT_MAX]; +}; + +static const char *pf_kstat_scounters_names[SCNT_MAX] = { + [SCNT_SRC_NODE_SEARCH] = "search", + [SCNT_SRC_NODE_INSERT] = "insert", + [SCNT_SRC_NODE_REMOVALS] = "removal", +}; + +static void +pf_kstat_u64s_read(struct kstat_kv *kvs, uint64_t *c, size_t n) +{ + size_t i; + + NET_LOCK_SHARED(); + PF_LOCK(); + + for (i = 0; i < n; i++) + kstat_kv_u64(&kvs[i]) = c[i]; + + PF_UNLOCK(); + NET_UNLOCK_SHARED(); +} + +static int +pf_kstat_counters_read(struct kstat *ks) +{ + struct pf_kstat_counters *pkc = ks->ks_data; + + pf_kstat_u64s_read(pkc->counters, + pf_status.counters, nitems(pkc->counters)); + + nanouptime(&ks->ks_updated); + + return (0); +} + +static void +pf_kstat_counters_attach(void) +{ + struct kstat *ks; + struct pf_kstat_counters *pkc; + size_t i; + + pkc = malloc(sizeof(*pkc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); + if (pkc == NULL) { + printf("pf: unable to allocate pf-counters kstat\n"); + return; + } + + ks = kstat_create("pf", 0, "pf-counters", 0, KSTAT_T_KV, 0); + if (ks == NULL) { + printf("pf: unable to create pf-counters kstat\n"); + free(pkc, M_DEVBUF, sizeof(*pkc)); + return; + } + + for (i = 0; i < nitems(pkc->counters); i++) { + struct kstat_kv *kv = &pkc->counters[i]; + + kstat_kv_init(kv, pf_kstat_counters_names[i], + KSTAT_KV_T_COUNTER64); + } + + ks->ks_data = pkc; + ks->ks_datalen = sizeof(*pkc); + ks->ks_read = pf_kstat_counters_read; + + kstat_install(ks); +} + +static int +pf_kstat_lcounters_read(struct kstat *ks) +{ + struct pf_kstat_lcounters *lkc = ks->ks_data; + + pf_kstat_u64s_read(lkc->counters, + pf_status.lcounters, nitems(lkc->counters)); + + nanouptime(&ks->ks_updated); + + return (0); +} + +static void +pf_kstat_lcounters_attach(void) +{ + struct kstat *ks; + struct pf_kstat_lcounters *lkc; + size_t i; + + lkc = malloc(sizeof(*lkc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); + if (lkc == NULL) { + printf("pf: unable to allocate pf-limits kstat\n"); + return; + } + + ks = kstat_create("pf", 0, "pf-limits", 0, KSTAT_T_KV, 0); + if (ks == NULL) { + printf("pf: unable to create pf-limits kstat\n"); + free(lkc, M_DEVBUF, sizeof(*lkc)); + return; + } + + for (i = 0; i < nitems(lkc->counters); i++) { + struct kstat_kv *kv = &lkc->counters[i]; + + kstat_kv_init(kv, pf_kstat_lcounters_names[i], + KSTAT_KV_T_COUNTER64); + } + + ks->ks_data = lkc; + ks->ks_datalen = sizeof(*lkc); + ks->ks_read = pf_kstat_lcounters_read; + + kstat_install(ks); +} + +static int +pf_kstat_fcounters_read(struct kstat *ks) +{ + struct pf_kstat_fcounters *fkc = ks->ks_data; + + pf_kstat_u64s_read(fkc->counters, + pf_status.fcounters, nitems(fkc->counters)); + kstat_kv_u64(&fkc->count) = + kstat_kv_u64(&fkc->counters[FCNT_STATE_INSERT]) - + kstat_kv_u64(&fkc->counters[FCNT_STATE_REMOVALS]); + + nanouptime(&ks->ks_updated); + + return (0); +} + +static void +pf_kstat_fcounters_attach(void) +{ + struct kstat *ks; + struct pf_kstat_fcounters *fkc; + size_t i; + + fkc = malloc(sizeof(*fkc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); + if (fkc == NULL) { + printf("pf: unable to allocate pf-states kstat\n"); + return; + } + + ks = kstat_create("pf", 0, "pf-states", 0, KSTAT_T_KV, 0); + if (ks == NULL) { + printf("pf: unable to create pf-states kstat\n"); + free(fkc, M_DEVBUF, sizeof(*fkc)); + return; + } + + kstat_kv_init(&fkc->count, "count", KSTAT_KV_T_UINT64); + for (i = 0; i < nitems(fkc->counters); i++) { + struct kstat_kv *kv = &fkc->counters[i]; + + kstat_kv_init(kv, pf_kstat_fcounters_names[i], + KSTAT_KV_T_COUNTER64); + } + + ks->ks_data = fkc; + ks->ks_datalen = sizeof(*fkc); + ks->ks_read = pf_kstat_fcounters_read; + + kstat_install(ks); +} + +static int +pf_kstat_scounters_read(struct kstat *ks) +{ + struct pf_kstat_scounters *skc = ks->ks_data; + + pf_kstat_u64s_read(skc->counters, + pf_status.scounters, nitems(skc->counters)); + kstat_kv_u64(&skc->count) = + kstat_kv_u64(&skc->counters[SCNT_SRC_NODE_INSERT]) - + kstat_kv_u64(&skc->counters[SCNT_SRC_NODE_REMOVALS]); + + nanouptime(&ks->ks_updated); + + return (0); +} + +static void +pf_kstat_scounters_attach(void) +{ + struct kstat *ks; + struct pf_kstat_scounters *skc; + size_t i; + + skc = malloc(sizeof(*skc), M_DEVBUF, M_WAITOK|M_CANFAIL|M_ZERO); + if (skc == NULL) { + printf("pf: unable to allocate pf-src-nodes kstat\n"); + return; + } + + ks = kstat_create("pf", 0, "pf-src-nodes", 0, KSTAT_T_KV, 0); + if (ks == NULL) { + printf("pf: unable to create pf-src-nodes kstat\n"); + free(skc, M_DEVBUF, sizeof(*skc)); + return; + } + + kstat_kv_init(&skc->count, "count", KSTAT_KV_T_UINT64); + for (i = 0; i < nitems(skc->counters); i++) { + struct kstat_kv *kv = &skc->counters[i]; + + kstat_kv_init(kv, pf_kstat_scounters_names[i], + KSTAT_KV_T_COUNTER64); + } + + ks->ks_data = skc; + ks->ks_datalen = sizeof(*skc); + ks->ks_read = pf_kstat_scounters_read; + + kstat_install(ks); +} + +static void +pf_kstat_attach(void) +{ + /* these are just hanging out in the breeze */ + pf_kstat_counters_attach(); + pf_kstat_lcounters_attach(); + pf_kstat_fcounters_attach(); + pf_kstat_scounters_attach(); +} + +#endif /* NKSTAT > 0 */