Index: if_vmx.c =================================================================== RCS file: /cvs/src/sys/dev/pci/if_vmx.c,v retrieving revision 1.55 diff -u -p -r1.55 if_vmx.c --- if_vmx.c 27 Oct 2019 22:24:40 -0000 1.55 +++ if_vmx.c 28 May 2020 02:27:02 -0000 @@ -17,6 +17,7 @@ */ #include "bpfilter.h" +#include "kstat.h" #include #include @@ -25,8 +26,10 @@ #include #include #include +#include #include +#include #include #include @@ -42,8 +45,7 @@ #include #include -#define NRXQUEUE 1 -#define NTXQUEUE 1 +#define VMX_MAX_QUEUES 4 #define NTXDESC 512 /* tx ring size */ #define NTXSEGS 8 /* tx descriptors per packet */ @@ -95,12 +97,39 @@ struct vmxnet3_txqueue { struct vmxnet3_txring cmd_ring; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_txq_shared *ts; + struct ifqueue *ifq; +}; + +struct vmx_rxqueue_kstat { + struct kstat_kv shared; + struct vmxnet3_rxq_shared shared_bytes; + + struct kstat_kv str; + char str_buf[3]; + + struct kstat_kv ring0_fill; + struct kstat_kv ring0_gen; + struct kstat_kv ring1_fill; + struct kstat_kv ring1_gen; + struct kstat_kv comp_next; + struct kstat_kv comp_gen; }; struct vmxnet3_rxqueue { struct vmxnet3_rxring cmd_ring[2]; struct vmxnet3_comp_ring comp_ring; struct vmxnet3_rxq_shared *rs; + struct ifiqueue *ifiq; + + struct kstat *kstat; +}; + +struct vmxnet3_queue { + struct vmxnet3_txqueue tx; + struct vmxnet3_rxqueue rx; + struct vmxnet3_softc *sc; + char intrname[8]; + int intr; }; struct vmxnet3_softc { @@ -114,11 +143,14 @@ struct vmxnet3_softc { bus_space_handle_t sc_ioh1; bus_dma_tag_t sc_dmat; void *sc_ih; + void *sc_qih[VMX_MAX_QUEUES]; + int sc_nintr; + int sc_nqueues; - struct vmxnet3_txqueue sc_txq[NTXQUEUE]; - struct vmxnet3_rxqueue sc_rxq[NRXQUEUE]; + struct vmxnet3_queue sc_q[VMX_MAX_QUEUES]; struct vmxnet3_driver_shared *sc_ds; u_int8_t *sc_mcast; + struct vmxnet3_upt1_rss_conf *sc_rss; }; #define VMXNET3_STAT @@ -153,10 +185,10 @@ struct { int vmxnet3_match(struct device *, void *, void *); void vmxnet3_attach(struct device *, struct device *, void *); int vmxnet3_dma_init(struct vmxnet3_softc *); -int vmxnet3_alloc_txring(struct vmxnet3_softc *, int); -int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int); +int vmxnet3_alloc_txring(struct vmxnet3_softc *, int, int); +int vmxnet3_alloc_rxring(struct vmxnet3_softc *, int, int); void vmxnet3_txinit(struct vmxnet3_softc *, struct vmxnet3_txqueue *); -void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); +void vmxnet3_rxinit(struct vmxnet3_softc *, struct vmxnet3_rxqueue *, int); void vmxnet3_txstop(struct vmxnet3_softc *, struct vmxnet3_txqueue *); void vmxnet3_rxstop(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); void vmxnet3_link_state(struct vmxnet3_softc *); @@ -164,6 +196,8 @@ void vmxnet3_enable_all_intrs(struct vmx void vmxnet3_disable_all_intrs(struct vmxnet3_softc *); int vmxnet3_intr(void *); int vmxnet3_intr_intx(void *); +int vmxnet3_intr_event(void *); +int vmxnet3_intr_queue(void *); void vmxnet3_evintr(struct vmxnet3_softc *); void vmxnet3_txintr(struct vmxnet3_softc *, struct vmxnet3_txqueue *); void vmxnet3_rxintr(struct vmxnet3_softc *, struct vmxnet3_rxqueue *); @@ -212,6 +246,7 @@ vmxnet3_attach(struct device *parent, st u_int memtype, ver, macl, mach, intrcfg; u_char enaddr[ETHER_ADDR_LEN]; int (*isr)(void *); + int i; memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 0x10); if (pci_mapreg_map(pa, 0x10, memtype, 0, &sc->sc_iot0, &sc->sc_ioh0, @@ -235,24 +270,29 @@ vmxnet3_attach(struct device *parent, st ver = READ_BAR1(sc, VMXNET3_BAR1_UVRS); if ((ver & 0x1) == 0) { - printf(": incompatiable UPT version 0x%x\n", ver); + printf(": incompatible UPT version 0x%x\n", ver); return; } WRITE_BAR1(sc, VMXNET3_BAR1_UVRS, 1); sc->sc_dmat = pa->pa_dmat; - if (vmxnet3_dma_init(sc)) { - printf(": failed to setup DMA\n"); - return; - } WRITE_CMD(sc, VMXNET3_CMD_GET_INTRCFG); intrcfg = READ_BAR1(sc, VMXNET3_BAR1_CMD); isr = vmxnet3_intr; + sc->sc_nintr = 0; + sc->sc_nqueues = 1; switch (intrcfg & VMXNET3_INTRCFG_TYPE_MASK) { case VMXNET3_INTRCFG_TYPE_AUTO: case VMXNET3_INTRCFG_TYPE_MSIX: + if (pci_intr_map_msix(pa, 0, &ih) == 0) { + isr = vmxnet3_intr_event; + sc->sc_nqueues = VMX_MAX_QUEUES; + sc->sc_nintr = sc->sc_nqueues + 1; + break; + } + /* FALLTHROUGH */ case VMXNET3_INTRCFG_TYPE_MSI: if (pci_intr_map_msi(pa, &ih) == 0) @@ -273,6 +313,34 @@ vmxnet3_attach(struct device *parent, st if (intrstr) printf(": %s", intrstr); + if (sc->sc_nintr > 1) { + for (i = 0; i < sc->sc_nqueues; i++) { + struct vmxnet3_queue *q; + int vec; + + q = &sc->sc_q[i]; + vec = i + 1; + if (pci_intr_map_msix(pa, vec, &ih) != 0) { + printf(", failed to map interrupt %d\n", vec); + return; + } + snprintf(q->intrname, sizeof(q->intrname), "%s:%d", self->dv_xname, i); + sc->sc_qih[i] = pci_intr_establish(pa->pa_pc, ih, + IPL_NET | IPL_MPSAFE, vmxnet3_intr_queue, q, + q->intrname); + + q->intr = vec; + q->sc = sc; + } + } + + if (vmxnet3_dma_init(sc)) { + printf(": failed to setup DMA\n"); + return; + } + + printf(", %d queue%s", sc->sc_nqueues, sc->sc_nqueues > 1 ? "s" : ""); + WRITE_CMD(sc, VMXNET3_CMD_GET_MACL); macl = READ_BAR1(sc, VMXNET3_BAR1_CMD); enaddr[0] = macl; @@ -319,6 +387,14 @@ vmxnet3_attach(struct device *parent, st if_attach(ifp); ether_ifattach(ifp); vmxnet3_link_state(sc); + + if_attach_queues(ifp, sc->sc_nqueues); + if_attach_iqueues(ifp, sc->sc_nqueues); + for (i = 0; i < sc->sc_nqueues; i++) { + ifp->if_ifqs[i]->ifq_softc = &sc->sc_q[i].tx; + sc->sc_q[i].tx.ifq = ifp->if_ifqs[i]; + sc->sc_q[i].rx.ifiq = ifp->if_iqs[i]; + } } int @@ -328,25 +404,30 @@ vmxnet3_dma_init(struct vmxnet3_softc *s struct vmxnet3_txq_shared *ts; struct vmxnet3_rxq_shared *rs; bus_addr_t ds_pa, qs_pa, mcast_pa; - int i, queue, qs_len; + int i, queue, qs_len, intr; u_int major, minor, release_code, rev; - qs_len = NTXQUEUE * sizeof *ts + NRXQUEUE * sizeof *rs; + qs_len = sc->sc_nqueues * (sizeof *ts + sizeof *rs); ts = vmxnet3_dma_allocmem(sc, qs_len, VMXNET3_DMADESC_ALIGN, &qs_pa); if (ts == NULL) return -1; - for (queue = 0; queue < NTXQUEUE; queue++) - sc->sc_txq[queue].ts = ts++; + for (queue = 0; queue < sc->sc_nqueues; queue++) + sc->sc_q[queue].tx.ts = ts++; rs = (void *)ts; - for (queue = 0; queue < NRXQUEUE; queue++) - sc->sc_rxq[queue].rs = rs++; + for (queue = 0; queue < sc->sc_nqueues; queue++) + sc->sc_q[queue].rx.rs = rs++; - for (queue = 0; queue < NTXQUEUE; queue++) - if (vmxnet3_alloc_txring(sc, queue)) + for (queue = 0; queue < sc->sc_nqueues; queue++) { + if (sc->sc_nintr > 0) + intr = queue + 1; + else + intr = 0; + + if (vmxnet3_alloc_txring(sc, queue, intr)) return -1; - for (queue = 0; queue < NRXQUEUE; queue++) - if (vmxnet3_alloc_rxring(sc, queue)) + if (vmxnet3_alloc_rxring(sc, queue, intr)) return -1; + } sc->sc_mcast = vmxnet3_dma_allocmem(sc, 682 * ETHER_ADDR_LEN, 32, &mcast_pa); if (sc->sc_mcast == NULL) @@ -381,30 +462,57 @@ vmxnet3_dma_init(struct vmxnet3_softc *s #endif ds->vmxnet3_revision = 1; ds->upt_version = 1; - ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN; + ds->upt_features = UPT1_F_CSUM | UPT1_F_VLAN | UPT1_F_RSS; ds->driver_data = vtophys(sc); ds->driver_data_len = sizeof(struct vmxnet3_softc); ds->queue_shared = qs_pa; ds->queue_shared_len = qs_len; ds->mtu = VMXNET3_MAX_MTU; - ds->ntxqueue = NTXQUEUE; - ds->nrxqueue = NRXQUEUE; + ds->ntxqueue = sc->sc_nqueues; + ds->nrxqueue = sc->sc_nqueues; ds->mcast_table = mcast_pa; ds->automask = 1; - ds->nintr = VMXNET3_NINTR; + ds->nintr = sc->sc_nintr; ds->evintr = 0; ds->ictrl = VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < VMXNET3_NINTR; i++) + for (i = 0; i < sc->sc_nintr; i++) ds->modlevel[i] = UPT1_IMOD_ADAPTIVE; + + if (sc->sc_nqueues > 0) { + struct vmxnet3_upt1_rss_conf *rsscfg; + bus_addr_t rss_pa; + + rsscfg = vmxnet3_dma_allocmem(sc, sizeof(*rsscfg), 8, &rss_pa); + + rsscfg->hash_type = UPT1_RSS_HASH_TYPE_TCP_IPV4 | + UPT1_RSS_HASH_TYPE_IPV4 | + UPT1_RSS_HASH_TYPE_TCP_IPV6 | + UPT1_RSS_HASH_TYPE_IPV6; + rsscfg->hash_func = UPT1_RSS_HASH_FUNC_TOEPLITZ; + rsscfg->hash_key_size = sizeof(rsscfg->hash_key); + stoeplitz_to_key(rsscfg->hash_key, sizeof(rsscfg->hash_key)); + + rsscfg->ind_table_size = sizeof(rsscfg->ind_table); + for (i = 0; i < sizeof(rsscfg->ind_table); i++) + rsscfg->ind_table[i] = i % sc->sc_nqueues; + + ds->upt_features |= UPT1_F_RSS; + ds->rss.version = 1; + ds->rss.len = sizeof(*rsscfg); + ds->rss.paddr = rss_pa; + + sc->sc_rss = rsscfg; + } + WRITE_BAR1(sc, VMXNET3_BAR1_DSL, ds_pa); WRITE_BAR1(sc, VMXNET3_BAR1_DSH, (u_int64_t)ds_pa >> 32); return 0; } int -vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue) +vmxnet3_alloc_txring(struct vmxnet3_softc *sc, int queue, int intr) { - struct vmxnet3_txqueue *tq = &sc->sc_txq[queue]; + struct vmxnet3_txqueue *tq = &sc->sc_q[queue].tx; struct vmxnet3_txq_shared *ts; struct vmxnet3_txring *ring = &tq->cmd_ring; struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring; @@ -435,16 +543,16 @@ vmxnet3_alloc_txring(struct vmxnet3_soft ts->comp_ring_len = NTXCOMPDESC; ts->driver_data = vtophys(tq); ts->driver_data_len = sizeof *tq; - ts->intr_idx = 0; + ts->intr_idx = intr; ts->stopped = 1; ts->error = 0; return 0; } int -vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue) +vmxnet3_alloc_rxring(struct vmxnet3_softc *sc, int queue, int intr) { - struct vmxnet3_rxqueue *rq = &sc->sc_rxq[queue]; + struct vmxnet3_rxqueue *rq = &sc->sc_q[queue].rx; struct vmxnet3_rxq_shared *rs; struct vmxnet3_rxring *ring; struct vmxnet3_comp_ring *comp_ring; @@ -487,7 +595,7 @@ vmxnet3_alloc_rxring(struct vmxnet3_soft rs->comp_ring_len = NRXCOMPDESC; rs->driver_data = vtophys(rq); rs->driver_data_len = sizeof *rq; - rs->intr_idx = 0; + rs->intr_idx = intr; rs->stopped = 1; rs->error = 0; return 0; @@ -575,8 +683,37 @@ vmxnet3_rxfill(struct vmxnet3_rxring *ri timeout_add(&ring->refill, 1); } +#if NKSTAT > 0 +static int +vmx_kstat_rq(struct kstat *ks) +{ + struct vmxnet3_rxqueue *rq = ks->ks_softc; + struct vmx_rxqueue_kstat *kvs = ks->ks_data; + + mtx_enter(&rq->cmd_ring[0].mtx); + kvs->ring0_fill.kv_v.v_u32 = rq->cmd_ring[0].fill; + kvs->ring0_gen.kv_v.v_u32 = rq->cmd_ring[0].gen; + mtx_leave(&rq->cmd_ring[0].mtx); + + mtx_enter(&rq->cmd_ring[1].mtx); + kvs->ring1_fill.kv_v.v_u32 = rq->cmd_ring[1].fill; + kvs->ring1_gen.kv_v.v_u32 = rq->cmd_ring[1].gen; + mtx_leave(&rq->cmd_ring[1].mtx); + + /* lock? */ + kvs->comp_next.kv_v.v_u32 = rq->comp_ring.next; + kvs->comp_gen.kv_v.v_u32 = rq->comp_ring.gen; + + memcpy(&kvs->shared_bytes, rq->rs, sizeof(kvs->shared_bytes)); + + getnanouptime(&ks->ks_updated); + + return (0); +} +#endif + void -vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq) +vmxnet3_rxinit(struct vmxnet3_softc *sc, struct vmxnet3_rxqueue *rq, int idx) { struct vmxnet3_rxring *ring; struct vmxnet3_comp_ring *comp_ring; @@ -600,6 +737,39 @@ vmxnet3_rxinit(struct vmxnet3_softc *sc, comp_ring->next = 0; comp_ring->gen = VMX_RXC_GEN; bzero(comp_ring->rxcd, NRXCOMPDESC * sizeof comp_ring->rxcd[0]); + +#if NKSTAT > 0 + rq->kstat = kstat_create(sc->sc_dev.dv_xname, 0, "rxring", idx, + KSTAT_T_KV, 0); + if (rq->kstat != NULL) { + struct vmx_rxqueue_kstat *kvs; + + kvs = malloc(sizeof(*kvs), M_DEVBUF, M_WAITOK|M_ZERO); + + kstat_kv_init(&kvs->shared, "shared", KSTAT_KV_T_BYTES); + kvs->shared.kv_v.v_len = sizeof(kvs->shared_bytes); + + kstat_kv_init(&kvs->str, "str", KSTAT_KV_T_STR); + kvs->str.kv_v.v_len = sizeof(kvs->str_buf); + + kvs->str_buf[0] = 'h'; + kvs->str_buf[1] = 'i'; + kvs->str_buf[2] = '\0'; + + kstat_kv_init(&kvs->ring0_fill, "fill0", KSTAT_KV_T_UINT32); + kstat_kv_init(&kvs->ring0_gen, "gen0", KSTAT_KV_T_UINT32); + kstat_kv_init(&kvs->ring1_fill, "fill1", KSTAT_KV_T_UINT32); + kstat_kv_init(&kvs->ring1_gen, "gen1", KSTAT_KV_T_UINT32); + kstat_kv_init(&kvs->comp_next, "nextC", KSTAT_KV_T_UINT32); + kstat_kv_init(&kvs->comp_gen, "genC", KSTAT_KV_T_UINT32); + + rq->kstat->ks_softc = rq; + rq->kstat->ks_data = kvs; + rq->kstat->ks_datalen = sizeof(*kvs); + rq->kstat->ks_read = vmx_kstat_rq; + kstat_install(rq->kstat); + } +#endif } void @@ -677,7 +847,7 @@ vmxnet3_enable_all_intrs(struct vmxnet3_ int i; sc->sc_ds->ictrl &= ~VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < VMXNET3_NINTR; i++) + for (i = 0; i < sc->sc_nintr; i++) vmxnet3_enable_intr(sc, i); } @@ -687,7 +857,7 @@ vmxnet3_disable_all_intrs(struct vmxnet3 int i; sc->sc_ds->ictrl |= VMXNET3_ICTRL_DISABLE_ALL; - for (i = 0; i < VMXNET3_NINTR; i++) + for (i = 0; i < sc->sc_nintr; i++) vmxnet3_disable_intr(sc, i); } @@ -715,14 +885,41 @@ vmxnet3_intr(void *arg) } if (ifp->if_flags & IFF_RUNNING) { - vmxnet3_rxintr(sc, &sc->sc_rxq[0]); - vmxnet3_txintr(sc, &sc->sc_txq[0]); + vmxnet3_rxintr(sc, &sc->sc_q[0].rx); + vmxnet3_txintr(sc, &sc->sc_q[0].tx); vmxnet3_enable_intr(sc, 0); } return 1; } +int +vmxnet3_intr_event(void *arg) +{ + struct vmxnet3_softc *sc = arg; + + if (sc->sc_ds->event) { + KERNEL_LOCK(); + vmxnet3_evintr(sc); + KERNEL_UNLOCK(); + } + + vmxnet3_enable_intr(sc, 0); + return 1; +} + +int +vmxnet3_intr_queue(void *arg) +{ + struct vmxnet3_queue *q = arg; + + vmxnet3_rxintr(q->sc, &q->rx); + vmxnet3_txintr(q->sc, &q->tx); + vmxnet3_enable_intr(q->sc, q->intr); + + return 1; +} + void vmxnet3_evintr(struct vmxnet3_softc *sc) { @@ -742,10 +939,10 @@ vmxnet3_evintr(struct vmxnet3_softc *sc) if (event & (VMXNET3_EVENT_TQERROR | VMXNET3_EVENT_RQERROR)) { WRITE_CMD(sc, VMXNET3_CMD_GET_STATUS); - ts = sc->sc_txq[0].ts; + ts = sc->sc_q[0].tx.ts; if (ts->stopped) printf("%s: TX error 0x%x\n", ifp->if_xname, ts->error); - rs = sc->sc_rxq[0].rs; + rs = sc->sc_q[0].rx.rs; if (rs->stopped) printf("%s: RX error 0x%x\n", ifp->if_xname, rs->error); vmxnet3_init(sc); @@ -761,7 +958,7 @@ vmxnet3_evintr(struct vmxnet3_softc *sc) void vmxnet3_txintr(struct vmxnet3_softc *sc, struct vmxnet3_txqueue *tq) { - struct ifnet *ifp = &sc->sc_arpcom.ac_if; + struct ifqueue *ifq = tq->ifq; struct vmxnet3_txring *ring = &tq->cmd_ring; struct vmxnet3_comp_ring *comp_ring = &tq->comp_ring; struct vmxnet3_txcompdesc *txcd; @@ -808,8 +1005,8 @@ vmxnet3_txintr(struct vmxnet3_softc *sc, comp_ring->gen = rgen; ring->cons = cons; - if (ifq_is_oactive(&ifp->if_snd)) - ifq_restart(&ifp->if_snd); + if (ifq_is_oactive(ifq)) + ifq_restart(ifq); } void @@ -842,7 +1039,7 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, idx = letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_IDX_S) & VMXNET3_RXC_IDX_M); if (letoh32((rxcd->rxc_word0 >> VMXNET3_RXC_QID_S) & - VMXNET3_RXC_QID_M) < NRXQUEUE) + VMXNET3_RXC_QID_M) < sc->sc_nqueues) ring = &rq->cmd_ring[0]; else ring = &rq->cmd_ring[1]; @@ -878,6 +1075,10 @@ vmxnet3_rxintr(struct vmxnet3_softc *sc, m->m_pkthdr.ether_vtag = letoh32((rxcd->rxc_word2 >> VMXNET3_RXC_VLANTAG_S) & VMXNET3_RXC_VLANTAG_M); } + if (((letoh32(rxcd->rxc_word0) >> VMXNET3_RXC_RSSTYPE_S) & + VMXNET3_RXC_RSSTYPE_M) != VMXNET3_RXC_RSSTYPE_NONE) { + m->m_pkthdr.ph_flowid = letoh32(rxcd->rxc_word1); + } ml_enqueue(&ml, m); @@ -890,10 +1091,10 @@ skip_buffer: VMXNET3_RXC_QID_S) & VMXNET3_RXC_QID_M); idx = (idx + 1) % NRXDESC; - if (qid < NRXQUEUE) { + if (qid < sc->sc_nqueues) { WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(qid), idx); } else { - qid -= NRXQUEUE; + qid -= sc->sc_nqueues; WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(qid), idx); } } @@ -907,7 +1108,7 @@ skip_buffer: ring = &rq->cmd_ring[0]; - if (ifiq_input(&ifp->if_rcv, &ml)) + if (ifiq_input(rq->ifiq, &ml)) if_rxr_livelocked(&ring->rxr); /* XXX Should we (try to) allocate buffers for ring 2 too? */ @@ -1000,12 +1201,17 @@ vmxnet3_stop(struct ifnet *ifp) WRITE_CMD(sc, VMXNET3_CMD_DISABLE); - intr_barrier(sc->sc_ih); + if (sc->sc_nintr == 1) + intr_barrier(sc->sc_ih); + else { + for (queue = 0; queue < sc->sc_nqueues; queue++) + intr_barrier(sc->sc_qih[queue]); + } - for (queue = 0; queue < NTXQUEUE; queue++) - vmxnet3_txstop(sc, &sc->sc_txq[queue]); - for (queue = 0; queue < NRXQUEUE; queue++) - vmxnet3_rxstop(sc, &sc->sc_rxq[queue]); + for (queue = 0; queue < sc->sc_nqueues; queue++) + vmxnet3_txstop(sc, &sc->sc_q[queue].tx); + for (queue = 0; queue < sc->sc_nqueues; queue++) + vmxnet3_rxstop(sc, &sc->sc_q[queue].rx); } void @@ -1030,12 +1236,12 @@ vmxnet3_init(struct vmxnet3_softc *sc) vmxnet3_reset(sc); #endif - for (queue = 0; queue < NTXQUEUE; queue++) - vmxnet3_txinit(sc, &sc->sc_txq[queue]); - for (queue = 0; queue < NRXQUEUE; queue++) - vmxnet3_rxinit(sc, &sc->sc_rxq[queue]); + for (queue = 0; queue < sc->sc_nqueues; queue++) + vmxnet3_txinit(sc, &sc->sc_q[queue].tx); + for (queue = 0; queue < sc->sc_nqueues; queue++) + vmxnet3_rxinit(sc, &sc->sc_q[queue].rx, queue); - for (queue = 0; queue < NRXQUEUE; queue++) { + for (queue = 0; queue < sc->sc_nqueues; queue++) { WRITE_BAR0(sc, VMXNET3_BAR0_RXH1(queue), 0); WRITE_BAR0(sc, VMXNET3_BAR0_RXH2(queue), 0); } @@ -1092,7 +1298,7 @@ vmxnet3_ioctl(struct ifnet *ifp, u_long break; case SIOCGIFRXR: error = if_rxr_ioctl((struct if_rxrinfo *)ifr->ifr_data, - NULL, JUMBO_LEN, &sc->sc_rxq[0].cmd_ring[0].rxr); + NULL, JUMBO_LEN, &sc->sc_q[0].rx.cmd_ring[0].rxr); break; default: error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); @@ -1131,7 +1337,7 @@ vmxnet3_start(struct ifqueue *ifq) { struct ifnet *ifp = ifq->ifq_if; struct vmxnet3_softc *sc = ifp->if_softc; - struct vmxnet3_txqueue *tq = sc->sc_txq; + struct vmxnet3_txqueue *tq = ifq->ifq_softc; struct vmxnet3_txring *ring = &tq->cmd_ring; struct vmxnet3_txdesc *txd, *sop; bus_dmamap_t map;