Index: if_qe.c =================================================================== RCS file: /cvs/src/sys/arch/vax/if/Attic/if_qe.c,v diff -u -p -r1.35 if_qe.c --- if_qe.c 4 Jul 2015 10:12:52 -0000 1.35 +++ if_qe.c 3 Jun 2024 10:38:49 -0000 @@ -65,6 +65,9 @@ #define RXDESCS 30 /* # of receive descriptors */ #define TXDESCS 60 /* # transmit descs */ +#define TXSEGS 16 /* max # segments in a tx packet */ + +#define QE_SETUPLEN 128 /* * Structure containing the elements that must be in DMA-safe memory. @@ -72,7 +75,6 @@ struct qe_cdata { struct qe_ring qc_recv[RXDESCS+1]; /* Receive descriptors */ struct qe_ring qc_xmit[TXDESCS+1]; /* Transmit descriptors */ - u_int8_t qc_setup[128]; /* Setup packet layout */ }; struct qe_softc { @@ -86,15 +88,14 @@ struct qe_softc { bus_dma_tag_t sc_dmat; struct qe_cdata *sc_qedata; /* Descriptor struct */ struct qe_cdata *sc_pqedata; /* Unibus address of above */ - struct mbuf* sc_txmbuf[TXDESCS]; - struct mbuf* sc_rxmbuf[RXDESCS]; - bus_dmamap_t sc_xmtmap[TXDESCS]; - bus_dmamap_t sc_rcvmap[RXDESCS]; + struct mbuf* sc_tx_mbuf[TXDESCS]; + struct mbuf* sc_rx_mbuf[RXDESCS]; + bus_dmamap_t sc_tx_maps[TXDESCS]; + bus_dmamap_t sc_rx_maps[RXDESCS]; + unsigned int sc_tx_prod; + unsigned int sc_tx_cons; struct ubinfo sc_ui; int sc_intvec; /* Interrupt vector */ - int sc_nexttx; - int sc_inq; - int sc_lastack; int sc_nextrx; int sc_setup; /* Setup packet in queue */ }; @@ -106,7 +107,7 @@ static void qestart(struct ifnet *); static void qeintr(void *); static int qeioctl(struct ifnet *, u_long, caddr_t); static int qe_add_rxbuf(struct qe_softc *, int); -static void qe_setup(struct qe_softc *); +static int qe_setup(struct qe_softc *); static void qetimeout(struct ifnet *); struct cfattach qe_ca = { @@ -239,8 +240,8 @@ qeattach(struct device *parent, struct d */ for (i = 0; i < TXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, - 1, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, - &sc->sc_xmtmap[i]))) { + TXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW, + &sc->sc_tx_maps[i]))) { printf(": unable to create tx DMA map %d, error = %d\n", i, error); goto fail_4; @@ -253,7 +254,7 @@ qeattach(struct device *parent, struct d for (i = 0; i < RXDESCS; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, - &sc->sc_rcvmap[i]))) { + &sc->sc_rx_maps[i]))) { printf(": unable to create rx DMA map %d, error = %d\n", i, error); goto fail_5; @@ -338,20 +339,18 @@ qeattach(struct device *parent, struct d */ fail_6: for (i = 0; i < RXDESCS; i++) { - if (sc->sc_rxmbuf[i] != NULL) { - bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); - m_freem(sc->sc_rxmbuf[i]); - } + if (sc->sc_rx_mbuf[i] != NULL) + m_freem(sc->sc_rx_mbuf[i]); } fail_5: for (i = 0; i < RXDESCS; i++) { - if (sc->sc_rcvmap[i] != NULL) - bus_dmamap_destroy(sc->sc_dmat, sc->sc_rcvmap[i]); + if (sc->sc_rx_maps[i] != NULL) + bus_dmamap_destroy(sc->sc_dmat, sc->sc_rx_maps[i]); } fail_4: for (i = 0; i < TXDESCS; i++) { - if (sc->sc_xmtmap[i] != NULL) - bus_dmamap_destroy(sc->sc_dmat, sc->sc_xmtmap[i]); + if (sc->sc_tx_maps[i] != NULL) + bus_dmamap_destroy(sc->sc_dmat, sc->sc_tx_maps[i]); } } @@ -375,20 +374,23 @@ qeinit(struct qe_softc *sc) QE_RCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_CSR) & ~QE_RESET); QE_WCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_VECTOR, sc->sc_intvec); - sc->sc_nexttx = sc->sc_inq = sc->sc_lastack = 0; /* * Release and init transmit descriptors. */ for (i = 0; i < TXDESCS; i++) { - if (sc->sc_txmbuf[i]) { - bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[i]); - m_freem(sc->sc_txmbuf[i]); - sc->sc_txmbuf[i] = 0; + struct mbuf *m = sc->sc_tx_mbuf[i]; + if (m != NULL) { + bus_dmamap_t map = sc->sc_tx_maps[i]; + bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_dmat, map); + m_freem(m); + sc->sc_tx_mbuf[i] = NULL; } qc->qc_xmit[i].qe_addr_hi = 0; /* Clear valid bit */ qc->qc_xmit[i].qe_status1 = qc->qc_xmit[i].qe_flag = QE_NOTYET; } - + sc->sc_tx_prod = sc->sc_tx_cons = 0; /* * Init receive descriptors. @@ -419,6 +421,24 @@ qeinit(struct qe_softc *sc) } +#if 0 +static inline int +qe_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map, struct mbuf *m) +{ + int error; + + error = bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT); + if (error != EFBIG) + return (error); + + error = m_defrag(m, M_DONTWAIT); + if (error != 0) + return (error); + + return (bus_dmamap_load_mbuf(dmat, map, m, BUS_DMA_NOWAIT)); +} +#endif + /* * Start output on interface. */ @@ -428,41 +448,83 @@ qestart(struct ifnet *ifp) struct qe_softc *sc = ifp->if_softc; struct qe_cdata *qc = sc->sc_qedata; paddr_t buffer; - struct mbuf *m, *m0; - int idx, len, s, i, totlen, error; + struct mbuf *m; + int len, s, i; short orword, csr; + unsigned int idx, free; + bus_dmamap_t map; - if ((QE_RCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_CSR) & QE_RCV_ENABLE) == 0) + if (!ISSET(ifp->if_flags, IFF_RUNNING)) return; - s = splnet(); - while (sc->sc_inq < (TXDESCS - 1)) { + idx = sc->sc_tx_prod; + free = sc->sc_tx_cons; + if (free <= idx) + free += TXDESCS; + free -= idx; - if (sc->sc_setup) { - qe_setup(sc); - continue; + s = splnet(); + for (;;) { + if (free <= TXSEGS) { + ifp->if_timer = 5; /* If transmit logic dies */ + ifp->if_flags |= IFF_OACTIVE; + break; } - idx = sc->sc_nexttx; - IFQ_POLL(&ifp->if_snd, m); + + IFQ_DEQUEUE(&ifp->if_snd, m); if (m == NULL) - goto out; - /* - * Count number of mbufs in chain. - * Always do DMA directly from mbufs, therefore the transmit - * ring is really big. - */ - for (m0 = m, i = 0; m0; m0 = m0->m_next) - if (m0->m_len) - i++; - if (i >= TXDESCS) - panic("qestart"); + break; - if ((i + sc->sc_inq) >= (TXDESCS - 1)) { - ifp->if_flags |= IFF_OACTIVE; - goto out; + if (m->m_pkthdr.len < ETHER_MIN_LEN) { + unsigned int pad = ETHER_MIN_LEN - m->m_pkthdr.len; + struct mbuf *n; + + /* + * This is a stupidly heavy hammer + */ + + n = m_gethdr(M_NOWAIT, MT_DATA); + if (n == NULL) { + m_freem(m); + ifp->if_oerrors++; + continue; + } + if (ETHER_MIN_LEN > MHLEN) { + MCLGETI(n, M_WAIT, NULL, ETHER_MIN_LEN); + if (!ISSET(n->m_flags, M_EXT)) { + m_freem(n); + m_freem(m); + ifp->if_oerrors++; + continue; + } + } + + m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t)); + memset(mtod(n, caddr_t) + m->m_pkthdr.len, 0, pad); + m_freem(m); + m = n; + } else if (m->m_next != NULL && m_defrag(m, M_DONTWAIT) != 0) { + /* vax doesnt have bus_dmamap_load_mbuf */ + m_freem(m); + ifp->if_oerrors++; + continue; } - IFQ_DEQUEUE(&ifp->if_snd, m); + map = sc->sc_tx_maps[idx]; +#if 0 + if (qe_load_mbuf(sc->sc_dmat, map, m) != 0) { + m_freem(m); + ifp->if_oerrors++; + continue; + } +#else + if (bus_dmamap_load(sc->sc_dmat, map, + m->m_data, m->m_len, NULL, BUS_DMA_NOWAIT)) { + m_freem(m); + ifp->if_oerrors++; + continue; + } +#endif #if NBPFILTER > 0 if (ifp->if_bpf) @@ -470,66 +532,52 @@ qestart(struct ifnet *ifp) #endif ifp->if_opackets++; - /* - * m now points to a mbuf chain that can be loaded. - * Loop around and set it. - */ - totlen = 0; - for (m0 = m; m0; m0 = m0->m_next) { - error = bus_dmamap_load(sc->sc_dmat, sc->sc_xmtmap[idx], - mtod(m0, void *), m0->m_len, 0, 0); - buffer = sc->sc_xmtmap[idx]->dm_segs[0].ds_addr; - len = m0->m_len; - if (len == 0) - continue; + bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, + BUS_DMASYNC_PREREAD); + sc->sc_tx_mbuf[idx] = m; + + for (i = 0; i < map->dm_nsegs; i++) { + buffer = map->dm_segs[i].ds_addr; + len = map->dm_segs[i].ds_len; - totlen += len; /* Word alignment calc */ orword = 0; - if (totlen == m->m_pkthdr.len) { - if (totlen < ETHER_MIN_LEN) - len += (ETHER_MIN_LEN - totlen); + if (i == map->dm_nsegs - 1) orword |= QE_EOMSG; - sc->sc_txmbuf[idx] = m; - } if ((buffer & 1) || (len & 1)) len += 2; if (buffer & 1) orword |= QE_ODDBEGIN; if ((buffer + len) & 1) orword |= QE_ODDEND; + qc->qc_xmit[idx].qe_buf_len = -(len/2); qc->qc_xmit[idx].qe_addr_lo = LOWORD(buffer); qc->qc_xmit[idx].qe_addr_hi = HIWORD(buffer); qc->qc_xmit[idx].qe_flag = qc->qc_xmit[idx].qe_status1 = QE_NOTYET; qc->qc_xmit[idx].qe_addr_hi |= (QE_VALID | orword); + if (++idx == TXDESCS) idx = 0; - sc->sc_inq++; } -#ifdef DIAGNOSTIC - if (totlen != m->m_pkthdr.len) - panic("qestart: len fault"); -#endif + free -= map->dm_nsegs; + } + if (sc->sc_tx_prod != idx) { /* * Kick off the transmit logic, if it is stopped. */ csr = QE_RCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_CSR); if (csr & QE_XL_INVALID) { QE_WCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_XMTL, - LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx])); + LOWORD(&sc->sc_pqedata->qc_xmit[sc->sc_tx_prod])); QE_WCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_XMTH, - HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_nexttx])); + HIWORD(&sc->sc_pqedata->qc_xmit[sc->sc_tx_prod])); } - sc->sc_nexttx = idx; - } - if (sc->sc_inq == (TXDESCS - 1)) - ifp->if_flags |= IFF_OACTIVE; -out: if (sc->sc_inq) - ifp->if_timer = 5; /* If transmit logic dies */ + sc->sc_tx_prod = idx; + } splx(s); } @@ -554,7 +602,7 @@ qeintr(void *arg) status1 = qc->qc_recv[sc->sc_nextrx].qe_status1; status2 = qc->qc_recv[sc->sc_nextrx].qe_status2; - m = sc->sc_rxmbuf[sc->sc_nextrx]; + m = sc->sc_rx_mbuf[sc->sc_nextrx]; len = ((status1 & QE_RBL_HI) | (status2 & QE_RBL_LO)) + 60; qe_add_rxbuf(sc, sc->sc_nextrx); @@ -582,26 +630,39 @@ qeintr(void *arg) } if (csr & (QE_XMIT_INT|QE_XL_INVALID)) { - while (qc->qc_xmit[sc->sc_lastack].qe_status1 != QE_NOTYET) { - int idx = sc->sc_lastack; - - sc->sc_inq--; - if (++sc->sc_lastack == TXDESCS) - sc->sc_lastack = 0; - - /* XXX collect statistics */ - qc->qc_xmit[idx].qe_addr_hi &= ~QE_VALID; - qc->qc_xmit[idx].qe_status1 = - qc->qc_xmit[idx].qe_flag = QE_NOTYET; - - if (qc->qc_xmit[idx].qe_addr_hi & QE_SETUP) - continue; - bus_dmamap_unload(sc->sc_dmat, sc->sc_xmtmap[idx]); - if (sc->sc_txmbuf[idx]) { - m_freem(sc->sc_txmbuf[idx]); - sc->sc_txmbuf[idx] = 0; + unsigned int cons = sc->sc_tx_cons; + unsigned int prod = sc->sc_tx_prod; + unsigned int idx; + + while (cons != prod) { + bus_dmamap_t map; + struct mbuf *m = sc->sc_tx_mbuf[cons]; + if (m == NULL) { + printf("%s: missing mbuf in txeof\n", + ifp->if_xname); + break; } + map = sc->sc_tx_maps[cons]; + + idx = cons + map->dm_nsegs - 1; + if (idx >= TXDESCS) + idx -= TXDESCS; + + if (qc->qc_xmit[idx].qe_status1 == QE_NOTYET) + break; + + bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, + BUS_DMASYNC_POSTREAD); + bus_dmamap_unload(sc->sc_dmat, map); + m_freem(sc->sc_tx_mbuf[idx]); + sc->sc_tx_mbuf[idx] = NULL; + + cons = idx + 1; + if (cons == TXDESCS) + cons = 0; } + sc->sc_tx_cons = cons; + ifp->if_timer = 0; ifp->if_flags &= ~IFF_OACTIVE; qestart(ifp); /* Put in more in queue */ @@ -704,25 +765,25 @@ qe_add_rxbuf(struct qe_softc *sc, int i) return (ENOBUFS); } - if (sc->sc_rxmbuf[i] != NULL) - bus_dmamap_unload(sc->sc_dmat, sc->sc_rcvmap[i]); + if (sc->sc_rx_mbuf[i] != NULL) + bus_dmamap_unload(sc->sc_dmat, sc->sc_rx_maps[i]); - error = bus_dmamap_load(sc->sc_dmat, sc->sc_rcvmap[i], + error = bus_dmamap_load(sc->sc_dmat, sc->sc_rx_maps[i], m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT); if (error) panic("%s: can't load rx DMA map %d, error = %d", sc->sc_dev.dv_xname, i, error); - sc->sc_rxmbuf[i] = m; + sc->sc_rx_mbuf[i] = m; - bus_dmamap_sync(sc->sc_dmat, sc->sc_rcvmap[i], 0, - sc->sc_rcvmap[i]->dm_mapsize, BUS_DMASYNC_PREREAD); + bus_dmamap_sync(sc->sc_dmat, sc->sc_rx_maps[i], 0, + sc->sc_rx_maps[i]->dm_mapsize, BUS_DMASYNC_PREREAD); /* * We know that the mbuf cluster is page aligned. Also, be sure * that the IP header will be longword aligned. */ m->m_data += 2; - addr = sc->sc_rcvmap[i]->dm_segs[0].ds_addr + 2; + addr = sc->sc_rx_maps[i]->dm_segs[0].ds_addr + 2; rp = &sc->sc_qedata->qc_recv[i]; rp->qe_flag = rp->qe_status1 = QE_NOTYET; rp->qe_addr_lo = LOWORD(addr); @@ -735,7 +796,7 @@ qe_add_rxbuf(struct qe_softc *sc, int i) /* * Create a setup packet and put in queue for sending. */ -void +int qe_setup(struct qe_softc *sc) { struct ether_multi *enm; @@ -745,20 +806,31 @@ qe_setup(struct qe_softc *sc) struct arpcom *ac = &sc->sc_ac; u_int8_t *enaddr = ac->ac_enaddr; int i, j, k, idx, s; + bus_dmamap_t map; + caddr_t setup; - s = splnet(); - if (sc->sc_inq == (TXDESCS - 1)) { - sc->sc_setup = 1; - splx(s); - return; + struct mbuf *m; + + m = m_gethdr(M_NOWAIT, MT_DATA); + if (m == NULL) + return (-1); + if (QE_SETUPLEN > MHLEN) { + MCLGETI(m, M_WAIT, NULL, QE_SETUPLEN); + if (!ISSET(m->m_flags, M_EXT)) + goto free; } - sc->sc_setup = 0; + + m->m_pkthdr.len = m->m_len = QE_SETUPLEN; + setup = mtod(m, caddr_t); + + s = splnet(); + /* * Init the setup packet with valid info. */ - memset(qc->qc_setup, 0xff, sizeof(qc->qc_setup)); /* Broadcast */ + memset(setup, 0xff, m->m_len); /* Broadcast */ for (i = 0; i < ETHER_ADDR_LEN; i++) - qc->qc_setup[i * 8 + 1] = enaddr[i]; /* Own address */ + setup[i * 8 + 1] = enaddr[i]; /* Own address */ /* * Multicast handling. The DEQNA can handle up to 12 direct @@ -775,7 +847,7 @@ qe_setup(struct qe_softc *sc) ETHER_FIRST_MULTI(step, ac, enm); while (enm != NULL) { for (i = 0; i < ETHER_ADDR_LEN; i++) - qc->qc_setup[i * 8 + j + k] = enm->enm_addrlo[i]; + setup[i * 8 + j + k] = enm->enm_addrlo[i]; j++; if (j == 8) { j = 1; k += 64; @@ -788,7 +860,23 @@ qe_setup(struct qe_softc *sc) } setit: - idx = sc->sc_nexttx; + /* + * There should always be space on the ring cos qe_setup leaves a gap. + */ + idx = sc->sc_tx_prod; + map = sc->sc_tx_maps[idx]; + +#if 0 + if (bus_dmamap_load_mbuf(sc->sc_dmat, map, m, BUS_DMA_NOWAIT) != 0) + goto fail; +#else + if (bus_dmamap_load(sc->sc_dmat, map, + m->m_data, m->m_len, NULL, BUS_DMA_NOWAIT)) + goto fail; +#endif + bus_dmamap_sync(sc->sc_dmat, map, 0, map->dm_mapsize, + BUS_DMASYNC_PREREAD); + qc->qc_xmit[idx].qe_buf_len = -64; /* @@ -803,12 +891,14 @@ setit: if (ifp->if_flags & IFF_PROMISC) qc->qc_xmit[idx].qe_buf_len = -65; - qc->qc_xmit[idx].qe_addr_lo = LOWORD(sc->sc_pqedata->qc_setup); + qc->qc_xmit[idx].qe_addr_lo = LOWORD(map->dm_segs[0].ds_addr); qc->qc_xmit[idx].qe_addr_hi = - HIWORD(sc->sc_pqedata->qc_setup) | QE_SETUP | QE_EOMSG; + HIWORD(map->dm_segs[0].ds_addr) | QE_SETUP | QE_EOMSG; qc->qc_xmit[idx].qe_status1 = qc->qc_xmit[idx].qe_flag = QE_NOTYET; qc->qc_xmit[idx].qe_addr_hi |= QE_VALID; + sc->sc_tx_mbuf[idx] = m; + if (QE_RCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_CSR) & QE_XL_INVALID) { QE_WCSR(sc->sc_iot, sc->sc_ioh, QE_CSR_XMTL, LOWORD(&sc->sc_pqedata->qc_xmit[idx])); @@ -816,10 +906,17 @@ setit: HIWORD(&sc->sc_pqedata->qc_xmit[idx])); } - sc->sc_inq++; - if (++sc->sc_nexttx == TXDESCS) - sc->sc_nexttx = 0; + if (++idx == TXDESCS) + idx = 0; + sc->sc_tx_prod = idx; + splx(s); + return (0); + +fail: splx(s); +free: + m_freem(m); + return (-1); } /* @@ -830,7 +927,7 @@ qetimeout(struct ifnet *ifp) { struct qe_softc *sc = ifp->if_softc; - if (sc->sc_inq == 0) + if (!ISSET(ifp->if_flags, IFF_OACTIVE)) return; printf("%s: xmit logic died, resetting...\n", sc->sc_dev.dv_xname);