Index: re.c =================================================================== RCS file: /cvs/src/sys/dev/ic/re.c,v retrieving revision 1.193 diff -u -p -r1.193 re.c --- re.c 10 Aug 2016 14:27:17 -0000 1.193 +++ re.c 4 Nov 2016 06:50:13 -0000 @@ -1415,22 +1415,25 @@ re_txeof(struct rl_softc *sc) struct ifnet *ifp; struct rl_txq *txq; uint32_t txstat; - int idx, descidx, tx_free, freed = 0; + int descidx, tx_free, freed = 0; + int considx, prodidx; ifp = &sc->sc_arpcom.ac_if; - for (idx = sc->rl_ldata.rl_txq_considx; - idx != sc->rl_ldata.rl_txq_prodidx; idx = RL_NEXT_TXQ(sc, idx)) { - txq = &sc->rl_ldata.rl_txq[idx]; + considx = sc->rl_ldata.rl_txq_considx; + prodidx = sc->rl_ldata.rl_txq_prodidx; + + bus_dmamap_sync(sc->sc_dmat, + sc->rl_ldata.rl_tx_list_map, 0, + sc->rl_ldata.rl_tx_list_map->dm_mapsize, + BUS_DMASYNC_POSTREAD); + + while (considx != prodidx) { + txq = &sc->rl_ldata.rl_txq[considx]; descidx = txq->txq_descidx; - RL_TXDESCSYNC(sc, descidx, - BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); - txstat = - letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); - RL_TXDESCSYNC(sc, descidx, BUS_DMASYNC_PREREAD); - KASSERT((txstat & RL_TDESC_CMD_EOF) != 0); - if (txstat & RL_TDESC_CMD_OWN) + txstat = letoh32(sc->rl_ldata.rl_tx_list[descidx].rl_cmdstat); + if (ISSET(txstat, RL_TDESC_CMD_OWN)) break; freed += txq->txq_nsegs; @@ -1446,15 +1449,22 @@ re_txeof(struct rl_softc *sc) ifp->if_oerrors++; else ifp->if_opackets++; + + considx = RL_NEXT_TXQ(sc, considx); } + bus_dmamap_sync(sc->sc_dmat, + sc->rl_ldata.rl_tx_list_map, 0, + sc->rl_ldata.rl_tx_list_map->dm_mapsize, + BUS_DMASYNC_PREREAD); + if (freed == 0) return (0); tx_free = atomic_add_int_nv(&sc->rl_ldata.rl_tx_free, freed); KASSERT(tx_free <= sc->rl_ldata.rl_tx_desc_cnt); - sc->rl_ldata.rl_txq_considx = idx; + sc->rl_ldata.rl_txq_considx = considx; /* * Some chips will ignore a second TX request issued while an @@ -1578,7 +1588,7 @@ re_encap(struct rl_softc *sc, struct mbu { bus_dmamap_t map; struct mbuf *mp, mh; - int error, seg, nsegs, uidx, startidx, curidx, lastidx, pad; + int error, seg, nsegs, startidx, curidx, lastidx, pad; int off; struct ip *ip; struct rl_desc *d; @@ -1709,68 +1719,42 @@ re_encap(struct rl_softc *sc, struct mbu * set this descriptor later when it start transmission or * reception.) */ + curidx = startidx = sc->rl_ldata.rl_tx_nextfree; - lastidx = -1; - for (seg = 0; seg < map->dm_nsegs; - seg++, curidx = RL_NEXT_TX_DESC(sc, curidx)) { + cmdstat = RL_TDESC_CMD_SOF; /* set the first frame up as SOF */ + + for (seg = 0; seg < map->dm_nsegs; seg++) { d = &sc->rl_ldata.rl_tx_list[curidx]; - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); - cmdstat = letoh32(d->rl_cmdstat); - RL_TXDESCSYNC(sc, curidx, BUS_DMASYNC_PREREAD); - if (cmdstat & RL_TDESC_STAT_OWN) { - printf("%s: tried to map busy TX descriptor\n", - sc->sc_dev.dv_xname); - for (; seg > 0; seg --) { - uidx = (curidx + sc->rl_ldata.rl_tx_desc_cnt - - seg) % sc->rl_ldata.rl_tx_desc_cnt; - sc->rl_ldata.rl_tx_list[uidx].rl_cmdstat = 0; - RL_TXDESCSYNC(sc, uidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); - } - error = EBUSY; - goto fail_unload; - } d->rl_vlanctl = htole32(vlanctl); re_set_bufaddr(d, map->dm_segs[seg].ds_addr); - cmdstat = csum_flags | map->dm_segs[seg].ds_len; - if (seg == 0) - cmdstat |= RL_TDESC_CMD_SOF; - else - cmdstat |= RL_TDESC_CMD_OWN; + cmdstat |= csum_flags | map->dm_segs[seg].ds_len; if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; - if (seg == nsegs - 1) { - cmdstat |= RL_TDESC_CMD_EOF; - lastidx = curidx; - } + d->rl_cmdstat = htole32(cmdstat); - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + + cmdstat = RL_TDESC_CMD_OWN; + lastidx = curidx; + curidx = RL_NEXT_TX_DESC(sc, curidx); } if (pad) { d = &sc->rl_ldata.rl_tx_list[curidx]; + d->rl_vlanctl = htole32(vlanctl); re_set_bufaddr(d, RL_TXPADDADDR(sc)); - cmdstat = csum_flags | - RL_TDESC_CMD_OWN | RL_TDESC_CMD_EOF | + cmdstat = csum_flags | RL_TDESC_CMD_OWN | (RL_IP4CSUMTX_PADLEN + 1 - m->m_pkthdr.len); if (curidx == sc->rl_ldata.rl_tx_desc_cnt - 1) cmdstat |= RL_TDESC_CMD_EOR; + d->rl_cmdstat = htole32(cmdstat); - RL_TXDESCSYNC(sc, curidx, - BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + lastidx = curidx; curidx = RL_NEXT_TX_DESC(sc, curidx); } - KASSERT(lastidx != -1); - - /* Transfer ownership of packet to the chip. */ - sc->rl_ldata.rl_tx_list[startidx].rl_cmdstat |= - htole32(RL_TDESC_CMD_OWN); - RL_TXDESCSYNC(sc, startidx, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); + d->rl_cmdstat |= htole32(RL_TDESC_CMD_EOF); /* update info of TX queue and descriptors */ txq->txq_mbuf = m; @@ -1799,6 +1783,7 @@ re_start(struct ifnet *ifp) struct rl_softc *sc = ifp->if_softc; struct mbuf *m; int idx, used = 0, txq_free, error; + int first, start, own = 0; if (!ISSET(sc->rl_flags, RL_FLAG_LINK)) { IFQ_PURGE(&ifp->if_snd); @@ -1811,6 +1796,14 @@ re_start(struct ifnet *ifp) txq_free += RL_TX_QLEN; txq_free -= idx; + bus_dmamap_sync(sc->sc_dmat, + sc->rl_ldata.rl_tx_list_map, 0, + sc->rl_ldata.rl_tx_list_map->dm_mapsize, + BUS_DMASYNC_POSTWRITE); + + first = start = sc->rl_ldata.rl_tx_nextfree; + own = 0; /* dont set the ownership on the first frame */ + for (;;) { if (txq_free <= 1) { ifq_set_oactive(&ifp->if_snd); @@ -1839,13 +1832,29 @@ re_start(struct ifnet *ifp) if (ifp->if_bpf) bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); #endif + + sc->rl_ldata.rl_tx_list[start].rl_cmdstat |= own; + idx = RL_NEXT_TXQ(sc, idx); txq_free--; + + start = sc->rl_ldata.rl_tx_nextfree; + own = htole32(RL_TDESC_CMD_OWN); /* set ownership */ + } + + if (used > 0) { + sc->rl_ldata.rl_tx_list[first].rl_cmdstat |= + htole32(RL_TDESC_CMD_OWN); } + bus_dmamap_sync(sc->sc_dmat, + sc->rl_ldata.rl_tx_list_map, 0, + sc->rl_ldata.rl_tx_list_map->dm_mapsize, + BUS_DMASYNC_PREWRITE); + if (used == 0) return; - + ifp->if_timer = 5; atomic_sub_int(&sc->rl_ldata.rl_tx_free, used); KASSERT(sc->rl_ldata.rl_tx_free >= 0);