Index: bus_dma.c =================================================================== RCS file: /cvs/src/sys/arch/amd64/amd64/bus_dma.c,v retrieving revision 1.48 diff -u -p -r1.48 bus_dma.c --- bus_dma.c 27 Jan 2015 05:10:30 -0000 1.48 +++ bus_dma.c 27 Jan 2015 05:13:19 -0000 @@ -97,8 +97,8 @@ #include -int _bus_dmamap_load_buffer(bus_dma_tag_t, bus_dmamap_t, void *, bus_size_t, - struct proc *, int, paddr_t *, int *, int); +int _bus_dmamap_load_paddr(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t); +int _bus_dmamap_load_vaddr(bus_dma_tag_t, bus_dmamap_t, void *, size_t, pmap_t); /* * Common function for DMA map creation. May be called by bus-specific @@ -161,8 +161,7 @@ int _bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags) { - bus_addr_t lastaddr = 0; - int seg, error; + int error; /* * Make sure that on error condition we return "no valid mappings". @@ -173,14 +172,17 @@ _bus_dmamap_load(bus_dma_tag_t t, bus_dm if (buflen > map->_dm_size) return (EINVAL); - seg = 0; - error = _bus_dmamap_load_buffer(t, map, buf, buflen, p, flags, - &lastaddr, &seg, 1); - if (error == 0) { - map->dm_mapsize = buflen; - map->dm_nsegs = seg + 1; + error = _bus_dmamap_load_vaddr(t, map, buf, buflen, + p == NULL ? pmap_kernel() : p->p_vmspace->vm_map.pmap); + if (error != 0) { + map->dm_mapsize = 0; + map->dm_nsegs = 0; + return (error); } - return (error); + + map->dm_nsegs++; + + return (0); } /* @@ -190,9 +192,8 @@ int _bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0, int flags) { - paddr_t lastaddr = 0; - int seg, error, first; struct mbuf *m; + int error; /* * Make sure that on error condition we return "no valid mappings". @@ -208,21 +209,22 @@ _bus_dmamap_load_mbuf(bus_dma_tag_t t, b if (m0->m_pkthdr.len > map->_dm_size) return (EINVAL); - first = 1; - seg = 0; - error = 0; - for (m = m0; m != NULL && error == 0; m = m->m_next) { + for (m = m0; m != NULL; m = m->m_next) { if (m->m_len == 0) continue; - error = _bus_dmamap_load_buffer(t, map, m->m_data, m->m_len, - NULL, flags, &lastaddr, &seg, first); - first = 0; - } - if (error == 0) { - map->dm_mapsize = m0->m_pkthdr.len; - map->dm_nsegs = seg + 1; + + error = _bus_dmamap_load_vaddr(t, map, m->m_data, m->m_len, + pmap_kernel()); + if (error != 0) { + map->dm_mapsize = 0; + map->dm_nsegs = 0; + return (error); + } } - return (error); + + map->dm_nsegs++; + + return (0); } /* @@ -232,12 +234,11 @@ int _bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags) { - paddr_t lastaddr = 0; - int seg, i, error, first; - bus_size_t minlen, resid; - struct proc *p = NULL; - struct iovec *iov; - caddr_t addr; + int i, error; + pmap_t pmap = pmap_kernel(); + struct iovec * const iov = uio->uio_iov; + bus_size_t resid = uio->uio_resid; + bus_size_t len; /* * Make sure that on error condition we return "no valid mappings". @@ -245,38 +246,30 @@ _bus_dmamap_load_uio(bus_dma_tag_t t, bu map->dm_mapsize = 0; map->dm_nsegs = 0; - resid = uio->uio_resid; - iov = uio->uio_iov; + if (resid > map->_dm_size) + return (EINVAL); - if (uio->uio_segflg == UIO_USERSPACE) { - p = uio->uio_procp; -#ifdef DIAGNOSTIC - if (p == NULL) - panic("_bus_dmamap_load_uio: USERSPACE but no proc"); -#endif - } + if (uio->uio_segflg == UIO_USERSPACE) + pmap = uio->uio_procp->p_vmspace->vm_map.pmap; + + for (i = 0; i < uio->uio_iovcnt; i++) { + len = MIN(resid, iov[i].iov_len); + error = _bus_dmamap_load_vaddr(t, map, + iov[i].iov_base, len, pmap); + if (error != 0) { + map->dm_mapsize = 0; + map->dm_nsegs = 0; + return (error); + } - first = 1; - seg = 0; - error = 0; - for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) { - /* - * Now at the first iovec to load. Load each iovec - * until we have exhausted the residual count. - */ - minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len; - addr = (caddr_t)iov[i].iov_base; - - error = _bus_dmamap_load_buffer(t, map, addr, minlen, - p, flags, &lastaddr, &seg, first); - first = 0; - - resid -= minlen; - } - if (error == 0) { - map->dm_mapsize = uio->uio_resid; - map->dm_nsegs = seg + 1; + resid -= len; + if (resid == 0) + break; } + /* check resid for leftovers? */ + + map->dm_nsegs++; + return (error); } @@ -410,7 +403,7 @@ _bus_dmamem_alloc(bus_dma_tag_t t, bus_s * memory under the 4gig boundary. */ return (_bus_dmamem_alloc_range(t, size, alignment, boundary, - segs, nsegs, rsegs, flags, (bus_addr_t)0, (bus_addr_t)0xffffffff)); + segs, nsegs, rsegs, flags, (paddr_t)0, (paddr_t)0xffffffff)); } /* @@ -550,97 +543,102 @@ _bus_dmamem_mmap(bus_dma_tag_t t, bus_dm /********************************************************************** * DMA utility functions **********************************************************************/ -/* - * Utility function to load a linear buffer. lastaddrp holds state - * between invocations (for multiple-buffer loads). segp contains - * the starting segment on entrance, and the ending segment on exit. - * first indicates if this is the first invocation of this function. - */ + int -_bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf, - bus_size_t buflen, struct proc *p, int flags, paddr_t *lastaddrp, int *segp, - int first) -{ - bus_size_t sgsize; - bus_addr_t curaddr, lastaddr, baddr, bmask; - vaddr_t vaddr = (vaddr_t)buf; - int seg; - pmap_t pmap; - - if (p != NULL) - pmap = p->p_vmspace->vm_map.pmap; - else - pmap = pmap_kernel(); +_bus_dmamap_load_paddr(bus_dma_tag_t t, bus_dmamap_t map, + bus_addr_t dvaddr, bus_size_t size) +{ + bus_dma_segment_t * const segs = map->dm_segs; + bus_addr_t bmask = ~(map->_dm_boundary - 1); - lastaddr = *lastaddrp; - bmask = ~(map->_dm_boundary - 1); + int nseg = map->dm_nsegs; + bus_size_t len = size; - for (seg = *segp; buflen > 0 ; ) { - /* - * Get the physical address for this segment. - */ - pmap_extract(pmap, vaddr, (paddr_t *)&curaddr); - - if (curaddr > dma_constraint.ucr_high) - panic("Non dma-reachable buffer at curaddr %#lx(raw)", - curaddr); - - /* - * Compute the segment size, and adjust counts. - */ - sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET); - if (buflen < sgsize) - sgsize = buflen; - - /* - * Make sure we don't cross any boundaries. - */ + if (dvaddr > dma_constraint.ucr_high) + panic("Non dma-reachable buffer at %#lx(raw)", dvaddr); + + if (map->dm_mapsize == 0) + ; /* First segment */ + else if (segs[nseg].ds_addr + segs[nseg].ds_len == dvaddr) { + /* Coalesce */ + dvaddr = segs[nseg].ds_addr; + len += segs[nseg].ds_len; + } else { + /* Next segment */ + if (++nseg >= map->_dm_segcnt) + return (EFBIG); + } + + for (;;) { + bus_size_t sgsize = len; + + /* Make sure we don't cross any boundaries. */ if (map->_dm_boundary > 0) { - baddr = (curaddr + map->_dm_boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } + bus_addr_t baddr; /* next boundary address */ - /* - * Insert chunk into a segment, coalescing with - * previous segment if possible. - */ - if (first) { - map->dm_segs[seg].ds_addr = curaddr; - map->dm_segs[seg].ds_len = sgsize; - first = 0; - } else { - if (curaddr == lastaddr && - (map->dm_segs[seg].ds_len + sgsize) <= - map->_dm_maxsegsz && - (map->_dm_boundary == 0 || - (map->dm_segs[seg].ds_addr & bmask) == - (curaddr & bmask))) - map->dm_segs[seg].ds_len += sgsize; - else { - if (++seg >= map->_dm_segcnt) - break; - map->dm_segs[seg].ds_addr = curaddr; - map->dm_segs[seg].ds_len = sgsize; - } + baddr = (dvaddr + map->_dm_boundary) & bmask; + if (sgsize > (baddr - dvaddr)) + sgsize = (baddr - dvaddr); } - lastaddr = curaddr + sgsize; - vaddr += sgsize; - buflen -= sgsize; + /* Make sure we dont exceed the max segment size. */ + if (sgsize > map->_dm_maxsegsz) + sgsize = map->_dm_maxsegsz; + + /* Insert chunk into a segment. */ + segs[nseg].ds_addr = dvaddr; + segs[nseg].ds_len = sgsize; + + len -= sgsize; + if (len == 0) + break; + + if (++nseg >= map->_dm_segcnt) + return (EFBIG); + + dvaddr += sgsize; } - *segp = seg; - *lastaddrp = lastaddr; + map->dm_mapsize += size; + map->dm_nsegs = nseg; - /* - * Did we fit? - */ - if (buflen != 0) - return (EFBIG); /* XXX better return value here? */ return (0); } +int +_bus_dmamap_load_vaddr(bus_dma_tag_t t, bus_dmamap_t map, + void *buf, size_t size, pmap_t pmap) +{ + vaddr_t vaddr; + paddr_t paddr; + vaddr_t next, end; + int error; + + vaddr = (vaddr_t)buf; + end = vaddr + size; + + if (pmap == pmap_kernel() && + vaddr >= PMAP_DIRECT_BASE && end <= PMAP_DIRECT_END) + paddr = vaddr - PMAP_DIRECT_BASE; + else { + for (next = (vaddr + PAGE_SIZE) & ~PAGE_MASK; + next < end; next += PAGE_SIZE) { + pmap_extract(pmap, vaddr, &paddr); + error = _bus_dmamap_load_paddr(t, map, + paddr, next - vaddr); + if (error != 0) + return (error); + + vaddr = next; + } + + pmap_extract(pmap, vaddr, &paddr); + size = end - vaddr; + } + + return (_bus_dmamap_load_paddr(t, map, paddr, size)); +} + /* * Allocate physical memory from the given physical address range. * Called by DMA-safe memory allocation methods. @@ -648,7 +646,7 @@ _bus_dmamap_load_buffer(bus_dma_tag_t t, int _bus_dmamem_alloc_range(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, - int flags, bus_addr_t low, bus_addr_t high) + int flags, paddr_t low, paddr_t high) { paddr_t curaddr, lastaddr; struct vm_page *m;