Index: dev/iommu.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/iommu.c,v retrieving revision 1.75 diff -u -p -r1.75 iommu.c --- dev/iommu.c 25 May 2017 03:19:39 -0000 1.75 +++ dev/iommu.c 4 Jun 2019 04:40:45 -0000 @@ -87,6 +87,8 @@ void iommu_dvmamap_print_map(bus_dma_tag bus_dmamap_t); int iommu_dvmamap_append_range(bus_dma_tag_t, bus_dmamap_t, paddr_t, bus_size_t, int, bus_size_t); +int iommu_dvmamap_insert(bus_dma_tag_t, bus_dmamap_t, bus_addr_t, + bus_size_t, int, bus_size_t); int64_t iommu_tsb_entry(struct iommu_state *, bus_addr_t); void strbuf_reset(struct strbuf_ctl *); int iommu_iomap_insert_page(struct iommu_map_state *, paddr_t); @@ -100,6 +102,25 @@ void iommu_iomap_clear_pages(struct iomm void _iommu_dvmamap_sync(bus_dma_tag_t, bus_dma_tag_t, bus_dmamap_t, bus_addr_t, bus_size_t, int); +void iommu_hw_enable(struct iommu_state *); + +const struct iommu_hw iommu_hw_default = { + .ihw_enable = iommu_hw_enable, + + .ihw_dvma_pa = IOTTE_PAMASK, + + .ihw_bypass = 0x3fffUL << 50, + .ihw_bypass_nc = 0, + .ihw_bypass_ro = 0, +}; + +void +iommu_hw_enable(struct iommu_state *is) +{ + IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb); + IOMMUREG_WRITE(is, iommu_cr, IOMMUCR_EN | (is->is_tsbsize << 16)); +} + /* * Initiate an STC entry flush. */ @@ -125,7 +146,8 @@ iommu_strbuf_flush(struct strbuf_ctl *sb * - create a private DVMA map. */ void -iommu_init(char *name, struct iommu_state *is, int tsbsize, u_int32_t iovabase) +iommu_init(char *name, const struct iommu_hw *ihw, struct iommu_state *is, + int tsbsize, u_int32_t iovabase) { psize_t size; vaddr_t va; @@ -149,13 +171,9 @@ iommu_init(char *name, struct iommu_stat * be hard-wired, so we read the start and size from the PROM and * just use those values. */ - if (strncmp(name, "pyro", 4) == 0) { - is->is_cr = IOMMUREG_READ(is, iommu_cr); - is->is_cr &= ~IOMMUCR_FIRE_BE; - is->is_cr |= (IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN | - IOMMUCR_FIRE_TE); - } else - is->is_cr = IOMMUCR_EN; + + is->is_hw = ihw; + is->is_tsbsize = tsbsize; if (iovabase == (u_int32_t)-1) { is->is_dvmabase = IOTSB_VSTART(is->is_tsbsize); @@ -237,15 +255,6 @@ iommu_init(char *name, struct iommu_stat mtx_init(&is->is_mtx, IPL_HIGH); /* - * Set the TSB size. The relevant bits were moved to the TSB - * base register in the PCIe host bridges. - */ - if (strncmp(name, "pyro", 4) == 0) - is->is_ptsb |= is->is_tsbsize; - else - is->is_cr |= (is->is_tsbsize << 16); - - /* * Now actually start up the IOMMU. */ iommu_reset(is); @@ -262,10 +271,7 @@ iommu_reset(struct iommu_state *is) { int i; - IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb); - - /* Enable IOMMU */ - IOMMUREG_WRITE(is, iommu_cr, is->is_cr); + (*is->is_hw->ihw_enable)(is); for (i = 0; i < 2; ++i) { struct strbuf_ctl *sb = is->is_sb[i]; @@ -280,7 +286,7 @@ iommu_reset(struct iommu_state *is) printf(", STC%d enabled", i); } - if (is->is_flags & IOMMU_FLUSH_CACHE) + if (ISSET(is->is_hw->ihw_flags, IOMMU_HW_FLUSH_CACHE)) IOMMUREG_WRITE(is, iommu_cache_invalidate, -1ULL); } @@ -433,7 +439,7 @@ iommu_extract(struct iommu_state *is, bu if (dva >= is->is_dvmabase && dva <= is->is_dvmaend) tte = is->is_tsb[IOTSBSLOT(dva, is->is_tsbsize)]; - return (tte & IOTTE_PAMASK); + return (tte & is->is_hw->ihw_dvma_pa); } /* @@ -601,6 +607,7 @@ iommu_dvmamap_create(bus_dma_tag_t t, bu { int ret; bus_dmamap_t map; + struct iommu_state *is = sb->sb_iommu; struct iommu_map_state *ims; BUS_DMA_FIND_PARENT(t, _dmamap_create); @@ -610,6 +617,12 @@ iommu_dvmamap_create(bus_dma_tag_t t, bu if (ret) return (ret); + if (flags & BUS_DMA_64BIT) { + map->_dm_cookie = is; + *dmamap = map; + return (0); + } + ims = iommu_iomap_create(atop(round_page(size))); if (ims == NULL) { @@ -641,8 +654,10 @@ iommu_dvmamap_destroy(bus_dma_tag_t t, b if (map->dm_nsegs) bus_dmamap_unload(t0, map); - if (map->_dm_cookie) - iommu_iomap_destroy(map->_dm_cookie); + if (!ISSET(map->_dm_flags, BUS_DMA_64BIT)) { + if (map->_dm_cookie) + iommu_iomap_destroy(map->_dm_cookie); + } map->_dm_cookie = NULL; BUS_DMA_FIND_PARENT(t, _dmamap_destroy); @@ -667,36 +682,36 @@ iommu_dvmamap_load(bus_dma_tag_t t, bus_ u_long dvmaddr, sgstart, sgend; bus_size_t align, boundary; struct iommu_state *is; - struct iommu_map_state *ims = map->_dm_cookie; + struct iommu_map_state *ims; pmap_t pmap; -#ifdef DIAGNOSTIC - if (ims == NULL) - panic("iommu_dvmamap_load: null map state"); -#endif -#ifdef DEBUG - if (ims->ims_sb == NULL) - panic("iommu_dvmamap_load: null sb"); - if (ims->ims_sb->sb_iommu == NULL) - panic("iommu_dvmamap_load: null iommu"); -#endif /* DEBUG */ - is = ims->ims_sb->sb_iommu; - - if (map->dm_nsegs) { - /* - * Is it still in use? _bus_dmamap_load should have taken care - * of this. - */ -#ifdef DIAGNOSTIC - panic("iommu_dvmamap_load: map still in use"); -#endif - bus_dmamap_unload(t0, map); - } - /* * Make sure that on error condition we return "no valid mappings". */ - map->dm_nsegs = 0; + KASSERTMSG(map->dm_nsegs == 0, "map still in use"); + + if (ISSET(map->_dm_flags, BUS_DMA_64BIT)) { + unsigned long bypass; + int i; + + is = map->_dm_cookie; + bypass = is->is_hw->ihw_bypass; + + /* Bypass translation by the IOMMU. */ + + BUS_DMA_FIND_PARENT(t, _dmamap_load); + err = (*t->_dmamap_load)(t, t0, map, buf, buflen, p, flags); + if (err != 0) + return (err); + + for (i = 0; i < map->dm_nsegs; i++) + map->dm_segs[i].ds_addr |= bypass; + + return (0); + } + + ims = map->_dm_cookie; + is = ims->ims_sb->sb_iommu; if (buflen < 1 || buflen > map->_dm_size) { DPRINTF(IDB_BUSDMA, @@ -876,27 +891,9 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, bus_size_t boundary, align; u_long dvmaddr, sgstart, sgend; struct iommu_state *is; - struct iommu_map_state *ims = map->_dm_cookie; - -#ifdef DIAGNOSTIC - if (ims == NULL) - panic("iommu_dvmamap_load_raw: null map state"); -#endif -#ifdef DEBUG - if (ims->ims_sb == NULL) - panic("iommu_dvmamap_load_raw: null sb"); - if (ims->ims_sb->sb_iommu == NULL) - panic("iommu_dvmamap_load_raw: null iommu"); -#endif /* DEBUG */ - is = ims->ims_sb->sb_iommu; + struct iommu_map_state *ims; - if (map->dm_nsegs) { - /* Already in use?? */ -#ifdef DIAGNOSTIC - panic("iommu_dvmamap_load_raw: map still in use"); -#endif - bus_dmamap_unload(t0, map); - } + KASSERTMSG(map->dm_nsegs == 0, "map stil in use"); /* * A boundary presented to bus_dmamem_alloc() takes precedence @@ -905,6 +902,31 @@ iommu_dvmamap_load_raw(bus_dma_tag_t t, if ((boundary = segs[0]._ds_boundary) == 0) boundary = map->_dm_boundary; + if (ISSET(map->_dm_flags, BUS_DMA_64BIT)) { + unsigned long bypass; + + is = map->_dm_cookie; + bypass = is->is_hw->ihw_bypass; + + /* Bypass translation by the IOMMU. */ + for (i = 0; i < nsegs; i++) { + err = iommu_dvmamap_insert(t, map, + bypass | segs[i].ds_addr, segs[i].ds_len, + 0, boundary); + if (err != 0) { + map->dm_nsegs = 0; + return (err); + } + } + + map->dm_mapsize = size; + + return (0); + } + + ims = map->_dm_cookie; + is = ims->ims_sb->sb_iommu; + align = MAX(segs[0]._ds_align, PAGE_SIZE); /* @@ -1084,22 +1106,24 @@ iommu_dvmamap_append_range(bus_dma_tag_t bus_size_t length, int flags, bus_size_t boundary) { struct iommu_map_state *ims = map->_dm_cookie; - bus_addr_t sgstart, sgend, bd_mask; - bus_dma_segment_t *seg = NULL; - int i = map->dm_nsegs; + bus_addr_t sgstart = iommu_iomap_translate(ims, pa); -#ifdef DEBUG - if (ims == NULL) - panic("iommu_dvmamap_append_range: null map state"); -#endif + return (iommu_dvmamap_insert(t, map, sgstart, length, flags, boundary)); +} - sgstart = iommu_iomap_translate(ims, pa); - sgend = sgstart + length - 1; +int +iommu_dvmamap_insert(bus_dma_tag_t t, bus_dmamap_t map, + bus_addr_t sgstart, bus_size_t length, int flags, bus_size_t boundary) +{ + bus_addr_t sgend = sgstart + length - 1; + bus_addr_t bd_mask; + bus_dma_segment_t *seg = NULL; + int i = map->dm_nsegs; #ifdef DIAGNOSTIC if (sgstart == 0 || sgstart > sgend) { - printf("append range invalid mapping for %lx " - "(0x%lx - 0x%lx)\n", pa, sgstart, sgend); + printf("append range invalid mapping for " + "0x%lx - 0x%lx\n", sgstart, sgend); map->dm_nsegs = 0; return (EINVAL); } @@ -1298,20 +1322,17 @@ void iommu_dvmamap_unload(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map) { struct iommu_state *is; - struct iommu_map_state *ims = map->_dm_cookie; + struct iommu_map_state *ims; bus_addr_t dvmaddr = map->_dm_dvmastart; bus_size_t sgsize = map->_dm_dvmasize; int error; -#ifdef DEBUG - if (ims == NULL) - panic("iommu_dvmamap_unload: null map state"); - if (ims->ims_sb == NULL) - panic("iommu_dvmamap_unload: null sb"); - if (ims->ims_sb->sb_iommu == NULL) - panic("iommu_dvmamap_unload: null iommu"); -#endif /* DEBUG */ + if (ISSET(map->_dm_flags, BUS_DMA_64BIT)) { + bus_dmamap_unload(t->_parent, map); + return; + } + ims = map->_dm_cookie; is = ims->ims_sb->sb_iommu; /* Flush the iommu */ @@ -1488,7 +1509,7 @@ iommu_dvmamap_print_map(bus_dma_tag_t t, break; } - if (map->_dm_cookie) { + if (!ISSET(map->_dm_flags, BUS_DMA_64BIT) && map->_dm_cookie != NULL) { struct iommu_map_state *ims = map->_dm_cookie; struct iommu_page_map *ipm = &ims->ims_map; @@ -1546,19 +1567,19 @@ void iommu_dvmamap_sync(bus_dma_tag_t t, bus_dma_tag_t t0, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops) { - struct iommu_map_state *ims = map->_dm_cookie; + struct iommu_map_state *ims; -#ifdef DIAGNOSTIC - if (ims == NULL) - panic("iommu_dvmamap_sync: null map state"); - if (ims->ims_sb == NULL) - panic("iommu_dvmamap_sync: null sb"); - if (ims->ims_sb->sb_iommu == NULL) - panic("iommu_dvmamap_sync: null iommu"); -#endif if (len == 0) return; + if (map->_dm_flags & BUS_DMA_64BIT) { + if (ops & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) + __membar("#MemIssue"); + return; + } + + ims = map->_dm_cookie; + if (ops & BUS_DMASYNC_PREWRITE) __membar("#MemIssue"); @@ -1622,9 +1643,13 @@ iommu_dvmamem_alloc(bus_dma_tag_t t, bus "bound %llx segp %p flags %d\n", (unsigned long long)size, (unsigned long long)alignment, (unsigned long long)boundary, segs, flags)); + + if ((flags & BUS_DMA_64BIT) == 0) + flags |= BUS_DMA_DVMA; + BUS_DMA_FIND_PARENT(t, _dmamem_alloc); return ((*t->_dmamem_alloc)(t, t0, size, alignment, boundary, - segs, nsegs, rsegs, flags | BUS_DMA_DVMA)); + segs, nsegs, rsegs, flags)); } void @@ -1763,7 +1788,7 @@ iommu_iomap_load_map(struct iommu_state /* Flush cache if necessary. */ slot = IOTSBSLOT(e->ipe_va, is->is_tsbsize); - if (is->is_flags & IOMMU_FLUSH_CACHE && + if (ISSET(is->is_hw->ihw_flags, IOMMU_HW_FLUSH_CACHE) && (i == (ipm->ipm_pagecnt - 1) || (slot % 8) == 7)) IOMMUREG_WRITE(is, iommu_cache_flush, is->is_ptsb + slot * 8); @@ -1788,7 +1813,7 @@ iommu_iomap_unload_map(struct iommu_stat /* Flush cache if necessary. */ slot = IOTSBSLOT(e->ipe_va, is->is_tsbsize); - if (is->is_flags & IOMMU_FLUSH_CACHE && + if (ISSET(is->is_hw->ihw_flags, IOMMU_HW_FLUSH_CACHE) && (i == (ipm->ipm_pagecnt - 1) || (slot % 8) == 7)) IOMMUREG_WRITE(is, iommu_cache_flush, is->is_ptsb + slot * 8); Index: dev/iommureg.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/iommureg.h,v retrieving revision 1.17 diff -u -p -r1.17 iommureg.h --- dev/iommureg.h 17 Aug 2012 20:46:50 -0000 1.17 +++ dev/iommureg.h 4 Jun 2019 04:40:45 -0000 @@ -90,10 +90,11 @@ struct iommu_strbuf { #define IOMMUCR_DE 0x000000000000000002LL /* Diag enable */ #define IOMMUCR_EN 0x000000000000000001LL /* Enable IOMMU */ -#define IOMMUCR_FIRE_SE 0x000000000000000400LL /* Snoop enable */ -#define IOMMUCR_FIRE_CM_EN 0x000000000000000300LL /* Cache mode enable */ -#define IOMMUCR_FIRE_BE 0x000000000000000002LL /* Bypass enable */ -#define IOMMUCR_FIRE_TE 0x000000000000000001LL /* Translation enabled */ +#define IOMMUCR_FIRE_PD 0x000000000000001000UL /* Process disable */ +#define IOMMUCR_FIRE_SE 0x000000000000000400UL /* Snoop enable */ +#define IOMMUCR_FIRE_CM_EN 0x000000000000000300UL /* Cache mode enable */ +#define IOMMUCR_FIRE_BE 0x000000000000000002UL /* Bypass enable */ +#define IOMMUCR_FIRE_TE 0x000000000000000001UL /* Translation enabled */ /* * IOMMU stuff Index: dev/iommuvar.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/iommuvar.h,v retrieving revision 1.17 diff -u -p -r1.17 iommuvar.h --- dev/iommuvar.h 4 May 2016 18:26:12 -0000 1.17 +++ dev/iommuvar.h 4 Jun 2019 04:40:45 -0000 @@ -100,6 +100,21 @@ struct iommu_map_state { }; #define IOMMU_MAP_STREAM 1 +struct iommu_hw { + void (*ihw_enable)(struct iommu_state *); + + unsigned long ihw_dvma_pa; + + unsigned long ihw_bypass; + unsigned long ihw_bypass_nc; /* non-cached */ + unsigned long ihw_bypass_ro; /* relaxed ordering */ + + unsigned int ihw_flags; +#define IOMMU_HW_FLUSH_CACHE (1 << 0) +}; + +extern const struct iommu_hw iommu_hw_default; + /* * per-IOMMU state */ @@ -112,8 +127,7 @@ struct iommu_state { int64_t is_cr; /* Control register value */ struct mutex is_mtx; struct extent *is_dvmamap; /* DVMA map for this instance */ - int is_flags; -#define IOMMU_FLUSH_CACHE 0x00000001 + const struct iommu_hw *is_hw; struct strbuf_ctl *is_sb[2]; /* Streaming buffers if any */ @@ -126,7 +140,8 @@ struct iommu_state { }; /* interfaces for PCI/SBus code */ -void iommu_init(char *, struct iommu_state *, int, u_int32_t); +void iommu_init(char *, const struct iommu_hw *, struct iommu_state *, + int, u_int32_t); void iommu_reset(struct iommu_state *); paddr_t iommu_extract(struct iommu_state *, bus_addr_t); int64_t iommu_lookup_tte(struct iommu_state *, bus_addr_t); @@ -146,6 +161,7 @@ int iommu_dvmamem_alloc(bus_dma_tag_t, b bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int); void iommu_dvmamem_free(bus_dma_tag_t, bus_dma_tag_t, bus_dma_segment_t *, int); + #define IOMMUREG_READ(is, reg) \ bus_space_read_8((is)->is_bustag, \ Index: dev/pci_machdep.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/pci_machdep.c,v retrieving revision 1.46 diff -u -p -r1.46 pci_machdep.c --- dev/pci_machdep.c 27 Dec 2018 11:04:41 -0000 1.46 +++ dev/pci_machdep.c 4 Jun 2019 04:40:45 -0000 @@ -57,6 +57,7 @@ int sparc_pci_debug = 0x0; #include #include #include +#include #include @@ -87,6 +88,46 @@ pci_attach_hook(parent, self, pba) } int +pci_bcm_dmamap_create(bus_dma_tag_t dt, bus_dma_tag_t t0, bus_size_t size, + int nsegments, bus_size_t maxsegsz, bus_size_t boundary, int flags, + bus_dmamap_t *dmamp) +{ + bus_dma_tag_t pdt = dt->_parent; + + CLR(flags, BUS_DMA_64BIT); + + return ((*pdt->_dmamap_create)(pdt, t0, size, nsegments, maxsegsz, + boundary, flags, dmamp)); +} + +int +pci_probe_device_hook(pci_chipset_tag_t pc, struct pci_attach_args *pa) +{ + bus_dma_tag_t dt, pdt; + + if (pa->pa_id == + PCI_ID_CODE(PCI_VENDOR_RCC, PCI_PRODUCT_RCC_PCIE_PCIX)) { + /* + * These PCI bridges only support 40bit DVA, so intercept + * bus_dmamap_create so we can clear BUS_DMA_64BIT. + */ + + dt = malloc(sizeof(*dt), M_DEVBUF, M_NOWAIT | M_ZERO); + if (dt == NULL) + panic("%s: could not alloc dma tag", __func__); + + pdt = pa->pa_dmat; + + dt->_parent = pdt; + dt->_dmamap_create = pci_bcm_dmamap_create; + + pa->pa_dmat = dt; + } + + return (0); +} + +int pci_bus_maxdevs(pc, busno) pci_chipset_tag_t pc; int busno; @@ -395,10 +436,15 @@ pci_intr_map(pa, ihp) /* XXXX -- we use the ino. What if there is a valid IGN? */ *ihp = interrupts[0]; - if (pa->pa_pc->intr_map) - return ((*pa->pa_pc->intr_map)(pa, ihp)); - else - return (0); + if (pa->pa_pc->intr_map) { + int rv = (*pa->pa_pc->intr_map)(pa, ihp); + if (rv != 0) + return (rv); + } + + KASSERT(PCI_INTR_TYPE(*ihp) == PCI_INTR_INTX); + + return (0); } int @@ -416,6 +462,30 @@ pci_intr_map_msi(struct pci_attach_args } int +pci_intr_map_msix(struct pci_attach_args *pa, int vec, pci_intr_handle_t *ihp) +{ + pci_chipset_tag_t pc = pa->pa_pc; + pcitag_t tag = pa->pa_tag; + pcireg_t reg; + + if (vec & ~PCI_INTR_VEC_MASK) + return (-1); + + if ((pa->pa_flags & PCI_FLAGS_MSI_ENABLED) == 0 || + pci_get_capability(pc, tag, PCI_CAP_MSIX, NULL, ®) == 0) + return (-1); + + if (vec > PCI_MSIX_MC_TBLSZ(reg)) + return (-1); + + KASSERT(!ISSET(pa->pa_tag, PCI_INTR_TYPE_MASK)); + KASSERT(!ISSET(pa->pa_tag, PCI_INTR_VEC_MASK)); + + *ihp = PCI_INTR_MSIX | PCITAG_OFFSET(pa->pa_tag) | vec; + return (0); +} + +int pci_intr_line(pci_chipset_tag_t pc, pci_intr_handle_t ih) { return (ih); @@ -427,15 +497,23 @@ pci_intr_string(pc, ih) pci_intr_handle_t ih; { static char str[16]; + const char *rv = str; DPRINTF(SPDB_INTR, ("pci_intr_string: ih %u", ih)); - if (ih & PCI_INTR_MSI) - snprintf(str, sizeof str, "msi"); - else + switch (PCI_INTR_TYPE(ih)) { + case PCI_INTR_MSIX: + rv = "msix"; + break; + case PCI_INTR_MSI: + rv = "msi"; + break; + case PCI_INTR_INTX: snprintf(str, sizeof str, "ivec 0x%llx", INTVEC(ih)); - DPRINTF(SPDB_INTR, ("; returning %s\n", str)); + break; + } + DPRINTF(SPDB_INTR, ("; returning %s\n", rv)); - return (str); + return (rv); } void * @@ -495,3 +573,43 @@ pci_msi_enable(pci_chipset_tag_t pc, pci } pci_conf_write(pc, tag, off, reg | PCI_MSI_MC_MSIE); } + +void +pci_msix_enable(pci_chipset_tag_t pc, pcitag_t tag, bus_space_tag_t memt, + int vec, bus_addr_t addr, uint32_t data) +{ + bus_space_handle_t memh; + bus_addr_t base; + pcireg_t reg, table, type; + uint32_t ctrl; + int bir, offset; + int off, tblsz; + + if (pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, ®) == 0) + panic("%s: no msix capability", __func__); + + table = pci_conf_read(pc, tag, off + PCI_MSIX_TABLE); + bir = (table & PCI_MSIX_TABLE_BIR); + offset = (table & PCI_MSIX_TABLE_OFF); + tblsz = PCI_MSIX_MC_TBLSZ(reg) + 1; + + bir = PCI_MAPREG_START + bir * 4; + type = pci_mapreg_type(pc, tag, bir); + if (pci_mapreg_info(pc, tag, bir, type, &base, NULL, NULL) || + bus_space_map(memt, base + offset, tblsz * 16, 0, &memh)) + panic("%s: cannot map registers", __func__); + + bus_space_write_4(memt, memh, PCI_MSIX_MA(vec), addr); + bus_space_write_4(memt, memh, PCI_MSIX_MAU32(vec), addr >> 32); + bus_space_write_4(memt, memh, PCI_MSIX_MD(vec), data); + bus_space_barrier(memt, memh, PCI_MSIX_MA(vec), 16, + BUS_SPACE_BARRIER_WRITE); + ctrl = bus_space_read_4(memt, memh, PCI_MSIX_VC(vec)); + bus_space_write_4(memt, memh, PCI_MSIX_VC(vec), + ctrl & ~PCI_MSIX_VC_MASK); + + bus_space_unmap(memt, memh, tblsz * 16); + + pci_conf_write(pc, tag, off, reg | PCI_MSIX_MC_MSIXE); +} + Index: dev/psycho.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/psycho.c,v retrieving revision 1.75 diff -u -p -r1.75 psycho.c --- dev/psycho.c 25 May 2017 03:19:39 -0000 1.75 +++ dev/psycho.c 4 Jun 2019 04:40:45 -0000 @@ -902,7 +902,7 @@ psycho_iommu_init(struct psycho_softc *s panic("couldn't malloc iommu name"); snprintf(name, 32, "%s dvma", sc->sc_dev.dv_xname); - iommu_init(name, is, tsbsize, iobase); + iommu_init(name, &iommu_hw_default, is, tsbsize, iobase); } /* Index: dev/pyro.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/pyro.c,v retrieving revision 1.31 diff -u -p -r1.31 pyro.c --- dev/pyro.c 25 May 2017 03:19:39 -0000 1.31 +++ dev/pyro.c 4 Jun 2019 04:40:45 -0000 @@ -131,6 +131,30 @@ int pyro_msi_eq_intr(void *); int pyro_dmamap_create(bus_dma_tag_t, bus_dma_tag_t, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *); +void pyro_iommu_enable(struct iommu_state *); + +const struct iommu_hw iommu_hw_fire = { + .ihw_enable = pyro_iommu_enable, + + .ihw_dvma_pa = 0x000007ffffffffffUL, + + .ihw_bypass = 0xfffc000000000000UL, + .ihw_bypass_nc = 0x0000080000000000UL, + .ihw_bypass_ro = 0, +}; + +const struct iommu_hw iommu_hw_oberon = { + .ihw_enable = pyro_iommu_enable, + + .ihw_dvma_pa = 0x00007fffffffffffUL, + + .ihw_bypass = 0x7ffc000000000000UL, + .ihw_bypass_nc = 0x0000800000000000UL, + .ihw_bypass_ro = 0x8000000000000000UL, + + .ihw_flags = IOMMU_HW_FLUSH_CACHE, +}; + #ifdef DDB void pyro_xir(void *, int); #endif @@ -266,6 +290,7 @@ pyro_init_iommu(struct pyro_softc *sc, s int tsbsize = 7; u_int32_t iobase = -1; char *name; + const struct iommu_hw *ihw = &iommu_hw_fire; is->is_bustag = sc->sc_bust; @@ -282,11 +307,23 @@ pyro_init_iommu(struct pyro_softc *sc, s panic("couldn't malloc iommu name"); snprintf(name, 32, "%s dvma", sc->sc_dv.dv_xname); - /* On Oberon, we need to flush the cache. */ if (sc->sc_oberon) - is->is_flags |= IOMMU_FLUSH_CACHE; + ihw = &iommu_hw_oberon; + + iommu_init(name, ihw, is, tsbsize, iobase); +} + +void +pyro_iommu_enable(struct iommu_state *is) +{ + unsigned long cr; + + cr = IOMMUREG_READ(is, iommu_cr); + cr |= IOMMUCR_FIRE_BE | IOMMUCR_FIRE_SE | IOMMUCR_FIRE_CM_EN | + IOMMUCR_FIRE_TE; - iommu_init(name, is, tsbsize, iobase); + IOMMUREG_WRITE(is, iommu_tsb, is->is_ptsb | is->is_tsbsize); + IOMMUREG_WRITE(is, iommu_cr, cr); } void @@ -611,9 +648,9 @@ pyro_intr_establish(bus_space_tag_t t, b volatile u_int64_t *intrmapptr = NULL, *intrclrptr = NULL; int ino; - if (ihandle & PCI_INTR_MSI) { + if (PCI_INTR_TYPE(ihandle) != PCI_INTR_INTX) { pci_chipset_tag_t pc = pbm->pp_pc; - pcitag_t tag = ihandle & ~PCI_INTR_MSI; + pcitag_t tag = PCI_INTR_TAG(ihandle); int msinum = pbm->pp_msinum++; u_int64_t reg; @@ -632,7 +669,15 @@ pyro_intr_establish(bus_space_tag_t t, b if (flags & BUS_INTR_ESTABLISH_MPSAFE) ih->ih_mpsafe = 1; - pci_msi_enable(pc, tag, pbm->pp_msiaddr, msinum); + switch (PCI_INTR_TYPE(ihandle)) { + case PCI_INTR_MSI: + pci_msi_enable(pc, tag, pbm->pp_msiaddr, msinum); + break; + case PCI_INTR_MSIX: + pci_msix_enable(pc, tag, pbm->pp_memt, + PCI_INTR_VEC(ihandle), pbm->pp_msiaddr, msinum); + break; + } /* Map MSI to the right EQ and mark it as valid. */ reg = bus_space_read_8(sc->sc_bust, sc->sc_csrh, Index: dev/sbus.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/sbus.c,v retrieving revision 1.44 diff -u -p -r1.44 sbus.c --- dev/sbus.c 19 Sep 2015 21:07:04 -0000 1.44 +++ dev/sbus.c 4 Jun 2019 04:40:45 -0000 @@ -349,7 +349,7 @@ sbus_mb_attach(struct device *parent, st snprintf(name, 32, "%s dvma", sc->sc_dev.dv_xname); printf("%s: ", sc->sc_dev.dv_xname); - iommu_init(name, &sc->sc_is, 0, -1); + iommu_init(name, &iommu_hw_default, &sc->sc_is, 0, -1); /* Initialize Starfire PC interrupt translation. */ if (OF_getprop(findroot(), "name", buf, sizeof(buf)) > 0 && Index: dev/schizo.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/schizo.c,v retrieving revision 1.68 diff -u -p -r1.68 schizo.c --- dev/schizo.c 25 May 2017 03:19:39 -0000 1.68 +++ dev/schizo.c 4 Jun 2019 04:40:45 -0000 @@ -451,7 +451,7 @@ schizo_init_iommu(struct schizo_softc *s "using iobase=0x%x, tsbsize=%d\n", iobase, tsbsize)); } - iommu_init(name, is, tsbsize, iobase); + iommu_init(name, &iommu_hw_default, is, tsbsize, iobase); } int Index: dev/vpci.c =================================================================== RCS file: /cvs/src/sys/arch/sparc64/dev/vpci.c,v retrieving revision 1.24 diff -u -p -r1.24 vpci.c --- dev/vpci.c 22 Dec 2017 15:52:36 -0000 1.24 +++ dev/vpci.c 4 Jun 2019 04:40:45 -0000 @@ -525,9 +525,9 @@ vpci_intr_establish(bus_space_tag_t t, b if (flags & BUS_INTR_ESTABLISH_MPSAFE) ih->ih_mpsafe = 1; - if (ihandle & PCI_INTR_MSI) { + if (PCI_INTR_TYPE(ihandle) != PCI_INTR_INTX) { pci_chipset_tag_t pc = pbm->vp_pc; - pcitag_t tag = ihandle & ~PCI_INTR_MSI; + pcitag_t tag = PCI_INTR_TAG(ihandle); int msinum = pbm->vp_msinum++; evcount_attach(&ih->ih_count, ih->ih_name, NULL); @@ -537,7 +537,15 @@ vpci_intr_establish(bus_space_tag_t t, b pbm->vp_msi[msinum] = ih; ih->ih_number = msinum; - pci_msi_enable(pc, tag, pbm->vp_msiaddr, msinum); + switch (PCI_INTR_TYPE(ihandle)) { + case PCI_INTR_MSI: + pci_msi_enable(pc, tag, pbm->vp_msiaddr, msinum); + break; + case PCI_INTR_MSIX: + pci_msix_enable(pc, tag, pbm->vp_memt, + PCI_INTR_VEC(ihandle), pbm->vp_msiaddr, msinum); + break; + } err = hv_pci_msi_setmsiq(pbm->vp_devhandle, msinum, 0, 0); if (err != H_EOK) { Index: include/pci_machdep.h =================================================================== RCS file: /cvs/src/sys/arch/sparc64/include/pci_machdep.h,v retrieving revision 1.33 diff -u -p -r1.33 pci_machdep.h --- include/pci_machdep.h 4 May 2016 14:30:01 -0000 1.33 +++ include/pci_machdep.h 4 Jun 2019 04:40:45 -0000 @@ -43,7 +43,17 @@ struct pci_attach_args; typedef struct sparc_pci_chipset *pci_chipset_tag_t; +#define PCI_INTR_INTX 0x00000000 #define PCI_INTR_MSI 0x80000000 +#define PCI_INTR_MSIX 0x40000000 +#define PCI_INTR_TYPE_MASK 0xc0000000 +#define PCI_INTR_TYPE(_ih) ((_ih) & PCI_INTR_TYPE_MASK) + +#define PCI_INTR_TAG_MASK 0x00ffff00 +#define PCI_INTR_TAG(_ih) ((_ih) & PCI_INTR_TAG_MASK) + +#define PCI_INTR_VEC_MASK 0x000000ff +#define PCI_INTR_VEC(_ih) ((_ih) & PCI_INTR_VEC_MASK) typedef u_int pci_intr_handle_t; /* @@ -74,10 +84,13 @@ struct sparc_pci_chipset { pcireg_t (*conf_read)(pci_chipset_tag_t, pcitag_t, int); void (*conf_write)(pci_chipset_tag_t, pcitag_t, int, pcireg_t); int (*intr_map)(struct pci_attach_args *, pci_intr_handle_t *); + int (*probe_device_hook)(void *, struct pci_attach_args *); }; void pci_attach_hook(struct device *, struct device *, struct pcibus_attach_args *); +int pci_probe_device_hook(pci_chipset_tag_t, + struct pci_attach_args *); int pci_bus_maxdevs(pci_chipset_tag_t, int); pcitag_t pci_make_tag(pci_chipset_tag_t, int, int, int); void pci_decompose_tag(pci_chipset_tag_t, pcitag_t, int *, int *, @@ -88,7 +101,8 @@ void pci_conf_write(pci_chipset_tag_t, pcireg_t); int pci_intr_map(struct pci_attach_args *, pci_intr_handle_t *); int pci_intr_map_msi(struct pci_attach_args *, pci_intr_handle_t *); -#define pci_intr_map_msix(pa, vec, ihp) (-1) +int pci_intr_map_msix(struct pci_attach_args *, int, + pci_intr_handle_t *); int pci_intr_line(pci_chipset_tag_t, pci_intr_handle_t); const char *pci_intr_string(pci_chipset_tag_t, pci_intr_handle_t); void *pci_intr_establish(pci_chipset_tag_t, pci_intr_handle_t, @@ -96,14 +110,14 @@ void *pci_intr_establish(pci_chipset_ta void pci_intr_disestablish(pci_chipset_tag_t, void *); void pci_msi_enable(pci_chipset_tag_t, pcitag_t, bus_addr_t, int); +void pci_msix_enable(pci_chipset_tag_t, pcitag_t, bus_space_tag_t, + int, bus_addr_t, uint32_t); int sparc64_pci_enumerate_bus(struct pci_softc *, int (*match)(struct pci_attach_args *), struct pci_attach_args *); #define PCI_MACHDEP_ENUMERATE_BUS sparc64_pci_enumerate_bus - -#define pci_probe_device_hook(c, a) (0) #define pci_min_powerstate(c, t) (PCI_PMCSR_STATE_D3) #define pci_set_powerstate_md(c, t, s, p)