Port-sgimips archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
Moving sgimips to common bus_dma.c
Hello,
the attached patch rips out the private bus_dma implementation in
sgimips/bus.c and replaces it with mips/bus_dma.c. This works on my O2
with sgimips64 ( well, at least no worse than the old implementation )
The goal is to eventually move to the common bus_space as well and at
some point be able to run LP64 kernels on IP2x and IP3x.
Remaining issues:
- this is untested on 32bit hardware, I don't have any MIPS-I, SGI or not
- _BUS_DMAMAP_COHERENT in _bus_dmamap_load() seems to cause data corruption,
so it's #ifndef sgimips for now
Other changes to bus_dma.c:
- make _bus_dmamem_mmap() return uncached pages if possible, as previously
on sgimips. This is for hardware which uses regular RAM as video memory,
like the O2. CI20 will need this as well.
have fun
Michael
Index: sgimips/conf/files.sgimips
===================================================================
RCS file: /cvsroot/src/sys/arch/sgimips/conf/files.sgimips,v
retrieving revision 1.52
diff -u -w -r1.52 files.sgimips
--- sgimips/conf/files.sgimips 20 Jul 2014 10:22:55 -0000 1.52
+++ sgimips/conf/files.sgimips 28 Jan 2015 04:42:39 -0000
@@ -26,8 +26,7 @@
file arch/sgimips/sgimips/disksubr.c
file arch/sgimips/sgimips/machdep.c
-file dev/bus_dma/bus_dmamem_common.c
-
+file arch/mips/mips/bus_dma.c
file arch/mips/mips/mips3_clock.c mips3
file arch/mips/mips/mips3_clockintr.c mips3
Index: sgimips/sgimips/bus.c
===================================================================
RCS file: /cvsroot/src/sys/arch/sgimips/sgimips/bus.c,v
retrieving revision 1.65
diff -u -w -r1.65 bus.c
--- sgimips/sgimips/bus.c 2 Oct 2012 23:54:54 -0000 1.65
+++ sgimips/sgimips/bus.c 28 Jan 2015 04:42:39 -0000
@@ -43,13 +43,12 @@
#include <sys/proc.h>
#include <sys/mbuf.h>
-#define _SGIMIPS_BUS_DMA_PRIVATE
+#define _MIPS_BUS_DMA_PRIVATE
+
#include <sys/bus.h>
#include <machine/cpu.h>
#include <machine/machtype.h>
-#include <dev/bus_dma/bus_dmamem_common.h>
-
#include <uvm/uvm_extern.h>
#include <mips/cpuregs.h>
@@ -60,49 +59,36 @@
#include "opt_sgimace.h"
-static int _bus_dmamap_load_buffer(bus_dmamap_t, void *, bus_size_t,
- struct vmspace *, int, vaddr_t *, int *, int);
-
-struct sgimips_bus_dma_tag sgimips_default_bus_dma_tag = {
- _bus_dmamap_create,
- _bus_dmamap_destroy,
- _bus_dmamap_load,
- _bus_dmamap_load_mbuf,
- _bus_dmamap_load_uio,
- _bus_dmamap_load_raw,
- _bus_dmamap_unload,
- NULL,
- _bus_dmamem_alloc,
- _bus_dmamem_free,
- _bus_dmamem_map,
- _bus_dmamem_unmap,
- _bus_dmamem_mmap,
+struct mips_bus_dma_tag sgimips_default_bus_dma_tag = {
+ ._dmamap_ops = _BUS_DMAMAP_OPS_INITIALIZER,
+ ._dmamem_ops = _BUS_DMAMEM_OPS_INITIALIZER,
+ ._dmatag_ops = _BUS_DMATAG_OPS_INITIALIZER,
};
+/*
+ * XXX
+ * I'm not sure how well the common MIPS bus_dma.c handles MIPS-I and I don't
+ * have any IP1x hardware, so I'll leave this in just in case it needs to be
+ * put back
+ */
+
void
sgimips_bus_dma_init(void)
{
+#if 0
switch (mach_type) {
/* R2000/R3000 */
case MACH_SGI_IP6 | MACH_SGI_IP10:
case MACH_SGI_IP12:
- sgimips_default_bus_dma_tag._dmamap_sync =
+ sgimips_default_bus_dma_tag._dmamap_ops.dmamap_sync =
_bus_dmamap_sync_mips1;
break;
- /* >=R4000*/
- case MACH_SGI_IP20:
- case MACH_SGI_IP22:
- case MACH_SGI_IP30:
- case MACH_SGI_IP32:
- sgimips_default_bus_dma_tag._dmamap_sync =
- _bus_dmamap_sync_mips3;
- break;
-
default:
panic("sgimips_bus_dma_init: unsupported mach type IP%d\n",
mach_type);
}
+#endif
}
u_int8_t
@@ -500,323 +486,6 @@
}
}
-/*
- * Common function for DMA map creation. May be called by bus-specific
- * DMA map creation functions.
- */
-int
-_bus_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegments,
- bus_size_t maxsegsz, bus_size_t boundary, int flags,
- bus_dmamap_t *dmamp)
-{
- struct sgimips_bus_dmamap *map;
- void *mapstore;
- size_t mapsize;
-
- /*
- * Allocate and initialize the DMA map. The end of the map
- * is a variable-sized array of segments, so we allocate enough
- * room for them in one shot.
- *
- * Note we don't preserve the WAITOK or NOWAIT flags. Preservation
- * of ALLOCNOW notifies others that we've reserved these resources,
- * and they are not to be freed.
- *
- * The bus_dmamap_t includes one bus_dma_segment_t, hence
- * the (nsegments - 1).
- */
- mapsize = sizeof(struct sgimips_bus_dmamap) +
- (sizeof(bus_dma_segment_t) * (nsegments - 1));
- if ((mapstore = malloc(mapsize, M_DMAMAP,
- (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
- return ENOMEM;
-
- memset(mapstore, 0, mapsize);
- map = (struct sgimips_bus_dmamap *)mapstore;
- map->_dm_size = size;
- map->_dm_segcnt = nsegments;
- map->_dm_maxmaxsegsz = maxsegsz;
- map->_dm_boundary = boundary;
- map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
- map->_dm_vmspace = NULL;
- map->dm_maxsegsz = maxsegsz;
- map->dm_mapsize = 0; /* no valid mappings */
- map->dm_nsegs = 0;
-
- *dmamp = map;
- return 0;
-}
-
-/*
- * Common function for DMA map destruction. May be called by bus-specific
- * DMA map destruction functions.
- */
-void
-_bus_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
-{
-
- free(map, M_DMAMAP);
-}
-extern paddr_t kvtophys(vaddr_t); /* XXX */
-
-/*
- * Utility function to load a linear buffer. lastaddrp holds state
- * between invocations (for multiple-buffer loads). segp contains
- * the starting segment on entrance, and the ending segment on exit.
- * first indicates if this is the first invocation of this function.
- */
-int
-_bus_dmamap_load_buffer(bus_dmamap_t map, void *buf, bus_size_t buflen,
- struct vmspace *vm, int flags, vaddr_t *lastaddrp,
- int *segp, int first)
-{
- bus_size_t sgsize;
- bus_addr_t lastaddr, baddr, bmask;
- paddr_t curaddr;
- vaddr_t vaddr = (vaddr_t)buf;
- int seg;
-
- lastaddr = *lastaddrp;
- bmask = ~(map->_dm_boundary - 1);
-
- for (seg = *segp; buflen > 0 ; ) {
- /*
- * Get the physical address for this segment.
- */
- if (!VMSPACE_IS_KERNEL_P(vm))
- (void) pmap_extract(vm_map_pmap(&vm->vm_map),
- vaddr, &curaddr);
- else
- curaddr = kvtophys(vaddr);
-
- /*
- * Compute the segment size, and adjust counts.
- */
- sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
- if (buflen < sgsize)
- sgsize = buflen;
-
- /*
- * Make sure we don't cross any boundaries.
- */
- if (map->_dm_boundary > 0) {
- baddr = (curaddr + map->_dm_boundary) & bmask;
- if (sgsize > (baddr - curaddr))
- sgsize = (baddr - curaddr);
- }
-
- /*
- * Insert chunk into a segment, coalescing with
- * the previous segment if possible.
- */
- if (first) {
- map->dm_segs[seg].ds_addr = curaddr;
- map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = vaddr;
- first = 0;
- } else {
- if (curaddr == lastaddr &&
- (map->dm_segs[seg].ds_len + sgsize) <=
- map->dm_maxsegsz &&
- (map->_dm_boundary == 0 ||
- (map->dm_segs[seg].ds_addr & bmask) ==
- (curaddr & bmask)))
- map->dm_segs[seg].ds_len += sgsize;
- else {
- if (++seg >= map->_dm_segcnt)
- break;
- map->dm_segs[seg].ds_addr = curaddr;
- map->dm_segs[seg].ds_len = sgsize;
- map->dm_segs[seg]._ds_vaddr = vaddr;
- }
- }
-
- lastaddr = curaddr + sgsize;
- vaddr += sgsize;
- buflen -= sgsize;
- }
-
- *segp = seg;
- *lastaddrp = lastaddr;
-
- /*
- * Did we fit?
- */
- if (buflen != 0)
- return EFBIG; /* XXX Better return value here? */
-
- return 0;
-}
-
-/*
- * Common function for loading a direct-mapped DMA map with a linear
- * buffer.
- */
-int
-_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
- bus_size_t buflen, struct proc *p, int flags)
-{
- vaddr_t lastaddr;
- int seg, error;
- struct vmspace *vm;
-
- /*
- * Make sure that on error condition we return "no valid mappings".
- */
- map->dm_mapsize = 0;
- map->dm_nsegs = 0;
- KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
-
- if (buflen > map->_dm_size)
- return EINVAL;
-
- if (p != NULL) {
- vm = p->p_vmspace;
- } else {
- vm = vmspace_kernel();
- }
-
- seg = 0;
- error = _bus_dmamap_load_buffer(map, buf, buflen,
- vm, flags, &lastaddr, &seg, 1);
- if (error == 0) {
- map->dm_mapsize = buflen;
- map->dm_nsegs = seg + 1;
- map->_dm_vmspace = vm;
-
- /*
- * For linear buffers, we support marking the mapping
- * as COHERENT.
- *
- * XXX Check TLB entries for cache-inhibit bits?
- */
- if (buf >= (void *)MIPS_KSEG1_START &&
- buf < (void *)MIPS_KSEG2_START)
- map->_dm_flags |= SGIMIPS_DMAMAP_COHERENT;
- }
- return error;
-}
-
-/*
- * Like _bus_dmamap_load(), but for mbufs.
- */
-int
-_bus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *m0,
- int flags)
-{
- vaddr_t lastaddr;
- int seg, error, first;
- struct mbuf *m;
-
- /*
- * Make sure that on error condition we return "no valid mappings."
- */
- map->dm_mapsize = 0;
- map->dm_nsegs = 0;
- KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
-
-#ifdef DIAGNOSTIC
- if ((m0->m_flags & M_PKTHDR) == 0)
- panic("_bus_dmamap_load_mbuf: no packet header");
-#endif
-
- if (m0->m_pkthdr.len > map->_dm_size)
- return EINVAL;
-
- first = 1;
- seg = 0;
- error = 0;
- for (m = m0; m != NULL && error == 0; m = m->m_next) {
- if (m->m_len == 0)
- continue;
- error = _bus_dmamap_load_buffer(map, m->m_data, m->m_len,
- vmspace_kernel(), flags, &lastaddr, &seg, first);
- first = 0;
- }
- if (error == 0) {
- map->dm_mapsize = m0->m_pkthdr.len;
- map->dm_nsegs = seg + 1;
- map->_dm_vmspace = vmspace_kernel(); /* always kernel */
- }
- return error;
-}
-
-/*
- * Like _bus_dmamap_load(), but for uios.
- */
-int
-_bus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio,
- int flags)
-{
- vaddr_t lastaddr;
- int seg, i, error, first;
- bus_size_t minlen, resid;
- struct iovec *iov;
- void *addr;
-
- /*
- * Make sure that on error condition we return "no valid mappings."
- */
- map->dm_mapsize = 0;
- map->dm_nsegs = 0;
- KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);
-
- resid = uio->uio_resid;
- iov = uio->uio_iov;
-
- first = 1;
- seg = 0;
- error = 0;
- for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
- /*
- * Now at the first iovec to load. Load each iovec
- * until we have exhausted the residual count.
- */
- minlen = resid < iov[i].iov_len ? resid : iov[i].iov_len;
- addr = (void *)iov[i].iov_base;
-
- error = _bus_dmamap_load_buffer(map, addr, minlen,
- uio->uio_vmspace, flags, &lastaddr, &seg, first);
- first = 0;
-
- resid -= minlen;
- }
- if (error == 0) {
- map->dm_mapsize = uio->uio_resid;
- map->dm_nsegs = seg + 1;
- map->_dm_vmspace = uio->uio_vmspace;
- }
- return error;
-}
-
-/*
- * Like _bus_dmamap_load(), but for raw memory.
- */
-int
-_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs,
- int nsegs, bus_size_t size, int flags)
-{
-
- panic("_bus_dmamap_load_raw: not implemented");
-}
-
-/*
- * Common function for unloading a DMA map. May be called by
- * chipset-specific DMA map unload functions.
- */
-void
-_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
-{
-
- /*
- * No resources to free; just mark the mappings as
- * invalid.
- */
- map->dm_maxsegsz = map->_dm_maxmaxsegsz;
- map->dm_mapsize = 0;
- map->dm_nsegs = 0;
- map->_dm_flags &= ~SGIMIPS_DMAMAP_COHERENT;
-}
/* Common function from DMA map synchronization. May be called
* by chipset-specific DMA map synchronization functions.
@@ -920,285 +589,6 @@
}
}
-/*
- * Common function for DMA map synchronization. May be called
- * by chipset-specific DMA map synchronization functions.
- *
- * This is the R4x00/R5k version.
- */
-void
-_bus_dmamap_sync_mips3(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset,
- bus_size_t len, int ops)
-{
- bus_size_t minlen;
- vaddr_t vaddr, start, end, preboundary, firstboundary, lastboundary;
- int i, useindex;
-
- /*
- * Mixing PRE and POST operations is not allowed.
- */
- if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
- (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
- panic("_bus_dmamap_sync_mips3: mix PRE and POST");
-
-#ifdef DIAGNOSTIC
- if (offset >= map->dm_mapsize)
- panic("_bus_dmamap_sync_mips3: bad offset %"PRIxPSIZE
- "(map size is %"PRIxPSIZE")", offset, map->dm_mapsize);
- if (len == 0 || (offset + len) > map->dm_mapsize)
- panic("_bus_dmamap_sync_mips3: bad length");
-#endif
-
- /*
- * Since we're dealing with a virtually-indexed, write-back
- * cache, we need to do the following things:
- *
- * PREREAD -- Invalidate D-cache. Note we might have
- * to also write-back here if we have to use an Index
- * op, or if the buffer start/end is not cache-line aligned.
- *
- * PREWRITE -- Write-back the D-cache. If we have to use
- * an Index op, we also have to invalidate. Note that if
- * we are doing PREREAD|PREWRITE, we can collapse everything
- * into a single op.
- *
- * POSTREAD -- Nothing.
- *
- * POSTWRITE -- Nothing.
- */
-
- /*
- * Flush the write buffer.
- * XXX Is this always necessary?
- */
- wbflush();
-
- /*
- * No cache flushes are necessary if we're only doing
- * POSTREAD or POSTWRITE (i.e. not doing PREREAD or PREWRITE).
- */
- ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
- if (ops == 0)
- return;
-
- /*
- * If the mapping is of COHERENT DMA-safe memory, no cache
- * flush is necessary.
- */
- if (map->_dm_flags & SGIMIPS_DMAMAP_COHERENT)
- return;
-
- /*
- * If the mapping belongs to the kernel, or it belongs
- * to the currently-running process (XXX actually, vmspace),
- * then we can use Hit ops. Otherwise, Index ops.
- *
- * This should be true the vast majority of the time.
- */
- if (__predict_true(VMSPACE_IS_KERNEL_P(map->_dm_vmspace) ||
- map->_dm_vmspace == curproc->p_vmspace))
- useindex = 0;
- else
- useindex = 1;
-
- for (i = 0; i < map->dm_nsegs && len != 0; i++) {
- /* Find the beginning segment. */
- if (offset >= map->dm_segs[i].ds_len) {
- offset -= map->dm_segs[i].ds_len;
- continue;
- }
-
- /*
- * Now at the first segment to sync; nail
- * each segment until we have exhausted the
- * length.
- */
- minlen = len < map->dm_segs[i].ds_len - offset ?
- len : map->dm_segs[i].ds_len - offset;
-
- vaddr = map->dm_segs[i]._ds_vaddr;
-
-#ifdef BUS_DMA_DEBUG
- printf("bus_dmamap_sync_mips3: flushing segment %d "
- "(0x%lx+%lx, 0x%lx+0x%lx) (olen = %ld)...", i,
- vaddr, offset, vaddr, offset + minlen - 1, len);
-#endif
-
- /*
- * If we are forced to use Index ops, it's always a
- * Write-back,Invalidate, so just do one test.
- */
- if (__predict_false(useindex)) {
- mips_dcache_wbinv_range_index(vaddr + offset, minlen);
-#ifdef BUS_DMA_DEBUG
- printf("\n");
-#endif
- offset = 0;
- len -= minlen;
- continue;
- }
-
- /* The code that follows is more correct than that in
- mips/bus_dma.c. */
- start = vaddr + offset;
- switch (ops) {
- case BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE:
- mips_dcache_wbinv_range(start, minlen);
- break;
-
- case BUS_DMASYNC_PREREAD: {
- struct mips_cache_info * const mci = &mips_cache_info;
- end = start + minlen;
- preboundary = start & ~mci->mci_dcache_align_mask;
- firstboundary = (start + mci->mci_dcache_align_mask)
- & ~mci->mci_dcache_align_mask;
- lastboundary = end & ~mci->mci_dcache_align_mask;
- if (preboundary < start && preboundary < lastboundary)
- mips_dcache_wbinv_range(preboundary,
- mci->mci_dcache_align);
- if (firstboundary < lastboundary)
- mips_dcache_inv_range(firstboundary,
- lastboundary - firstboundary);
- if (lastboundary < end)
- mips_dcache_wbinv_range(lastboundary,
- mci->mci_dcache_align);
- break;
- }
-
- case BUS_DMASYNC_PREWRITE:
- mips_dcache_wb_range(start, minlen);
- break;
- }
-#ifdef BUS_DMA_DEBUG
- printf("\n");
-#endif
- offset = 0;
- len -= minlen;
- }
-}
-
-/*
- * Common function for DMA-safe memory allocation. May be called
- * by bus-specific DMA memory allocation functions.
- */
-int
-_bus_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t alignment,
- bus_size_t boundary, bus_dma_segment_t *segs,
- int nsegs, int *rsegs, int flags)
-{
- return (_bus_dmamem_alloc_range_common(t, size, alignment, boundary,
- segs, nsegs, rsegs, flags,
- mips_avail_start /*low*/, mips_avail_end - PAGE_SIZE /*high*/));
-}
-
-/*
- * Common function for freeing DMA-safe memory. May be called by
- * bus-specific DMA memory free functions.
- */
-void
-_bus_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
-{
-
- _bus_dmamem_free_common(t, segs, nsegs);
-}
-
-/*
- * Common function for mapping DMA-safe memory. May be called by
- * bus-specific DMA memory map functions.
- */
-int
-_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
- size_t size, void **kvap, int flags)
-{
- vaddr_t va;
- bus_addr_t addr;
- int curseg;
- const uvm_flag_t kmflags =
- (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
- u_int pmapflags;
-
- /*
- * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
- * TLB thrashing.
- */
-
- if (nsegs == 1) {
- if (flags & BUS_DMA_COHERENT)
- *kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
- else
- *kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
- return 0;
- }
-
- size = round_page(size);
-
- va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
-
- if (va == 0)
- return (ENOMEM);
-
- *kvap = (void *)va;
-
- pmapflags = VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED;
- if (flags & BUS_DMA_COHERENT)
- pmapflags |= PMAP_NOCACHE;
-
- for (curseg = 0; curseg < nsegs; curseg++) {
- for (addr = segs[curseg].ds_addr;
- addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
- addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
- if (size == 0)
- panic("_bus_dmamem_map: size botch");
- pmap_enter(pmap_kernel(), va, addr,
- VM_PROT_READ | VM_PROT_WRITE,
- pmapflags);
- }
- }
- pmap_update(pmap_kernel());
-
- return 0;
-}
-
-/*
- * Common function for unmapping DMA-safe memory. May be called by
- * bus-specific DMA memory unmapping functions.
- */
-void
-_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
-{
-
- /*
- * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
- * not in KSEG2).
- */
- if (kva >= (void *)MIPS_KSEG0_START &&
- kva < (void *)MIPS_KSEG2_START)
- return;
-
- _bus_dmamem_unmap_common(t, kva, size);
-}
-
-/*
- * Common functin for mmap(2)'ing DMA-safe memory. May be called by
- * bus-specific DMA mmap(2)'ing functions.
- */
-paddr_t
-_bus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
- off_t off, int prot, int flags)
-{
- bus_addr_t rv;
-
- rv = _bus_dmamem_mmap_common(t, segs, nsegs, off, prot, flags);
- if (rv == (bus_addr_t)-1)
- return (-1);
-
-#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
- return (mips_btop(rv | PGC_NOCACHE));
-#else
- return (mips_btop(rv));
-#endif
-}
-
paddr_t
bus_space_mmap(bus_space_tag_t space, bus_addr_t addr, off_t off,
int prot, int flags)
Index: mips/mips/bus_dma.c
===================================================================
RCS file: /cvsroot/src/sys/arch/mips/mips/bus_dma.c,v
retrieving revision 1.31
diff -u -w -r1.31 bus_dma.c
--- mips/mips/bus_dma.c 27 May 2014 15:56:18 -0000 1.31
+++ mips/mips/bus_dma.c 28 Jan 2015 04:42:39 -0000
@@ -450,9 +450,16 @@
*
* XXX Check TLB entries for cache-inhibit bits?
*/
+ /*
+ * XXX
+ * this seems to cause occasional data corruption on SGI O2
+ */
+#ifndef sgimips
if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
map->_dm_flags |= _BUS_DMAMAP_COHERENT;
- else if (MIPS_KSEG1_P(buf))
+ else
+#endif
+ if (MIPS_KSEG1_P(buf))
map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#ifdef _LP64
else if (MIPS_XKPHYS_P((vaddr_t)buf)
@@ -1162,7 +1169,16 @@
pa = (paddr_t)segs[i].ds_addr + off;
+/*
+ * This is for machines which use normal RAM as video memory, so userland can
+ * mmap() it and treat it like device memory, which is normally uncached.
+ * Needed for X11 on SGI O2, will likely be needed on things like CI20.
+ */
+#if defined(_MIPS_PADDR_T_64BIT) || defined(_LP64)
+ return (mips_btop(pa | PGC_NOCACHE));
+#else
return mips_btop(pa);
+#endif
}
/* Page not found. */
Home |
Main Index |
Thread Index |
Old Index