Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/sparc64 Handle DMA boundaries a bit better.
details: https://anonhg.NetBSD.org/src/rev/0fc9e99a0c4f
branches: trunk
changeset: 515266:0fc9e99a0c4f
user: eeh <eeh%NetBSD.org@localhost>
date: Fri Sep 21 03:04:09 2001 +0000
description:
Handle DMA boundaries a bit better.
diffstat:
sys/arch/sparc64/dev/iommu.c | 320 +++++++++++++++++++++++++++-------------
sys/arch/sparc64/include/bus.h | 5 +-
2 files changed, 220 insertions(+), 105 deletions(-)
diffs (truncated from 475 to 300 lines):
diff -r 831c740248e3 -r 0fc9e99a0c4f sys/arch/sparc64/dev/iommu.c
--- a/sys/arch/sparc64/dev/iommu.c Fri Sep 21 03:02:32 2001 +0000
+++ b/sys/arch/sparc64/dev/iommu.c Fri Sep 21 03:04:09 2001 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: iommu.c,v 1.39 2001/09/15 06:55:50 eeh Exp $ */
+/* $NetBSD: iommu.c,v 1.40 2001/09/21 03:04:09 eeh Exp $ */
/*
* Copyright (c) 1999, 2000 Matthew R. Green
@@ -483,9 +483,10 @@
int err;
bus_size_t sgsize;
paddr_t curaddr;
- u_long dvmaddr;
+ u_long dvmaddr, sgstart, sgend;
bus_size_t align, boundary;
vaddr_t vaddr = (vaddr_t)buf;
+ int seg;
pmap_t pmap;
if (map->dm_nsegs) {
@@ -517,8 +518,13 @@
boundary = map->_dm_boundary;
align = max(map->dm_segs[0]._ds_align, NBPG);
s = splhigh();
- err = extent_alloc(is->is_dvmamap, sgsize, align,
- boundary, EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dvmaddr);
+ /*
+ * If our segment size is larger than the boundary we need to
+ * split the transfer up int little pieces ourselves.
+ */
+ err = extent_alloc(is->is_dvmamap, sgsize, align,
+ (sgsize > boundary) ? 0 : boundary,
+ EX_NOWAIT|EX_BOUNDZERO, (u_long *)&dvmaddr);
splx(s);
#ifdef DEBUG
@@ -526,7 +532,9 @@
{
printf("iommu_dvmamap_load(): extent_alloc(%d, %x) failed!\n",
(int)sgsize, flags);
+#ifdef DDB
Debugger();
+#endif
}
#endif
if (err != 0)
@@ -535,20 +543,54 @@
if (dvmaddr == (bus_addr_t)-1)
return (ENOMEM);
+ /* Set the active DVMA map */
+ map->_dm_dvmastart = dvmaddr;
+ map->_dm_dvmasize = sgsize;
+
/*
- * We always use just one segment.
+ * Now split the DVMA range into segments, not crossing
+ * the boundary.
*/
+ seg = 0;
+ sgstart = dvmaddr + (vaddr & PGOFSET);
+ sgend = sgstart + buflen - 1;
+ map->dm_segs[seg].ds_addr = sgstart;
+ DPRINTF(IDB_INFO, ("iommu_dvmamap_load: boundary %lx boundary-1 %lx "
+ "~(boundary-1) %lx\n", boundary, (boundary-1), ~(boundary-1)));
+ while ((sgstart & ~(boundary - 1)) != (sgend & ~(boundary - 1))) {
+ /* Oops. We crossed a boundary. Split the xfer. */
+ DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
+ "seg %d start %lx size %lx\n", seg,
+ map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
+ map->dm_segs[seg].ds_len = sgstart & (boundary - 1);
+ if (++seg > map->_dm_segcnt) {
+ /* Too many segments. Fail the operation. */
+ DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
+ "too many segments %d\n", seg));
+ s = splhigh();
+ /* How can this fail? And if it does what can we do? */
+ err = extent_free(is->is_dvmamap,
+ dvmaddr, sgsize, EX_NOWAIT);
+ splx(s);
+ map->_dm_dvmastart = 0;
+ map->_dm_dvmasize = 0;
+ return (E2BIG);
+ }
+ sgstart = roundup(sgstart, boundary);
+ map->dm_segs[seg].ds_addr = sgstart;
+ }
+ map->dm_segs[seg].ds_len = sgend - sgstart + 1;
+ DPRINTF(IDB_INFO, ("iommu_dvmamap_load: "
+ "seg %d start %lx size %lx\n", seg,
+ map->dm_segs[seg].ds_addr, map->dm_segs[seg].ds_len));
+ map->dm_nsegs = seg+1;
map->dm_mapsize = buflen;
- map->dm_nsegs = 1;
- map->dm_segs[0].ds_addr = dvmaddr + (vaddr & PGOFSET);
- map->dm_segs[0].ds_len = buflen;
if (p != NULL)
pmap = p->p_vmspace->vm_map.pmap;
else
pmap = pmap_kernel();
- dvmaddr = trunc_page(map->dm_segs[0].ds_addr);
for (; buflen > 0; ) {
/*
* Get the physical address for this page.
@@ -587,37 +629,20 @@
struct iommu_state *is;
bus_dmamap_t map;
{
- vaddr_t addr, offset;
- size_t len;
- int error, s, i;
- bus_addr_t dvmaddr;
+ int error, s;
bus_size_t sgsize;
- paddr_t pa;
-
- dvmaddr = (map->dm_segs[0].ds_addr & ~PGOFSET);
- pa = 0;
- sgsize = 0;
- for (i = 0; i<map->dm_nsegs; i++) {
- addr = trunc_page(map->dm_segs[i].ds_addr);
- offset = map->dm_segs[i].ds_addr & PGOFSET;
- len = map->dm_segs[i].ds_len;
- if (len == 0 || addr == 0)
- printf("iommu_dvmamap_unload: map = %p, i = %d, len = %d, addr = %lx\n",
- map, (int)i, (int)len, (unsigned long)addr);
+ /* Flush the iommu */
+#ifdef DEBUG
+ if (!map->_dm_dvmastart) {
+ printf("iommu_dvmamap_unload: No dvmastart is zero\n");
+#ifdef DDB
+ Debugger();
+#endif
+ }
+#endif
+ iommu_remove(is, map->_dm_dvmastart, map->_dm_dvmasize);
- DPRINTF(IDB_BUSDMA,
- ("iommu_dvmamap_unload: map %p removing va %lx size %lx\n",
- map, (long)addr, (long)len));
- iommu_remove(is, addr, len);
-
- if (trunc_page(pa) == addr)
- sgsize += trunc_page(len + offset);
- else
- sgsize += round_page(len + offset);
- pa = addr + offset + len;
-
- }
/* Flush the caches */
bus_dmamap_unload(t->_parent, map);
@@ -626,10 +651,15 @@
map->dm_nsegs = 0;
s = splhigh();
- error = extent_free(is->is_dvmamap, dvmaddr, sgsize, EX_NOWAIT);
+ error = extent_free(is->is_dvmamap, map->_dm_dvmastart,
+ map->_dm_dvmasize, EX_NOWAIT);
splx(s);
if (error != 0)
printf("warning: %qd of DVMA space lost\n", (long long)sgsize);
+
+ /* Clear the map */
+ map->_dm_dvmastart = 0;
+ map->_dm_dvmasize = 0;
}
@@ -644,13 +674,13 @@
bus_size_t size;
{
struct vm_page *m;
- int i, s;
+ int i, j, s;
int left;
int err;
bus_size_t sgsize;
paddr_t pa;
bus_size_t boundary, align;
- u_long dvmaddr;
+ u_long dvmaddr, sgstart, sgend;
struct pglist *mlist;
int pagesz = PAGE_SIZE;
@@ -661,20 +691,6 @@
#endif
bus_dmamap_unload(t, map);
}
- /*
- * Make sure that on error condition we return "no valid mappings".
- */
- map->dm_nsegs = 0;
- /* Count up the total number of pages we need */
- pa = segs[0].ds_addr;
- sgsize = 0;
- for (i=0; i<nsegs; i++) {
- if (round_page(pa) != round_page(segs[i].ds_addr))
- sgsize = round_page(sgsize);
- sgsize += segs[i].ds_len;
- pa = segs[i].ds_addr + segs[i].ds_len;
- }
- sgsize = round_page(sgsize);
/*
* A boundary presented to bus_dmamem_alloc() takes precedence
@@ -684,10 +700,33 @@
boundary = map->_dm_boundary;
align = max(segs[0]._ds_align, NBPG);
+
+ /*
+ * Make sure that on error condition we return "no valid mappings".
+ */
+ map->dm_nsegs = 0;
+ /* Count up the total number of pages we need */
+ pa = segs[0].ds_addr;
+ sgsize = 0;
+ left = size;
+ for (i=0; left && i<nsegs; i++) {
+ if (round_page(pa) != round_page(segs[i].ds_addr))
+ sgsize = round_page(sgsize);
+ sgsize += min(left, segs[i].ds_len);
+ left -= segs[i].ds_len;
+ pa = segs[i].ds_addr + segs[i].ds_len;
+ }
+ sgsize = round_page(sgsize);
+
s = splhigh();
- err = extent_alloc(is->is_dvmamap, sgsize, align, boundary,
- ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT)|EX_BOUNDZERO,
- (u_long *)&dvmaddr);
+ /*
+ * If our segment size is larger than the boundary we need to
+ * split the transfer up int little pieces ourselves.
+ */
+ err = extent_alloc(is->is_dvmamap, sgsize, align,
+ (sgsize > boundary) ? 0 : boundary,
+ ((flags & BUS_DMA_NOWAIT) == 0 ? EX_WAITOK : EX_NOWAIT) |
+ EX_BOUNDZERO, (u_long *)&dvmaddr);
splx(s);
if (err != 0)
@@ -704,6 +743,10 @@
if (dvmaddr == (bus_addr_t)-1)
return (ENOMEM);
+ /* Set the active DVMA map */
+ map->_dm_dvmastart = dvmaddr;
+ map->_dm_dvmasize = sgsize;
+
if ((mlist = segs[0]._ds_mlist) == NULL) {
u_long prev_va = NULL;
/*
@@ -712,73 +755,141 @@
* _bus_dmamap_load_mbuf(). Ignore the mlist and
* load each segment individually.
*/
+ map->dm_mapsize = size;
- /* We'll never end up with less segments than we got as input.
- this gives us a chance to fail quickly */
- if (nsegs > map->_dm_segcnt)
- return (E2BIG);
+ i = j = 0;
+ pa = segs[i].ds_addr;
+ dvmaddr += (pa & PGOFSET);
+ left = min(size, segs[i].ds_len);
+
+ sgstart = dvmaddr;
+ sgend = sgstart + left - 1;
+
+ map->dm_segs[j].ds_addr = dvmaddr;
+ map->dm_segs[j].ds_len = left;
+
+ /* Set the size (which we will be destroying */
+ map->dm_mapsize = size;
+
+ while (size > 0) {
+ int incr;
+
+ if (left <= 0) {
+ u_long offset;
+
+ /*
+ * If the two segs are on different physical
+ * pages move to a new virtual page.
+ */
+ if (trunc_page(pa) !=
+ trunc_page(segs[++i].ds_addr))
+ dvmaddr += NBPG;
+
+ pa = segs[i].ds_addr;
+ left = min(size, segs[i].ds_len);
- i = 0;
Home |
Main Index |
Thread Index |
Old Index