tech-kern archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
uvm vm_physseg trimming
Hi Everyone,
Please find below a patch to remove .avail_(start|end) from
struct vm_physseg
I couldn't find a reason for them to be used redundantly, but I may be
wrong. Are there port specific uses for these ?
--
Cherry
diff -r 0b3902dbe274 sys/arch/acorn26/acorn26/pmap.c
--- a/sys/arch/acorn26/acorn26/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/acorn26/acorn26/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -301,11 +301,11 @@
addr = 0;
size = round_page(size);
for (i = 0; i < vm_nphysseg; i++) {
- if (VM_PHYSMEM_PTR(i)->avail_start < VM_PHYSMEM_PTR(i)->avail_end) {
+ if (VM_PHYSMEM_PTR(i)->start < VM_PHYSMEM_PTR(i)->end) {
addr = (vaddr_t)
((char*)MEMC_PHYS_BASE +
- ptoa(VM_PHYSMEM_PTR(i)->avail_start));
- VM_PHYSMEM_PTR(i)->avail_start++;
+ ptoa(VM_PHYSMEM_PTR(i)->start));
+ VM_PHYSMEM_PTR(i)->start++;
break;
}
}
diff -r 0b3902dbe274 sys/arch/alpha/alpha/machdep.c
--- a/sys/arch/alpha/alpha/machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/alpha/alpha/machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -617,11 +617,10 @@
vps = VM_PHYSMEM_PTR(vm_nphysseg - 1);
/* shrink so that it'll fit in the last segment */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ptoa(vps->avail_end - vps->avail_start);
+ if ((vps->end - vps->start) < atop(sz))
+ sz = ptoa(vps->end - vps->start);
vps->end -= atop(sz);
- vps->avail_end -= atop(sz);
msgbufaddr = (void *) ALPHA_PHYS_TO_K0SEG(ptoa(vps->end));
initmsgbuf(msgbufaddr, sz);
diff -r 0b3902dbe274 sys/arch/alpha/alpha/pmap.c
--- a/sys/arch/alpha/alpha/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/alpha/alpha/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -1027,35 +1027,34 @@
panic("pmap_steal_memory: called _after_ bootstrap");
#if 0
- printf(" bank %d: avail_start 0x%lx, start 0x%lx, "
- "avail_end 0x%lx\n", bank, VM_PHYSMEM_PTR(bank)->avail_start,
- VM_PHYSMEM_PTR(bank)->start, VM_PHYSMEM_PTR(bank)->avail_end);
+ printf(" bank %d: start 0x%lx, start 0x%lx, "
+ "end 0x%lx\n", bank, VM_PHYSMEM_PTR(bank)->start,
+ VM_PHYSMEM_PTR(bank)->start, VM_PHYSMEM_PTR(bank)->end);
#endif
- if (VM_PHYSMEM_PTR(bank)->avail_start != VM_PHYSMEM_PTR(bank)->start ||
- VM_PHYSMEM_PTR(bank)->avail_start >= VM_PHYSMEM_PTR(bank)->avail_end)
+ if (VM_PHYSMEM_PTR(bank)->start != VM_PHYSMEM_PTR(bank)->start ||
+ VM_PHYSMEM_PTR(bank)->start >= VM_PHYSMEM_PTR(bank)->end)
continue;
#if 0
- printf(" avail_end - avail_start = 0x%lx\n",
- VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start);
+ printf(" end - start = 0x%lx\n",
+ VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start);
#endif
- if ((VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start)
+ if ((VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start)
< npgs)
continue;
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
- VM_PHYSMEM_PTR(bank)->avail_start += npgs;
+ pa = ptoa(VM_PHYSMEM_PTR(bank)->start);
VM_PHYSMEM_PTR(bank)->start += npgs;
/*
* Have we used up this segment?
*/
- if (VM_PHYSMEM_PTR(bank)->avail_start == VM_PHYSMEM_PTR(bank)->end) {
+ if (VM_PHYSMEM_PTR(bank)->start == VM_PHYSMEM_PTR(bank)->end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
@@ -1116,10 +1115,10 @@
printf("bank %d\n", bank);
printf("\tstart = 0x%x\n", ptoa(VM_PHYSMEM_PTR(bank)->start));
printf("\tend = 0x%x\n", ptoa(VM_PHYSMEM_PTR(bank)->end));
- printf("\tavail_start = 0x%x\n",
- ptoa(VM_PHYSMEM_PTR(bank)->avail_start));
- printf("\tavail_end = 0x%x\n",
- ptoa(VM_PHYSMEM_PTR(bank)->avail_end));
+ printf("\tstart = 0x%x\n",
+ ptoa(VM_PHYSMEM_PTR(bank)->start));
+ printf("\tend = 0x%x\n",
+ ptoa(VM_PHYSMEM_PTR(bank)->end));
}
#endif
}
diff -r 0b3902dbe274 sys/arch/amd64/amd64/machdep.c
--- a/sys/arch/amd64/amd64/machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/amd64/amd64/machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -1450,20 +1450,20 @@
for (x = 0; x < vm_nphysseg; x++) {
vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end)
+ if (ctob(vps->end) == avail_end)
break;
}
if (x == vm_nphysseg)
panic("init_x86_64: can't find end of memory");
/* Shrink so it'll fit in the last segment. */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
+ if ((vps->end - vps->start) < atop(sz))
+ sz = ctob(vps->end - vps->start);
- vps->avail_end -= atop(sz);
+ vps->end -= atop(sz);
vps->end -= atop(sz);
msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->end);
/* Remove the last segment if it now has no pages. */
if (vps->start == vps->end) {
@@ -1471,10 +1471,10 @@
VM_PHYSMEM_PTR_SWAP(x, x + 1);
}
- /* Now find where the new avail_end is. */
+ /* Now find where the new end is. */
for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
+ if (VM_PHYSMEM_PTR(x)->end > avail_end)
+ avail_end = VM_PHYSMEM_PTR(x)->end;
avail_end = ctob(avail_end);
if (sz == reqsz)
diff -r 0b3902dbe274 sys/arch/arm/arm32/pmap.c
--- a/sys/arch/arm/arm32/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/arm/arm32/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -7750,23 +7750,23 @@
KASSERT(mask == 0);
for (i = 0; i < vm_nphysseg; i++) {
ps = VM_PHYSMEM_PTR(i);
- if (ps->avail_start == atop(pv->pv_pa + pv->pv_size)
- && pv->pv_va + pv->pv_size <= ptoa(ps->avail_end)) {
+ if (ps->start == atop(pv->pv_pa + pv->pv_size)
+ && pv->pv_va + pv->pv_size <= ptoa(ps->end)) {
rpv->pv_va = pv->pv_va;
rpv->pv_pa = pv->pv_pa;
rpv->pv_size = amount;
*pvp = NULL;
pmap_map_chunk(kernel_l1pt.pv_va,
- ptoa(ps->avail_start) + (pv->pv_va - pv->pv_pa),
- ptoa(ps->avail_start),
+ ptoa(ps->start) + (pv->pv_va - pv->pv_pa),
+ ptoa(ps->start),
amount - pv->pv_size,
VM_PROT_READ|VM_PROT_WRITE,
PTE_CACHE);
- ps->avail_start += atop(amount - pv->pv_size);
+ ps->start += atop(amount - pv->pv_size);
/*
* If we consumed the entire physseg, remove it.
*/
- if (ps->avail_start == ps->avail_end) {
+ if (ps->start == ps->end) {
for (--vm_nphysseg; i < vm_nphysseg; i++)
VM_PHYSMEM_PTR_SWAP(i, i + 1);
}
diff -r 0b3902dbe274 sys/arch/i386/i386/machdep.c
--- a/sys/arch/i386/i386/machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/i386/i386/machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -1036,7 +1036,7 @@
vps = NULL;
for (x = 0; x < vm_nphysseg; ++x) {
vps = VM_PHYSMEM_PTR(x);
- if (ctob(vps->avail_end) == avail_end) {
+ if (ctob(vps->end) == avail_end) {
break;
}
}
@@ -1044,13 +1044,12 @@
panic("init386: can't find end of memory");
/* Shrink so it'll fit in the last segment. */
- if (vps->avail_end - vps->avail_start < atop(sz))
- sz = ctob(vps->avail_end - vps->avail_start);
+ if (vps->end - vps->start < atop(sz))
+ sz = ctob(vps->end - vps->start);
- vps->avail_end -= atop(sz);
vps->end -= atop(sz);
msgbuf_p_seg[msgbuf_p_cnt].sz = sz;
- msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->avail_end);
+ msgbuf_p_seg[msgbuf_p_cnt++].paddr = ctob(vps->end);
/* Remove the last segment if it now has no pages. */
if (vps->start == vps->end) {
@@ -1060,8 +1059,8 @@
/* Now find where the new avail_end is. */
for (avail_end = 0, x = 0; x < vm_nphysseg; x++)
- if (VM_PHYSMEM_PTR(x)->avail_end > avail_end)
- avail_end = VM_PHYSMEM_PTR(x)->avail_end;
+ if (VM_PHYSMEM_PTR(x)->end > avail_end)
+ avail_end = VM_PHYSMEM_PTR(x)->end;
avail_end = ctob(avail_end);
if (sz == reqsz)
diff -r 0b3902dbe274 sys/arch/ia64/ia64/machdep.c
--- a/sys/arch/ia64/ia64/machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/ia64/ia64/machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -179,14 +179,14 @@
printf("Physical memory chunk(s):\n");
for (lcv = 0;
- lcv < vm_nphysseg || VM_PHYSMEM_PTR(lcv)->avail_end != 0;
+ lcv < vm_nphysseg || VM_PHYSMEM_PTR(lcv)->end != 0;
lcv++) {
- sizetmp = VM_PHYSMEM_PTR(lcv)->avail_end -
- VM_PHYSMEM_PTR(lcv)->avail_start;
+ sizetmp = VM_PHYSMEM_PTR(lcv)->end -
+ VM_PHYSMEM_PTR(lcv)->start;
printf("0x%016lx - 0x%016lx, %ld bytes (%d pages)\n",
- ptoa(VM_PHYSMEM_PTR(lcv)->avail_start),
- ptoa(VM_PHYSMEM_PTR(lcv)->avail_end) - 1,
+ ptoa(VM_PHYSMEM_PTR(lcv)->start),
+ ptoa(VM_PHYSMEM_PTR(lcv)->end) - 1,
ptoa(sizetmp), sizetmp);
}
printf("Total number of segments: vm_nphysseg = %d \n",
diff -r 0b3902dbe274 sys/arch/ia64/ia64/pmap.c
--- a/sys/arch/ia64/ia64/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/ia64/ia64/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -327,25 +327,23 @@
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
- if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start ||
- VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+ if (VM_PHYSMEM_PTR(lcv)->start >= VM_PHYSMEM_PTR(lcv)->end)
continue;
- if ((VM_PHYSMEM_PTR(lcv)->avail_end - VM_PHYSMEM_PTR(lcv)->avail_start)
+ if ((VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start)
< npgs)
continue;
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(VM_PHYSMEM_PTR(lcv)->avail_start);
- VM_PHYSMEM_PTR(lcv)->avail_start += npgs;
+ pa = ptoa(VM_PHYSMEM_PTR(lcv)->start);
VM_PHYSMEM_PTR(lcv)->start += npgs;
/*
* Have we used up this segment?
*/
- if (VM_PHYSMEM_PTR(lcv)->avail_start ==
+ if (VM_PHYSMEM_PTR(lcv)->start ==
VM_PHYSMEM_PTR(lcv)->end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
@@ -392,19 +390,18 @@
if (uvm.page_init_done == true)
panic("pmap_vhpt_steal_memory: called _after_ bootstrap");
- if (VM_PHYSMEM_PTR(lcv)->avail_start != VM_PHYSMEM_PTR(lcv)->start || /* XXX: ??? */
- VM_PHYSMEM_PTR(lcv)->avail_start >= VM_PHYSMEM_PTR(lcv)->avail_end)
+ if (VM_PHYSMEM_PTR(lcv)->start >= VM_PHYSMEM_PTR(lcv)->end)
continue;
/* Break off a VHPT sized, aligned chunk off this segment. */
- start1 = VM_PHYSMEM_PTR(lcv)->avail_start;
+ start1 = VM_PHYSMEM_PTR(lcv)->start;
/* Align requested start address on requested size boundary */
end1 = vhpt_start = roundup(start1, npgs);
start2 = vhpt_start + npgs;
- end2 = VM_PHYSMEM_PTR(lcv)->avail_end;
+ end2 = VM_PHYSMEM_PTR(lcv)->end;
/* Case 1: Doesn't fit. skip this segment */
diff -r 0b3902dbe274 sys/arch/mips/mips/mips_machdep.c
--- a/sys/arch/mips/mips/mips_machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/mips/mips/mips_machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -2004,17 +2004,16 @@
* Fist the physical segment that can be mapped to KSEG0
*/
for (; vps >= vm_physmem; vps--, bank--) {
- if (vps->avail_start + atop(sz) <= atop(MIPS_PHYS_MASK))
+ if (vps->start + atop(sz) <= atop(MIPS_PHYS_MASK))
break;
}
#endif
/* shrink so that it'll fit in the last segment */
- if ((vps->avail_end - vps->avail_start) < atop(sz))
- sz = ptoa(vps->avail_end - vps->avail_start);
+ if ((vps->end - vps->start) < atop(sz))
+ sz = ptoa(vps->end - vps->start);
vps->end -= atop(sz);
- vps->avail_end -= atop(sz);
#ifdef _LP64
msgbufaddr = (void *) MIPS_PHYS_TO_XKPHYS_CACHED(ptoa(vps->end));
#else
diff -r 0b3902dbe274 sys/arch/mips/mips/pmap.c
--- a/sys/arch/mips/mips/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/mips/mips/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -633,18 +633,16 @@
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
- printf("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR" %#"PRIxPADDR"\n",
+ printf("%s: seg %u: %#"PRIxPADDR" %#"PRIxPADDR"\n",
__func__, bank,
- seg->avail_start, seg->start,
- seg->avail_end, seg->end);
+ seg->start, seg->end);
- if (seg->avail_start != seg->start
- || seg->avail_start >= seg->avail_end) {
+ if (seg->start >= seg->end) {
printf("%s: seg %u: bad start\n", __func__, bank);
continue;
}
- if (seg->avail_end - seg->avail_start < npgs) {
+ if (seg->end - seg->start < npgs) {
printf("%s: seg %u: too small for %u pages\n",
__func__, bank, npgs);
continue;
@@ -653,14 +651,13 @@
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(seg->avail_start);
- seg->avail_start += npgs;
+ pa = ptoa(seg->start);
seg->start += npgs;
/*
* Have we used up this segment?
*/
- if (seg->avail_start == seg->end) {
+ if (seg->start == seg->end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
diff -r 0b3902dbe274 sys/arch/powerpc/isa/isadma_machdep.c
--- a/sys/arch/powerpc/isa/isadma_machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/powerpc/isa/isadma_machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -169,8 +169,8 @@
paddr_t avail_end = 0;
for (bank = 0; bank < vm_nphysseg; bank++) {
- if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
- avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+ if (avail_end < VM_PHYSMEM_PTR(bank)->end << PGSHIFT)
+ avail_end = VM_PHYSMEM_PTR(bank)->end << PGSHIFT;
}
/* Call common function to create the basic map. */
@@ -601,8 +601,8 @@
int bank;
for (bank = 0; bank < vm_nphysseg; bank++) {
- if (avail_end < VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT)
- avail_end = VM_PHYSMEM_PTR(bank)->avail_end << PGSHIFT;
+ if (avail_end < VM_PHYSMEM_PTR(bank)->end << PGSHIFT)
+ avail_end = VM_PHYSMEM_PTR(bank)->end << PGSHIFT;
}
if (avail_end > ISA_DMA_BOUNCE_THRESHOLD)
diff -r 0b3902dbe274 sys/arch/powerpc/oea/pmap.c
--- a/sys/arch/powerpc/oea/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/powerpc/oea/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -2929,8 +2929,8 @@
for (bank = 0; bank < vm_nphysseg; bank++) {
ps = VM_PHYSMEM_PTR(bank);
if (ps->free_list == VM_FREELIST_FIRST256 &&
- ps->avail_end - ps->avail_start >= npgs) {
- pa = ptoa(ps->avail_start);
+ ps->end - ps->start >= npgs) {
+ pa = ptoa(ps->start);
break;
}
}
@@ -2938,14 +2938,13 @@
if (pa == 0)
panic("pmap_steal_memory: no approriate memory to steal!");
- ps->avail_start += npgs;
ps->start += npgs;
/*
* If we've used up all the pages in the segment, remove it and
* compact the list.
*/
- if (ps->avail_start == ps->end) {
+ if (ps->start == ps->end) {
/*
* If this was the last one, then a very bad thing has occurred
*/
@@ -2966,7 +2965,7 @@
u_int cnt = 0;
for (bank = 0; bank < vm_nphysseg; bank++) {
ps = VM_PHYSMEM_PTR(bank);
- cnt += ps->avail_end - ps->avail_start;
+ cnt += ps->end - ps->start;
}
printf("pmap_steal_memory: stole %u (total %u) pages (%u left)\n",
npgs, pmap_pages_stolen, cnt);
@@ -3448,12 +3447,12 @@
int bank;
char pbuf[9];
for (cnt = 0, bank = 0; bank < vm_nphysseg; bank++) {
- cnt += VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start;
+ cnt += VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start;
printf("pmap_bootstrap: vm_physmem[%d]=%#" _PRIxpa "-%#" _PRIxpa "/%#" _PRIxpa "\n",
bank,
- ptoa(VM_PHYSMEM_PTR(bank)->avail_start),
- ptoa(VM_PHYSMEM_PTR(bank)->avail_end),
- ptoa(VM_PHYSMEM_PTR(bank)->avail_end - VM_PHYSMEM_PTR(bank)->avail_start));
+ ptoa(VM_PHYSMEM_PTR(bank)->start),
+ ptoa(VM_PHYSMEM_PTR(bank)->end),
+ ptoa(VM_PHYSMEM_PTR(bank)->end - VM_PHYSMEM_PTR(bank)->start));
}
format_bytes(pbuf, sizeof(pbuf), ptoa((u_int64_t) cnt));
printf("pmap_bootstrap: UVM memory = %s (%u pages)\n",
@@ -3488,8 +3487,8 @@
pm->pm_sr[0] = sr;
for (bank = 0; bank < vm_nphysseg; bank++) {
- pa_end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
- pa = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
+ pa_end = ptoa(VM_PHYSMEM_PTR(bank)->end);
+ pa = ptoa(VM_PHYSMEM_PTR(bank)->start);
for (; pa < pa_end; pa += PAGE_SIZE) {
ptegidx = va_to_pteg(pm, pa);
pmap_pte_create(&pt, pm, pa, pa | PTE_M|PTE_BW);
diff -r 0b3902dbe274 sys/arch/powerpc/powerpc/bus_dma.c
--- a/sys/arch/powerpc/powerpc/bus_dma.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/powerpc/powerpc/bus_dma.c Wed Dec 30 15:27:27 2015 +0530
@@ -547,10 +547,10 @@
int bank;
for (bank = 0; bank < vm_nphysseg; bank++) {
- if (start > ptoa(VM_PHYSMEM_PTR(bank)->avail_start))
- start = ptoa(VM_PHYSMEM_PTR(bank)->avail_start);
- if (end < ptoa(VM_PHYSMEM_PTR(bank)->avail_end))
- end = ptoa(VM_PHYSMEM_PTR(bank)->avail_end);
+ if (start > ptoa(VM_PHYSMEM_PTR(bank)->start))
+ start = ptoa(VM_PHYSMEM_PTR(bank)->start);
+ if (end < ptoa(VM_PHYSMEM_PTR(bank)->end))
+ end = ptoa(VM_PHYSMEM_PTR(bank)->end);
}
return _bus_dmamem_alloc_range(t, size, alignment, boundary, segs,
diff -r 0b3902dbe274 sys/arch/sh3/sh3/pmap.c
--- a/sys/arch/sh3/sh3/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/sh3/sh3/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -139,19 +139,18 @@
bank = NULL;
for (i = 0; i < vm_nphysseg; i++) {
bank = VM_PHYSMEM_PTR(i);
- if (npage <= bank->avail_end - bank->avail_start)
+ if (npage <= bank->end - bank->start)
break;
}
KDASSERT(i != vm_nphysseg);
KDASSERT(bank != NULL);
/* Steal pages */
- pa = ptoa(bank->avail_start);
- bank->avail_start += npage;
+ pa = ptoa(bank->start);
bank->start += npage;
/* GC memory bank */
- if (bank->avail_start == bank->end) {
+ if (bank->start == bank->end) {
/* Remove this segment from the list. */
vm_nphysseg--;
KDASSERT(vm_nphysseg > 0);
diff -r 0b3902dbe274 sys/arch/vax/vax/pmap.c
--- a/sys/arch/vax/vax/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/vax/vax/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -491,8 +491,7 @@
* A vax only have one segment of memory.
*/
- v = (VM_PHYSMEM_PTR(0)->avail_start << PGSHIFT) | KERNBASE;
- VM_PHYSMEM_PTR(0)->avail_start += npgs;
+ v = (VM_PHYSMEM_PTR(0)->start << PGSHIFT) | KERNBASE;
VM_PHYSMEM_PTR(0)->start += npgs;
memset((void *)v, 0, size);
return v;
diff -r 0b3902dbe274 sys/arch/x86/x86/x86_machdep.c
--- a/sys/arch/x86/x86/x86_machdep.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/arch/x86/x86/x86_machdep.c Wed Dec 30 15:27:27 2015 +0530
@@ -830,7 +830,6 @@
(uint64_t)atop(tmp));
#endif
uvm_page_physload(atop(seg_start), atop(tmp),
- atop(seg_start), atop(tmp),
x86_freelists[i].freelist);
seg_start = tmp;
}
@@ -843,9 +842,8 @@
(uint64_t)atop(seg_start),
(uint64_t)atop(seg_end));
#endif
- uvm_page_physload(atop(seg_start),
- atop(seg_end), atop(seg_start),
- atop(seg_end), VM_FREELIST_DEFAULT);
+ uvm_page_physload(atop(seg_start), atop(seg_end),
+ VM_FREELIST_DEFAULT);
}
}
@@ -872,7 +870,6 @@
(uint64_t)atop(tmp));
#endif
uvm_page_physload(atop(seg_start1), atop(tmp),
- atop(seg_start1), atop(tmp),
x86_freelists[i].freelist);
seg_start1 = tmp;
}
@@ -885,9 +882,8 @@
(uint64_t)atop(seg_start1),
(uint64_t)atop(seg_end1));
#endif
- uvm_page_physload(atop(seg_start1),
- atop(seg_end1), atop(seg_start1),
- atop(seg_end1), VM_FREELIST_DEFAULT);
+ uvm_page_physload(atop(seg_start1), atop(seg_end1),
+ VM_FREELIST_DEFAULT);
}
}
}
diff -r 0b3902dbe274 sys/uvm/pmap/pmap.c
--- a/sys/uvm/pmap/pmap.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/uvm/pmap/pmap.c Wed Dec 30 15:27:27 2015 +0530
@@ -417,24 +417,20 @@
if (uvm.page_init_done == true)
panic("pmap_steal_memory: called _after_ bootstrap");
- if (seg->avail_start != seg->start ||
- seg->avail_start >= seg->avail_end)
- continue;
-
- if ((seg->avail_end - seg->avail_start) < npgs)
+ if ((seg->end - seg->start) < npgs)
continue;
/*
* There are enough pages here; steal them!
*/
- pa = ptoa(seg->avail_start);
- seg->avail_start += npgs;
+ pa = ptoa(seg->start);
+ seg->start += npgs;
seg->start += npgs;
/*
* Have we used up this segment?
*/
- if (seg->avail_start == seg->end) {
+ if (seg->jstart == seg->end) {
if (vm_nphysseg == 1)
panic("pmap_steal_memory: out of memory!");
diff -r 0b3902dbe274 sys/uvm/uvm_extern.h
--- a/sys/uvm/uvm_extern.h Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/uvm/uvm_extern.h Wed Dec 30 15:27:27 2015 +0530
@@ -702,8 +702,7 @@
void uvm_pagerealloc(struct vm_page *,
struct uvm_object *, voff_t);
/* Actually, uvm_page_physload takes PF#s which need their own type */
-void uvm_page_physload(paddr_t, paddr_t, paddr_t,
- paddr_t, int);
+void uvm_page_physload(paddr_t, paddr_t, int);
void uvm_setpagesize(void);
/* uvm_pager.c */
diff -r 0b3902dbe274 sys/uvm/uvm_page.c
--- a/sys/uvm/uvm_page.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/uvm/uvm_page.c Wed Dec 30 15:27:27 2015 +0530
@@ -446,8 +446,8 @@
#ifdef __HAVE_VM_PAGE_MD
VM_MDPAGE_INIT(&seg->pgs[i]);
#endif
- if (atop(paddr) >= seg->avail_start &&
- atop(paddr) < seg->avail_end) {
+ if (atop(paddr) >= seg->start &&
+ atop(paddr) < seg->end) {
uvmexp.npages++;
/* add page to free pool */
uvm_pagefree(&seg->pgs[i]);
@@ -644,13 +644,11 @@
continue;
/* try from front */
- if (seg->avail_start == seg->start &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
+ if (seg->start < seg->end) {
+ *paddrp = ctob(seg->start);
seg->start++;
/* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
+ if (seg->start == seg->end) {
if (vm_nphysmem == 1)
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
@@ -662,13 +660,11 @@
}
/* try from rear */
- if (seg->avail_end == seg->end &&
- seg->avail_start < seg->avail_end) {
- *paddrp = ctob(seg->avail_end - 1);
- seg->avail_end--;
+ if (seg->start < seg->end) {
+ *paddrp = ctob(seg->end - 1);
seg->end--;
/* nothing left? nuke it */
- if (seg->avail_end == seg->start) {
+ if (seg->end == seg->start) {
if (vm_nphysmem == 1)
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
@@ -690,16 +686,14 @@
seg = VM_PHYSMEM_PTR(lcv);
/* any room in this bank? */
- if (seg->avail_start >= seg->avail_end)
+ if (seg->start >= seg->end)
continue; /* nope */
- *paddrp = ctob(seg->avail_start);
- seg->avail_start++;
- /* truncate! */
- seg->start = seg->avail_start;
+ *paddrp = ctob(seg->start);
+ seg->start++;
/* nothing left? nuke it */
- if (seg->avail_start == seg->end) {
+ if (seg->start == seg->end) {
if (vm_nphysmem == 1)
panic("uvm_page_physget: out of memory!");
vm_nphysmem--;
@@ -736,8 +730,7 @@
*/
void
-uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
- paddr_t avail_end, int free_list)
+uvm_page_physload(paddr_t start, paddr_t end, int free_list)
{
int preload, lcv;
psize_t npages;
@@ -826,8 +819,6 @@
ps->start = start;
ps->end = end;
- ps->avail_start = avail_start;
- ps->avail_end = avail_end;
if (preload) {
ps->pgs = NULL;
} else {
diff -r 0b3902dbe274 sys/uvm/uvm_page.h
--- a/sys/uvm/uvm_page.h Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/uvm/uvm_page.h Wed Dec 30 15:27:27 2015 +0530
@@ -300,8 +300,6 @@
struct vm_physseg {
paddr_t start; /* PF# of first page in segment */
paddr_t end; /* (PF# of last page in segment) + 1 */
- paddr_t avail_start; /* PF# of first free page in segment */
- paddr_t avail_end; /* (PF# of last free page in segment) +1 */
struct vm_page *pgs; /* vm_page structures (from start) */
struct vm_page *lastpg; /* vm_page structure for end */
int free_list; /* which free list they belong on */
diff -r 0b3902dbe274 sys/uvm/uvm_pglist.c
--- a/sys/uvm/uvm_pglist.c Sun Sep 13 09:15:02 2015 +0530
+++ b/sys/uvm/uvm_pglist.c Wed Dec 30 15:27:27 2015 +0530
@@ -140,15 +140,15 @@
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= ps->start || low >= ps->end)
return 0;
/*
* We start our search at the just after where the last allocation
* succeeded.
*/
- candidate = roundup2(max(low, ps->avail_start + ps->start_hint), alignment);
- limit = min(high, ps->avail_end);
+ candidate = roundup2(max(low, ps->start + ps->start_hint), alignment);
+ limit = min(high, ps->end);
pagemask = ~((boundary >> PAGE_SHIFT) - 1);
skip = 0;
second_pass = false;
@@ -171,8 +171,8 @@
* is were we started.
*/
second_pass = true;
- candidate = roundup2(max(low, ps->avail_start), alignment);
- limit = min(limit, ps->avail_start + ps->start_hint);
+ candidate = roundup2(max(low, ps->start), alignment);
+ limit = min(limit, ps->start + ps->start_hint);
skip = 0;
continue;
}
@@ -287,12 +287,12 @@
* the next time we need to search this segment, start after this
* chunk of pages we just allocated.
*/
- ps->start_hint = candidate + num - ps->avail_start;
- KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+ ps->start_hint = candidate + num - ps->start;
+ KASSERTMSG(ps->start_hint <= ps->end - ps->start,
"%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
candidate + num,
- ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
- ps->avail_end - ps->avail_start);
+ ps->start_hint, ps->start_hint, ps->end, ps->start,
+ ps->end - ps->start);
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num);
@@ -369,23 +369,19 @@
#endif
KASSERT(mutex_owned(&uvm_fpageqlock));
- KASSERT(ps->start <= ps->avail_start);
- KASSERT(ps->start <= ps->avail_end);
- KASSERT(ps->avail_start <= ps->end);
- KASSERT(ps->avail_end <= ps->end);
low = atop(low);
high = atop(high);
todo = num;
- candidate = max(low, ps->avail_start + ps->start_hint);
- limit = min(high, ps->avail_end);
+ candidate = max(low, ps->start + ps->start_hint);
+ limit = min(high, ps->end);
pg = &ps->pgs[candidate - ps->start];
second_pass = false;
/*
* Make sure that physseg falls within with range to be allocated from.
*/
- if (high <= ps->avail_start || low >= ps->avail_end)
+ if (high <= ps->start || low >= ps->end)
return 0;
again:
@@ -396,8 +392,8 @@
break;
}
second_pass = true;
- candidate = max(low, ps->avail_start);
- limit = min(limit, ps->avail_start + ps->start_hint);
+ candidate = max(low, ps->start);
+ limit = min(limit, ps->start + ps->start_hint);
pg = &ps->pgs[candidate - ps->start];
goto again;
}
@@ -426,12 +422,12 @@
* The next time we need to search this segment,
* start just after the pages we just allocated.
*/
- ps->start_hint = candidate + 1 - ps->avail_start;
- KASSERTMSG(ps->start_hint <= ps->avail_end - ps->avail_start,
+ ps->start_hint = candidate + 1 - ps->start;
+ KASSERTMSG(ps->start_hint <= ps->end - ps->start,
"%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
candidate + 1,
- ps->start_hint, ps->start_hint, ps->avail_end, ps->avail_start,
- ps->avail_end - ps->avail_start);
+ ps->start_hint, ps->start_hint, ps->end, ps->start,
+ ps->end - ps->start);
#ifdef PGALLOC_VERBOSE
printf("got %d pgs\n", num - todo);
Home |
Main Index |
Thread Index |
Old Index