Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/arch/arm/arm32 Whitespace
details: https://anonhg.NetBSD.org/src/rev/29c5e3723732
branches: trunk
changeset: 1011536:29c5e3723732
user: skrll <skrll%NetBSD.org@localhost>
date: Fri Jul 03 06:22:48 2020 +0000
description:
Whitespace
diffstat:
sys/arch/arm/arm32/arm32_kvminit.c | 41 +++++++++++++++++++------------------
1 files changed, 21 insertions(+), 20 deletions(-)
diffs (154 lines):
diff -r 9deb0a8c6a8c -r 29c5e3723732 sys/arch/arm/arm32/arm32_kvminit.c
--- a/sys/arch/arm/arm32/arm32_kvminit.c Fri Jul 03 06:15:27 2020 +0000
+++ b/sys/arch/arm/arm32/arm32_kvminit.c Fri Jul 03 06:22:48 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: arm32_kvminit.c,v 1.60 2020/06/26 08:42:27 skrll Exp $ */
+/* $NetBSD: arm32_kvminit.c,v 1.61 2020/07/03 06:22:48 skrll Exp $ */
/*
* Copyright (c) 2002, 2003, 2005 Genetec Corporation. All rights reserved.
@@ -127,7 +127,7 @@
#include "opt_multiprocessor.h"
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.60 2020/06/26 08:42:27 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: arm32_kvminit.c,v 1.61 2020/07/03 06:22:48 skrll Exp $");
#include <sys/param.h>
@@ -348,7 +348,7 @@
static void
valloc_pages(struct bootmem_info *bmi, pv_addr_t *pv, size_t npages,
- int prot, int cache, bool zero_p)
+ int prot, int cache, bool zero_p)
{
size_t nbytes = npages * PAGE_SIZE;
pv_addr_t *free_pv = bmi->bmi_freeblocks;
@@ -368,7 +368,7 @@
VPRINTF(" l1pt");
valloc_pages(bmi, &kernel_l1pt, L1_TABLE_SIZE / PAGE_SIZE,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
add_pages(bmi, &kernel_l1pt);
}
@@ -424,7 +424,7 @@
void
arm32_kernel_vm_init(vaddr_t kernel_vm_base, vaddr_t vectors, vaddr_t iovbase,
- const struct pmap_devmap *devmap, bool mapallmem_p)
+ const struct pmap_devmap *devmap, bool mapallmem_p)
{
struct bootmem_info * const bmi = &bootmem_info;
#ifdef MULTIPROCESSOR
@@ -536,7 +536,7 @@
*/
VPRINTF(" vector");
valloc_pages(bmi, &bmi->bmi_vector_l2pt, 1,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
add_pages(bmi, &bmi->bmi_vector_l2pt);
}
@@ -546,7 +546,7 @@
VPRINTF(" kernel");
for (size_t idx = 0; idx < KERNEL_L2PT_KERNEL_NUM; ++idx) {
valloc_pages(bmi, &kernel_l2pt[idx], 1,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
add_pages(bmi, &kernel_l2pt[idx]);
}
@@ -556,7 +556,7 @@
VPRINTF(" vm");
for (size_t idx = 0; idx < KERNEL_L2PT_VMDATA_NUM; ++idx) {
valloc_pages(bmi, &vmdata_l2pt[idx], 1,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
add_pages(bmi, &vmdata_l2pt[idx]);
}
@@ -566,7 +566,7 @@
if (iovbase) {
VPRINTF(" io");
valloc_pages(bmi, &bmi->bmi_io_l2pt, 1,
- VM_PROT_READ|VM_PROT_WRITE, PTE_PAGETABLE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_PAGETABLE, true);
add_pages(bmi, &bmi->bmi_io_l2pt);
}
@@ -574,28 +574,28 @@
/* Allocate stacks for all modes and CPUs */
valloc_pages(bmi, &abtstack, ABT_STACK_SIZE * cpu_num,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &abtstack);
valloc_pages(bmi, &fiqstack, FIQ_STACK_SIZE * cpu_num,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &fiqstack);
valloc_pages(bmi, &irqstack, IRQ_STACK_SIZE * cpu_num,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &irqstack);
valloc_pages(bmi, &undstack, UND_STACK_SIZE * cpu_num,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &undstack);
valloc_pages(bmi, &idlestack, UPAGES * cpu_num, /* SVC32 */
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &idlestack);
valloc_pages(bmi, &kernelstack, UPAGES, /* SVC32 */
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, true);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, true);
add_pages(bmi, &kernelstack);
/* Allocate the message buffer from the end of memory. */
const size_t msgbuf_pgs = round_page(MSGBUFSIZE) / PAGE_SIZE;
valloc_pages(bmi, &msgbuf, msgbuf_pgs,
- VM_PROT_READ|VM_PROT_WRITE, PTE_CACHE, false);
+ VM_PROT_READ | VM_PROT_WRITE, PTE_CACHE, false);
add_pages(bmi, &msgbuf);
msgbufphys = msgbuf.pv_pa;
msgbufaddr = (void *)msgbuf.pv_va;
@@ -608,7 +608,8 @@
*/
VPRINTF(" vector");
- valloc_pages(bmi, &systempage, 1, VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE,
+ valloc_pages(bmi, &systempage, 1,
+ VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE,
PTE_CACHE, true);
}
systempage.pv_va = vectors;
@@ -622,7 +623,7 @@
if (xscale_use_minidata)
#endif
valloc_pages(bmi, &minidataclean, 1,
- VM_PROT_READ|VM_PROT_WRITE, 0, true);
+ VM_PROT_READ | VM_PROT_WRITE, 0, true);
#endif
/*
@@ -764,7 +765,7 @@
* See if we can extend the current pv to emcompass the
* hole, and if so do it and retry the concatenation.
*/
- if (cur_pv.pv_prot == (VM_PROT_READ|VM_PROT_WRITE)
+ if (cur_pv.pv_prot == (VM_PROT_READ | VM_PROT_WRITE)
&& cur_pv.pv_cache == PTE_CACHE) {
cur_pv.pv_size = pv->pv_pa - cur_pv.pv_va;
continue;
@@ -856,7 +857,7 @@
if (map_vectors_p) {
/* Map the vector page. */
pmap_map_entry(l1pt_va, systempage.pv_va, systempage.pv_pa,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE, PTE_CACHE);
+ VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, PTE_CACHE);
}
/* Map the Mini-Data cache clean area. */
Home |
Main Index |
Thread Index |
Old Index