Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/sys/uvm Consistently use UVMHIST(__func__)
details: https://anonhg.NetBSD.org/src/rev/2694b2a13d39
branches: trunk
changeset: 1011751:2694b2a13d39
user: skrll <skrll%NetBSD.org@localhost>
date: Thu Jul 09 05:57:15 2020 +0000
description:
Consistently use UVMHIST(__func__)
Convert UVMHIST_{CALLED,LOG} into UVMHIST_CALLARGS
diffstat:
sys/uvm/uvm_amap.c | 35 +++++++--------
sys/uvm/uvm_anon.c | 10 ++--
sys/uvm/uvm_aobj.c | 16 +++---
sys/uvm/uvm_bio.c | 17 +++----
sys/uvm/uvm_device.c | 15 +++---
sys/uvm/uvm_fault.c | 45 ++++++++++----------
sys/uvm/uvm_km.c | 8 +-
sys/uvm/uvm_map.c | 109 ++++++++++++++++++++++---------------------------
sys/uvm/uvm_page.c | 8 +-
sys/uvm/uvm_pager.c | 20 ++++-----
sys/uvm/uvm_pdaemon.c | 16 +++---
sys/uvm/uvm_swap.c | 49 ++++++++++-----------
sys/uvm/uvm_vnode.c | 15 +++---
13 files changed, 170 insertions(+), 193 deletions(-)
diffs (truncated from 1243 to 300 lines):
diff -r 6964adfef330 -r 2694b2a13d39 sys/uvm/uvm_amap.c
--- a/sys/uvm/uvm_amap.c Thu Jul 09 05:12:09 2020 +0000
+++ b/sys/uvm/uvm_amap.c Thu Jul 09 05:57:15 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_amap.c,v 1.121 2020/07/08 13:26:22 skrll Exp $ */
+/* $NetBSD: uvm_amap.c,v 1.122 2020/07/09 05:57:15 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.121 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.122 2020/07/09 05:57:15 skrll Exp $");
#include "opt_uvmhist.h"
@@ -235,7 +235,7 @@
{
struct vm_amap *amap;
int slots, padslots;
- UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
AMAP_B2SLOT(slots, sz);
AMAP_B2SLOT(padslots, padsz);
@@ -321,7 +321,7 @@
{
int slots;
- UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
@@ -363,9 +363,8 @@
const km_flag_t kmflags =
(flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
- UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
-
- UVMHIST_LOG(maphist, " (entry=%#jx, addsize=%#jx, flags=%#jx)",
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist, " (entry=%#jx, addsize=%#jx, flags=%#jx)",
(uintptr_t)entry, addsize, flags, 0);
/*
@@ -725,8 +724,8 @@
{
u_int lcv;
- UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist,"(amap=%#jx)", (uintptr_t)amap, 0,0,0);
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist,"(amap=%#jx)", (uintptr_t)amap, 0,0,0);
KASSERT(rw_write_held(amap->am_lock));
KASSERT(amap->am_ref == 0);
@@ -797,9 +796,9 @@
krwlock_t *oldlock;
vsize_t len;
- UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist, " (map=%#j, entry=%#j, flags=%jd)",
- (uintptr_t)map, (uintptr_t)entry, flags, 0);
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist, " (map=%#j, entry=%#j, flags=%jd)",
+ (uintptr_t)map, (uintptr_t)entry, flags, -2);
KASSERT(map != kernel_map); /* we use nointr pool */
@@ -1416,7 +1415,7 @@
struct vm_anon *an;
u_int slot;
- UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(rw_lock_held(amap->am_lock));
AMAP_B2SLOT(slot, offset);
@@ -1445,7 +1444,7 @@
struct vm_amap *amap = aref->ar_amap;
u_int slot;
- UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(rw_lock_held(amap->am_lock));
AMAP_B2SLOT(slot, offset);
@@ -1483,7 +1482,7 @@
struct vm_amap *amap = aref->ar_amap;
u_int slot;
- UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(rw_write_held(amap->am_lock));
KASSERT(anon->an_lock == amap->am_lock);
@@ -1525,7 +1524,7 @@
struct vm_amap *amap = aref->ar_amap;
u_int slot, ptr, last;
- UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(rw_write_held(amap->am_lock));
AMAP_B2SLOT(slot, offset);
@@ -1593,7 +1592,7 @@
void
amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
{
- UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
amap_lock(amap, RW_WRITER);
if (flags & AMAP_SHARED) {
@@ -1614,7 +1613,7 @@
void
amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, bool all)
{
- UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
amap_lock(amap, RW_WRITER);
diff -r 6964adfef330 -r 2694b2a13d39 sys/uvm/uvm_anon.c
--- a/sys/uvm/uvm_anon.c Thu Jul 09 05:12:09 2020 +0000
+++ b/sys/uvm/uvm_anon.c Thu Jul 09 05:57:15 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_anon.c,v 1.78 2020/07/08 13:26:22 skrll Exp $ */
+/* $NetBSD: uvm_anon.c,v 1.79 2020/07/09 05:57:15 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -30,7 +30,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.78 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.79 2020/07/09 05:57:15 skrll Exp $");
#include "opt_uvmhist.h"
@@ -106,8 +106,8 @@
{
struct vm_page *pg = anon->an_page, *pg2 __diagused;
- UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist);
- UVMHIST_LOG(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
KASSERT(anon->an_ref == 0);
@@ -336,7 +336,7 @@
void
uvm_anon_dropswap(struct vm_anon *anon)
{
- UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
if (anon->an_swslot == 0)
return;
diff -r 6964adfef330 -r 2694b2a13d39 sys/uvm/uvm_aobj.c
--- a/sys/uvm/uvm_aobj.c Thu Jul 09 05:12:09 2020 +0000
+++ b/sys/uvm/uvm_aobj.c Thu Jul 09 05:57:15 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_aobj.c,v 1.148 2020/07/08 13:26:22 skrll Exp $ */
+/* $NetBSD: uvm_aobj.c,v 1.149 2020/07/09 05:57:15 skrll Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.148 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.149 2020/07/09 05:57:15 skrll Exp $");
#ifdef _KERNEL_OPT
#include "opt_uvmhist.h"
@@ -290,8 +290,8 @@
struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
struct uao_swhash_elt *elt;
int oldslot;
- UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
- UVMHIST_LOG(pdhist, "aobj %#jx pageidx %jd slot %jd",
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
(uintptr_t)aobj, pageidx, slot, 0);
KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
@@ -583,7 +583,7 @@
struct uvm_page_array a;
struct vm_page *pg;
- UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
/*
* Detaching from kernel object is a NOP.
@@ -666,7 +666,7 @@
struct uvm_page_array a;
struct vm_page *pg;
voff_t curoff;
- UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
KASSERT(UVM_OBJ_IS_AOBJ(uobj));
KASSERT(rw_write_held(uobj->vmobjlock));
@@ -801,11 +801,11 @@
voff_t current_offset;
struct vm_page *ptmp;
int lcv, gotpages, maxpages, swslot, pageidx;
- UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
bool overwrite = ((flags & PGO_OVERWRITE) != 0);
struct uvm_page_array a;
- UVMHIST_LOG(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
(uintptr_t)uobj, offset, flags,0);
/*
diff -r 6964adfef330 -r 2694b2a13d39 sys/uvm/uvm_bio.c
--- a/sys/uvm/uvm_bio.c Thu Jul 09 05:12:09 2020 +0000
+++ b/sys/uvm/uvm_bio.c Thu Jul 09 05:57:15 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_bio.c,v 1.119 2020/07/08 13:26:22 skrll Exp $ */
+/* $NetBSD: uvm_bio.c,v 1.120 2020/07/09 05:57:15 skrll Exp $ */
/*
* Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.119 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.120 2020/07/09 05:57:15 skrll Exp $");
#include "opt_uvmhist.h"
#include "opt_ubc.h"
@@ -311,7 +311,7 @@
int i, error, npages;
vm_prot_t prot;
- UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
/*
* no need to try with PGO_LOCKED...
@@ -482,9 +482,8 @@
struct ubc_map *umap;
voff_t umap_offset;
int error;
- UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
-
- UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
(uintptr_t)uobj, offset, *lenp, 0);
KASSERT(*lenp > 0);
@@ -640,9 +639,9 @@
struct uvm_object *uobj;
vaddr_t umapva;
bool unmapped;
- UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
+ UVMHIST_FUNC(__func__);
+ UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
- UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
umapva = UBC_UMAP_ADDR(umap);
uobj = umap->uobj;
@@ -844,7 +843,7 @@
int error;
int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
int access_type = VM_PROT_READ;
- UVMHIST_FUNC("ubc_alloc_direct"); UVMHIST_CALLED(ubchist);
+ UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
if (flags & UBC_WRITE) {
if (flags & UBC_FAULTBUSY)
diff -r 6964adfef330 -r 2694b2a13d39 sys/uvm/uvm_device.c
--- a/sys/uvm/uvm_device.c Thu Jul 09 05:12:09 2020 +0000
+++ b/sys/uvm/uvm_device.c Thu Jul 09 05:57:15 2020 +0000
@@ -1,4 +1,4 @@
-/* $NetBSD: uvm_device.c,v 1.70 2020/02/24 12:38:57 rin Exp $ */
+/* $NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $ */
/*
* Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
*/
#include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.70 2020/02/24 12:38:57 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $");
Home |
Main Index |
Thread Index |
Old Index