Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/external/bsd/jemalloc Remove __clang___ hacks for noreturn, ...
details: https://anonhg.NetBSD.org/src/rev/9855f237c643
branches: trunk
changeset: 997586:9855f237c643
user: christos <christos%NetBSD.org@localhost>
date: Thu Mar 14 18:56:12 2019 +0000
description:
Remove __clang___ hacks for noreturn, reduce footprint when we don't compile
with JEMALLOC_PROF. More to do here.
diffstat:
external/bsd/jemalloc/dist/src/prof.c | 125 ++++++---
external/bsd/jemalloc/include/jemalloc/internal/jemalloc_preamble.h | 2 +
2 files changed, 87 insertions(+), 40 deletions(-)
diffs (truncated from 514 to 300 lines):
diff -r f0bb9d265540 -r 9855f237c643 external/bsd/jemalloc/dist/src/prof.c
--- a/external/bsd/jemalloc/dist/src/prof.c Thu Mar 14 16:59:09 2019 +0000
+++ b/external/bsd/jemalloc/dist/src/prof.c Thu Mar 14 18:56:12 2019 +0000
@@ -78,7 +78,9 @@
* creating/destroying mutexes.
*/
static malloc_mutex_t *gctx_locks;
+#ifdef JEMALLOC_PROF
static atomic_u_t cum_gctxs; /* Atomic counter. */
+#endif
/*
* Table of mutexes that are shared among tdata's. No operations require
@@ -103,14 +105,18 @@
static prof_tdata_tree_t tdatas;
static malloc_mutex_t tdatas_mtx;
+#ifdef JEMALLOC_PROF
static uint64_t next_thr_uid;
+#endif
static malloc_mutex_t next_thr_uid_mtx;
static malloc_mutex_t prof_dump_seq_mtx;
+#ifdef JEMALLOC_PROF
static uint64_t prof_dump_seq;
static uint64_t prof_dump_iseq;
static uint64_t prof_dump_mseq;
static uint64_t prof_dump_useq;
+#endif
/*
* This buffer is rather large for stack allocation, so use a single buffer for
@@ -128,8 +134,10 @@
static size_t prof_dump_buf_end;
static int prof_dump_fd;
+#ifdef JEMALLOC_PROF
/* Do not dump any profiles until bootstrapping is complete. */
static bool prof_booted = false;
+#endif
/******************************************************************************/
/*
@@ -143,7 +151,9 @@
bool even_if_attached);
static void prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
bool even_if_attached);
+#ifdef JEMALLOC_PROF
static char *prof_thread_name_alloc(tsdn_t *tsdn, const char *thread_name);
+#endif
/******************************************************************************/
/* Red-black trees. */
@@ -207,7 +217,7 @@
/******************************************************************************/
-JEMALLOC_NORETURN void
+JEMALLOC_PROF_NORETURN void
prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated) {
prof_tdata_t *tdata;
@@ -237,7 +247,7 @@
}
}
-JEMALLOC_NORETURN void
+JEMALLOC_PROF_NORETURN void
prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx) {
prof_tctx_set(tsdn, ptr, usize, NULL, tctx);
@@ -268,7 +278,7 @@
}
}
-JEMALLOC_NORETURN void
+JEMALLOC_PROF_NORETURN void
bt_init(prof_bt_t *bt, void **vec) {
cassert(config_prof);
@@ -276,7 +286,7 @@
bt->len = 0;
}
-static JEMALLOC_NORETURN void
+static JEMALLOC_PROF_NORETURN void
prof_enter(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -289,7 +299,7 @@
malloc_mutex_lock(tsd_tsdn(tsd), &bt2gctx_mtx);
}
-static JEMALLOC_NORETURN void
+static JEMALLOC_PROF_NORETURN void
prof_leave(tsd_t *tsd, prof_tdata_t *tdata) {
cassert(config_prof);
assert(tdata == prof_tdata_get(tsd, false));
@@ -537,6 +547,7 @@
}
#endif
+#ifdef JEMALLOC_PROF
static malloc_mutex_t *
prof_gctx_mutex_choose(void) {
unsigned ngctxs = atomic_fetch_add_u(&cum_gctxs, 1, ATOMIC_RELAXED);
@@ -574,8 +585,9 @@
gctx->bt.len = bt->len;
return gctx;
}
+#endif
-static JEMALLOC_NORETURN void
+static JEMALLOC_PROF_NORETURN void
prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
prof_tdata_t *tdata) {
cassert(config_prof);
@@ -716,6 +728,7 @@
}
}
+#ifdef JEMALLOC_PROF
static bool
prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx) {
@@ -780,9 +793,12 @@
*p_new_gctx = new_gctx;
return false;
}
+#endif
-prof_tctx_t *
+JEMALLOC_PROF_NORETURN prof_tctx_t *
prof_lookup(tsd_t *tsd, prof_bt_t *bt) {
+ cassert(config_prof);
+#ifdef JEMALLOC_PROF
union {
prof_tctx_t *p;
void *v;
@@ -790,8 +806,6 @@
prof_tdata_t *tdata;
bool not_found;
- cassert(config_prof);
-
tdata = prof_tdata_get(tsd, false);
if (tdata == NULL) {
return NULL;
@@ -853,6 +867,7 @@
}
return ret.p;
+#endif
}
/*
@@ -994,6 +1009,7 @@
return ret;
}
+#ifdef JEMALLOC_PROF
static bool
prof_dump_close(bool propagate_err) {
bool ret;
@@ -1005,6 +1021,7 @@
return ret;
}
+#endif
static bool
prof_dump_write(bool propagate_err, const char *s) {
@@ -1052,6 +1069,7 @@
return ret;
}
+#ifdef JEMALLOC_PROF
static void
prof_tctx_merge_tdata(tsdn_t *tsdn, prof_tctx_t *tctx, prof_tdata_t *tdata) {
malloc_mutex_assert_owner(tsdn, tctx->tdata->lock);
@@ -1175,9 +1193,8 @@
return ret;
}
-static JEMALLOC_NORETURN void
+static void
prof_dump_gctx_prep(tsdn_t *tsdn, prof_gctx_t *gctx, prof_gctx_tree_t *gctxs) {
- cassert(config_prof);
malloc_mutex_lock(tsdn, gctx->lock);
@@ -1300,6 +1317,7 @@
return NULL;
}
+#endif
static prof_tdata_t *
prof_tdata_dump_iter(prof_tdata_tree_t *tdatasunused, prof_tdata_t *tdata,
@@ -1343,6 +1361,7 @@
}
prof_dump_header_t *JET_MUTABLE prof_dump_header = prof_dump_header_impl;
+#ifdef JEMALLOC_PROF
static bool
prof_dump_gctx(tsdn_t *tsdn, bool propagate_err, prof_gctx_t *gctx,
const prof_bt_t *bt, prof_gctx_tree_t *gctxs) {
@@ -1495,7 +1514,6 @@
static void
prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
const char *filename) {
-#ifdef JEMALLOC_PROF
/*
* Scaling is equivalent AdjustSamples() in jeprof, but the result may
* differ slightly from what jeprof reports, because here we scale the
@@ -1520,7 +1538,6 @@
"<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
filename);
}
-#endif
}
struct prof_gctx_dump_iter_arg_s {
@@ -1548,7 +1565,7 @@
return ret;
}
-static JEMALLOC_NORETURN void
+static void
prof_dump_prep(tsd_t *tsd, prof_tdata_t *tdata,
struct prof_tdata_merge_iter_arg_s *prof_tdata_merge_iter_arg,
struct prof_gctx_merge_iter_arg_s *prof_gctx_merge_iter_arg,
@@ -1669,6 +1686,7 @@
}
return false;
}
+#endif
#ifdef JEMALLOC_JET
void
@@ -1717,9 +1735,10 @@
}
#endif
+#ifdef JEMALLOC_PROF
#define DUMP_FILENAME_BUFSIZE (PATH_MAX + 1)
#define VSEQ_INVALID UINT64_C(0xffffffffffffffff)
-static JEMALLOC_NORETURN void
+static void
prof_dump_filename(char *filename, char v, uint64_t vseq) {
cassert(config_prof);
@@ -1739,11 +1758,10 @@
static void
prof_fdump(void) {
+ cassert(config_prof);
tsd_t *tsd;
char filename[DUMP_FILENAME_BUFSIZE];
-#ifndef __clang__
- cassert(config_prof);
-#endif
+
assert(opt_prof_final);
assert(opt_prof_prefix[0] != '\0');
@@ -1758,11 +1776,12 @@
malloc_mutex_unlock(tsd_tsdn(tsd), &prof_dump_seq_mtx);
prof_dump(tsd, false, filename, opt_prof_leak);
}
+#endif
-bool
+JEMALLOC_PROF_NORETURN bool
prof_accum_init(tsdn_t *tsdn, prof_accum_t *prof_accum) {
cassert(config_prof);
-
+#ifdef JEMALLOC_PROF
#ifndef JEMALLOC_ATOMIC_U64
if (malloc_mutex_init(&prof_accum->mtx, "prof_accum",
WITNESS_RANK_PROF_ACCUM, malloc_mutex_rank_exclusive)) {
@@ -1773,15 +1792,15 @@
atomic_store_u64(&prof_accum->accumbytes, 0, ATOMIC_RELAXED);
#endif
return false;
+#endif
}
-void
+JEMALLOC_PROF_NORETURN void
prof_idump(tsdn_t *tsdn) {
+ cassert(config_prof);
+#ifdef JEMALLOC_PROF
tsd_t *tsd;
prof_tdata_t *tdata;
-#ifndef __clang__
- cassert(config_prof);
-#endif
if (!prof_booted || tsdn_null(tsdn) || !prof_active_get_unlocked()) {
Home |
Main Index |
Thread Index |
Old Index