Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[src/trunk]: src/external/cddl/osnet/dist/tools/ctf/cvt ctfmerge: error check...
details: https://anonhg.NetBSD.org/src/rev/cdfa97884d31
branches: trunk
changeset: 375970:cdfa97884d31
user: lukem <lukem%NetBSD.org@localhost>
date: Tue May 23 18:54:58 2023 +0000
description:
ctfmerge: error check sem_*() and pthread_*() APIs
terminate() if sem_*() returns -1 or pthread_*() returns != 0.
(Set errno from pthread_*() so terminate() prints the strerror message).
Note: Failing on errors instead of ignoring them helps identify
reasons for intermittent failures, such as those on macOS host builds:
ERROR: nbctfmerge: barrier_init: sem_init(bar_sem): Function not implemented
diffstat:
external/cddl/osnet/dist/tools/ctf/cvt/barrier.c | 34 ++-
external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c | 189 +++++++++++++++------
external/cddl/osnet/dist/tools/ctf/cvt/tdata.c | 7 +-
3 files changed, 164 insertions(+), 66 deletions(-)
diffs (truncated from 458 to 300 lines):
diff -r 90292ddf1a03 -r cdfa97884d31 external/cddl/osnet/dist/tools/ctf/cvt/barrier.c
--- a/external/cddl/osnet/dist/tools/ctf/cvt/barrier.c Tue May 23 18:16:28 2023 +0000
+++ b/external/cddl/osnet/dist/tools/ctf/cvt/barrier.c Tue May 23 18:54:58 2023 +0000
@@ -42,6 +42,7 @@
* get a return code of 0.
*/
+#include <errno.h>
#include <pthread.h>
#ifdef illumos
#include <synch.h>
@@ -49,15 +50,19 @@
#include <stdio.h>
#include "barrier.h"
+#include "ctftools.h"
void
barrier_init(barrier_t *bar, int nthreads)
{
- pthread_mutex_init(&bar->bar_lock, NULL);
+ if ((errno = pthread_mutex_init(&bar->bar_lock, NULL)) != 0)
+ terminate("%s: pthread_mutex_init(bar_lock)", __func__);
#ifdef illumos
- sema_init(&bar->bar_sem, 0, USYNC_THREAD, NULL);
+ if ((errno = sema_init(&bar->bar_sem, 0, USYNC_THREAD, NULL)) != 0)
+ terminate("%s: sema_init(bar_sem)", __func__);
#else
- sem_init(&bar->bar_sem, 0, 0);
+ if (sem_init(&bar->bar_sem, 0, 0) == -1)
+ terminate("%s: sem_init(bar_sem)", __func__);
#endif
bar->bar_numin = 0;
@@ -67,14 +72,19 @@ barrier_init(barrier_t *bar, int nthread
int
barrier_wait(barrier_t *bar)
{
- pthread_mutex_lock(&bar->bar_lock);
+ if ((errno = pthread_mutex_lock(&bar->bar_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(bar_lock)", __func__);
if (++bar->bar_numin < bar->bar_nthr) {
- pthread_mutex_unlock(&bar->bar_lock);
+ if ((errno = pthread_mutex_unlock(&bar->bar_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(bar_lock)",
+ __func__);
#ifdef illumos
- sema_wait(&bar->bar_sem);
+ if ((errno = sema_wait(&bar->bar_sem)) != 0)
+ terminate("%s: sema_wait(bar_sem)", __func__);
#else
- sem_wait(&bar->bar_sem);
+ if (sem_wait(&bar->bar_sem) == -1)
+ terminate("%s: sem_wait(bar_sem)", __func__);
#endif
return (0);
@@ -86,11 +96,15 @@ barrier_wait(barrier_t *bar)
bar->bar_numin = 0;
for (i = 1; i < bar->bar_nthr; i++)
#ifdef illumos
- sema_post(&bar->bar_sem);
+ if ((errno = sema_post(&bar->bar_sem)) != 0)
+ terminate("%s: sema_post(bar_sem)", __func__);
#else
- sem_post(&bar->bar_sem);
+ if (sem_post(&bar->bar_sem) == -1)
+ terminate("%s: sem_post(bar_sem)", __func__);
#endif
- pthread_mutex_unlock(&bar->bar_lock);
+ if ((errno = pthread_mutex_unlock(&bar->bar_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(bar_lock)",
+ __func__);
return (1);
}
diff -r 90292ddf1a03 -r cdfa97884d31 external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c
--- a/external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c Tue May 23 18:16:28 2023 +0000
+++ b/external/cddl/osnet/dist/tools/ctf/cvt/ctfmerge.c Tue May 23 18:54:58 2023 +0000
@@ -373,22 +373,31 @@ init_phase_two(workqueue_t *wq)
static void
wip_save_work(workqueue_t *wq, wip_t *slot, int slotnum)
{
- pthread_mutex_lock(&wq->wq_donequeue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_donequeue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_donequeue_lock)",
+ __func__);
- while (wq->wq_lastdonebatch + 1 < slot->wip_batchid)
- pthread_cond_wait(&slot->wip_cv, &wq->wq_donequeue_lock);
+ while (wq->wq_lastdonebatch + 1 < slot->wip_batchid) {
+ if ((errno = pthread_cond_wait(&slot->wip_cv, &wq->wq_donequeue_lock)) != 0)
+ terminate("%s: pthread_cond_wait(wip_cv,wq_donequeue_lock)",
+ __func__);
+ }
assert(wq->wq_lastdonebatch + 1 == slot->wip_batchid);
fifo_add(wq->wq_donequeue, slot->wip_td);
wq->wq_lastdonebatch++;
- pthread_cond_signal(&wq->wq_wip[(slotnum + 1) %
- wq->wq_nwipslots].wip_cv);
+ const int nextslot = (slotnum + 1) % wq->wq_nwipslots;
+ if ((errno = pthread_cond_signal(&wq->wq_wip[nextslot].wip_cv)) != 0)
+ terminate("%s: pthread_cond_signal(wq_wip[%d].wip_cv)",
+ __func__, nextslot);
/* reset the slot for next use */
slot->wip_td = NULL;
slot->wip_batchid = wq->wq_next_batchid++;
- pthread_mutex_unlock(&wq->wq_donequeue_lock);
+ if ((errno = pthread_mutex_unlock(&wq->wq_donequeue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_donequeue_lock)",
+ __func__);
}
static void
@@ -417,25 +426,35 @@ worker_runphase1(workqueue_t *wq)
int wipslotnum, pownum;
for (;;) {
- pthread_mutex_lock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+ __func__);
while (fifo_empty(wq->wq_queue)) {
if (wq->wq_nomorefiles == 1) {
- pthread_cond_broadcast(&wq->wq_work_avail);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_cond_broadcast(&wq->wq_work_avail)) != 0)
+ terminate("%s: pthread_cond_broadcast(wq_work_avail)",
+ __func__);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
/* on to phase 2 ... */
return;
}
- pthread_cond_wait(&wq->wq_work_avail,
- &wq->wq_queue_lock);
+ if ((errno = pthread_cond_wait(&wq->wq_work_avail,
+ &wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_cond_wait(wq_work_avail,wq_queue_lock)",
+ __func__);
}
/* there's work to be done! */
pow = fifo_remove(wq->wq_queue);
pownum = wq->wq_nextpownum++;
- pthread_cond_broadcast(&wq->wq_work_removed);
+ if ((errno = pthread_cond_broadcast(&wq->wq_work_removed)) != 0)
+ terminate("%s: pthread_cond_broadcast(wq_work_removed)",
+ __func__);
assert(pow != NULL);
@@ -443,16 +462,21 @@ worker_runphase1(workqueue_t *wq)
wipslotnum = pownum % wq->wq_nwipslots;
wipslot = &wq->wq_wip[wipslotnum];
- pthread_mutex_lock(&wipslot->wip_lock);
+ if ((errno = pthread_mutex_lock(&wipslot->wip_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wip_lock)", __func__);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
wip_add_work(wipslot, pow);
if (wipslot->wip_nmerged == wq->wq_maxbatchsz)
wip_save_work(wq, wipslot, wipslotnum);
- pthread_mutex_unlock(&wipslot->wip_lock);
+ if ((errno = pthread_mutex_unlock(&wipslot->wip_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wip_lock)",
+ __func__);
}
}
@@ -463,28 +487,44 @@ worker_runphase2(workqueue_t *wq)
int batchid;
for (;;) {
- pthread_mutex_lock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+ __func__);
if (wq->wq_ninqueue == 1) {
- pthread_cond_broadcast(&wq->wq_work_avail);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_cond_broadcast(&wq->wq_work_avail)) != 0)
+ terminate("%s: pthread_cond_broadcast(wq_work_avail)",
+ __func__);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
debug(2, "0x%jx: entering p2 completion barrier\n",
(uintmax_t)(uintptr_t)pthread_self());
if (barrier_wait(&wq->wq_bar1)) {
- pthread_mutex_lock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+ __func__);
wq->wq_alldone = 1;
- pthread_cond_signal(&wq->wq_alldone_cv);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_cond_signal(&wq->wq_alldone_cv)) != 0)
+ terminate("%s: pthread_cond_signal(wq_alldone_cv)",
+ __func__);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
}
return;
}
if (fifo_len(wq->wq_queue) < 2) {
- pthread_cond_wait(&wq->wq_work_avail,
- &wq->wq_queue_lock);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_cond_wait(&wq->wq_work_avail,
+ &wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_cond_wait(wq_work_avail,wq_queue_lock)",
+ __func__);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
continue;
}
@@ -495,7 +535,9 @@ worker_runphase2(workqueue_t *wq)
batchid = wq->wq_next_batchid++;
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
debug(2, "0x%jx: merging %p into %p\n",
(uintmax_t)(uintptr_t)pthread_self(),
@@ -507,10 +549,14 @@ worker_runphase2(workqueue_t *wq)
* merging is complete. place at the tail of the queue in
* proper order.
*/
- pthread_mutex_lock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_queue_lock)",
+ __func__);
while (wq->wq_lastdonebatch + 1 != batchid) {
- pthread_cond_wait(&wq->wq_done_cv,
- &wq->wq_queue_lock);
+ if ((errno = pthread_cond_wait(&wq->wq_done_cv,
+ &wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_cond_wait(wq_done_cv,wq_queue_lock)",
+ __func__);
}
wq->wq_lastdonebatch = batchid;
@@ -519,9 +565,15 @@ worker_runphase2(workqueue_t *wq)
debug(2, "0x%jx: added %p to queue, len now %d, ninqueue %d\n",
(uintmax_t)(uintptr_t)pthread_self(), (void *)pow2,
fifo_len(wq->wq_queue), wq->wq_ninqueue);
- pthread_cond_broadcast(&wq->wq_done_cv);
- pthread_cond_signal(&wq->wq_work_avail);
- pthread_mutex_unlock(&wq->wq_queue_lock);
+ if ((errno = pthread_cond_broadcast(&wq->wq_done_cv)) != 0)
+ terminate("%s: pthread_cond_broadcast(wq_done_cv)",
+ __func__);
+ if ((errno = pthread_cond_signal(&wq->wq_work_avail)) != 0)
+ terminate("%s: pthread_cond_signal(wq_work_avail)",
+ __func__);
+ if ((errno = pthread_mutex_unlock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_unlock(wq_queue_lock)",
+ __func__);
}
}
@@ -574,18 +626,23 @@ merge_ctf_cb(tdata_t *td, char *name, vo
debug(3, "Adding tdata %p for processing\n", (void *)td);
- pthread_mutex_lock(&wq->wq_queue_lock);
+ if ((errno = pthread_mutex_lock(&wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_mutex_lock(wq_queue_lock)", __func__);
while (fifo_len(wq->wq_queue) > wq->wq_ithrottle) {
debug(2, "Throttling input (len = %d, throttle = %d)\n",
fifo_len(wq->wq_queue), wq->wq_ithrottle);
- pthread_cond_wait(&wq->wq_work_removed, &wq->wq_queue_lock);
+ if ((errno = pthread_cond_wait(&wq->wq_work_removed, &wq->wq_queue_lock)) != 0)
+ terminate("%s: pthread_cond_wait(wq_work_removed,wq_queue_lock)",
Home |
Main Index |
Thread Index |
Old Index