Source-Changes-HG archive
[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index][Old Index]
[pkgsrc/trunk]: pkgsrc/devel/cmake Fix a number of long-standing race conditi...
details: https://anonhg.NetBSD.org/pkgsrc/rev/d2692dfd66e2
branches: trunk
changeset: 432152:d2692dfd66e2
user: joerg <joerg%pkgsrc.org@localhost>
date: Tue May 19 13:52:10 2020 +0000
description:
Fix a number of long-standing race conditions in cmake's autogen
handling. Bump revision.
diffstat:
devel/cmake/Makefile | 4 +-
devel/cmake/distinfo | 3 +-
devel/cmake/patches/patch-Source_cmWorkerPool.cxx | 138 ++++++++++++++++++++++
3 files changed, 142 insertions(+), 3 deletions(-)
diffs (171 lines):
diff -r 799811dee162 -r d2692dfd66e2 devel/cmake/Makefile
--- a/devel/cmake/Makefile Tue May 19 13:35:06 2020 +0000
+++ b/devel/cmake/Makefile Tue May 19 13:52:10 2020 +0000
@@ -1,6 +1,6 @@
-# $NetBSD: Makefile,v 1.174 2020/05/06 14:04:25 adam Exp $
+# $NetBSD: Makefile,v 1.175 2020/05/19 13:52:10 joerg Exp $
-PKGREVISION= 1
+PKGREVISION= 2
.include "Makefile.common"
COMMENT= Cross platform make
diff -r 799811dee162 -r d2692dfd66e2 devel/cmake/distinfo
--- a/devel/cmake/distinfo Tue May 19 13:35:06 2020 +0000
+++ b/devel/cmake/distinfo Tue May 19 13:52:10 2020 +0000
@@ -1,4 +1,4 @@
-$NetBSD: distinfo,v 1.157 2020/04/29 06:47:39 adam Exp $
+$NetBSD: distinfo,v 1.158 2020/05/19 13:52:10 joerg Exp $
SHA1 (cmake-3.17.2.tar.gz) = 4b3ea5249e0d544c5d09a08b0e81f66a4963e5d7
RMD160 (cmake-3.17.2.tar.gz) = a6d7f219aa85afff7bd6ca0000c0336fdfb17904
@@ -19,6 +19,7 @@
SHA1 (patch-Source_CursesDialog_ccmake.cxx) = 7f6ca6fda5d0db615f04c18efa8ecdd6ef00cb93
SHA1 (patch-Source_QtDialog_CMakeLists.txt) = c4007da363c5b7c925f1ff345901057f3fbdc4e1
SHA1 (patch-Source_cmArchiveWrite.cxx) = 1b6a46252bd10618703116ef69e22f8ec5c5f31f
+SHA1 (patch-Source_cmWorkerPool.cxx) = 8eb3faf10767fe9228ed6822c17c96c04f017777
SHA1 (patch-Utilities_KWIML_CMakeLists.txt) = e4bdf9fc58757e87bf7e3e3e195839eededbc796
SHA1 (patch-Utilities_std_cm_string__view) = 90bbb578c5628b661a25974d7dd9aa6f5063271f
SHA1 (patch-bootstrap) = fc1b689bbe705cd888e2bef4debad1a26e5885bd
diff -r 799811dee162 -r d2692dfd66e2 devel/cmake/patches/patch-Source_cmWorkerPool.cxx
--- /dev/null Thu Jan 01 00:00:00 1970 +0000
+++ b/devel/cmake/patches/patch-Source_cmWorkerPool.cxx Tue May 19 13:52:10 2020 +0000
@@ -0,0 +1,138 @@
+$NetBSD: patch-Source_cmWorkerPool.cxx,v 1.1 2020/05/19 13:52:10 joerg Exp $
+
+Redo locking and state machine for fence handling and the worker pool.
+
+(1) All CV use must hold the corresponding mutex, otherwise race
+conditions happen. This is mandated by the C++ standard.
+
+(2) Introduce a separate CV for the thread waiting for other jobs to
+finish before running a fence. This avoids waking up all other workers
+blindly. Correctly wake that thread up when the processing of outstanding
+jobs is done.
+
+(3) Split the waiting for a fence to become runnable from a fence is
+pending. This avoids problems if more than one fence can end up on the
+queue. The thread that took a fence off the queue is responsible for
+clearing the fence processing flag.
+
+--- Source/cmWorkerPool.cxx.orig 2020-04-28 14:23:06.000000000 +0000
++++ Source/cmWorkerPool.cxx
+@@ -469,11 +469,9 @@ void cmWorkerPoolWorker::UVProcessStart(
+
+ void cmWorkerPoolWorker::UVProcessFinished()
+ {
+- {
+- std::lock_guard<std::mutex> lock(Proc_.Mutex);
+- if (Proc_.ROP && (Proc_.ROP->IsFinished() || !Proc_.ROP->IsStarted())) {
+- Proc_.ROP.reset();
+- }
++ std::lock_guard<std::mutex> lock(Proc_.Mutex);
++ if (Proc_.ROP && (Proc_.ROP->IsFinished() || !Proc_.ROP->IsStarted())) {
++ Proc_.ROP.reset();
+ }
+ // Notify idling thread
+ Proc_.Condition.notify_one();
+@@ -532,6 +530,7 @@ public:
+ unsigned int JobsProcessing = 0;
+ std::deque<cmWorkerPool::JobHandleT> Queue;
+ std::condition_variable Condition;
++ std::condition_variable ConditionFence;
+ std::vector<std::unique_ptr<cmWorkerPoolWorker>> Workers;
+
+ // -- References
+@@ -593,19 +592,12 @@ bool cmWorkerPoolInternal::Process()
+
+ void cmWorkerPoolInternal::Abort()
+ {
+- bool notifyThreads = false;
+ // Clear all jobs and set abort flag
+- {
+- std::lock_guard<std::mutex> guard(Mutex);
+- if (Processing && !Aborting) {
+- // Register abort and clear queue
+- Aborting = true;
+- Queue.clear();
+- notifyThreads = true;
+- }
+- }
+- if (notifyThreads) {
+- // Wake threads
++ std::lock_guard<std::mutex> guard(Mutex);
++ if (!Aborting) {
++ // Register abort and clear queue
++ Aborting = true;
++ Queue.clear();
+ Condition.notify_all();
+ }
+ }
+@@ -669,7 +661,7 @@ void cmWorkerPoolInternal::Work(unsigned
+ if (Aborting) {
+ break;
+ }
+- // Wait for new jobs
++ // Wait for new jobs on the main CV
+ if (Queue.empty()) {
+ ++WorkersIdle;
+ Condition.wait(uLock);
+@@ -677,20 +669,33 @@ void cmWorkerPoolInternal::Work(unsigned
+ continue;
+ }
+
+- // Check for fence jobs
+- if (FenceProcessing || Queue.front()->IsFence()) {
+- if (JobsProcessing != 0) {
+- Condition.wait(uLock);
+- continue;
+- }
+- // No jobs get processed. Set the fence job processing flag.
+- FenceProcessing = true;
++ // If there is a fence currently active or waiting,
++ // sleep on the main CV and try again.
++ if (FenceProcessing) {
++ Condition.wait(uLock);
++ continue;
+ }
+
+ // Pop next job from queue
+ jobHandle = std::move(Queue.front());
+ Queue.pop_front();
+
++ // Check for fence jobs
++ bool raisedFence = false;
++ if (jobHandle->IsFence()) {
++ FenceProcessing = true;
++ raisedFence = true;
++ // Wait on the Fence CV until all pending jobs are done.
++ while (JobsProcessing != 0 && !Aborting)
++ ConditionFence.wait(uLock);
++ // When aborting, explicitly kick all threads alive once more.
++ if (Aborting) {
++ FenceProcessing = false;
++ Condition.notify_all();
++ break;
++ }
++ }
++
+ // Unlocked scope for job processing
+ ++JobsProcessing;
+ {
+@@ -701,11 +706,17 @@ void cmWorkerPoolInternal::Work(unsigned
+ }
+ --JobsProcessing;
+
+- // Was this a fence job?
+- if (FenceProcessing) {
++ // If this was the thread that entered fence processing
++ // originally, notify all idling workers that the fence
++ // is done.
++ if (raisedFence) {
+ FenceProcessing = false;
+ Condition.notify_all();
+ }
++ // If fence processing is still not done, notify the
++ // the fencing worker when all active jobs are done.
++ if (FenceProcessing && JobsProcessing == 0)
++ ConditionFence.notify_all();
+ }
+
+ // Decrement running workers count
Home |
Main Index |
Thread Index |
Old Index