Commit b9a38733 authored by andersca@apple.com's avatar andersca@apple.com

Convert GCThreadSharedData over to STL threading primitives

https://bugs.webkit.org/show_bug.cgi?id=127256

Reviewed by Andreas Kling.

* heap/GCThread.cpp:
(JSC::GCThread::waitForNextPhase):
(JSC::GCThread::gcThreadMain):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::GCThreadSharedData):
(JSC::GCThreadSharedData::~GCThreadSharedData):
(JSC::GCThreadSharedData::startNextPhase):
(JSC::GCThreadSharedData::endCurrentPhase):
(JSC::GCThreadSharedData::didStartMarking):
(JSC::GCThreadSharedData::didFinishMarking):
* heap/GCThreadSharedData.h:
* heap/SlotVisitor.cpp:
(JSC::SlotVisitor::donateKnownParallel):
(JSC::SlotVisitor::drainFromShared):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@162352 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent b1560682
2014-01-19 Anders Carlsson <andersca@apple.com>
Convert GCThreadSharedData over to STL threading primitives
https://bugs.webkit.org/show_bug.cgi?id=127256
Reviewed by Andreas Kling.
* heap/GCThread.cpp:
(JSC::GCThread::waitForNextPhase):
(JSC::GCThread::gcThreadMain):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::GCThreadSharedData):
(JSC::GCThreadSharedData::~GCThreadSharedData):
(JSC::GCThreadSharedData::startNextPhase):
(JSC::GCThreadSharedData::endCurrentPhase):
(JSC::GCThreadSharedData::didStartMarking):
(JSC::GCThreadSharedData::didFinishMarking):
* heap/GCThreadSharedData.h:
* heap/SlotVisitor.cpp:
(JSC::SlotVisitor::donateKnownParallel):
(JSC::SlotVisitor::drainFromShared):
2014-01-18 Andreas Kling <akling@apple.com>
CodeBlock: Size m_callLinkInfos and m_byValInfos to fit earlier.
......
......@@ -69,16 +69,14 @@ CopyVisitor* GCThread::copyVisitor()
GCPhase GCThread::waitForNextPhase()
{
MutexLocker locker(m_shared.m_phaseLock);
while (m_shared.m_gcThreadsShouldWait)
m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
std::unique_lock<std::mutex> lock(m_shared.m_phaseMutex);
m_shared.m_phaseConditionVariable.wait(lock, [this] { return !m_shared.m_gcThreadsShouldWait; });
m_shared.m_numberOfActiveGCThreads--;
if (!m_shared.m_numberOfActiveGCThreads)
m_shared.m_activityCondition.signal();
m_shared.m_activityConditionVariable.notify_one();
while (m_shared.m_currentPhase == NoPhase)
m_shared.m_phaseCondition.wait(m_shared.m_phaseLock);
m_shared.m_phaseConditionVariable.wait(lock, [this] { return m_shared.m_currentPhase != NoPhase; });
m_shared.m_numberOfActiveGCThreads++;
return m_shared.m_currentPhase;
}
......@@ -92,7 +90,7 @@ void GCThread::gcThreadMain()
// Wait for the main thread to finish creating and initializing us. The main thread grabs this lock before
// creating this thread. We aren't guaranteed to have a valid threadID until the main thread releases this lock.
{
MutexLocker locker(m_shared.m_phaseLock);
std::lock_guard<std::mutex> lock(m_shared.m_phaseMutex);
}
{
ParallelModeEnabler enabler(*m_slotVisitor);
......
......@@ -83,7 +83,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
m_copyLock.Init();
#if ENABLE(PARALLEL_GC)
// Grab the lock so the new GC threads can be properly initialized before they start running.
MutexLocker locker(m_phaseLock);
std::unique_lock<std::mutex> lock(m_phaseMutex);
for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) {
m_numberOfActiveGCThreads++;
SlotVisitor* slotVisitor = new SlotVisitor(*this);
......@@ -95,8 +95,7 @@ GCThreadSharedData::GCThreadSharedData(VM* vm)
}
// Wait for all the GCThreads to get to the right place.
while (m_numberOfActiveGCThreads)
m_activityCondition.wait(m_phaseLock);
m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
#endif
}
......@@ -105,13 +104,13 @@ GCThreadSharedData::~GCThreadSharedData()
#if ENABLE(PARALLEL_GC)
// Destroy our marking threads.
{
MutexLocker markingLocker(m_markingLock);
MutexLocker phaseLocker(m_phaseLock);
std::lock_guard<std::mutex> markingLock(m_markingMutex);
std::lock_guard<std::mutex> phaseLock(m_phaseMutex);
ASSERT(m_currentPhase == NoPhase);
m_parallelMarkersShouldExit = true;
m_gcThreadsShouldWait = false;
m_currentPhase = Exit;
m_phaseCondition.broadcast();
m_phaseConditionVariable.notify_all();
}
for (unsigned i = 0; i < m_gcThreads.size(); ++i) {
waitForThreadCompletion(m_gcThreads[i]->threadID());
......@@ -139,28 +138,27 @@ void GCThreadSharedData::reset()
void GCThreadSharedData::startNextPhase(GCPhase phase)
{
MutexLocker phaseLocker(m_phaseLock);
std::lock_guard<std::mutex> lock(m_phaseMutex);
ASSERT(!m_gcThreadsShouldWait);
ASSERT(m_currentPhase == NoPhase);
m_gcThreadsShouldWait = true;
m_currentPhase = phase;
m_phaseCondition.broadcast();
m_phaseConditionVariable.notify_all();
}
void GCThreadSharedData::endCurrentPhase()
{
ASSERT(m_gcThreadsShouldWait);
MutexLocker locker(m_phaseLock);
std::unique_lock<std::mutex> lock(m_phaseMutex);
m_currentPhase = NoPhase;
m_gcThreadsShouldWait = false;
m_phaseCondition.broadcast();
while (m_numberOfActiveGCThreads)
m_activityCondition.wait(m_phaseLock);
m_phaseConditionVariable.notify_all();
m_activityConditionVariable.wait(lock, [this] { return !m_numberOfActiveGCThreads; });
}
void GCThreadSharedData::didStartMarking()
{
MutexLocker markingLocker(m_markingLock);
std::lock_guard<std::mutex> lock(m_markingMutex);
m_parallelMarkersShouldExit = false;
startNextPhase(Mark);
}
......@@ -168,9 +166,9 @@ void GCThreadSharedData::didStartMarking()
void GCThreadSharedData::didFinishMarking()
{
{
MutexLocker markingLocker(m_markingLock);
std::lock_guard<std::mutex> lock(m_markingMutex);
m_parallelMarkersShouldExit = true;
m_markingCondition.broadcast();
m_markingConditionVariable.notify_all();
}
ASSERT(m_currentPhase == Mark);
......
......@@ -31,9 +31,9 @@
#include "MarkedBlock.h"
#include "UnconditionalFinalizer.h"
#include "WeakReferenceHarvester.h"
#include <condition_variable>
#include <wtf/HashSet.h>
#include <wtf/TCSpinLock.h>
#include <wtf/Threading.h>
#include <wtf/Vector.h>
namespace JSC {
......@@ -86,8 +86,8 @@ private:
Vector<GCThread*> m_gcThreads;
Mutex m_markingLock;
ThreadCondition m_markingCondition;
std::mutex m_markingMutex;
std::condition_variable m_markingConditionVariable;
MarkStackArray m_sharedMarkStack;
unsigned m_numberOfActiveParallelMarkers;
bool m_parallelMarkersShouldExit;
......@@ -100,9 +100,9 @@ private:
size_t m_copyIndex;
static const size_t s_blockFragmentLength = 32;
Mutex m_phaseLock;
ThreadCondition m_phaseCondition;
ThreadCondition m_activityCondition;
std::mutex m_phaseMutex;
std::condition_variable m_phaseConditionVariable;
std::condition_variable m_activityConditionVariable;
unsigned m_numberOfActiveGCThreads;
bool m_gcThreadsShouldWait;
GCPhase m_currentPhase;
......
......@@ -118,15 +118,15 @@ void SlotVisitor::donateKnownParallel()
// If we're contending on the lock, be conservative and assume that another
// thread is already donating.
MutexTryLocker locker(m_shared.m_markingLock);
if (!locker.locked())
std::unique_lock<std::mutex> lock(m_shared.m_markingMutex, std::try_to_lock);
if (!lock.owns_lock())
return;
// Otherwise, assume that a thread will go idle soon, and donate.
m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack);
if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers())
m_shared.m_markingCondition.broadcast();
m_shared.m_markingConditionVariable.notify_all();
}
void SlotVisitor::drain()
......@@ -181,12 +181,12 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
#if ENABLE(PARALLEL_GC)
{
MutexLocker locker(m_shared.m_markingLock);
std::lock_guard<std::mutex> lock(m_shared.m_markingMutex);
m_shared.m_numberOfActiveParallelMarkers++;
}
while (true) {
{
MutexLocker locker(m_shared.m_markingLock);
std::unique_lock<std::mutex> lock(m_shared.m_markingMutex);
m_shared.m_numberOfActiveParallelMarkers--;
// How we wait differs depending on drain mode.
......@@ -197,7 +197,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
// Did we reach termination?
if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty()) {
// Let any sleeping slaves know it's time for them to return;
m_shared.m_markingCondition.broadcast();
m_shared.m_markingConditionVariable.notify_all();
return;
}
......@@ -206,17 +206,16 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode)
break;
// Otherwise wait.
m_shared.m_markingCondition.wait(m_shared.m_markingLock);
m_shared.m_markingConditionVariable.wait(lock);
}
} else {
ASSERT(sharedDrainMode == SlaveDrain);
// Did we detect termination? If so, let the master know.
if (!m_shared.m_numberOfActiveParallelMarkers && m_shared.m_sharedMarkStack.isEmpty())
m_shared.m_markingCondition.broadcast();
while (m_shared.m_sharedMarkStack.isEmpty() && !m_shared.m_parallelMarkersShouldExit)
m_shared.m_markingCondition.wait(m_shared.m_markingLock);
m_shared.m_markingConditionVariable.notify_all();
m_shared.m_markingConditionVariable.wait(lock, [this] { return !m_shared.m_sharedMarkStack.isEmpty() || m_shared.m_parallelMarkersShouldExit; });
// Is the current phase done? If so, return from this function.
if (m_shared.m_parallelMarkersShouldExit)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment