Commit 07148b12 authored by fpizlo@apple.com's avatar fpizlo@apple.com
Browse files

The JSC garbage collector returns memory to the operating system too

eagerly.
https://bugs.webkit.org/show_bug.cgi?id=65382

Reviewed by Oliver Hunt.

This introduces a memory reuse model similar to the one in FastMalloc.
A periodic scavenger thread runs in the background and returns half the
free memory to the OS on each timer fire.  New block allocations first
attempt to get the memory from the collector's internal pool, reverting
to OS allocation only when this pool is empty.

* heap/Heap.cpp:
(JSC::Heap::Heap):
(JSC::Heap::~Heap):
(JSC::Heap::destroy):
(JSC::Heap::waitForRelativeTimeWhileHoldingLock):
(JSC::Heap::waitForRelativeTime):
(JSC::Heap::blockFreeingThreadStartFunc):
(JSC::Heap::blockFreeingThreadMain):
(JSC::Heap::allocateBlock):
(JSC::Heap::freeBlocks):
(JSC::Heap::releaseFreeBlocks):
* heap/Heap.h:
* heap/MarkedBlock.cpp:
(JSC::MarkedBlock::destroy):
(JSC::MarkedBlock::MarkedBlock):
(JSC::MarkedBlock::initForCellSize):
(JSC::MarkedBlock::reset):
* heap/MarkedBlock.h:
* wtf/Platform.h:


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@92084 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 8a2d14d7
2011-07-31 Filip Pizlo <fpizlo@apple.com>
The JSC garbage collector returns memory to the operating system too
eagerly.
https://bugs.webkit.org/show_bug.cgi?id=65382
Reviewed by Oliver Hunt.
This introduces a memory reuse model similar to the one in FastMalloc.
A periodic scavenger thread runs in the background and returns half the
free memory to the OS on each timer fire. New block allocations first
attempt to get the memory from the collector's internal pool, reverting
to OS allocation only when this pool is empty.
* heap/Heap.cpp:
(JSC::Heap::Heap):
(JSC::Heap::~Heap):
(JSC::Heap::destroy):
(JSC::Heap::waitForRelativeTimeWhileHoldingLock):
(JSC::Heap::waitForRelativeTime):
(JSC::Heap::blockFreeingThreadStartFunc):
(JSC::Heap::blockFreeingThreadMain):
(JSC::Heap::allocateBlock):
(JSC::Heap::freeBlocks):
(JSC::Heap::releaseFreeBlocks):
* heap/Heap.h:
* heap/MarkedBlock.cpp:
(JSC::MarkedBlock::destroy):
(JSC::MarkedBlock::MarkedBlock):
(JSC::MarkedBlock::initForCellSize):
(JSC::MarkedBlock::reset):
* heap/MarkedBlock.h:
* wtf/Platform.h:
2011-07-30 Filip Pizlo <fpizlo@apple.com>
 
DFG JIT speculation failure pass sometimes forgets to emit code to
......
......@@ -244,10 +244,25 @@ Heap::Heap(JSGlobalData* globalData)
{
m_newSpace.setHighWaterMark(minBytesPerCycle);
(*m_activityCallback)();
#if ENABLE(LAZY_BLOCK_FREEING)
m_numberOfFreeBlocks = 0;
m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree");
ASSERT(m_blockFreeingThread);
#endif
}
Heap::~Heap()
{
#if ENABLE(LAZY_BLOCK_FREEING)
// destroy our thread
{
MutexLocker locker(m_freeBlockLock);
m_blockFreeingThreadShouldQuit = true;
m_freeBlockCondition.broadcast();
}
waitForThreadCompletion(m_blockFreeingThread, 0);
#endif
// The destroy function must already have been called, so assert this.
ASSERT(!m_globalData);
}
......@@ -279,10 +294,78 @@ void Heap::destroy()
shrink();
ASSERT(!size());
#if ENABLE(LAZY_BLOCK_FREEING)
releaseFreeBlocks();
#endif
m_globalData = 0;
}
#if ENABLE(LAZY_BLOCK_FREEING)
void Heap::waitForRelativeTimeWhileHoldingLock(double relative)
{
if (m_blockFreeingThreadShouldQuit)
return;
m_freeBlockCondition.timedWait(m_freeBlockLock, currentTime() + relative);
}
void Heap::waitForRelativeTime(double relative)
{
// If this returns early, that's fine, so long as it doesn't do it too
// frequently. It would only be a bug if this function failed to return
// when it was asked to do so.
MutexLocker locker(m_freeBlockLock);
waitForRelativeTimeWhileHoldingLock(relative);
}
void* Heap::blockFreeingThreadStartFunc(void* heap)
{
static_cast<Heap*>(heap)->blockFreeingThreadMain();
return 0;
}
void Heap::blockFreeingThreadMain()
{
while (!m_blockFreeingThreadShouldQuit) {
// Generally wait for one second before scavenging free blocks. This
// may return early, particularly when we're being asked to quit.
waitForRelativeTime(1.0);
if (m_blockFreeingThreadShouldQuit)
break;
// Now process the list of free blocks. Keep freeing until half of the
// blocks that are currently on the list are gone. Assume that a size_t
// field can be accessed atomically.
size_t currentNumberOfFreeBlocks = m_numberOfFreeBlocks;
if (!currentNumberOfFreeBlocks)
continue;
size_t desiredNumberOfFreeBlocks = currentNumberOfFreeBlocks / 2;
while (!m_blockFreeingThreadShouldQuit) {
MarkedBlock* block;
{
MutexLocker locker(m_freeBlockLock);
if (m_numberOfFreeBlocks <= desiredNumberOfFreeBlocks)
block = 0;
else {
block = m_freeBlocks.removeHead();
ASSERT(block);
m_numberOfFreeBlocks--;
}
}
if (!block)
break;
MarkedBlock::destroy(block);
}
}
}
#endif // ENABLE(LAZY_BLOCK_FREEING)
void Heap::reportExtraMemoryCostSlowCase(size_t cost)
{
// Our frequency of garbage collection tries to balance memory use against speed
......@@ -613,7 +696,26 @@ bool Heap::isValidAllocation(size_t bytes)
MarkedBlock* Heap::allocateBlock(size_t cellSize)
{
MarkedBlock* block = MarkedBlock::create(this, cellSize);
MarkedBlock* block;
#if !ENABLE(LAZY_BLOCK_FREEING)
block = MarkedBlock::create(this, cellSize);
#else
{
MutexLocker locker(m_freeBlockLock);
if (m_numberOfFreeBlocks) {
block = m_freeBlocks.removeHead();
ASSERT(block);
m_numberOfFreeBlocks--;
} else
block = 0;
}
if (block)
block->initForCellSize(cellSize);
else
block = MarkedBlock::create(this, cellSize);
#endif
m_blocks.add(block);
return block;
......@@ -626,7 +728,14 @@ void Heap::freeBlocks(MarkedBlock* head)
next = block->next();
m_blocks.remove(block);
block->reset();
#if !ENABLE(LAZY_BLOCK_FREEING)
MarkedBlock::destroy(block);
#else
MutexLocker locker(m_freeBlockLock);
m_freeBlocks.append(block);
m_numberOfFreeBlocks++;
#endif
}
}
......@@ -637,6 +746,30 @@ void Heap::shrink()
freeBlocks(forEachBlock(takeIfEmpty));
}
#if ENABLE(LAZY_BLOCK_FREEING)
void Heap::releaseFreeBlocks()
{
while (true) {
MarkedBlock* block;
{
MutexLocker locker(m_freeBlockLock);
if (!m_numberOfFreeBlocks)
block = 0;
else {
block = m_freeBlocks.removeHead();
ASSERT(block);
m_numberOfFreeBlocks--;
}
}
if (!block)
break;
MarkedBlock::destroy(block);
}
}
#endif
#if ENABLE(GGC)
void Heap::writeBarrierSlowCase(const JSCell* owner, JSCell* cell)
{
......
......@@ -52,7 +52,7 @@ namespace JSC {
typedef HashCountedSet<const char*> TypeCountSet;
enum OperationInProgress { NoOperation, Allocation, Collection };
class Heap {
WTF_MAKE_NONCOPYABLE(Heap);
public:
......@@ -144,16 +144,34 @@ namespace JSC {
enum SweepToggle { DoNotSweep, DoSweep };
void collect(SweepToggle);
void shrink();
void releaseFreeBlocks();
void sweep();
RegisterFile& registerFile();
static void writeBarrierSlowCase(const JSCell*, JSCell*);
#if ENABLE(LAZY_BLOCK_FREEING)
void waitForRelativeTimeWhileHoldingLock(double relative);
void waitForRelativeTime(double relative);
void blockFreeingThreadMain();
static void* blockFreeingThreadStartFunc(void* heap);
#endif
OperationInProgress m_operationInProgress;
NewSpace m_newSpace;
MarkedBlockSet m_blocks;
#if ENABLE(LAZY_BLOCK_FREEING)
DoublyLinkedList<MarkedBlock> m_freeBlocks;
size_t m_numberOfFreeBlocks;
ThreadIdentifier m_blockFreeingThread;
Mutex m_freeBlockLock;
ThreadCondition m_freeBlockCondition;
bool m_blockFreeingThreadShouldQuit;
#endif
size_t m_extraCost;
ProtectCountSet m_protectedValues;
......
......@@ -42,8 +42,6 @@ MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
void MarkedBlock::destroy(MarkedBlock* block)
{
for (size_t i = block->firstAtom(); i < block->m_endAtom; i += block->m_atomsPerCell)
reinterpret_cast<JSCell*>(&block->atoms()[i])->~JSCell();
block->m_allocation.deallocate();
}
......@@ -51,11 +49,22 @@ MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, si
: m_inNewSpace(false)
, m_allocation(allocation)
, m_heap(heap)
{
initForCellSize(cellSize);
}
void MarkedBlock::initForCellSize(size_t cellSize)
{
m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
}
void MarkedBlock::reset()
{
for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
reinterpret_cast<JSCell*>(&atoms()[i])->~JSCell();
}
void MarkedBlock::sweep()
{
Structure* dummyMarkableCellStructure = m_heap->globalData()->dummyMarkableCellStructure.get();
......
......@@ -94,12 +94,16 @@ namespace JSC {
// them, and returns a linked list of those cells.
FreeCell* lazySweep();
void initForCellSize(size_t cellSize);
// These should be called immediately after a block is created.
// Blessing for fast path creates a linked list, while blessing for
// slow path creates dummy cells.
FreeCell* blessNewBlockForFastPath();
void blessNewBlockForSlowPath();
void reset();
// This unmarks all cells on the free list, and allocates dummy JSCells
// in their place.
void canonicalizeBlock(FreeCell* firstFreeCell);
......
......@@ -1105,6 +1105,13 @@
#endif
#endif
#if ENABLE(SINGLE_THREADED)
#undef ENABLE_LAZY_BLOCK_FREEING
#define ENABLE_LAZY_BLOCK_FREEING 0
#else
#define ENABLE_LAZY_BLOCK_FREEING 1
#endif
#if !defined(ENABLE_PAN_SCROLLING) && OS(WINDOWS)
#define ENABLE_PAN_SCROLLING 1
#endif
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment