Commit c574f3bd authored by mhahnenberg@apple.com's avatar mhahnenberg@apple.com

Copying should be generational

https://bugs.webkit.org/show_bug.cgi?id=126555

Reviewed by Geoffrey Garen.

This patch adds support for copying to our generational collector. Eden collections 
always trigger copying. Full collections use our normal fragmentation-based heuristics.

The way this works is that the CopiedSpace now has the notion of an old generation set of CopiedBlocks
and a new generation of CopiedBlocks. During each mutator cycle new CopiedSpace allocations reside
in the new generation. When a collection occurs, those blocks are moved to the old generation.

One key thing to remember is that both new and old generation objects in the MarkedSpace can
refer to old or new generation allocations in CopiedSpace. This is why we must fire write barriers 
when assigning to an old (MarkedSpace) object's Butterfly.

* heap/CopiedAllocator.h:
(JSC::CopiedAllocator::tryAllocateDuringCopying):
* heap/CopiedBlock.h:
(JSC::CopiedBlock::CopiedBlock):
(JSC::CopiedBlock::didEvacuateBytes):
(JSC::CopiedBlock::isOld):
(JSC::CopiedBlock::didPromote):
* heap/CopiedBlockInlines.h:
(JSC::CopiedBlock::reportLiveBytes):
(JSC::CopiedBlock::reportLiveBytesDuringCopying):
* heap/CopiedSpace.cpp:
(JSC::CopiedSpace::CopiedSpace):
(JSC::CopiedSpace::~CopiedSpace):
(JSC::CopiedSpace::init):
(JSC::CopiedSpace::tryAllocateOversize):
(JSC::CopiedSpace::tryReallocateOversize):
(JSC::CopiedSpace::doneFillingBlock):
(JSC::CopiedSpace::didStartFullCollection):
(JSC::CopiedSpace::doneCopying):
(JSC::CopiedSpace::size):
(JSC::CopiedSpace::capacity):
(JSC::CopiedSpace::isPagedOut):
* heap/CopiedSpace.h:
(JSC::CopiedSpace::CopiedGeneration::CopiedGeneration):
* heap/CopiedSpaceInlines.h:
(JSC::CopiedSpace::contains):
(JSC::CopiedSpace::recycleEvacuatedBlock):
(JSC::CopiedSpace::allocateBlock):
(JSC::CopiedSpace::startedCopying):
* heap/CopyVisitor.cpp:
(JSC::CopyVisitor::copyFromShared):
* heap/CopyVisitorInlines.h:
(JSC::CopyVisitor::allocateNewSpace):
(JSC::CopyVisitor::allocateNewSpaceSlow):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::didStartCopying):
* heap/Heap.cpp:
(JSC::Heap::copyBackingStores):
* heap/SlotVisitorInlines.h:
(JSC::SlotVisitor::copyLater):
* heap/TinyBloomFilter.h:
(JSC::TinyBloomFilter::add):


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@162017 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 3d8edf77
2014-01-10 Mark Hahnenberg <mhahnenberg@apple.com>
Copying should be generational
https://bugs.webkit.org/show_bug.cgi?id=126555
Reviewed by Geoffrey Garen.
This patch adds support for copying to our generational collector. Eden collections
always trigger copying. Full collections use our normal fragmentation-based heuristics.
The way this works is that the CopiedSpace now has the notion of an old generation set of CopiedBlocks
and a new generation of CopiedBlocks. During each mutator cycle new CopiedSpace allocations reside
in the new generation. When a collection occurs, those blocks are moved to the old generation.
One key thing to remember is that both new and old generation objects in the MarkedSpace can
refer to old or new generation allocations in CopiedSpace. This is why we must fire write barriers
when assigning to an old (MarkedSpace) object's Butterfly.
* heap/CopiedAllocator.h:
(JSC::CopiedAllocator::tryAllocateDuringCopying):
* heap/CopiedBlock.h:
(JSC::CopiedBlock::CopiedBlock):
(JSC::CopiedBlock::didEvacuateBytes):
(JSC::CopiedBlock::isOld):
(JSC::CopiedBlock::didPromote):
* heap/CopiedBlockInlines.h:
(JSC::CopiedBlock::reportLiveBytes):
(JSC::CopiedBlock::reportLiveBytesDuringCopying):
* heap/CopiedSpace.cpp:
(JSC::CopiedSpace::CopiedSpace):
(JSC::CopiedSpace::~CopiedSpace):
(JSC::CopiedSpace::init):
(JSC::CopiedSpace::tryAllocateOversize):
(JSC::CopiedSpace::tryReallocateOversize):
(JSC::CopiedSpace::doneFillingBlock):
(JSC::CopiedSpace::didStartFullCollection):
(JSC::CopiedSpace::doneCopying):
(JSC::CopiedSpace::size):
(JSC::CopiedSpace::capacity):
(JSC::CopiedSpace::isPagedOut):
* heap/CopiedSpace.h:
(JSC::CopiedSpace::CopiedGeneration::CopiedGeneration):
* heap/CopiedSpaceInlines.h:
(JSC::CopiedSpace::contains):
(JSC::CopiedSpace::recycleEvacuatedBlock):
(JSC::CopiedSpace::allocateBlock):
(JSC::CopiedSpace::startedCopying):
* heap/CopyVisitor.cpp:
(JSC::CopyVisitor::copyFromShared):
* heap/CopyVisitorInlines.h:
(JSC::CopyVisitor::allocateNewSpace):
(JSC::CopyVisitor::allocateNewSpaceSlow):
* heap/GCThreadSharedData.cpp:
(JSC::GCThreadSharedData::didStartCopying):
* heap/Heap.cpp:
(JSC::Heap::copyBackingStores):
* heap/SlotVisitorInlines.h:
(JSC::SlotVisitor::copyLater):
* heap/TinyBloomFilter.h:
(JSC::TinyBloomFilter::add):
2014-01-14 Mark Lam <mark.lam@apple.com>
ASSERTION FAILED: !hasError() in JSC::Parser<LexerType>::createSavePoint().
......
......@@ -38,6 +38,7 @@ public:
bool fastPathShouldSucceed(size_t bytes) const;
CheckedBoolean tryAllocate(size_t bytes, void** outPtr);
CheckedBoolean tryAllocateDuringCopying(size_t bytes, void** outPtr);
CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes);
void* forceAllocate(size_t bytes);
CopiedBlock* resetCurrentBlock();
......@@ -93,6 +94,14 @@ inline CheckedBoolean CopiedAllocator::tryAllocate(size_t bytes, void** outPtr)
return true;
}
inline CheckedBoolean CopiedAllocator::tryAllocateDuringCopying(size_t bytes, void** outPtr)
{
if (!tryAllocate(bytes, outPtr))
return false;
m_currentBlock->reportLiveBytesDuringCopying(bytes);
return true;
}
inline CheckedBoolean CopiedAllocator::tryReallocate(
void* oldPtr, size_t oldBytes, size_t newBytes)
{
......
......@@ -49,10 +49,14 @@ public:
void pin();
bool isPinned();
bool isOld();
bool isOversize();
void didPromote();
unsigned liveBytes();
void reportLiveBytes(JSCell*, CopyToken, unsigned);
bool shouldReportLiveBytes(SpinLockHolder&, JSCell* owner);
void reportLiveBytes(SpinLockHolder&, JSCell*, CopyToken, unsigned);
void reportLiveBytesDuringCopying(unsigned);
void didSurviveGC();
void didEvacuateBytes(unsigned);
bool shouldEvacuate();
......@@ -81,6 +85,7 @@ public:
bool hasWorkList();
CopyWorkList& workList();
SpinLock& workListLock() { return m_workListLock; }
private:
CopiedBlock(Region*);
......@@ -88,13 +93,12 @@ private:
void checkConsistency();
#if ENABLE(PARALLEL_GC)
SpinLock m_workListLock;
#endif
OwnPtr<CopyWorkList> m_workList;
size_t m_remaining;
uintptr_t m_isPinned;
bool m_isPinned : 1;
bool m_isOld : 1;
unsigned m_liveBytes;
#ifndef NDEBUG
unsigned m_liveObjects;
......@@ -130,14 +134,13 @@ inline CopiedBlock::CopiedBlock(Region* region)
: HeapBlock<CopiedBlock>(region)
, m_remaining(payloadCapacity())
, m_isPinned(false)
, m_isOld(false)
, m_liveBytes(0)
#ifndef NDEBUG
, m_liveObjects(0)
#endif
{
#if ENABLE(PARALLEL_GC)
m_workListLock.Init();
#endif
ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
}
......@@ -156,6 +159,7 @@ inline void CopiedBlock::didSurviveGC()
inline void CopiedBlock::didEvacuateBytes(unsigned bytes)
{
ASSERT(m_liveBytes >= bytes);
ASSERT(m_liveObjects);
checkConsistency();
m_liveBytes -= bytes;
#ifndef NDEBUG
......@@ -188,6 +192,16 @@ inline bool CopiedBlock::isPinned()
return m_isPinned;
}
inline bool CopiedBlock::isOld()
{
return m_isOld;
}
inline void CopiedBlock::didPromote()
{
m_isOld = true;
}
inline bool CopiedBlock::isOversize()
{
return region()->isCustomSize();
......
......@@ -26,21 +26,33 @@
#ifndef CopiedBlockInlines_h
#define CopiedBlockInlines_h
#include "ClassInfo.h"
#include "CopiedBlock.h"
#include "Heap.h"
#include "MarkedBlock.h"
namespace JSC {
inline void CopiedBlock::reportLiveBytes(JSCell* owner, CopyToken token, unsigned bytes)
inline bool CopiedBlock::shouldReportLiveBytes(SpinLockHolder&, JSCell* owner)
{
// We want to add to live bytes if the owner isn't part of the remembered set or
// if this block was allocated during the last cycle.
// If we always added live bytes we would double count for elements in the remembered
// set across collections.
// If we didn't always add live bytes to new blocks, we'd get too few.
bool ownerIsRemembered = MarkedBlock::blockFor(owner)->isRemembered(owner);
return !ownerIsRemembered || !m_isOld;
}
inline void CopiedBlock::reportLiveBytes(SpinLockHolder&, JSCell* owner, CopyToken token, unsigned bytes)
{
#if ENABLE(PARALLEL_GC)
SpinLockHolder locker(&m_workListLock);
#endif
#ifndef NDEBUG
checkConsistency();
#ifndef NDEBUG
m_liveObjects++;
#endif
m_liveBytes += bytes;
checkConsistency();
ASSERT(m_liveBytes <= CopiedBlock::blockSize);
if (isPinned())
return;
......@@ -56,6 +68,19 @@ inline void CopiedBlock::reportLiveBytes(JSCell* owner, CopyToken token, unsigne
m_workList->append(CopyWorklistItem(owner, token));
}
inline void CopiedBlock::reportLiveBytesDuringCopying(unsigned bytes)
{
checkConsistency();
// This doesn't need to be locked because the thread that calls this function owns the current block.
m_isOld = true;
#ifndef NDEBUG
m_liveObjects++;
#endif
m_liveBytes += bytes;
checkConsistency();
ASSERT(m_liveBytes <= CopiedBlock::blockSize);
}
} // namespace JSC
#endif // CopiedBlockInlines_h
This diff is collapsed.
......@@ -28,6 +28,7 @@
#include "CopiedAllocator.h"
#include "HeapBlock.h"
#include "HeapOperation.h"
#include "TinyBloomFilter.h"
#include <wtf/Assertions.h>
#include <wtf/CheckedBoolean.h>
......@@ -62,7 +63,10 @@ public:
void didStartFullCollection();
template <HeapOperation collectionType>
void startedCopying();
void startedEdenCopy();
void startedFullCopy();
void doneCopying();
bool isInCopyPhase() { return m_inCopyingPhase; }
......@@ -95,24 +99,36 @@ private:
CopiedBlock* allocateBlockForCopyingPhase();
void doneFillingBlock(CopiedBlock*, CopiedBlock**);
void recycleEvacuatedBlock(CopiedBlock*);
void recycleEvacuatedBlock(CopiedBlock*, HeapOperation collectionType);
void recycleBorrowedBlock(CopiedBlock*);
Heap* m_heap;
CopiedAllocator m_allocator;
TinyBloomFilter m_blockFilter;
HashSet<CopiedBlock*> m_blockSet;
SpinLock m_toSpaceLock;
DoublyLinkedList<CopiedBlock>* m_toSpace;
DoublyLinkedList<CopiedBlock>* m_fromSpace;
DoublyLinkedList<CopiedBlock> m_blocks1;
DoublyLinkedList<CopiedBlock> m_blocks2;
DoublyLinkedList<CopiedBlock> m_oversizeBlocks;
struct CopiedGeneration {
CopiedGeneration()
: toSpace(0)
, fromSpace(0)
{
}
DoublyLinkedList<CopiedBlock>* toSpace;
DoublyLinkedList<CopiedBlock>* fromSpace;
DoublyLinkedList<CopiedBlock> blocks1;
DoublyLinkedList<CopiedBlock> blocks2;
DoublyLinkedList<CopiedBlock> oversizeBlocks;
TinyBloomFilter blockFilter;
};
CopiedGeneration m_oldGen;
CopiedGeneration m_newGen;
bool m_inCopyingPhase;
bool m_shouldDoCopyPhase;
......
......@@ -37,7 +37,8 @@ namespace JSC {
inline bool CopiedSpace::contains(CopiedBlock* block)
{
return !m_blockFilter.ruleOut(reinterpret_cast<Bits>(block)) && m_blockSet.contains(block);
return (!m_newGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)) || !m_oldGen.blockFilter.ruleOut(reinterpret_cast<Bits>(block)))
&& m_blockSet.contains(block);
}
inline bool CopiedSpace::contains(void* ptr, CopiedBlock*& result)
......@@ -92,7 +93,7 @@ inline void CopiedSpace::pinIfNecessary(void* opaquePointer)
pin(block);
}
inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block)
inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block, HeapOperation collectionType)
{
ASSERT(block);
ASSERT(block->canBeRecycled());
......@@ -100,7 +101,10 @@ inline void CopiedSpace::recycleEvacuatedBlock(CopiedBlock* block)
{
SpinLockHolder locker(&m_toSpaceLock);
m_blockSet.remove(block);
m_fromSpace->remove(block);
if (collectionType == EdenCollection)
m_newGen.fromSpace->remove(block);
else
m_oldGen.fromSpace->remove(block);
}
m_heap->blockAllocator().deallocate(CopiedBlock::destroy(block));
}
......@@ -141,8 +145,8 @@ inline void CopiedSpace::allocateBlock()
CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate<CopiedBlock>());
m_toSpace->push(block);
m_blockFilter.add(reinterpret_cast<Bits>(block));
m_newGen.toSpace->push(block);
m_newGen.blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
m_allocator.setCurrentBlock(block);
}
......@@ -174,6 +178,85 @@ inline CopiedBlock* CopiedSpace::blockFor(void* ptr)
return reinterpret_cast<CopiedBlock*>(reinterpret_cast<size_t>(ptr) & s_blockMask);
}
template <HeapOperation collectionType>
inline void CopiedSpace::startedCopying()
{
DoublyLinkedList<CopiedBlock>* fromSpace;
DoublyLinkedList<CopiedBlock>* oversizeBlocks;
TinyBloomFilter* blockFilter;
if (collectionType == FullCollection) {
ASSERT(m_oldGen.fromSpace->isEmpty());
ASSERT(m_newGen.fromSpace->isEmpty());
m_oldGen.toSpace->append(*m_newGen.toSpace);
m_oldGen.oversizeBlocks.append(m_newGen.oversizeBlocks);
ASSERT(m_newGen.toSpace->isEmpty());
ASSERT(m_newGen.fromSpace->isEmpty());
ASSERT(m_newGen.oversizeBlocks.isEmpty());
std::swap(m_oldGen.fromSpace, m_oldGen.toSpace);
fromSpace = m_oldGen.fromSpace;
oversizeBlocks = &m_oldGen.oversizeBlocks;
blockFilter = &m_oldGen.blockFilter;
} else {
std::swap(m_newGen.fromSpace, m_newGen.toSpace);
fromSpace = m_newGen.fromSpace;
oversizeBlocks = &m_newGen.oversizeBlocks;
blockFilter = &m_newGen.blockFilter;
}
blockFilter->reset();
m_allocator.resetCurrentBlock();
CopiedBlock* next = 0;
size_t totalLiveBytes = 0;
size_t totalUsableBytes = 0;
for (CopiedBlock* block = fromSpace->head(); block; block = next) {
next = block->next();
if (!block->isPinned() && block->canBeRecycled()) {
recycleEvacuatedBlock(block, collectionType);
continue;
}
ASSERT(block->liveBytes() <= CopiedBlock::blockSize);
totalLiveBytes += block->liveBytes();
totalUsableBytes += block->payloadCapacity();
block->didPromote();
}
CopiedBlock* block = oversizeBlocks->head();
while (block) {
CopiedBlock* next = block->next();
if (block->isPinned()) {
blockFilter->add(reinterpret_cast<Bits>(block));
totalLiveBytes += block->payloadCapacity();
totalUsableBytes += block->payloadCapacity();
block->didPromote();
} else {
oversizeBlocks->remove(block);
m_blockSet.remove(block);
m_heap->blockAllocator().deallocateCustomSize(CopiedBlock::destroy(block));
}
block = next;
}
double markedSpaceBytes = m_heap->objectSpace().capacity();
double totalFragmentation = static_cast<double>(totalLiveBytes + markedSpaceBytes) / static_cast<double>(totalUsableBytes + markedSpaceBytes);
m_shouldDoCopyPhase = m_heap->operationInProgress() == EdenCollection || totalFragmentation <= Options::minHeapUtilization();
if (!m_shouldDoCopyPhase) {
if (Options::logGC())
dataLog("Skipped copying, ");
return;
}
if (Options::logGC())
dataLogF("Did copy, ");
ASSERT(m_shouldDoCopyPhase);
ASSERT(!m_numberOfLoanedBlocks);
ASSERT(!m_inCopyingPhase);
m_inCopyingPhase = true;
}
} // namespace JSC
#endif // CopiedSpaceInlines_h
......
......@@ -57,7 +57,7 @@ void CopyVisitor::copyFromShared()
visitItem(*it);
ASSERT(!block->liveBytes());
m_shared.m_copiedSpace->recycleEvacuatedBlock(block);
m_shared.m_copiedSpace->recycleEvacuatedBlock(block, m_shared.m_vm->heap.operationInProgress());
}
m_shared.getNextBlocksToCopy(next, end);
}
......
......@@ -55,7 +55,7 @@ inline bool CopyVisitor::checkIfShouldCopy(void* oldPtr)
inline void* CopyVisitor::allocateNewSpace(size_t bytes)
{
void* result = 0; // Compilers don't realize that this will be assigned.
if (LIKELY(m_copiedAllocator.tryAllocate(bytes, &result)))
if (LIKELY(m_copiedAllocator.tryAllocateDuringCopying(bytes, &result)))
return result;
result = allocateNewSpaceSlow(bytes);
......@@ -70,7 +70,7 @@ inline void* CopyVisitor::allocateNewSpaceSlow(size_t bytes)
m_copiedAllocator.setCurrentBlock(newBlock);
void* result = 0;
CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
CheckedBoolean didSucceed = m_copiedAllocator.tryAllocateDuringCopying(bytes, &result);
ASSERT(didSucceed);
return result;
}
......
......@@ -181,7 +181,15 @@ void GCThreadSharedData::didStartCopying()
{
{
SpinLockHolder locker(&m_copyLock);
WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
if (m_vm->heap.operationInProgress() == EdenCollection) {
// Reset the vector to be empty, but don't throw away the backing store.
m_blocksToCopy.shrink(0);
for (CopiedBlock* block = m_copiedSpace->m_newGen.fromSpace->head(); block; block = block->next())
m_blocksToCopy.append(block);
} else {
ASSERT(m_vm->heap.operationInProgress() == FullCollection);
WTF::copyToVector(m_copiedSpace->m_blockSet, m_blocksToCopy);
}
m_copyIndex = 0;
}
......
......@@ -641,10 +641,7 @@ void Heap::markRoots()
template <HeapOperation collectionType>
void Heap::copyBackingStores()
{
if (collectionType == EdenCollection)
return;
m_storageSpace.startedCopying();
m_storageSpace.startedCopying<collectionType>();
if (m_storageSpace.shouldDoCopyPhase()) {
m_sharedData.didStartCopying();
m_copyVisitor.startCopying();
......
......@@ -225,18 +225,17 @@ inline void SlotVisitor::donateAndDrain()
inline void SlotVisitor::copyLater(JSCell* owner, CopyToken token, void* ptr, size_t bytes)
{
ASSERT(bytes);
// We don't do any copying during EdenCollections.
ASSERT(heap()->operationInProgress() != EdenCollection);
m_bytesCopied += bytes;
CopiedBlock* block = CopiedSpace::blockFor(ptr);
if (block->isOversize()) {
m_shared.m_copiedSpace->pin(block);
return;
}
block->reportLiveBytes(owner, token, bytes);
SpinLockHolder locker(&block->workListLock());
if (heap()->operationInProgress() == FullCollection || block->shouldReportLiveBytes(locker, owner)) {
m_bytesCopied += bytes;
block->reportLiveBytes(locker, owner, token, bytes);
}
}
inline void SlotVisitor::reportExtraMemoryUsage(JSCell* owner, size_t size)
......
......@@ -35,6 +35,7 @@ public:
TinyBloomFilter();
void add(Bits);
void add(TinyBloomFilter&);
bool ruleOut(Bits) const; // True for 0.
void reset();
......@@ -52,6 +53,11 @@ inline void TinyBloomFilter::add(Bits bits)
m_bits |= bits;
}
inline void TinyBloomFilter::add(TinyBloomFilter& other)
{
m_bits |= other.m_bits;
}
inline bool TinyBloomFilter::ruleOut(Bits bits) const
{
if (!bits)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment