Commit a147a4d3 authored by fpizlo@apple.com's avatar fpizlo@apple.com

Inline caches that refer to otherwise dead objects should be cleared

https://bugs.webkit.org/show_bug.cgi?id=72311

Reviewed by Geoff Garen.

DFG code blocks now participate in the weak reference harvester fixpoint
so that they only consider themselves to be live if either they are
currently executing, or their owner is live and all of their weak references
are live. If not, the relevant code blocks are jettisoned.

Inline caches in both the old JIT and the DFG are now cleared if any of
their references are not marked at the end of a GC.

This is performance-neutral on SunSpider, V8, and Kraken. With the clear-
all-code-on-GC policy that we currently have, it shows a slight reduction
in memory usage. If we turn that policy off, it's pretty easy to come up
with an example program that will cause ToT to experience linear heap
growth, while with this patch, the heap stays small and remains at a
constant size.

* assembler/ARMv7Assembler.h:
(JSC::ARMv7Assembler::readCallTarget):
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::readCallTarget):
* assembler/MacroAssemblerX86.h:
(JSC::MacroAssemblerX86::readCallTarget):
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::readCallTarget):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::visitAggregate):
(JSC::CodeBlock::performTracingFixpointIteration):
(JSC::CodeBlock::visitWeakReferences):
(JSC::CodeBlock::finalizeUnconditionally):
(JSC::CodeBlock::stronglyVisitStrongReferences):
(JSC::MethodCallLinkInfo::reset):
(JSC::ProgramCodeBlock::jettison):
(JSC::EvalCodeBlock::jettison):
(JSC::FunctionCodeBlock::jettison):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::reoptimize):
(JSC::CodeBlock::shouldImmediatelyAssumeLivenessDuringScan):
* bytecode/Instruction.h:
(JSC::PolymorphicAccessStructureList::visitWeak):
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::visitWeakReferences):
* bytecode/StructureStubInfo.h:
(JSC::isGetByIdAccess):
(JSC::isPutByIdAccess):
(JSC::StructureStubInfo::reset):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::link):
* dfg/DFGOperations.cpp:
* dfg/DFGRepatch.cpp:
(JSC::DFG::dfgRepatchByIdSelfAccess):
(JSC::DFG::dfgResetGetByID):
(JSC::DFG::dfgResetPutByID):
* dfg/DFGRepatch.h:
(JSC::DFG::dfgResetGetByID):
(JSC::DFG::dfgResetPutByID):
* jit/JIT.h:
* jit/JITPropertyAccess.cpp:
(JSC::JIT::resetPatchGetById):
(JSC::JIT::resetPatchPutById):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::resetPatchGetById):
(JSC::JIT::resetPatchPutById):
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
* jit/JITWriteBarrier.h:
(JSC::JITWriteBarrierBase::clearToMaxUnsigned):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@100880 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 5da687a7
2011-11-18 Filip Pizlo <fpizlo@apple.com>
Inline caches that refer to otherwise dead objects should be cleared
https://bugs.webkit.org/show_bug.cgi?id=72311
Reviewed by Geoff Garen.
DFG code blocks now participate in the weak reference harvester fixpoint
so that they only consider themselves to be live if either they are
currently executing, or their owner is live and all of their weak references
are live. If not, the relevant code blocks are jettisoned.
Inline caches in both the old JIT and the DFG are now cleared if any of
their references are not marked at the end of a GC.
This is performance-neutral on SunSpider, V8, and Kraken. With the clear-
all-code-on-GC policy that we currently have, it shows a slight reduction
in memory usage. If we turn that policy off, it's pretty easy to come up
with an example program that will cause ToT to experience linear heap
growth, while with this patch, the heap stays small and remains at a
constant size.
* assembler/ARMv7Assembler.h:
(JSC::ARMv7Assembler::readCallTarget):
* assembler/MacroAssemblerARMv7.h:
(JSC::MacroAssemblerARMv7::readCallTarget):
* assembler/MacroAssemblerX86.h:
(JSC::MacroAssemblerX86::readCallTarget):
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::readCallTarget):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::visitAggregate):
(JSC::CodeBlock::performTracingFixpointIteration):
(JSC::CodeBlock::visitWeakReferences):
(JSC::CodeBlock::finalizeUnconditionally):
(JSC::CodeBlock::stronglyVisitStrongReferences):
(JSC::MethodCallLinkInfo::reset):
(JSC::ProgramCodeBlock::jettison):
(JSC::EvalCodeBlock::jettison):
(JSC::FunctionCodeBlock::jettison):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::reoptimize):
(JSC::CodeBlock::shouldImmediatelyAssumeLivenessDuringScan):
* bytecode/Instruction.h:
(JSC::PolymorphicAccessStructureList::visitWeak):
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::visitWeakReferences):
* bytecode/StructureStubInfo.h:
(JSC::isGetByIdAccess):
(JSC::isPutByIdAccess):
(JSC::StructureStubInfo::reset):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::link):
* dfg/DFGOperations.cpp:
* dfg/DFGRepatch.cpp:
(JSC::DFG::dfgRepatchByIdSelfAccess):
(JSC::DFG::dfgResetGetByID):
(JSC::DFG::dfgResetPutByID):
* dfg/DFGRepatch.h:
(JSC::DFG::dfgResetGetByID):
(JSC::DFG::dfgResetPutByID):
* jit/JIT.h:
* jit/JITPropertyAccess.cpp:
(JSC::JIT::resetPatchGetById):
(JSC::JIT::resetPatchPutById):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::resetPatchGetById):
(JSC::JIT::resetPatchPutById):
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
* jit/JITWriteBarrier.h:
(JSC::JITWriteBarrierBase::clearToMaxUnsigned):
2011-11-20 Filip Pizlo <fpizlo@apple.com>
Showing the data overlay in OpenStreetMap doesn't work, zooming partially broken
......@@ -1894,6 +1894,11 @@ public:
setPointer(reinterpret_cast<uint16_t*>(from) - 1, to);
}
static void* readCallTarget(void* from)
{
return readPointer(reinterpret_cast<uint16_t*>(from) - 1);
}
static void repatchInt32(void* where, int32_t value)
{
......
......@@ -1561,6 +1561,11 @@ protected:
{
return static_cast<ARMv7Assembler::Condition>(cond);
}
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return ARMv7Assembler::readCallTarget(call.dataLocation());
}
private:
friend class LinkBuffer;
......
......@@ -205,6 +205,12 @@ public:
static bool supportsFloatingPointTruncate() { return isSSE2Present(); }
static bool supportsFloatingPointSqrt() { return isSSE2Present(); }
static bool supportsFloatingPointAbs() { return isSSE2Present(); }
static FunctionPtr readCallTarget(CodeLocationCall call)
{
intptr_t offset = reinterpret_cast<int32_t*>(call.dataLocation())[-1];
return FunctionPtr(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(call.dataLocation()) + offset));
}
private:
friend class LinkBuffer;
......
......@@ -497,6 +497,11 @@ public:
static bool supportsFloatingPointTruncate() { return true; }
static bool supportsFloatingPointSqrt() { return true; }
static bool supportsFloatingPointAbs() { return true; }
static FunctionPtr readCallTarget(CodeLocationCall call)
{
return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation()));
}
private:
friend class LinkBuffer;
......
......@@ -33,9 +33,11 @@
#include "BytecodeGenerator.h"
#include "DFGCapabilities.h"
#include "DFGNode.h"
#include "DFGRepatch.h"
#include "Debugger.h"
#include "Interpreter.h"
#include "JIT.h"
#include "JITStubs.h"
#include "JSActivation.h"
#include "JSFunction.h"
#include "JSStaticScopeObject.h"
......@@ -1587,6 +1589,210 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
{
if (!!m_alternative)
m_alternative->visitAggregate(visitor);
// There are three things that may use unconditional finalizers: lazy bytecode freeing,
// inline cache clearing, and jettisoning. The probability of us wanting to do at
// least one of those things is probably quite close to 1. So we add one no matter what
// and when it runs, it figures out whether it has any work to do.
visitor.addUnconditionalFinalizer(this);
if (shouldImmediatelyAssumeLivenessDuringScan()) {
// This code block is live, so scan all references strongly and return.
stronglyVisitStrongReferences(visitor);
stronglyVisitWeakReferences(visitor);
return;
}
#if ENABLE(DFG_JIT)
// We get here if we're live in the sense that our owner executable is live,
// but we're not yet live for sure in another sense: we may yet decide that this
// code block should be jettisoned based on its outgoing weak references being
// stale. Set a flag to indicate that we're still assuming that we're dead, and
// perform one round of determining if we're live. The GC may determine, based on
// either us marking additional objects, or by other objects being marked for
// other reasons, that this iteration should run again; it will notify us of this
// decision by calling harvestWeakReferences().
m_dfgData->livenessHasBeenProved = false;
m_dfgData->allTransitionsHaveBeenMarked = false;
performTracingFixpointIteration(visitor);
// GC doesn't have enough information yet for us to decide whether to keep our DFG
// data, so we need to register a handler to run again at the end of GC, when more
// information is available.
if (!(m_dfgData->livenessHasBeenProved && m_dfgData->allTransitionsHaveBeenMarked))
visitor.addWeakReferenceHarvester(this);
#else // ENABLE(DFG_JIT)
ASSERT_NOT_REACHED();
#endif // ENABLE(DFG_JIT)
}
void CodeBlock::performTracingFixpointIteration(SlotVisitor& visitor)
{
UNUSED_PARAM(visitor);
#if ENABLE(DFG_JIT)
// Evaluate our weak reference transitions, if there are still some to evaluate.
if (!m_dfgData->allTransitionsHaveBeenMarked) {
bool allAreMarkedSoFar = true;
for (unsigned i = 0; i < m_dfgData->transitions.size(); ++i) {
if ((!m_dfgData->transitions[i].m_codeOrigin
|| Heap::isMarked(m_dfgData->transitions[i].m_codeOrigin.get()))
&& Heap::isMarked(m_dfgData->transitions[i].m_from.get())) {
// If the following three things are live, then the target of the
// transition is also live:
// - This code block. We know it's live already because otherwise
// we wouldn't be scanning ourselves.
// - The code origin of the transition. Transitions may arise from
// code that was inlined. They are not relevant if the user's
// object that is required for the inlinee to run is no longer
// live.
// - The source of the transition. The transition checks if some
// heap location holds the source, and if so, stores the target.
// Hence the source must be live for the transition to be live.
visitor.append(&m_dfgData->transitions[i].m_to);
} else
allAreMarkedSoFar = false;
}
if (allAreMarkedSoFar)
m_dfgData->allTransitionsHaveBeenMarked = true;
}
// Check if we have any remaining work to do.
if (m_dfgData->livenessHasBeenProved)
return;
// Now check all of our weak references. If all of them are live, then we
// have proved liveness and so we scan our strong references. If at end of
// GC we still have not proved liveness, then this code block is toast.
bool allAreLiveSoFar = true;
for (unsigned i = 0; i < m_dfgData->weakReferences.size(); ++i) {
if (!Heap::isMarked(m_dfgData->weakReferences[i].get())) {
allAreLiveSoFar = false;
break;
}
}
// If some weak references are dead, then this fixpoint iteration was
// unsuccessful.
if (!allAreLiveSoFar)
return;
// All weak references are live. Record this information so we don't
// come back here again, and scan the strong references.
m_dfgData->livenessHasBeenProved = true;
stronglyVisitStrongReferences(visitor);
#endif // ENABLE(DFG_JIT)
}
void CodeBlock::visitWeakReferences(SlotVisitor& visitor)
{
performTracingFixpointIteration(visitor);
}
void CodeBlock::finalizeUnconditionally()
{
#if ENABLE(JIT_VERBOSE_OSR)
static const bool verboseUnlinking = true;
#else
static const bool verboseUnlinking = false;
#endif
#if ENABLE(DFG_JIT)
// Check if we're not live. If we are, then jettison.
if (!(shouldImmediatelyAssumeLivenessDuringScan() || m_dfgData->livenessHasBeenProved)) {
if (verboseUnlinking)
printf("Code block %p has dead weak references, jettisoning during GC.\n", this);
// Make sure that the baseline JIT knows that it should re-warm-up before
// optimizing.
alternative()->optimizeAfterWarmUp();
jettison();
return;
}
#endif // ENABLE(DFG_JIT)
#if ENABLE(JIT)
// Handle inline caches.
if (!!getJITCode()) {
RepatchBuffer repatchBuffer(this);
for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
if (callLinkInfo(i).isLinked() && !Heap::isMarked(callLinkInfo(i).callee.get())) {
if (verboseUnlinking)
printf("Clearing call from %p.\n", this);
callLinkInfo(i).unlink(*m_globalData, repatchBuffer);
}
if (!!callLinkInfo(i).lastSeenCallee
&& !Heap::isMarked(callLinkInfo(i).lastSeenCallee.get()))
callLinkInfo(i).lastSeenCallee.clear();
}
for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
if (m_globalResolveInfos[i].structure && !Heap::isMarked(m_globalResolveInfos[i].structure.get())) {
if (verboseUnlinking)
printf("Clearing resolve info in %p.\n", this);
m_globalResolveInfos[i].structure.clear();
}
}
for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i) {
StructureStubInfo& stubInfo = m_structureStubInfos[i];
AccessType accessType = static_cast<AccessType>(stubInfo.accessType);
if (stubInfo.visitWeakReferences())
continue;
if (verboseUnlinking)
printf("Clearing structure cache (kind %d) in %p.\n", stubInfo.accessType, this);
if (isGetByIdAccess(accessType)) {
if (getJITCode().jitType() == JITCode::DFGJIT)
DFG::dfgResetGetByID(repatchBuffer, stubInfo);
else
JIT::resetPatchGetById(repatchBuffer, &stubInfo);
} else {
ASSERT(isPutByIdAccess(accessType));
if (getJITCode().jitType() == JITCode::DFGJIT)
DFG::dfgResetPutByID(repatchBuffer, stubInfo);
else
JIT::resetPatchPutById(repatchBuffer, &stubInfo);
}
stubInfo.reset();
}
for (size_t size = m_methodCallLinkInfos.size(), i = 0; i < size; ++i) {
if (!m_methodCallLinkInfos[i].cachedStructure)
continue;
ASSERT(m_methodCallLinkInfos[i].seenOnce());
ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
if (!Heap::isMarked(m_methodCallLinkInfos[i].cachedStructure.get())
|| !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototypeStructure.get())
|| !Heap::isMarked(m_methodCallLinkInfos[i].cachedFunction.get())
|| !Heap::isMarked(m_methodCallLinkInfos[i].cachedPrototype.get())) {
if (verboseUnlinking)
printf("Clearing method call in %p.\n", this);
m_methodCallLinkInfos[i].reset(repatchBuffer, getJITType());
}
}
}
#endif
// Handle the bytecode discarding chore.
if (m_shouldDiscardBytecode) {
discardBytecode();
m_shouldDiscardBytecode = false;
}
}
void CodeBlock::stronglyVisitStrongReferences(SlotVisitor& visitor)
{
visitor.append(&m_globalObject);
visitor.append(&m_ownerExecutable);
if (m_rareData) {
......@@ -1601,42 +1807,12 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
visitor.append(&m_functionExprs[i]);
for (size_t i = 0; i < m_functionDecls.size(); ++i)
visitor.append(&m_functionDecls[i]);
#if ENABLE(JIT)
for (unsigned i = 0; i < numberOfCallLinkInfos(); ++i) {
if (callLinkInfo(i).isLinked())
visitor.append(&callLinkInfo(i).callee);
if (!!callLinkInfo(i).lastSeenCallee)
visitor.append(&callLinkInfo(i).lastSeenCallee);
}
#endif
#if ENABLE(INTERPRETER)
for (size_t size = m_propertyAccessInstructions.size(), i = 0; i < size; ++i)
visitStructures(visitor, &instructions()[m_propertyAccessInstructions[i]]);
for (size_t size = m_globalResolveInstructions.size(), i = 0; i < size; ++i)
visitStructures(visitor, &instructions()[m_globalResolveInstructions[i]]);
#endif
#if ENABLE(JIT)
for (size_t size = m_globalResolveInfos.size(), i = 0; i < size; ++i) {
if (m_globalResolveInfos[i].structure)
visitor.append(&m_globalResolveInfos[i].structure);
}
for (size_t size = m_structureStubInfos.size(), i = 0; i < size; ++i)
m_structureStubInfos[i].visitAggregate(visitor);
for (size_t size = m_methodCallLinkInfos.size(), i = 0; i < size; ++i) {
if (m_methodCallLinkInfos[i].cachedStructure) {
// These members must be filled at the same time, and only after
// the MethodCallLinkInfo is set as seen.
ASSERT(m_methodCallLinkInfos[i].seenOnce());
visitor.append(&m_methodCallLinkInfos[i].cachedStructure);
ASSERT(!!m_methodCallLinkInfos[i].cachedPrototypeStructure);
visitor.append(&m_methodCallLinkInfos[i].cachedPrototypeStructure);
visitor.append(&m_methodCallLinkInfos[i].cachedFunction);
visitor.append(&m_methodCallLinkInfos[i].cachedPrototype);
}
}
#endif
#if ENABLE(DFG_JIT)
if (hasCodeOrigins()) {
......@@ -1653,15 +1829,6 @@ void CodeBlock::visitAggregate(SlotVisitor& visitor)
for (unsigned profileIndex = 0; profileIndex < numberOfValueProfiles(); ++profileIndex)
valueProfile(profileIndex)->computeUpdatedPrediction();
#endif
#if ENABLE(JIT) && !ENABLE(OPCODE_SAMPLING)
// Kill off some bytecode. We can't do it here because we don't want to accidentally
// call into malloc while in stop-the-world GC mode.
if (hasInstructions() && m_shouldDiscardBytecode)
visitor.addUnconditionalFinalizer(this);
#endif
stronglyVisitWeakReferences(visitor);
}
void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor)
......@@ -1867,6 +2034,25 @@ void CallLinkInfo::unlink(JSGlobalData& globalData, RepatchBuffer& repatchBuffer
remove();
}
void MethodCallLinkInfo::reset(RepatchBuffer& repatchBuffer, JITCode::JITType jitType)
{
cachedStructure.clearToMaxUnsigned();
cachedPrototype.clear();
cachedPrototypeStructure.clearToMaxUnsigned();
cachedFunction.clear();
if (jitType == JITCode::DFGJIT) {
#if ENABLE(DFG_JIT)
repatchBuffer.relink(callReturnLocation, operationGetMethodOptimize);
#else
ASSERT_NOT_REACHED();
#endif
} else {
ASSERT(jitType == JITCode::BaselineJIT);
repatchBuffer.relink(callReturnLocation, cti_op_get_by_id_method_check);
}
}
void CodeBlock::unlinkCalls()
{
if (!!m_alternative)
......@@ -1982,37 +2168,28 @@ bool FunctionCodeBlock::canCompileWithDFG()
return DFG::canCompileFunctionForCall(this);
}
void ProgramCodeBlock::jettison(JSGlobalData& globalData)
void ProgramCodeBlock::jettison()
{
ASSERT(getJITType() != JITCode::BaselineJIT);
ASSERT(this == replacement());
static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(globalData);
static_cast<ProgramExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
}
void EvalCodeBlock::jettison(JSGlobalData& globalData)
void EvalCodeBlock::jettison()
{
ASSERT(getJITType() != JITCode::BaselineJIT);
ASSERT(this == replacement());
static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(globalData);
static_cast<EvalExecutable*>(ownerExecutable())->jettisonOptimizedCode(*globalData());
}
void FunctionCodeBlock::jettison(JSGlobalData& globalData)
void FunctionCodeBlock::jettison()
{
ASSERT(getJITType() != JITCode::BaselineJIT);
ASSERT(this == replacement());
static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(globalData, m_isConstructor ? CodeForConstruct : CodeForCall);
static_cast<FunctionExecutable*>(ownerExecutable())->jettisonOptimizedCodeFor(*globalData(), m_isConstructor ? CodeForConstruct : CodeForCall);
}
#endif
void CodeBlock::finalizeUnconditionally()
{
#if ENABLE(OPCODE_SAMPLING) || !ENABLE(JIT)
ASSERT_NOT_REACHED();
#endif
ASSERT(m_shouldDiscardBytecode);
discardBytecode();
}
#if ENABLE(VALUE_PROFILER)
bool CodeBlock::shouldOptimizeNow()
{
......
......@@ -171,6 +171,8 @@ namespace JSC {
{
seen = true;
}
void reset(RepatchBuffer&, JITCode::JITType);
unsigned bytecodeIndex;
CodeLocationCall callReturnLocation;
......@@ -250,7 +252,7 @@ namespace JSC {
}
#endif
class CodeBlock : public UnconditionalFinalizer {
class CodeBlock : public UnconditionalFinalizer, public WeakReferenceHarvester {
WTF_MAKE_FAST_ALLOCATED;
friend class JIT;
public:
......@@ -294,10 +296,6 @@ namespace JSC {
bool canProduceCopyWithBytecode() { return hasInstructions(); }
void visitAggregate(SlotVisitor&);
// Call this if you are not jettisoning a code block, and thus
// have no evidence to suggest that it will never be called into again.
void stronglyVisitWeakReferences(SlotVisitor&);
static void dumpStatistics();
......@@ -537,7 +535,7 @@ namespace JSC {
JITCode::JITType getJITType() { return m_jitCode.jitType(); }
ExecutableMemoryHandle* executableMemory() { return getJITCode().getExecutableMemory(); }
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*) = 0;
virtual void jettison(JSGlobalData&) = 0;
virtual void jettison() = 0;
virtual CodeBlock* replacement() = 0;
virtual bool canCompileWithDFG() = 0;
bool hasOptimizedReplacement()
......@@ -1060,10 +1058,10 @@ namespace JSC {
#endif
#if ENABLE(JIT)
void reoptimize(JSGlobalData& globalData)
void reoptimize()
{
ASSERT(replacement() != this);
replacement()->jettison(globalData);
replacement()->jettison();
countReoptimization();
optimizeAfterWarmUp();
}
......@@ -1085,6 +1083,7 @@ namespace JSC {
bool m_shouldDiscardBytecode;
protected:
virtual void visitWeakReferences(SlotVisitor&);
virtual void finalizeUnconditionally();
private:
......@@ -1101,6 +1100,33 @@ namespace JSC {
void printPutByIdOp(ExecState*, int location, Vector<Instruction>::const_iterator&, const char* op) const;
#endif
void visitStructures(SlotVisitor&, Instruction* vPC) const;
#if ENABLE(DFG_JIT)
bool shouldImmediatelyAssumeLivenessDuringScan()
{
// Null m_dfgData means that this is a baseline JIT CodeBlock. Baseline JIT
// CodeBlocks don't need to be jettisoned when their weak references go
// stale. So if a basline JIT CodeBlock gets scanned, we can assume that
// this means that it's live.
if (!m_dfgData)
return true;
// For simplicity, we don't attempt to jettison code blocks during GC if
// they are executing. Instead we strongly mark their weak references to
// allow them to continue to execute soundly.
if (m_dfgData->mayBeExecuting)
return true;
return false;
}
#else
bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
#endif
void performTracingFixpointIteration(SlotVisitor&);
void stronglyVisitStrongReferences(SlotVisitor&);
void stronglyVisitWeakReferences(SlotVisitor&);
void createRareDataIfNecessary()
{
......@@ -1177,6 +1203,8 @@ namespace JSC {
Vector<WriteBarrier<JSCell> > weakReferences;
bool mayBeExecuting;
bool isJettisoned;
bool livenessHasBeenProved; // Initialized and used on every GC.
bool allTransitionsHaveBeenMarked; // Initialized and used on every GC.
};
OwnPtr<DFGData> m_dfgData;
......@@ -1278,7 +1306,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison(JSGlobalData&);
virtual void jettison();
virtual CodeBlock* replacement();
virtual bool canCompileWithDFG();
#endif
......@@ -1312,7 +1340,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison(JSGlobalData&);
virtual void jettison();
virtual CodeBlock* replacement();
virtual bool canCompileWithDFG();
#endif
......@@ -1349,7 +1377,7 @@ namespace JSC {
#if ENABLE(JIT)
protected:
virtual JSObject* compileOptimized(ExecState*, ScopeChainNode*);
virtual void jettison(JSGlobalData&);
virtual void jettison();
virtual CodeBlock* replacement();
virtual bool canCompileWithDFG();
#endif
......
......@@ -114,7 +114,7 @@ namespace JSC {
list[0].set(globalData, owner, stubRoutine, firstBase, firstChain, isDirect);
}
void visitAggregate(SlotVisitor& visitor, int count)
bool visitWeak(int count)
{
for (int i = 0; i < count; ++i) {
PolymorphicStubInfo& info = list[i];
......@@ -124,12 +124,17 @@ namespace JSC {
continue;
}
visitor.append(&info.base);
if (info.u.proto && !info.isChain)