Commit 56f0addc authored by oliver@apple.com's avatar oliver@apple.com

fourthTier: Everyone should know about the FTL

https://bugs.webkit.org/show_bug.cgi?id=113897

Reviewed by Mark Hahnenberg.

In order to get OSR exit to work right, we need the distinction between DFG and
FTL to be clear even after compilation finishes, since they will have subtly
different OSR stories and likely use different data structures.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::resetStubInternal):
(JSC::ProgramCodeBlock::compileOptimized):
(JSC::EvalCodeBlock::compileOptimized):
(JSC::FunctionCodeBlock::compileOptimized):
(JSC::CodeBlock::adjustedExitCountThreshold):
(JSC::CodeBlock::tallyFrequentExitSites):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::setJITCode):
(JSC::CodeBlock::hasOptimizedReplacement):
(JSC::ExecState::isInlineCallFrame):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLJITCode.cpp:
(JSC::FTL::JITCode::JITCode):
* ftl/FTLState.cpp:
(JSC::FTL::State::dumpState):
* heap/DFGCodeBlocks.cpp:
(JSC::DFGCodeBlocks::jettison):
* interpreter/Interpreter.cpp:
(JSC::getLineNumberForCallFrame):
(JSC::getCallerInfo):
* jit/JITCode.cpp:
(WTF::printInternal):
* jit/JITCode.h:
(JSC::JITCode::topTierJIT):
(JSC::JITCode::nextTierJIT):
(JITCode):
(JSC::JITCode::isJIT):
(JSC::JITCode::isLowerTier):
(JSC::JITCode::isHigherTier):
(JSC::JITCode::isLowerOrSameTier):
(JSC::JITCode::isHigherOrSameTier):
(JSC::JITCode::isOptimizingJIT):
* jit/JITDriver.h:
(JSC::jitCompileIfAppropriate):
(JSC::jitCompileFunctionIfAppropriate):
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
* runtime/Executable.cpp:
(JSC::EvalExecutable::compileOptimized):
(JSC::samplingDescription):
(JSC::ProgramExecutable::compileOptimized):
(JSC::FunctionExecutable::compileOptimizedForCall):
(JSC::FunctionExecutable::compileOptimizedForConstruct):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@153115 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent c02ffd60
2013-07-15 Oliver Hunt <oliver@apple.com>
Merge dfgFourthTier r147587
2013-04-03 Filip Pizlo <fpizlo@apple.com>
fourthTier: Everyone should know about the FTL
https://bugs.webkit.org/show_bug.cgi?id=113897
Reviewed by Mark Hahnenberg.
In order to get OSR exit to work right, we need the distinction between DFG and
FTL to be clear even after compilation finishes, since they will have subtly
different OSR stories and likely use different data structures.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::resetStubInternal):
(JSC::ProgramCodeBlock::compileOptimized):
(JSC::EvalCodeBlock::compileOptimized):
(JSC::FunctionCodeBlock::compileOptimized):
(JSC::CodeBlock::adjustedExitCountThreshold):
(JSC::CodeBlock::tallyFrequentExitSites):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::setJITCode):
(JSC::CodeBlock::hasOptimizedReplacement):
(JSC::ExecState::isInlineCallFrame):
* ftl/FTLCompile.cpp:
(JSC::FTL::compile):
* ftl/FTLJITCode.cpp:
(JSC::FTL::JITCode::JITCode):
* ftl/FTLState.cpp:
(JSC::FTL::State::dumpState):
* heap/DFGCodeBlocks.cpp:
(JSC::DFGCodeBlocks::jettison):
* interpreter/Interpreter.cpp:
(JSC::getLineNumberForCallFrame):
(JSC::getCallerInfo):
* jit/JITCode.cpp:
(WTF::printInternal):
* jit/JITCode.h:
(JSC::JITCode::topTierJIT):
(JSC::JITCode::nextTierJIT):
(JITCode):
(JSC::JITCode::isJIT):
(JSC::JITCode::isLowerTier):
(JSC::JITCode::isHigherTier):
(JSC::JITCode::isLowerOrSameTier):
(JSC::JITCode::isHigherOrSameTier):
(JSC::JITCode::isOptimizingJIT):
* jit/JITDriver.h:
(JSC::jitCompileIfAppropriate):
(JSC::jitCompileFunctionIfAppropriate):
* jit/JITStubs.cpp:
(JSC::DEFINE_STUB_FUNCTION):
* runtime/Executable.cpp:
(JSC::EvalExecutable::compileOptimized):
(JSC::samplingDescription):
(JSC::ProgramExecutable::compileOptimized):
(JSC::FunctionExecutable::compileOptimizedForCall):
(JSC::FunctionExecutable::compileOptimizedForConstruct):
2013-04-03 Filip Pizlo <fpizlo@apple.com>
fourthTier: DFG should abstract out how it does forward exits, and that code should be simplified
......
......@@ -2386,18 +2386,23 @@ void CodeBlock::resetStubInternal(RepatchBuffer& repatchBuffer, StructureStubInf
if (verboseUnlinking)
dataLog("Clearing structure cache (kind ", static_cast<int>(stubInfo.accessType), ") in ", *this, ".\n");
if (isGetByIdAccess(accessType)) {
if (getJITType() == JITCode::DFGJIT)
switch (getJITType()) {
case JITCode::BaselineJIT:
if (isGetByIdAccess(accessType))
JIT::resetPatchGetById(repatchBuffer, &stubInfo);
else
JIT::resetPatchPutById(repatchBuffer, &stubInfo);
break;
case JITCode::DFGJIT:
if (isGetByIdAccess(accessType))
DFG::dfgResetGetByID(repatchBuffer, stubInfo);
else
JIT::resetPatchGetById(repatchBuffer, &stubInfo);
} else {
ASSERT(isPutByIdAccess(accessType));
if (getJITType() == JITCode::DFGJIT)
DFG::dfgResetPutByID(repatchBuffer, stubInfo);
else
JIT::resetPatchPutById(repatchBuffer, &stubInfo);
break;
default:
RELEASE_ASSERT_NOT_REACHED();
break;
}
stubInfo.reset();
......@@ -2836,7 +2841,7 @@ CodeBlock* FunctionCodeBlock::replacement()
JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
if (JITCode::isHigherTier(replacement()->getJITType(), getJITType()))
return 0;
JSObject* error = static_cast<ProgramExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex);
return error;
......@@ -2844,7 +2849,7 @@ JSObject* ProgramCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, un
JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
if (JITCode::isHigherTier(replacement()->getJITType(), getJITType()))
return 0;
JSObject* error = static_cast<EvalExecutable*>(ownerExecutable())->compileOptimized(exec, scope, bytecodeIndex);
return error;
......@@ -2852,7 +2857,7 @@ JSObject* EvalCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsig
JSObject* FunctionCodeBlock::compileOptimized(ExecState* exec, JSScope* scope, unsigned bytecodeIndex)
{
if (replacement()->getJITType() == JITCode::nextTierJIT(getJITType()))
if (JITCode::isHigherTier(replacement()->getJITType(), getJITType()))
return 0;
JSObject* error = static_cast<FunctionExecutable*>(ownerExecutable())->compileOptimizedFor(exec, scope, bytecodeIndex, m_isConstructor ? CodeForConstruct : CodeForCall);
return error;
......@@ -3094,7 +3099,7 @@ void CodeBlock::optimizeSoon()
#if ENABLE(JIT)
uint32_t CodeBlock::adjustedExitCountThreshold(uint32_t desiredThreshold)
{
ASSERT(getJITType() == JITCode::DFGJIT);
ASSERT(JITCode::isOptimizingJIT(getJITType()));
// Compute this the lame way so we don't saturate. This is called infrequently
// enough that this loop won't hurt us.
unsigned result = desiredThreshold;
......@@ -3231,7 +3236,7 @@ bool CodeBlock::shouldOptimizeNow()
#if ENABLE(DFG_JIT)
void CodeBlock::tallyFrequentExitSites()
{
ASSERT(getJITType() == JITCode::DFGJIT);
ASSERT(JITCode::isOptimizingJIT(getJITType()));
ASSERT(alternative()->getJITType() == JITCode::BaselineJIT);
ASSERT(!!m_dfgData);
......
......@@ -431,7 +431,7 @@ public:
m_jitCode = code;
m_jitCodeWithArityCheck = codeWithArityCheck;
#if ENABLE(DFG_JIT)
if (JITCode::jitTypeFor(m_jitCode) == JITCode::DFGJIT) {
if (JITCode::isOptimizingJIT(JITCode::jitTypeFor(m_jitCode))) {
createDFGDataIfNecessary();
m_vm->heap.m_dfgCodeBlocks.m_set.add(this);
}
......@@ -472,10 +472,10 @@ public:
bool hasOptimizedReplacement()
{
ASSERT(JITCode::isBaselineCode(getJITType()));
bool result = replacement()->getJITType() > getJITType();
bool result = JITCode::isHigherTier(replacement()->getJITType(), getJITType());
#if !ASSERT_DISABLED
if (result)
ASSERT(replacement()->getJITType() == JITCode::DFGJIT);
ASSERT(JITCode::isOptimizingJIT(replacement()->getJITType()));
else {
ASSERT(JITCode::isBaselineCode(replacement()->getJITType()));
ASSERT(replacement() == this);
......@@ -1438,8 +1438,8 @@ inline Register& ExecState::uncheckedR(int index)
#if ENABLE(DFG_JIT)
inline bool ExecState::isInlineCallFrame()
{
if (LIKELY(!codeBlock() || codeBlock()->getJITType() != JITCode::DFGJIT))
return false;
if (LIKELY(!codeBlock() || !JITCode::isOptimizingJIT(codeBlock()->getJITType())))
return false;
return isInlineCallFrameSlow();
}
#endif
......
......@@ -52,7 +52,7 @@ void DFGCodeBlocks::jettison(PassOwnPtr<CodeBlock> codeBlockPtr)
CodeBlock* codeBlock = codeBlockPtr.leakPtr();
ASSERT(codeBlock);
ASSERT(codeBlock->getJITType() == JITCode::DFGJIT);
ASSERT(JITCode::isOptimizingJIT(codeBlock->getJITType()));
// It should not have already been jettisoned.
ASSERT(!codeBlock->m_dfgData->isJettisoned);
......
......@@ -473,7 +473,7 @@ static unsigned getBytecodeOffsetForCallFrame(CallFrame* callFrame)
if (!codeBlock)
return 0;
#if ENABLE(DFG_JIT)
if (codeBlock->getJITType() == JITCode::DFGJIT)
if (JITCode::isOptimizingJIT(codeBlock->getJITType()))
return codeBlock->codeOrigin(callFrame->codeOriginIndexForDFG()).bytecodeIndex;
#endif
return callFrame->bytecodeOffsetForNonDFGCode();
......@@ -500,7 +500,7 @@ static CallFrame* getCallerInfo(VM* vm, CallFrame* callFrame, unsigned& bytecode
if (wasCalledByHost) {
#if ENABLE(DFG_JIT)
if (callerCodeBlock && callerCodeBlock->getJITType() == JITCode::DFGJIT) {
if (callerCodeBlock && JITCode::isOptimizingJIT(callerCodeBlock->getJITType())) {
unsigned codeOriginIndex = callFrame->callerFrame()->removeHostCallFrameFlag()->codeOriginIndexForDFG();
CodeOrigin origin = callerCodeBlock->codeOrigin(codeOriginIndex);
bytecodeOffset = origin.bytecodeIndex;
......@@ -521,7 +521,7 @@ static CallFrame* getCallerInfo(VM* vm, CallFrame* callFrame, unsigned& bytecode
ASSERT(newCodeBlock->instructionCount() > bytecodeOffset);
callerCodeBlock = newCodeBlock;
}
} else if (callerCodeBlock && callerCodeBlock->getJITType() == JITCode::DFGJIT) {
} else if (callerCodeBlock && JITCode::isOptimizingJIT(callerCodeBlock->getJITType())) {
CodeOrigin origin;
if (!callerCodeBlock->codeOriginForReturn(callFrame->returnPC(), origin)) {
// This should not be possible, but we're seeing cases where it does happen
......
......@@ -116,6 +116,9 @@ void printInternal(PrintStream& out, JSC::JITCode::JITType type)
case JSC::JITCode::DFGJIT:
out.print("DFG");
return;
case JSC::JITCode::FTLJIT:
out.print("FTL");
return;
default:
CRASH();
return;
......
......@@ -47,7 +47,7 @@ public:
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT };
enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT, FTLJIT };
static JITType bottomTierJIT()
{
......@@ -56,18 +56,58 @@ public:
static JITType topTierJIT()
{
return DFGJIT;
return FTLJIT;
}
static JITType nextTierJIT(JITType jitType)
{
ASSERT_UNUSED(jitType, jitType == BaselineJIT || jitType == DFGJIT);
return DFGJIT;
switch (jitType) {
case BaselineJIT:
return DFGJIT;
case DFGJIT:
return FTLJIT;
default:
RELEASE_ASSERT_NOT_REACHED();
}
}
static bool isJIT(JITType jitType)
{
switch (jitType) {
case BaselineJIT:
case DFGJIT:
case FTLJIT:
return true;
default:
return false;
}
}
static bool isLowerTier(JITType expectedLower, JITType expectedHigher)
{
RELEASE_ASSERT(isJIT(expectedLower));
RELEASE_ASSERT(isJIT(expectedHigher));
return expectedLower < expectedHigher;
}
static bool isHigherTier(JITType expectedHigher, JITType expectedLower)
{
return isLowerTier(expectedLower, expectedHigher);
}
static bool isLowerOrSameTier(JITType expectedLower, JITType expectedHigher)
{
return !isHigherTier(expectedLower, expectedHigher);
}
static bool isHigherOrSameTier(JITType expectedHigher, JITType expectedLower)
{
return isLowerOrSameTier(expectedLower, expectedHigher);
}
static bool isOptimizingJIT(JITType jitType)
{
return jitType == DFGJIT;
return jitType == DFGJIT || jitType == FTLJIT;
}
static bool isBaselineCode(JITType jitType)
......
......@@ -53,7 +53,7 @@ inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& code
RefPtr<JITCode> oldJITCode = jitCode;
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
if (JITCode::isOptimizingJIT(jitType))
dfgCompiled = DFG::tryCompile(exec, codeBlock.get(), jitCode, bytecodeIndex);
if (dfgCompiled) {
if (codeBlock->alternative())
......@@ -91,7 +91,7 @@ inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCode
MacroAssemblerCodePtr oldJITCodeWithArityCheck = jitCodeWithArityCheck;
bool dfgCompiled = false;
if (jitType == JITCode::DFGJIT)
if (JITCode::isOptimizingJIT(jitType))
dfgCompiled = DFG::tryCompileFunction(exec, codeBlock.get(), jitCode, jitCodeWithArityCheck, bytecodeIndex);
if (dfgCompiled) {
if (codeBlock->alternative())
......
......@@ -2041,7 +2041,16 @@ DEFINE_STUB_FUNCTION(void, optimize)
}
CodeBlock* optimizedCodeBlock = codeBlock->replacement();
ASSERT(optimizedCodeBlock->getJITType() == JITCode::DFGJIT);
ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->getJITType()));
if (optimizedCodeBlock->getJITType() == JITCode::FTLJIT) {
// FTL JIT doesn't support OSR entry yet.
// https://bugs.webkit.org/show_bug.cgi?id=113625
// Don't attempt OSR entry again.
codeBlock->dontOptimizeAnytimeSoon();
return;
}
if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
if (Options::showDFGDisassembly()) {
......
......@@ -162,7 +162,7 @@ JSObject* EvalExecutable::compileOptimized(ExecState* exec, JSScope* scope, unsi
ASSERT(exec->vm().dynamicGlobalObject);
ASSERT(!!m_evalCodeBlock);
JSObject* error = 0;
if (m_evalCodeBlock->getJITType() != JITCode::topTierJIT())
if (!JITCode::isOptimizingJIT(m_evalCodeBlock->getJITType()))
error = compileInternal(exec, scope, JITCode::nextTierJIT(m_evalCodeBlock->getJITType()), bytecodeIndex);
ASSERT(!!m_evalCodeBlock);
return error;
......@@ -184,6 +184,8 @@ inline const char* samplingDescription(JITCode::JITType jitType)
return "Baseline Compilation (TOTAL)";
case JITCode::DFGJIT:
return "DFG Compilation (TOTAL)";
case JITCode::FTLJIT:
return "FTL Compilation (TOTAL)";
default:
RELEASE_ASSERT_NOT_REACHED();
return 0;
......@@ -293,7 +295,7 @@ JSObject* ProgramExecutable::compileOptimized(ExecState* exec, JSScope* scope, u
RELEASE_ASSERT(exec->vm().dynamicGlobalObject);
ASSERT(!!m_programCodeBlock);
JSObject* error = 0;
if (m_programCodeBlock->getJITType() != JITCode::topTierJIT())
if (!JITCode::isOptimizingJIT(m_programCodeBlock->getJITType()))
error = compileInternal(exec, scope, JITCode::nextTierJIT(m_programCodeBlock->getJITType()), bytecodeIndex);
ASSERT(!!m_programCodeBlock);
return error;
......@@ -461,7 +463,7 @@ JSObject* FunctionExecutable::compileOptimizedForCall(ExecState* exec, JSScope*
RELEASE_ASSERT(exec->vm().dynamicGlobalObject);
ASSERT(!!m_codeBlockForCall);
JSObject* error = 0;
if (m_codeBlockForCall->getJITType() != JITCode::topTierJIT())
if (!JITCode::isOptimizingJIT(m_codeBlockForCall->getJITType()))
error = compileForCallInternal(exec, scope, JITCode::nextTierJIT(m_codeBlockForCall->getJITType()), bytecodeIndex);
ASSERT(!!m_codeBlockForCall);
return error;
......@@ -472,7 +474,7 @@ JSObject* FunctionExecutable::compileOptimizedForConstruct(ExecState* exec, JSSc
RELEASE_ASSERT(exec->vm().dynamicGlobalObject);
ASSERT(!!m_codeBlockForConstruct);
JSObject* error = 0;
if (m_codeBlockForConstruct->getJITType() != JITCode::topTierJIT())
if (JITCode::isOptimizingJIT(m_codeBlockForConstruct->getJITType()))
error = compileForConstructInternal(exec, scope, JITCode::nextTierJIT(m_codeBlockForConstruct->getJITType()), bytecodeIndex);
ASSERT(!!m_codeBlockForConstruct);
return error;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment