Commit 3cb7e2c7 authored by fpizlo@apple.com's avatar fpizlo@apple.com

Forced OSR exits should lead to recompilation based on count, not rate

https://bugs.webkit.org/show_bug.cgi?id=83247
<rdar://problem/10720925>

Reviewed by Geoff Garen.
        
Track which OSR exits happen because of inadequate coverage. Count them
separately. If the count reaches a threshold, immediately trigger
reoptimization.
        
This is in contrast to the recompilation trigger for all other OSR exits.
Normally recomp is triggered when the exit rate exceeds a certain ratio.
        
Looks like a slight V8 speedup (sub 1%).

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::CodeBlock):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::forcedOSRExitCounter):
(JSC::CodeBlock::addressOfForcedOSRExitCounter):
(JSC::CodeBlock::offsetOfForcedOSRExitCounter):
(JSC::CodeBlock::shouldReoptimizeNow):
(JSC::CodeBlock::shouldReoptimizeFromLoopNow):
(CodeBlock):
* bytecode/DFGExitProfile.h:
(JSC::DFG::exitKindToString):
* dfg/DFGOSRExitCompiler.cpp:
(JSC::DFG::OSRExitCompiler::handleExitCounts):
(DFG):
* dfg/DFGOSRExitCompiler.h:
(OSRExitCompiler):
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOperations.cpp:
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* runtime/Options.cpp:
(Options):
(JSC::Options::initializeOptions):
* runtime/Options.h:
(Options):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@113552 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 3263d17c
2012-04-04 Filip Pizlo <fpizlo@apple.com>
Forced OSR exits should lead to recompilation based on count, not rate
https://bugs.webkit.org/show_bug.cgi?id=83247
<rdar://problem/10720925>
Reviewed by Geoff Garen.
Track which OSR exits happen because of inadequate coverage. Count them
separately. If the count reaches a threshold, immediately trigger
reoptimization.
This is in contrast to the recompilation trigger for all other OSR exits.
Normally recomp is triggered when the exit rate exceeds a certain ratio.
Looks like a slight V8 speedup (sub 1%).
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::CodeBlock):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::forcedOSRExitCounter):
(JSC::CodeBlock::addressOfForcedOSRExitCounter):
(JSC::CodeBlock::offsetOfForcedOSRExitCounter):
(JSC::CodeBlock::shouldReoptimizeNow):
(JSC::CodeBlock::shouldReoptimizeFromLoopNow):
(CodeBlock):
* bytecode/DFGExitProfile.h:
(JSC::DFG::exitKindToString):
* dfg/DFGOSRExitCompiler.cpp:
(JSC::DFG::OSRExitCompiler::handleExitCounts):
(DFG):
* dfg/DFGOSRExitCompiler.h:
(OSRExitCompiler):
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOperations.cpp:
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileGetIndexedPropertyStorage):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::compile):
* runtime/Options.cpp:
(Options):
(JSC::Options::initializeOptions):
* runtime/Options.h:
(Options):
2012-04-06 Benjamin Poulain <bpoulain@apple.com>
Do not abuse ArrayStorage's m_length for testing array consistency
......@@ -1432,6 +1432,7 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable* symTab)
, m_symbolTable(symTab)
, m_speculativeSuccessCounter(0)
, m_speculativeFailCounter(0)
, m_forcedOSRExitCounter(0)
, m_optimizationDelayCounter(0)
, m_reoptimizationRetryCounter(0)
#if ENABLE(JIT)
......
......@@ -1006,12 +1006,15 @@ namespace JSC {
uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; }
uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; }
uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; }
uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; }
uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; }
uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; }
static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); }
static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); }
static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); }
#if ENABLE(JIT)
// The number of failures that triggers the use of the ratio.
......@@ -1020,12 +1023,20 @@ namespace JSC {
bool shouldReoptimizeNow()
{
return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThreshold();
return (Options::desiredSpeculativeSuccessFailRatio *
speculativeFailCounter() >= speculativeSuccessCounter()
&& speculativeFailCounter() >= largeFailCountThreshold())
|| forcedOSRExitCounter() >=
Options::forcedOSRExitCountForReoptimization;
}
bool shouldReoptimizeFromLoopNow()
{
return Options::desiredSpeculativeSuccessFailRatio * speculativeFailCounter() >= speculativeSuccessCounter() && speculativeFailCounter() >= largeFailCountThresholdForLoop();
return (Options::desiredSpeculativeSuccessFailRatio *
speculativeFailCounter() >= speculativeSuccessCounter()
&& speculativeFailCounter() >= largeFailCountThresholdForLoop())
|| forcedOSRExitCounter() >=
Options::forcedOSRExitCountForReoptimization;
}
#endif
......@@ -1228,6 +1239,7 @@ namespace JSC {
int32_t m_totalJITExecutions;
uint32_t m_speculativeSuccessCounter;
uint32_t m_speculativeFailCounter;
uint32_t m_forcedOSRExitCounter;
uint16_t m_optimizationDelayCounter;
uint16_t m_reoptimizationRetryCounter;
......
......@@ -38,6 +38,7 @@ enum ExitKind {
BadCache, // We exited because an inline cache was wrong.
Overflow, // We exited because of overflow.
NegativeZero, // We exited because we encountered negative zero.
InadequateCoverage, // We exited because we ended up in code that didn't have profiling coverage.
Uncountable, // We exited for none of the above reasons, and we should not count it. Most uses of this should be viewed as a FIXME.
};
......@@ -54,6 +55,8 @@ inline const char* exitKindToString(ExitKind kind)
return "Overflow";
case NegativeZero:
return "NegativeZero";
case InadequateCoverage:
return "InadequateCoverage";
default:
return "Unknown";
}
......
......@@ -95,6 +95,64 @@ void compileOSRExit(ExecState* exec)
} // extern "C"
void OSRExitCompiler::handleExitCounts(const OSRExit& exit)
{
m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
AssemblyHelpers::JumpList tooFewFails;
if (exit.m_kind == InadequateCoverage) {
// Proceed based on the assumption that we can profitably optimize this code once
// it has executed enough times.
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization)));
} else {
// Proceed based on the assumption that we can handle these exits so long as they
// don't get too frequent.
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold())));
m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1));
}
// Reoptimize as soon as possible.
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
tooFewFails.link(&m_jit);
// Adjust the execution counter such that the target is to only optimize after a while.
int32_t targetValue =
ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
m_jit.baselineCodeBlock());
m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&m_jit);
}
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
......@@ -70,6 +70,8 @@ private:
return result;
}
void handleExitCounts(const OSRExit&);
AssemblyHelpers& m_jit;
Vector<unsigned> m_poisonScratchIndices;
};
......
......@@ -562,42 +562,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// counter to 0; otherwise we set the counter to
// counterValueForOptimizeAfterWarmUp().
m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()));
m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
// Reoptimize as soon as possible.
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
fewFails.link(&m_jit);
lowFailRate.link(&m_jit);
// Adjust the execution counter such that the target is to only optimize after a while.
int32_t targetValue =
ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
m_jit.baselineCodeBlock());
m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&m_jit);
handleExitCounts(exit);
// 12) Load the result of the last bytecode operation into regT0.
......
......@@ -541,42 +541,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco
// counter to 0; otherwise we set the counter to
// counterValueForOptimizeAfterWarmUp().
m_jit.add32(AssemblyHelpers::TrustedImm32(1), AssemblyHelpers::AbsoluteAddress(&exit.m_count));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2);
m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1);
m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2);
m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()));
m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0);
AssemblyHelpers::Jump fewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()));
m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2);
AssemblyHelpers::Jump lowFailRate = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1);
// Reoptimize as soon as possible.
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(0), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
AssemblyHelpers::Jump doneAdjusting = m_jit.jump();
fewFails.link(&m_jit);
lowFailRate.link(&m_jit);
// Adjust the execution counter such that the target is to only optimize after a while.
int32_t targetValue =
ExecutionCounter::applyMemoryUsageHeuristicsAndConvertToInt(
m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(),
m_jit.baselineCodeBlock());
m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter()));
m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold()));
m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount()));
doneAdjusting.link(&m_jit);
handleExitCounts(exit);
// 14) Load the result of the last bytecode operation into regT0.
......
......@@ -1123,7 +1123,17 @@ void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void*
SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw);
CodeBlock* codeBlock = debugInfo->codeBlock;
CodeBlock* alternative = codeBlock->alternative();
dataLog("Speculation failure in %p at @%u with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter());
dataLog("Speculation failure in %p at @%u with executeCounter = %d, "
"reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, "
"success/fail %u/(%u+%u)\n",
codeBlock,
debugInfo->nodeIndex,
alternative ? alternative->jitExecuteCounter() : 0,
alternative ? alternative->reoptimizationRetryCounter() : 0,
alternative ? alternative->optimizationDelayCounter() : 0,
codeBlock->speculativeSuccessCounter(),
codeBlock->speculativeFailCounter(),
codeBlock->forcedOSRExitCounter());
}
#endif
......
......@@ -2973,7 +2973,7 @@ bool SpeculativeJIT::compileStrictEq(Node& node)
void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
{
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
return;
}
......
......@@ -1802,7 +1802,7 @@ void SpeculativeJIT::compile(Node& node)
// If we have no prediction for this local, then don't attempt to compile.
if (prediction == PredictNone || value.isClear()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2234,7 +2234,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByVal: {
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2367,7 +2367,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByVal: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2527,7 +2527,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByValAlias: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3174,7 +3174,7 @@ void SpeculativeJIT::compile(Node& node)
case GetById: {
if (!node.prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3228,7 +3228,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByIdFlush: {
if (!node.prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3727,7 +3727,7 @@ void SpeculativeJIT::compile(Node& node)
break;
case ForceOSRExit: {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......
......@@ -1905,7 +1905,7 @@ void SpeculativeJIT::compile(Node& node)
// If we have no prediction for this local, then don't attempt to compile.
if (prediction == PredictNone || value.isClear()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2297,7 +2297,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByVal: {
if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2422,7 +2422,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByVal: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -2572,7 +2572,7 @@ void SpeculativeJIT::compile(Node& node)
case PutByValAlias: {
if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3177,7 +3177,7 @@ void SpeculativeJIT::compile(Node& node)
}
case GetById: {
if (!node.prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3227,7 +3227,7 @@ void SpeculativeJIT::compile(Node& node)
case GetByIdFlush: {
if (!node.prediction()) {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......@@ -3698,7 +3698,7 @@ void SpeculativeJIT::compile(Node& node)
break;
case ForceOSRExit: {
terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
......
......@@ -75,6 +75,7 @@ double osrExitProminenceForFrequentExitSite;
unsigned largeFailCountThresholdBase;
unsigned largeFailCountThresholdBaseForLoop;
unsigned forcedOSRExitCountForReoptimization;
unsigned reoptimizationRetryCounterMax;
unsigned reoptimizationRetryCounterStep;
......@@ -174,8 +175,9 @@ void initializeOptions()
SET(osrExitProminenceForFrequentExitSite, 0.3);
SET(largeFailCountThresholdBase, 20);
SET(largeFailCountThresholdBaseForLoop, 1);
SET(largeFailCountThresholdBase, 20);
SET(largeFailCountThresholdBaseForLoop, 1);
SET(forcedOSRExitCountForReoptimization, 250);
SET(reoptimizationRetryCounterStep, 1);
......
......@@ -61,6 +61,7 @@ extern double osrExitProminenceForFrequentExitSite;
extern unsigned largeFailCountThresholdBase;
extern unsigned largeFailCountThresholdBaseForLoop;
extern unsigned forcedOSRExitCountForReoptimization;
extern unsigned reoptimizationRetryCounterMax;
extern unsigned reoptimizationRetryCounterStep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment