Commit 81bb8bb3 authored by fpizlo@apple.com's avatar fpizlo@apple.com

CodeBlock::m_numCalleeRegisters shouldn't also mean frame size, frame size...

CodeBlock::m_numCalleeRegisters shouldn't also mean frame size, frame size needed for exit, or any other unrelated things
https://bugs.webkit.org/show_bug.cgi?id=124793

Reviewed by Mark Hahnenberg.
        
Now m_numCalleeRegisters always refers to the number of locals that the attached
bytecode uses. It never means anything else.
        
For frame size, we now have it lazily computed from m_numCalleeRegisters for the
baseline engines and we have it stored in DFG::CommonData for the optimizing JITs.
        
For frame-size-needed-at-exit, we store that in DFG::CommonData, too.
        
The code no longer implies that there is any arithmetic relationship between
m_numCalleeRegisters and frameSize. Previously it implied that the latter is greater
than the former.
        
The code no longer implies that there is any arithmetic relationship between the
frame Size and the frame-size-needed-at-exit. Previously it implied that the latter
is greater that the former.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::frameRegisterCount):
* bytecode/CodeBlock.h:
* dfg/DFGCommonData.h:
(JSC::DFG::CommonData::CommonData):
(JSC::DFG::CommonData::requiredRegisterCountForExecutionAndExit):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::frameRegisterCount):
(JSC::DFG::Graph::requiredRegisterCountForExit):
(JSC::DFG::Graph::requiredRegisterCountForExecutionAndExit):
* dfg/DFGGraph.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
(JSC::DFG::VirtualRegisterAllocationPhase::run):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::LowerDFGToLLVM::compileCallOrConstruct):
* ftl/FTLOSREntry.cpp:
(JSC::FTL::prepareOSREntry):
* interpreter/CallFrame.cpp:
(JSC::CallFrame::frameExtentInternal):
* interpreter/JSStackInlines.h:
(JSC::JSStack::pushFrame):
* jit/JIT.h:
(JSC::JIT::frameRegisterCountFor):
* jit/JITOperations.cpp:
* llint/LLIntEntrypoint.cpp:
(JSC::LLInt::frameRegisterCountFor):
* llint/LLIntEntrypoint.h:



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@159721 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent a847461e
2013-11-22 Filip Pizlo <fpizlo@apple.com>
CodeBlock::m_numCalleeRegisters shouldn't also mean frame size, frame size needed for exit, or any other unrelated things
https://bugs.webkit.org/show_bug.cgi?id=124793
Reviewed by Mark Hahnenberg.
Now m_numCalleeRegisters always refers to the number of locals that the attached
bytecode uses. It never means anything else.
For frame size, we now have it lazily computed from m_numCalleeRegisters for the
baseline engines and we have it stored in DFG::CommonData for the optimizing JITs.
For frame-size-needed-at-exit, we store that in DFG::CommonData, too.
The code no longer implies that there is any arithmetic relationship between
m_numCalleeRegisters and frameSize. Previously it implied that the latter is greater
than the former.
The code no longer implies that there is any arithmetic relationship between the
frame Size and the frame-size-needed-at-exit. Previously it implied that the latter
is greater that the former.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::frameRegisterCount):
* bytecode/CodeBlock.h:
* dfg/DFGCommonData.h:
(JSC::DFG::CommonData::CommonData):
(JSC::DFG::CommonData::requiredRegisterCountForExecutionAndExit):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::frameRegisterCount):
(JSC::DFG::Graph::requiredRegisterCountForExit):
(JSC::DFG::Graph::requiredRegisterCountForExecutionAndExit):
* dfg/DFGGraph.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::link):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::SpeculativeJIT):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
(JSC::DFG::VirtualRegisterAllocationPhase::run):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::LowerDFGToLLVM::compileCallOrConstruct):
* ftl/FTLOSREntry.cpp:
(JSC::FTL::prepareOSREntry):
* interpreter/CallFrame.cpp:
(JSC::CallFrame::frameExtentInternal):
* interpreter/JSStackInlines.h:
(JSC::JSStack::pushFrame):
* jit/JIT.h:
(JSC::JIT::frameRegisterCountFor):
* jit/JITOperations.cpp:
* llint/LLIntEntrypoint.cpp:
(JSC::LLInt::frameRegisterCountFor):
* llint/LLIntEntrypoint.h:
2013-11-21 Filip Pizlo <fpizlo@apple.com>
Combine SymbolTable and SharedSymbolTable
......
......@@ -39,11 +39,13 @@
#include "DFGWorklist.h"
#include "Debugger.h"
#include "Interpreter.h"
#include "JIT.h"
#include "JITStubs.h"
#include "JSActivation.h"
#include "JSCJSValue.h"
#include "JSFunction.h"
#include "JSNameScope.h"
#include "LLIntEntrypoint.h"
#include "LowLevelInterpreter.h"
#include "Operations.h"
#include "PolymorphicPutByIdList.h"
......@@ -3299,6 +3301,31 @@ void CodeBlock::dumpValueProfiles()
}
#endif // ENABLE(VERBOSE_VALUE_PROFILE)
unsigned CodeBlock::frameRegisterCount()
{
switch (jitType()) {
#if ENABLE(LLINT)
case JITCode::InterpreterThunk:
return LLInt::frameRegisterCountFor(this);
#endif // ENABLE(LLINT)
#if ENABLE(JIT)
case JITCode::BaselineJIT:
return JIT::frameRegisterCountFor(this);
#endif // ENABLE(JIT)
#if ENABLE(DFG_JIT)
case JITCode::DFGJIT:
case JITCode::FTLJIT:
return jitCode()->dfgCommon()->frameRegisterCount;
#endif // ENABLE(DFG_JIT)
default:
RELEASE_ASSERT_NOT_REACHED();
return 0;
}
}
size_t CodeBlock::predictedMachineCodeSize()
{
// This will be called from CodeBlock::CodeBlock before either m_vm or the
......
......@@ -897,6 +897,8 @@ public:
#if ENABLE(VERBOSE_VALUE_PROFILE)
void dumpValueProfiles();
#endif
unsigned frameRegisterCount();
// FIXME: Make these remaining members private.
......
......@@ -74,6 +74,8 @@ public:
CommonData()
: isStillValid(true)
, machineCaptureStart(std::numeric_limits<int>::max())
, frameRegisterCount(std::numeric_limits<unsigned>::max())
, requiredRegisterCountForExit(std::numeric_limits<unsigned>::max())
{ }
void notifyCompilingStructureTransition(Plan&, CodeBlock*, Node*);
......@@ -82,6 +84,11 @@ public:
void shrinkToFit();
bool invalidate(); // Returns true if we did invalidate, or false if the code block was already invalidated.
unsigned requiredRegisterCountForExecutionAndExit() const
{
return std::max(frameRegisterCount, requiredRegisterCountForExit);
}
OwnPtr<InlineCallFrameSet> inlineCallFrames;
Vector<CodeOrigin, 0, UnsafeVectorOverflow> codeOrigins;
......@@ -100,6 +107,9 @@ public:
int machineCaptureStart;
std::unique_ptr<SlowArgument[]> slowArguments;
unsigned frameRegisterCount;
unsigned requiredRegisterCountForExit;
};
} } // namespace JSC::DFG
......
......@@ -34,6 +34,7 @@
#include "DFGVariableAccessDataDump.h"
#include "FullBytecodeLiveness.h"
#include "FunctionExecutableDump.h"
#include "JIT.h"
#include "OperandsInlines.h"
#include "Operations.h"
#include <wtf/CommaPrinter.h>
......@@ -698,7 +699,29 @@ bool Graph::isLiveInBytecode(VirtualRegister operand, CodeOrigin codeOrigin)
return true;
}
unsigned Graph::frameRegisterCount()
{
return m_nextMachineLocal + m_parameterSlots;
}
unsigned Graph::requiredRegisterCountForExit()
{
unsigned count = JIT::frameRegisterCountFor(m_profiledBlock);
for (InlineCallFrameSet::iterator iter = m_inlineCallFrames->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
unsigned requiredCount = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + JIT::frameRegisterCountFor(codeBlock);
count = std::max(count, requiredCount);
}
return count;
}
unsigned Graph::requiredRegisterCountForExecutionAndExit()
{
return std::max(frameRegisterCount(), requiredRegisterCountForExit());
}
} } // namespace JSC::DFG
#endif
......@@ -787,6 +787,10 @@ public:
FullBytecodeLiveness& livenessFor(InlineCallFrame*);
bool isLiveInBytecode(VirtualRegister, CodeOrigin);
unsigned frameRegisterCount();
unsigned requiredRegisterCountForExit();
unsigned requiredRegisterCountForExecutionAndExit();
VM& m_vm;
Plan& m_plan;
CodeBlock* m_codeBlock;
......
......@@ -154,6 +154,9 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize());
#endif
m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
if (!m_graph.m_inlineCallFrames->isEmpty())
m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release();
......@@ -331,7 +334,7 @@ void JITCompiler::compileFunction()
// so enter after this.
Label fromArityCheck(this);
// Plant a check that sufficient space is available in the JSStack.
addPtr(TrustedImm32(virtualRegisterForLocal(m_codeBlock->m_numCalleeRegisters).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1);
// Return here after stack check.
Label fromStackCheck = label();
......
......@@ -77,7 +77,8 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
return 0;
}
OSREntryData* entry = codeBlock->jitCode()->dfg()->osrEntryDataForBytecodeIndex(bytecodeIndex);
JITCode* jitCode = codeBlock->jitCode()->dfg();
OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
if (!entry) {
if (Options::verboseOSR())
......@@ -180,7 +181,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
// it seems silly: you'd be diverting the program to error handling when it
// would have otherwise just kept running albeit less quickly.
if (!vm->interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(codeBlock->m_numCalleeRegisters).offset()])) {
if (!vm->interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(jitCode->common.requiredRegisterCountForExecutionAndExit()).offset()])) {
if (Options::verboseOSR())
dataLogF(" OSR failed because stack growth failed.\n");
return 0;
......
......@@ -46,7 +46,7 @@ SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
, m_jit(jit)
, m_currentNode(0)
, m_indexInBlock(0)
, m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
, m_generationInfo(m_jit.graph().frameRegisterCount())
, m_state(m_jit.graph())
, m_interpreter(m_jit.graph(), m_state)
, m_stream(&jit.jitCode()->variableEventStream)
......
......@@ -119,28 +119,6 @@ public:
// to figure out where to put the parameters.
m_graph.m_nextMachineLocal = scoreBoard.highWatermark();
// 'm_numCalleeRegisters' is the number of locals and temporaries allocated
// for the function (and checked for on entry). Since we perform a new and
// different allocation of temporaries, more registers may now be required.
// This also accounts for the number of temporaries that may be needed if we
// OSR exit, due to inlining. Hence this computes the number of temporaries
// that could be used by this code block even if it exits; it may be more
// than what this code block needs if it never exits.
unsigned calleeRegisters = scoreBoard.highWatermark() + m_graph.m_parameterSlots;
for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) {
InlineCallFrame* inlineCallFrame = *iter;
CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
unsigned requiredCalleeRegisters = VirtualRegister(inlineCallFrame->stackOffset).toLocal() + 1 + codeBlock->m_numCalleeRegisters;
if (requiredCalleeRegisters > calleeRegisters)
calleeRegisters = requiredCalleeRegisters;
}
calleeRegisters = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), calleeRegisters);
if ((unsigned)codeBlock()->m_numCalleeRegisters < calleeRegisters)
codeBlock()->m_numCalleeRegisters = calleeRegisters;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Num callee registers: %u\n", calleeRegisters);
#endif
return true;
}
};
......
......@@ -57,6 +57,9 @@ void link(State& state)
// LLVM will create its own jump tables as needed.
codeBlock->clearSwitchJumpTables();
state.jitCode->common.frameRegisterCount = state.graph.frameRegisterCount();
state.jitCode->common.requiredRegisterCountForExit = state.graph.requiredRegisterCountForExit();
if (!state.graph.m_inlineCallFrames->isEmpty())
state.jitCode->common.inlineCallFrames = std::move(state.graph.m_inlineCallFrames);
......@@ -80,7 +83,7 @@ void link(State& state)
// Plant a check that sufficient space is available in the JSStack.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
jit.addPtr(
CCallHelpers::TrustedImm32(virtualRegisterForLocal(codeBlock->m_numCalleeRegisters).offset() * sizeof(Register)),
CCallHelpers::TrustedImm32(virtualRegisterForLocal(state.jitCode->common.requiredRegisterCountForExit).offset() * sizeof(Register)),
GPRInfo::callFrameRegister, GPRInfo::regT1);
CCallHelpers::Jump stackCheck = jit.branchPtr(
CCallHelpers::Above,
......
......@@ -2405,7 +2405,7 @@ private:
LValue calleeFrame = m_out.add(
m_callFrame,
m_out.constIntPtr(sizeof(Register) * virtualRegisterForLocal(codeBlock()->m_numCalleeRegisters).offset()));
m_out.constIntPtr(sizeof(Register) * virtualRegisterForLocal(m_graph.frameRegisterCount()).offset()));
m_out.store32(
m_out.constInt32(numPassedArgs + dummyThisArgument),
......
......@@ -78,10 +78,10 @@ void* prepareOSREntry(
for (int local = values.numberOfLocals(); local--;)
scratch[local] = JSValue::encode(values.local(local));
int stackFrameSize = entryCodeBlock->m_numCalleeRegisters;
int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit();
if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) {
if (Options::verboseOSR())
dataLog(" OSR failed bcause stack growth failed.\n");
dataLog(" OSR failed because stack growth failed.\n");
return 0;
}
......
......@@ -120,7 +120,7 @@ Register* CallFrame::frameExtentInternal()
{
CodeBlock* codeBlock = this->codeBlock();
ASSERT(codeBlock);
return registers() + virtualRegisterForLocal(codeBlock->m_numCalleeRegisters).offset();
return registers() + virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset();
}
JSGlobalObject* CallFrame::vmEntryGlobalObject()
......
......@@ -73,7 +73,7 @@ inline CallFrame* JSStack::pushFrame(CallFrame* callerFrame,
Register* newEnd = newCallFrameSlot;
if (!!codeBlock)
newEnd += virtualRegisterForLocal(codeBlock->m_numCalleeRegisters).offset();
newEnd += virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset();
// Ensure that we have the needed stack capacity to push the new frame:
if (!grow(newEnd))
......
......@@ -567,7 +567,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
}
#endif
addPtr(TrustedImm32(virtualRegisterForLocal(m_codeBlock->m_numCalleeRegisters).offset() * sizeof(Register)), callFrameRegister, regT1);
addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1);
stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
}
......
......@@ -243,6 +243,11 @@ namespace JSC {
static void linkFor(ExecState*, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
static unsigned frameRegisterCountFor(CodeBlock* codeBlock)
{
return codeBlock->m_numCalleeRegisters;
}
private:
JIT(VM*, CodeBlock* = 0);
......
......@@ -80,7 +80,7 @@ void JIT_OPERATION operationStackCheck(ExecState* exec, CodeBlock* codeBlock)
JSStack& stack = vm->interpreter->stack();
if (UNLIKELY(!stack.grow(&exec->registers()[virtualRegisterForLocal(codeBlock->m_numCalleeRegisters).offset()])))
if (UNLIKELY(!stack.grow(&exec->registers()[virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset()])))
vm->throwException(callerFrame, createStackOverflowError(callerFrame));
}
......
......@@ -119,6 +119,11 @@ void setEntrypoint(VM& vm, CodeBlock* codeBlock)
RELEASE_ASSERT_NOT_REACHED();
}
unsigned frameRegisterCountFor(CodeBlock* codeBlock)
{
return codeBlock->m_numCalleeRegisters;
}
} } // namespace JSC::LLInt
#endif // ENABLE(LLINT)
......@@ -41,6 +41,8 @@ namespace LLInt {
void setEntrypoint(VM&, CodeBlock*);
unsigned frameRegisterCountFor(CodeBlock*);
} } // namespace JSC::LLInt
#endif // ENABLE(LLINT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment