Commit af8809ed authored by fpizlo@apple.com's avatar fpizlo@apple.com

Get rid of CodeBlock::RareData::callReturnIndexVector and most of the evil that it introduced

https://bugs.webkit.org/show_bug.cgi?id=121766

Reviewed by Andreas Kling.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::shrinkToFit):
* bytecode/CodeBlock.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileExceptionHandlers):
(JSC::DFG::JITCompiler::link):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@156247 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 5f6610a7
2013-09-22 Filip Pizlo <fpizlo@apple.com>
Get rid of CodeBlock::RareData::callReturnIndexVector and most of the evil that it introduced
https://bugs.webkit.org/show_bug.cgi?id=121766
Reviewed by Andreas Kling.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::shrinkToFit):
* bytecode/CodeBlock.h:
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileExceptionHandlers):
(JSC::DFG::JITCompiler::link):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
2013-09-21 Filip Pizlo <fpizlo@apple.com>
Interpreter::unwind() has no need for the bytecodeOffset
......@@ -2547,9 +2547,6 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
if (m_rareData) {
m_rareData->m_exceptionHandlers.shrinkToFit();
#if ENABLE(JIT)
m_rareData->m_callReturnIndexVector.shrinkToFit();
#endif
#if ENABLE(DFG_JIT)
m_rareData->m_inlineCallFrames.shrinkToFit();
m_rareData->m_codeOrigins.shrinkToFit();
......@@ -2641,102 +2638,6 @@ void CodeBlock::linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo* inco
}
#endif // ENABLE(LLINT)
#if ENABLE(JIT)
ClosureCallStubRoutine* CodeBlock::findClosureCallForReturnPC(ReturnAddressPtr returnAddress)
{
for (unsigned i = m_callLinkInfos.size(); i--;) {
CallLinkInfo& info = m_callLinkInfos[i];
if (!info.stub)
continue;
if (!info.stub->code().executableMemory()->contains(returnAddress.value()))
continue;
RELEASE_ASSERT(info.stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
return info.stub.get();
}
// The stub routine may have been jettisoned. This is rare, but we have to handle it.
const JITStubRoutineSet& set = m_vm->heap.jitStubRoutines();
for (unsigned i = set.size(); i--;) {
GCAwareJITStubRoutine* genericStub = set.at(i);
if (!genericStub->isClosureCall())
continue;
ClosureCallStubRoutine* stub = static_cast<ClosureCallStubRoutine*>(genericStub);
if (!stub->code().executableMemory()->contains(returnAddress.value()))
continue;
RELEASE_ASSERT(stub->codeOrigin().bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
return stub;
}
return 0;
}
#endif
unsigned CodeBlock::bytecodeOffset(ExecState* exec, ReturnAddressPtr returnAddress)
{
UNUSED_PARAM(exec);
UNUSED_PARAM(returnAddress);
#if ENABLE(LLINT)
#if !ENABLE(LLINT_C_LOOP)
// When using the JIT, we could have addresses that are not bytecode
// addresses. We check if the return address is in the LLint glue and
// opcode handlers range here to ensure that we are looking at bytecode
// before attempting to convert the return address into a bytecode offset.
//
// In the case of the C Loop LLInt, the JIT is disabled, and the only
// valid return addresses should be bytecode PCs. So, we can and need to
// forego this check because when we do not ENABLE(COMPUTED_GOTO_OPCODES),
// then the bytecode "PC"s are actually the opcodeIDs and are not bounded
// by llint_begin and llint_end.
if (returnAddress.value() >= LLInt::getCodePtr(llint_begin)
&& returnAddress.value() <= LLInt::getCodePtr(llint_end))
#endif
{
RELEASE_ASSERT(exec->codeBlock());
RELEASE_ASSERT(exec->codeBlock() == this);
RELEASE_ASSERT(JITCode::isBaselineCode(jitType()));
Instruction* instruction = exec->currentVPC();
RELEASE_ASSERT(instruction);
return bytecodeOffset(instruction);
}
#endif // !ENABLE(LLINT)
#if ENABLE(JIT)
if (!m_rareData)
return 1;
Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
if (!callIndices.size())
return 1;
if (jitCode()->contains(returnAddress.value())) {
unsigned callReturnOffset = jitCode()->offsetOf(returnAddress.value());
CallReturnOffsetToBytecodeOffset* result =
binarySearch<CallReturnOffsetToBytecodeOffset, unsigned>(
callIndices, callIndices.size(), callReturnOffset, getCallReturnOffset);
RELEASE_ASSERT(result->callReturnOffset == callReturnOffset);
RELEASE_ASSERT(result->bytecodeOffset < instructionCount());
return result->bytecodeOffset;
}
ClosureCallStubRoutine* closureInfo = findClosureCallForReturnPC(returnAddress);
CodeOrigin origin = closureInfo->codeOrigin();
while (InlineCallFrame* inlineCallFrame = origin.inlineCallFrame) {
if (inlineCallFrame->baselineCodeBlock() == this)
break;
origin = inlineCallFrame->caller;
RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
}
RELEASE_ASSERT(origin.bytecodeIndex != CodeOrigin::invalidBytecodeIndex);
unsigned bytecodeIndex = origin.bytecodeIndex;
RELEASE_ASSERT(bytecodeIndex < instructionCount());
return bytecodeIndex;
#endif // ENABLE(JIT)
#if !ENABLE(LLINT) && !ENABLE(JIT)
return 1;
#endif
}
void CodeBlock::clearEvalCache()
{
if (!!m_alternative)
......
......@@ -199,25 +199,9 @@ public:
}
#endif // ENABLE(JIT)
unsigned bytecodeOffset(ExecState*, ReturnAddressPtr);
void unlinkIncomingCalls();
#if ENABLE(JIT)
unsigned bytecodeOffsetForCallAtIndex(unsigned index)
{
if (!m_rareData)
return 1;
Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callIndices = m_rareData->m_callReturnIndexVector;
if (!callIndices.size())
return 1;
// FIXME: Fix places in DFG that call out to C that don't set the CodeOrigin. https://bugs.webkit.org/show_bug.cgi?id=118315
ASSERT(index < m_rareData->m_callReturnIndexVector.size());
if (index >= m_rareData->m_callReturnIndexVector.size())
return 1;
return m_rareData->m_callReturnIndexVector[index].bytecodeOffset;
}
void unlinkCalls();
void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
......@@ -581,14 +565,6 @@ public:
bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
#if ENABLE(JIT)
Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow>& callReturnIndexVector()
{
createRareDataIfNecessary();
return m_rareData->m_callReturnIndexVector;
}
#endif
#if ENABLE(DFG_JIT)
SegmentedVector<InlineCallFrame, 4>& inlineCallFrames()
{
......@@ -1145,9 +1121,6 @@ private:
EvalCodeCache m_evalCodeCache;
#if ENABLE(JIT)
Vector<CallReturnOffsetToBytecodeOffset, 0, UnsafeVectorOverflow> m_callReturnIndexVector;
#endif
#if ENABLE(DFG_JIT)
SegmentedVector<InlineCallFrame, 4> m_inlineCallFrames;
Vector<CodeOrigin, 0, UnsafeVectorOverflow> m_codeOrigins;
......
......@@ -133,9 +133,8 @@ void JITCompiler::compileExceptionHandlers()
// If any exception checks were linked, generate code to lookup a handler.
if (didLinkExceptionCheck) {
// lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and
// the index into the CodeBlock's callReturnIndexVector corresponding to the
// call that threw the exception (this was set in nonPreservedNonReturnGPR, when
// the exception check was planted).
// the index of the CodeOrigin. The latter is unused, see
// https://bugs.webkit.org/show_bug.cgi?id=121734.
move(GPRInfo::nonPreservedNonReturnGPR, GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
#if CPU(X86)
......@@ -217,16 +216,6 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
for (unsigned i = 0; i < m_calls.size(); ++i)
linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function);
m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size());
for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) {
unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call);
CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin;
while (codeOrigin.inlineCallFrame)
codeOrigin = codeOrigin.inlineCallFrame->caller;
unsigned exceptionInfo = codeOrigin.bytecodeIndex;
m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo));
}
Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins = m_codeBlock->codeOrigins();
codeOrigins.resize(m_exceptionChecks.size());
......
......@@ -707,10 +707,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment