Commit b6a00d35 authored by barraclough@apple.com's avatar barraclough@apple.com
Browse files

Implement a JIT-code aware sampling profiler for JSC

https://bugs.webkit.org/show_bug.cgi?id=76855

Rubber stanmped by Geoff Garen.

Mechanical change - pass CodeBlock through to the executable allocator,
such that we will be able to map ranges of JIT code back to their owner.

* assembler/ARMAssembler.cpp:
(JSC::ARMAssembler::executableCopy):
* assembler/ARMAssembler.h:
* assembler/AssemblerBuffer.h:
(JSC::AssemblerBuffer::executableCopy):
* assembler/AssemblerBufferWithConstantPool.h:
(JSC::AssemblerBufferWithConstantPool::executableCopy):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::LinkBuffer):
(JSC::LinkBuffer::linkCode):
* assembler/MIPSAssembler.h:
(JSC::MIPSAssembler::executableCopy):
* assembler/SH4Assembler.h:
(JSC::SH4Assembler::executableCopy):
* assembler/X86Assembler.h:
(JSC::X86Assembler::executableCopy):
(JSC::X86Assembler::X86InstructionFormatter::executableCopy):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGRepatch.cpp:
(JSC::DFG::generateProtoChainAccessStub):
(JSC::DFG::tryCacheGetByID):
(JSC::DFG::tryBuildGetByIDList):
(JSC::DFG::tryCachePutByID):
* dfg/DFGThunks.cpp:
(JSC::DFG::osrExitGenerationThunkGenerator):
* jit/ExecutableAllocator.cpp:
(JSC::ExecutableAllocator::allocate):
* jit/ExecutableAllocator.h:
* jit/ExecutableAllocatorFixedVMPool.cpp:
(JSC::ExecutableAllocator::allocate):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JITOpcodes.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
(JSC::JIT::privateCompileCTINativeCall):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITStubs.cpp:
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::finalize):
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::compile):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@105636 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent b458c9cb
2012-01-23 Gavin Barraclough <barraclough@apple.com>
Implement a JIT-code aware sampling profiler for JSC
https://bugs.webkit.org/show_bug.cgi?id=76855
Rubber stanmped by Geoff Garen.
Mechanical change - pass CodeBlock through to the executable allocator,
such that we will be able to map ranges of JIT code back to their owner.
* assembler/ARMAssembler.cpp:
(JSC::ARMAssembler::executableCopy):
* assembler/ARMAssembler.h:
* assembler/AssemblerBuffer.h:
(JSC::AssemblerBuffer::executableCopy):
* assembler/AssemblerBufferWithConstantPool.h:
(JSC::AssemblerBufferWithConstantPool::executableCopy):
* assembler/LinkBuffer.h:
(JSC::LinkBuffer::LinkBuffer):
(JSC::LinkBuffer::linkCode):
* assembler/MIPSAssembler.h:
(JSC::MIPSAssembler::executableCopy):
* assembler/SH4Assembler.h:
(JSC::SH4Assembler::executableCopy):
* assembler/X86Assembler.h:
(JSC::X86Assembler::executableCopy):
(JSC::X86Assembler::X86InstructionFormatter::executableCopy):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compile):
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGRepatch.cpp:
(JSC::DFG::generateProtoChainAccessStub):
(JSC::DFG::tryCacheGetByID):
(JSC::DFG::tryBuildGetByIDList):
(JSC::DFG::tryCachePutByID):
* dfg/DFGThunks.cpp:
(JSC::DFG::osrExitGenerationThunkGenerator):
* jit/ExecutableAllocator.cpp:
(JSC::ExecutableAllocator::allocate):
* jit/ExecutableAllocator.h:
* jit/ExecutableAllocatorFixedVMPool.cpp:
(JSC::ExecutableAllocator::allocate):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JITOpcodes.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::privateCompileCTIMachineTrampolines):
(JSC::JIT::privateCompileCTINativeCall):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::stringGetByValStubGenerator):
(JSC::JIT::privateCompilePutByIdTransition):
(JSC::JIT::privateCompilePatchGetArrayLength):
(JSC::JIT::privateCompileGetByIdProto):
(JSC::JIT::privateCompileGetByIdSelfList):
(JSC::JIT::privateCompileGetByIdProtoList):
(JSC::JIT::privateCompileGetByIdChainList):
(JSC::JIT::privateCompileGetByIdChain):
* jit/JITStubs.cpp:
* jit/SpecializedThunkJIT.h:
(JSC::SpecializedThunkJIT::finalize):
* yarr/YarrJIT.cpp:
(JSC::Yarr::YarrGenerator::compile):
2012-01-23 Xianzhu Wang <wangxianzhu@chromium.org>
 
Basic enhancements to StringBuilder
......@@ -344,14 +344,14 @@ void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID b
fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0);
}
PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID)
{
// 64-bit alignment is required for next constant pool and JIT code as well
m_buffer.flushWithoutBarrier(true);
if (!m_buffer.isAligned(8))
bkpt(0);
RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData);
RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID);
char* data = reinterpret_cast<char*>(result->start());
for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) {
......
......@@ -679,7 +679,7 @@ namespace JSC {
return loadBranchTarget(ARMRegisters::pc, cc, useConstantPool);
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&);
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData&, void* ownerUID);
#ifndef NDEBUG
unsigned debugOffset() { return m_buffer.debugOffset(); }
......
......@@ -129,12 +129,12 @@ namespace JSC {
return AssemblerLabel(m_index);
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
if (!m_index)
return 0;
RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index);
RefPtr<ExecutableMemoryHandle> result = globalData.executableAllocator.allocate(globalData, m_index, ownerUID);
if (!result)
return 0;
......
......@@ -195,10 +195,10 @@ public:
putIntegralUnchecked(value.low);
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
flushConstantPool(false);
return AssemblerBuffer::executableCopy(globalData);
return AssemblerBuffer::executableCopy(globalData, ownerUID);
}
void putShortWithConstantInt(uint16_t insn, uint32_t constant, bool isReusable = false)
......
......@@ -31,6 +31,9 @@
#define DUMP_LINK_STATISTICS 0
#define DUMP_CODE 0
#define GLOBAL_THUNK_ID reinterpret_cast<void*>(static_cast<intptr_t>(-1))
#define REGEXP_CODE_ID reinterpret_cast<void*>(static_cast<intptr_t>(-2))
#include <MacroAssembler.h>
#include <wtf/Noncopyable.h>
......@@ -69,7 +72,7 @@ class LinkBuffer {
#endif
public:
LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm)
LinkBuffer(JSGlobalData& globalData, MacroAssembler* masm, void* ownerUID)
: m_size(0)
, m_code(0)
, m_assembler(masm)
......@@ -78,7 +81,7 @@ public:
, m_completed(false)
#endif
{
linkCode();
linkCode(ownerUID);
}
~LinkBuffer()
......@@ -173,10 +176,7 @@ public:
return applyOffset(label.m_label).m_offset;
}
// Upon completion of all patching either 'finalizeCode()' or 'finalizeCodeAddendum()' should be called
// once to complete generation of the code. 'finalizeCode()' is suited to situations
// where the executable pool must also be retained, the lighter-weight 'finalizeCodeAddendum()' is
// suited to adding to an existing allocation.
// Upon completion of all patching 'finalizeCode()' should be called once to complete generation of the code.
CodeRef finalizeCode()
{
performFinalization();
......@@ -210,18 +210,17 @@ private:
return src;
}
// Keep this private! - the underlying code should only be obtained externally via
// finalizeCode() or finalizeCodeAddendum().
// Keep this private! - the underlying code should only be obtained externally via finalizeCode().
void* code()
{
return m_code;
}
void linkCode()
void linkCode(void* ownerUID)
{
ASSERT(!m_code);
#if !ENABLE(BRANCH_COMPACTION)
m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData);
m_executableMemory = m_assembler->m_assembler.executableCopy(*m_globalData, ownerUID);
if (!m_executableMemory)
return;
m_code = m_executableMemory->start();
......@@ -229,7 +228,7 @@ private:
ASSERT(m_code);
#else
size_t initialSize = m_assembler->m_assembler.codeSize();
m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, initialSize);
m_executableMemory = m_globalData->executableAllocator.allocate(*m_globalData, initialSize, ownerUID);
if (!m_executableMemory)
return;
m_code = (uint8_t*)m_executableMemory->start();
......
......@@ -645,9 +645,9 @@ public:
return m_buffer.codeSize();
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData);
RefPtr<ExecutableMemoryHandle> result = m_buffer.executableCopy(globalData, ownerUID);
if (!result)
return 0;
......
......@@ -1513,9 +1513,9 @@ public:
return reinterpret_cast<void*>(readPCrelativeAddress((*instructionPtr & 0xff), instructionPtr));
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
return m_buffer.executableCopy(globalData);
return m_buffer.executableCopy(globalData, ownerUID);
}
void prefix(uint16_t pre)
......
......@@ -1782,9 +1782,9 @@ public:
return b.m_offset - a.m_offset;
}
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
return m_formatter.executableCopy(globalData);
return m_formatter.executableCopy(globalData, ownerUID);
}
#ifndef NDEBUG
......@@ -2132,9 +2132,9 @@ private:
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
void* data() const { return m_buffer.data(); }
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData)
PassRefPtr<ExecutableMemoryHandle> executableCopy(JSGlobalData& globalData, void* ownerUID)
{
return m_buffer.executableCopy(globalData);
return m_buffer.executableCopy(globalData, ownerUID);
}
#ifndef NDEBUG
......
......@@ -211,7 +211,7 @@ void JITCompiler::compile(JITCode& entry)
SpeculativeJIT speculative(*this);
compileBody(speculative);
LinkBuffer linkBuffer(*m_globalData, this);
LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock);
link(linkBuffer);
speculative.linkOSREntries(linkBuffer);
......@@ -271,7 +271,7 @@ void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWi
// === Link ===
LinkBuffer linkBuffer(*m_globalData, this);
LinkBuffer linkBuffer(*m_globalData, this, m_codeBlock);
link(linkBuffer);
speculative.linkOSREntries(linkBuffer);
......
......@@ -62,7 +62,7 @@ void compileOSRExit(ExecState* exec)
exitCompiler.compileExit(exit, recovery);
LinkBuffer patchBuffer(*globalData, &jit);
LinkBuffer patchBuffer(*globalData, &jit, codeBlock);
exit.m_code = patchBuffer.finalizeCode();
#if DFG_ENABLE(DEBUG_VERBOSE)
......
......@@ -150,7 +150,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
LinkBuffer patchBuffer(*globalData, &stubJit);
LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
......@@ -201,7 +201,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
LinkBuffer patchBuffer(*globalData, &stubJit);
LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
......@@ -380,7 +380,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
isDirect = true;
}
LinkBuffer patchBuffer(*globalData, &stubJit);
LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
CodeLocationLabel lastProtoBegin;
if (listIndex)
......@@ -606,7 +606,7 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
} else
success = stubJit.jump();
LinkBuffer patchBuffer(*globalData, &stubJit);
LinkBuffer patchBuffer(*globalData, &stubJit, codeBlock);
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToDone));
if (needToRestoreScratch)
patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.deltaCallToSlowCase));
......
......@@ -66,7 +66,7 @@ MacroAssemblerCodeRef osrExitGenerationThunkGenerator(JSGlobalData* globalData)
jit.jump(MacroAssembler::AbsoluteAddress(&globalData->osrExitJumpDestination));
LinkBuffer patchBuffer(*globalData, &jit);
LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
patchBuffer.link(functionCall, compileOSRExit);
......
......@@ -110,8 +110,10 @@ bool ExecutableAllocator::underMemoryPressure()
return false;
}
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes)
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID)
{
UNUSED_PARAM(ownerUID);
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
if (!result)
CRASH();
......
......@@ -113,7 +113,7 @@ public:
static void dumpProfile() { }
#endif
PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes);
PassRefPtr<ExecutableMemoryHandle> allocate(JSGlobalData&, size_t sizeInBytes, void* ownerUID);
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
static void makeWritable(void* start, size_t size)
......@@ -130,7 +130,6 @@ public:
static void makeExecutable(void*, size_t) {}
#endif
#if CPU(X86) || CPU(X86_64)
static void cacheFlush(void*, size_t)
{
......
......@@ -114,8 +114,10 @@ bool ExecutableAllocator::underMemoryPressure()
return statistics.bytesAllocated > statistics.bytesReserved / 2;
}
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes)
PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(JSGlobalData& globalData, size_t sizeInBytes, void* ownerUID)
{
UNUSED_PARAM(ownerUID);
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes);
if (!result) {
releaseExecutableMemory(globalData);
......
......@@ -614,7 +614,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
ASSERT(m_jmpTable.isEmpty());
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
......
......@@ -194,7 +194,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
......
......@@ -193,7 +193,7 @@ PassRefPtr<ExecutableMemoryHandle> JIT::privateCompileCTIMachineTrampolines(JSGl
Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
......@@ -492,7 +492,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(JSGlobalData* globalData, NativeFu
ret();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return patchBuffer.finalizeCode();
......
......@@ -86,7 +86,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
jit.move(TrustedImm32(0), regT0);
jit.ret();
LinkBuffer patchBuffer(*globalData, &jit);
LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
return patchBuffer.finalizeCode();
}
......@@ -570,7 +570,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
......@@ -629,7 +629,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
......@@ -686,7 +686,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
......@@ -741,7 +741,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
}
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
......@@ -809,7 +809,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
......@@ -878,7 +878,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
......@@ -946,7 +946,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
LinkBuffer patchBuffer(*m_globalData, this);
LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment