Commit 842e0c70 authored by barraclough@apple.com's avatar barraclough@apple.com

An attempt to revive Windows bots.

Patch by Ryosuke Niwa <rniwa@webkit.org> on 2011-05-25
* runtime/RegExp.cpp:
* runtime/RegExp.h:



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@87359 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 7d76d9bc
......@@ -5,6 +5,66 @@
* runtime/RegExp.cpp:
* runtime/RegExp.h:
2011-05-25 Gavin Barraclough <barraclough@apple.com>
Reviewed by Sam Weinig.
Bug 61503 - Move population of CodeBlock::m_structureStubInfos into JIT
This data structure, used at runtime by the JIT, is currently unnecessarily populated
with default entries during byte compilation.
Aside from meaning that there is JIT specific code in the bytecompiler, this also ties
us to one entry per corresponding bytecode op, which may be undesirable. Instead,
populate this array from the JIT.
The type StructureStubInfo has two unused states, one for gets & one for puts. Unify
these, so that the class can have a default constructor (and to simply switch statements
in code walking over the table).
This change has ramification for the DFG JIT, in that the DFG JIT used this datastructure
to check for functions containing property access. Instead do so in the DFGByteCodeParser.
* bytecode/CodeBlock.cpp:
(JSC::printStructureStubInfo):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::setNumberOfStructureStubInfos):
(JSC::CodeBlock::numberOfStructureStubInfos):
* bytecode/StructureStubInfo.cpp:
(JSC::StructureStubInfo::deref):
(JSC::StructureStubInfo::visitAggregate):
* bytecode/StructureStubInfo.h:
(JSC::StructureStubInfo::StructureStubInfo):
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::emitGetById):
(JSC::BytecodeGenerator::emitPutById):
(JSC::BytecodeGenerator::emitDirectPutById):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::parseBlock):
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileMainPass):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
* jit/JIT.h:
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_put_by_id):
(JSC::JIT::emit_op_method_check):
(JSC::JIT::compileGetByIdHotPath):
(JSC::JIT::compileGetByIdSlowCase):
(JSC::JIT::emitSlow_op_put_by_id):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emitSlow_op_get_by_id):
(JSC::JIT::emit_op_put_by_id):
(JSC::JIT::emitSlow_op_put_by_id):
(JSC::JIT::emit_op_method_check):
(JSC::JIT::compileGetByIdHotPath):
(JSC::JIT::compileGetByIdSlowCase):
* runtime/Executable.cpp:
(JSC::tryDFGCompile):
2011-05-25 Gavin Barraclough <barraclough@apple.com>
Reviewed by Sam Weinig.
......
......@@ -252,11 +252,8 @@ static void printStructureStubInfo(const StructureStubInfo& stubInfo, unsigned i
case access_put_by_id_replace:
printf(" [%4d] %s: %s\n", instructionOffset, "put_by_id_replace", pointerToSourceString(stubInfo.u.putByIdReplace.baseObjectStructure).utf8().data());
return;
case access_get_by_id:
printf(" [%4d] %s\n", instructionOffset, "get_by_id");
return;
case access_put_by_id:
printf(" [%4d] %s\n", instructionOffset, "put_by_id");
case access_unset:
printf(" [%4d] %s\n", instructionOffset, "unset");
return;
case access_get_by_id_generic:
printf(" [%4d] %s\n", instructionOffset, "op_get_by_id_generic");
......
......@@ -353,12 +353,8 @@ namespace JSC {
bool hasGlobalResolveInstructionAtBytecodeOffset(unsigned bytecodeOffset);
#endif
#if ENABLE(JIT)
void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
void addStructureStubInfo(const StructureStubInfo& stubInfo)
{
if (m_globalData->canUseJIT())
m_structureStubInfos.append(stubInfo);
}
StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
void addGlobalResolveInfo(unsigned globalResolveInstruction)
......
......@@ -50,8 +50,7 @@ void StructureStubInfo::deref()
case access_get_by_id_chain:
case access_put_by_id_transition:
case access_put_by_id_replace:
case access_get_by_id:
case access_put_by_id:
case access_unset:
case access_get_by_id_generic:
case access_put_by_id_generic:
case access_get_array_length:
......@@ -95,8 +94,7 @@ void StructureStubInfo::visitAggregate(SlotVisitor& visitor)
case access_put_by_id_replace:
visitor.append(&u.putByIdReplace.baseObjectStructure);
return;
case access_get_by_id:
case access_put_by_id:
case access_unset:
case access_get_by_id_generic:
case access_put_by_id_generic:
case access_get_array_length:
......
......@@ -43,8 +43,7 @@ namespace JSC {
access_get_by_id_proto_list,
access_put_by_id_transition,
access_put_by_id_replace,
access_get_by_id,
access_put_by_id,
access_unset,
access_get_by_id_generic,
access_put_by_id_generic,
access_get_array_length,
......@@ -52,8 +51,8 @@ namespace JSC {
};
struct StructureStubInfo {
StructureStubInfo(AccessType accessType)
: accessType(accessType)
StructureStubInfo()
: accessType(access_unset)
, seen(false)
{
}
......
......@@ -1387,10 +1387,6 @@ void BytecodeGenerator::emitMethodCheck()
RegisterID* BytecodeGenerator::emitGetById(RegisterID* dst, RegisterID* base, const Identifier& property)
{
#if ENABLE(JIT)
m_codeBlock->addStructureStubInfo(StructureStubInfo(access_get_by_id));
#endif
#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
#endif
......@@ -1418,9 +1414,6 @@ RegisterID* BytecodeGenerator::emitGetArgumentsLength(RegisterID* dst, RegisterI
RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
#if ENABLE(JIT)
m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
#endif
#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
#endif
......@@ -1439,9 +1432,6 @@ RegisterID* BytecodeGenerator::emitPutById(RegisterID* base, const Identifier& p
RegisterID* BytecodeGenerator::emitDirectPutById(RegisterID* base, const Identifier& property, RegisterID* value)
{
#if ENABLE(JIT)
m_codeBlock->addStructureStubInfo(StructureStubInfo(access_put_by_id));
#endif
#if ENABLE(INTERPRETER)
m_codeBlock->addPropertyAccessInstruction(instructions().size());
#endif
......
......@@ -36,9 +36,12 @@ namespace JSC { namespace DFG {
#if ENABLE(DFG_JIT_RESTRICTIONS)
// FIXME: Temporarily disable arithmetic, until we fix associated performance regressions.
// FIXME: temporarily disable property accesses until we fix regressions.
#define ARITHMETIC_OP() m_parseFailed = true
#define PROPERTY_ACCESS_OP() m_parseFailed = true
#else
#define ARITHMETIC_OP() ((void)0)
#define PROPERTY_ACCESS_OP() ((void)0)
#endif
// === ByteCodeParser ===
......@@ -862,6 +865,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_get_by_id: {
PROPERTY_ACCESS_OP();
NodeIndex base = get(currentInstruction[2].u.operand);
unsigned identifier = currentInstruction[3].u.operand;
......@@ -873,6 +877,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
}
case op_put_by_id: {
PROPERTY_ACCESS_OP();
NodeIndex value = get(currentInstruction[3].u.operand);
NodeIndex base = get(currentInstruction[1].u.operand);
unsigned identifier = currentInstruction[2].u.operand;
......
......@@ -74,7 +74,6 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_globalData(globalData)
, m_codeBlock(codeBlock)
, m_labels(codeBlock ? codeBlock->instructions().size() : 0)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
, m_bytecodeOffset((unsigned)-1)
#if USE(JSVALUE32_64)
......@@ -175,7 +174,6 @@ void JIT::privateCompileMainPass()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
m_propertyAccessInstructionIndex = 0;
m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
......@@ -348,7 +346,6 @@ void JIT::privateCompileMainPass()
}
}
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
......@@ -452,7 +449,7 @@ void JIT::privateCompileSlowCases()
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
#endif
ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
......@@ -573,7 +570,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) {
StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
......
......@@ -570,7 +570,7 @@ namespace JSC {
#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
void compileGetByIdHotPath(int baseVReg, Identifier*);
void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
#endif
void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
......
......@@ -278,8 +278,6 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
stubCall.addArgument(regT0);
stubCall.addArgument(TrustedImmPtr(ident));
stubCall.call(resultVReg);
m_propertyAccessInstructionIndex++;
}
void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
......@@ -301,8 +299,6 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(regT1);
stubCall.call();
m_propertyAccessInstructionIndex++;
}
void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
......@@ -329,7 +325,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
emitGetVirtualRegister(baseVReg, regT0);
// Do the method check - check the object & its prototype's structure inline (this is the common case).
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessCompilationInfo.size()));
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
Jump notCell = emitJumpIfNotJSCell(regT0);
......@@ -358,7 +354,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
// Do a regular(ish) get_by_id (the slow case will be link to
// cti_op_get_by_id_method_check instead of cti_op_get_by_id.
compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
compileGetByIdHotPath(baseVReg, ident);
match.link(this);
emitPutVirtualRegister(resultVReg);
......@@ -395,11 +391,11 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitGetVirtualRegister(baseVReg, regT0);
compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
compileGetByIdHotPath(baseVReg, ident);
emitPutVirtualRegister(resultVReg);
}
void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
{
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
......@@ -411,7 +407,8 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
......@@ -465,8 +462,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
m_propertyAccessInstructionIndex++;
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
......@@ -474,8 +470,6 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
unsigned baseVReg = currentInstruction[1].u.operand;
unsigned valueVReg = currentInstruction[3].u.operand;
unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// such that the Structure & offset are always at the same distance from this.
......@@ -488,7 +482,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
......@@ -509,8 +504,6 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
unsigned direct = currentInstruction[8].u.operand;
unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
......@@ -521,7 +514,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
}
// Compile a store into an object's property storage. May overwrite the
......
......@@ -157,13 +157,10 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
stubCall.addArgument(base);
stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.call(dst);
m_propertyAccessInstructionIndex++;
}
void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
m_propertyAccessInstructionIndex++;
ASSERT_NOT_REACHED();
}
......@@ -178,13 +175,10 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.addArgument(value);
stubCall.call();
m_propertyAccessInstructionIndex++;
}
void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
m_propertyAccessInstructionIndex++;
ASSERT_NOT_REACHED();
}
......@@ -202,7 +196,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
currentInstruction += OPCODE_LENGTH(op_method_check);
// Do the method check - check the object & its prototype's structure inline (this is the common case).
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessCompilationInfo.size()));
MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
int dst = currentInstruction[1].u.operand;
......@@ -425,8 +419,8 @@ void JIT::compileGetByIdHotPath()
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessInstructionIndex++;
m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
......@@ -480,8 +474,7 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl
ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
m_propertyAccessInstructionIndex++;
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
......@@ -500,8 +493,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
m_propertyAccessInstructionIndex++;
m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo());
m_propertyAccessCompilationInfo.last().hotPathBegin = hotPathBegin;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
......@@ -534,8 +527,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
m_propertyAccessInstructionIndex++;
m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].callReturnLocation = call;
}
// Compile a store into an object's property storage. May overwrite base.
......
......@@ -232,8 +232,7 @@ static bool tryDFGCompile(JSGlobalData* globalData, CodeBlock* codeBlock, JITCod
#if ENABLE(DFG_JIT)
#if ENABLE(DFG_JIT_RESTRICTIONS)
// FIXME: No flow control yet supported, don't bother scanning the bytecode if there are any jump targets.
// FIXME: temporarily disable property accesses until we fix regressions.
if (codeBlock->numberOfJumpTargets() || codeBlock->numberOfStructureStubInfos())
if (codeBlock->numberOfJumpTargets())
return false;
#endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment