Commit b70e41bc authored by msaboff@apple.com's avatar msaboff@apple.com

fourthTier: Change JSStack to grow from high to low addresses

https://bugs.webkit.org/show_bug.cgi?id=118758

Reviewed by Oliver Hunt.

Changed the JSC stack to grow down.  Effectively the JSC stack frame is flipped from
what it was.  See JSStack.h for the new offsets.  Changed JSStack begin() and end()
to be getBaseOfStack() and getLimitOfStack().  Most of the changes are address or offset
calculation changes.  Decoupled a local register ordinal (loop variable or array index)
from the offset into the callFrame using localToOperand() and the inverse operandToLocal().

* assembler/MacroAssembler.h:
(JSC::MacroAssembler::trustedImm32ForShift):
(JSC::MacroAssembler::lshiftPtr): Added to create scaled addresses with a negative index
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::lshift64): Added to create scaled addresses with a negative index
* assembler/X86Assembler.h:
(JSC::X86Assembler::shlq_i8r): Added to create scaled addresses with a negative index
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
* bytecode/CodeBlock.h:
(JSC::unmodifiedArgumentsRegister):
(JSC::CodeBlock::isCaptured):
* bytecode/CodeOrigin.h:
(JSC::CodeOrigin::stackOffset):
* bytecode/Operands.h:
(JSC::localToOperand):
(JSC::operandIsLocal):
(JSC::operandToLocal):
(JSC::operandIsArgument):
(JSC::operandToArgument):
(JSC::argumentToOperand):
* bytecode/VirtualRegister.h: Made InvalidVirtualRegister a positive value that fits in
31 bits since it can be placed into the 31 bit field "stackOffset" in struct InlineCallFrame.
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::addVar):
(JSC::BytecodeGenerator::BytecodeGenerator):
(JSC::BytecodeGenerator::createLazyRegisterIfNecessary):
(JSC::BytecodeGenerator::newRegister):
(JSC::BytecodeGenerator::emitNewArray):
* bytecompiler/BytecodeGenerator.h:
(JSC::CallArguments::registerOffset):
* bytecompiler/NodesCodegen.cpp:
(JSC::CallArguments::CallArguments):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::findArgumentPositionForLocal):
(JSC::DFG::ByteCodeParser::addCall):
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOperations.cpp:
* dfg/DFGScoreBoard.h:
(JSC::DFG::ScoreBoard::allocate):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileGetByValOnArguments):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::callFrameSlot):
(JSC::DFG::SpeculativeJIT::argumentSlot):
(JSC::DFG::SpeculativeJIT::callFrameTagSlot):
(JSC::DFG::SpeculativeJIT::callFramePayloadSlot):
(JSC::DFG::SpeculativeJIT::argumentTagSlot):
(JSC::DFG::SpeculativeJIT::argumentPayloadSlot):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGValidate.cpp:
(JSC::DFG::Validate::reportValidationContext):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* heap/ConservativeRoots.cpp:
(JSC::ConservativeRoots::genericAddSpan):
* interpreter/CallFrame.cpp:
(JSC::CallFrame::frameExtentInternal):
* interpreter/CallFrame.h:
(JSC::ExecState::init):
(JSC::ExecState::argumentOffset):
(JSC::ExecState::argumentOffsetIncludingThis):
(JSC::ExecState::argIndexForRegister):
* interpreter/Interpreter.cpp:
(JSC::loadVarargs):
(JSC::Interpreter::dumpRegisters):
* interpreter/JSStack.cpp:
(JSC::JSStack::JSStack):
(JSC::JSStack::~JSStack):
(JSC::JSStack::growSlowCase):
(JSC::JSStack::gatherConservativeRoots):
(JSC::JSStack::releaseExcessCapacity):
(JSC::JSStack::disableErrorStackReserve):
* interpreter/JSStack.h:
(JSC::JSStack::getBaseOfStack):
(JSC::JSStack::getLimitOfStack):
(JSC::JSStack::size):
(JSC::JSStack::end):
(JSC::JSStack::containsAddress):
(JSC::JSStack::lowAddress):
(JSC::JSStack::highAddress):
(JSC::JSStack::reservationEnd):
(JSC::JSStack::shrink):
(JSC::JSStack::grow):
* interpreter/JSStackInlines.h:
(JSC::JSStack::getTopOfFrame):
(JSC::JSStack::pushFrame):
(JSC::JSStack::popFrame):
(JSC::JSStack::installTrapsAfterFrame):
* interpreter/StackVisitor.cpp:
(JSC::inlinedFrameOffset):
(JSC::StackVisitor::readInlinedFrame):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JITCall.cpp:
(JSC::JIT::compileLoadVarargs):
(JSC::JIT::compileOpCall):
* jit/JITCall32_64.cpp:
(JSC::JIT::compileLoadVarargs):
(JSC::JIT::compileOpCall):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_create_activation):
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITStubs.cpp:
(JSC::throwExceptionFromOpCall):
(JSC::DEFINE_STUB_FUNCTION):
* jit/ThunkGenerators.cpp:
(JSC::arityFixup):
* llint/LLIntData.cpp:
(JSC::LLInt::Data::performAssertions):
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
(JSC::LLInt::genericCall):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* runtime/ArgList.cpp:
(JSC::ArgList::getSlice):
(JSC::MarkedArgumentBuffer::slowAppend):
* runtime/ArgList.h:
(JSC::MarkedArgumentBuffer::MarkedArgumentBuffer):
(JSC::MarkedArgumentBuffer::slotFor):
(JSC::MarkedArgumentBuffer::mallocBase):
(JSC::ArgList::at):
* runtime/Arguments.cpp:
(JSC::Arguments::tearOff):
* runtime/ArrayConstructor.cpp:
(JSC::constructArrayWithSizeQuirk):
* runtime/CommonSlowPaths.cpp:
(JSC::SLOW_PATH_DECL):
* runtime/JSActivation.h:
(JSC::JSActivation::registersOffset):
(JSC::JSActivation::tearOff):
(JSC::JSActivation::isValidIndex):
* runtime/JSArray.h:
(JSC::constructArrayNegativeIndexed): New method to create an array from registers that grow down.
* runtime/JSGlobalObject.cpp:
(JSC::JSGlobalObject::globalExec):
* runtime/JSGlobalObject.h:
(JSC::constructArrayNegativeIndexed):
* runtime/JSString.h:
* runtime/Operations.h:
(JSC::jsStringFromRegisterArray):
* runtime/SymbolTable.h:
(JSC::SharedSymbolTable::captureCount):


git-svn-id: http://svn.webkit.org/repository/webkit/trunk@155711 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 4c74e8d8
2013-09-12 Michael Saboff <msaboff@apple.com>
fourthTier: Change JSStack to grow from high to low addresses
https://bugs.webkit.org/show_bug.cgi?id=118758
Reviewed by Oliver Hunt.
Changed the JSC stack to grow down. Effectively the JSC stack frame is flipped from
what it was. See JSStack.h for the new offsets. Changed JSStack begin() and end()
to be getBaseOfStack() and getLimitOfStack(). Most of the changes are address or offset
calculation changes. Decoupled a local register ordinal (loop variable or array index)
from the offset into the callFrame using localToOperand() and the inverse operandToLocal().
* assembler/MacroAssembler.h:
(JSC::MacroAssembler::trustedImm32ForShift):
(JSC::MacroAssembler::lshiftPtr): Added to create scaled addresses with a negative index
* assembler/MacroAssemblerX86_64.h:
(JSC::MacroAssemblerX86_64::lshift64): Added to create scaled addresses with a negative index
* assembler/X86Assembler.h:
(JSC::X86Assembler::shlq_i8r): Added to create scaled addresses with a negative index
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpBytecode):
* bytecode/CodeBlock.h:
(JSC::unmodifiedArgumentsRegister):
(JSC::CodeBlock::isCaptured):
* bytecode/CodeOrigin.h:
(JSC::CodeOrigin::stackOffset):
* bytecode/Operands.h:
(JSC::localToOperand):
(JSC::operandIsLocal):
(JSC::operandToLocal):
(JSC::operandIsArgument):
(JSC::operandToArgument):
(JSC::argumentToOperand):
* bytecode/VirtualRegister.h: Made InvalidVirtualRegister a positive value that fits in
31 bits since it can be placed into the 31 bit field "stackOffset" in struct InlineCallFrame.
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::addVar):
(JSC::BytecodeGenerator::BytecodeGenerator):
(JSC::BytecodeGenerator::createLazyRegisterIfNecessary):
(JSC::BytecodeGenerator::newRegister):
(JSC::BytecodeGenerator::emitNewArray):
* bytecompiler/BytecodeGenerator.h:
(JSC::CallArguments::registerOffset):
* bytecompiler/NodesCodegen.cpp:
(JSC::CallArguments::CallArguments):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::findArgumentPositionForLocal):
(JSC::DFG::ByteCodeParser::addCall):
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileFunction):
* dfg/DFGOSREntry.cpp:
(JSC::DFG::prepareOSREntry):
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOperations.cpp:
* dfg/DFGScoreBoard.h:
(JSC::DFG::ScoreBoard::allocate):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::compileGetByValOnArguments):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::callFrameSlot):
(JSC::DFG::SpeculativeJIT::argumentSlot):
(JSC::DFG::SpeculativeJIT::callFrameTagSlot):
(JSC::DFG::SpeculativeJIT::callFramePayloadSlot):
(JSC::DFG::SpeculativeJIT::argumentTagSlot):
(JSC::DFG::SpeculativeJIT::argumentPayloadSlot):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::emitCall):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGValidate.cpp:
(JSC::DFG::Validate::reportValidationContext):
* ftl/FTLLink.cpp:
(JSC::FTL::link):
* heap/ConservativeRoots.cpp:
(JSC::ConservativeRoots::genericAddSpan):
* interpreter/CallFrame.cpp:
(JSC::CallFrame::frameExtentInternal):
* interpreter/CallFrame.h:
(JSC::ExecState::init):
(JSC::ExecState::argumentOffset):
(JSC::ExecState::argumentOffsetIncludingThis):
(JSC::ExecState::argIndexForRegister):
* interpreter/Interpreter.cpp:
(JSC::loadVarargs):
(JSC::Interpreter::dumpRegisters):
* interpreter/JSStack.cpp:
(JSC::JSStack::JSStack):
(JSC::JSStack::~JSStack):
(JSC::JSStack::growSlowCase):
(JSC::JSStack::gatherConservativeRoots):
(JSC::JSStack::releaseExcessCapacity):
(JSC::JSStack::disableErrorStackReserve):
* interpreter/JSStack.h:
(JSC::JSStack::getBaseOfStack):
(JSC::JSStack::getLimitOfStack):
(JSC::JSStack::size):
(JSC::JSStack::end):
(JSC::JSStack::containsAddress):
(JSC::JSStack::lowAddress):
(JSC::JSStack::highAddress):
(JSC::JSStack::reservationEnd):
(JSC::JSStack::shrink):
(JSC::JSStack::grow):
* interpreter/JSStackInlines.h:
(JSC::JSStack::getTopOfFrame):
(JSC::JSStack::pushFrame):
(JSC::JSStack::popFrame):
(JSC::JSStack::installTrapsAfterFrame):
* interpreter/StackVisitor.cpp:
(JSC::inlinedFrameOffset):
(JSC::StackVisitor::readInlinedFrame):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
* jit/JITCall.cpp:
(JSC::JIT::compileLoadVarargs):
(JSC::JIT::compileOpCall):
* jit/JITCall32_64.cpp:
(JSC::JIT::compileLoadVarargs):
(JSC::JIT::compileOpCall):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_create_activation):
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITStubs.cpp:
(JSC::throwExceptionFromOpCall):
(JSC::DEFINE_STUB_FUNCTION):
* jit/ThunkGenerators.cpp:
(JSC::arityFixup):
* llint/LLIntData.cpp:
(JSC::LLInt::Data::performAssertions):
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
(JSC::LLInt::genericCall):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* runtime/ArgList.cpp:
(JSC::ArgList::getSlice):
(JSC::MarkedArgumentBuffer::slowAppend):
* runtime/ArgList.h:
(JSC::MarkedArgumentBuffer::MarkedArgumentBuffer):
(JSC::MarkedArgumentBuffer::slotFor):
(JSC::MarkedArgumentBuffer::mallocBase):
(JSC::ArgList::at):
* runtime/Arguments.cpp:
(JSC::Arguments::tearOff):
* runtime/ArrayConstructor.cpp:
(JSC::constructArrayWithSizeQuirk):
* runtime/CommonSlowPaths.cpp:
(JSC::SLOW_PATH_DECL):
* runtime/JSActivation.h:
(JSC::JSActivation::registersOffset):
(JSC::JSActivation::tearOff):
(JSC::JSActivation::isValidIndex):
* runtime/JSArray.h:
(JSC::constructArrayNegativeIndexed): New method to create an array from registers that grow down.
* runtime/JSGlobalObject.cpp:
(JSC::JSGlobalObject::globalExec):
* runtime/JSGlobalObject.h:
(JSC::constructArrayNegativeIndexed):
* runtime/JSString.h:
* runtime/Operations.h:
(JSC::jsStringFromRegisterArray):
* runtime/SymbolTable.h:
(JSC::SharedSymbolTable::captureCount):
2013-09-13 Csaba Osztrogonác <ossy@webkit.org>
ARM EABI hardfp buildfix after r155675
......
......@@ -208,6 +208,13 @@ public:
}
#endif
// Immediate shifts only have 5 controllable bits
// so we'll consider them safe for now.
TrustedImm32 trustedImm32ForShift(Imm32 imm)
{
return TrustedImm32(imm.asTrustedImm32().m_value & 31);
}
// Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
{
......@@ -629,6 +636,11 @@ public:
and64(imm, srcDest);
}
void lshiftPtr(Imm32 imm, RegisterID srcDest)
{
lshift64(trustedImm32ForShift(imm), srcDest);
}
void negPtr(RegisterID dest)
{
neg64(dest);
......@@ -1411,13 +1423,6 @@ public:
return branchSub32(cond, src, imm.asTrustedImm32(), dest);
}
// Immediate shifts only have 5 controllable bits
// so we'll consider them safe for now.
TrustedImm32 trustedImm32ForShift(Imm32 imm)
{
return TrustedImm32(imm.asTrustedImm32().m_value & 31);
}
void lshift32(Imm32 imm, RegisterID dest)
{
lshift32(trustedImm32ForShift(imm), dest);
......
......@@ -225,6 +225,11 @@ public:
m_assembler.andq_ir(imm.m_value, srcDest);
}
void lshift64(TrustedImm32 imm, RegisterID dest)
{
m_assembler.shlq_i8r(imm.m_value, dest);
}
void neg64(RegisterID dest)
{
m_assembler.negq_r(dest);
......
......@@ -819,6 +819,16 @@ public:
m_formatter.immediate8(imm);
}
}
void shlq_i8r(int imm, RegisterID dst)
{
if (imm == 1)
m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
else {
m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
m_formatter.immediate8(imm);
}
}
#endif
void imull_rr(RegisterID src, RegisterID dst)
......
......@@ -507,7 +507,7 @@ void CodeBlock::dumpBytecode(PrintStream& out)
if (symbolTable() && symbolTable()->captureCount()) {
out.printf(
"; %d captured var(s) (from r%d to r%d, inclusive)",
symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() - 1);
symbolTable()->captureCount(), symbolTable()->captureStart(), symbolTable()->captureEnd() + 1);
}
if (usesArguments()) {
out.printf(
......
......@@ -86,7 +86,7 @@ class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister - 1; }
inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister + 1; }
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
......@@ -386,8 +386,8 @@ public:
if (!symbolTable())
return false;
return operand >= symbolTable()->captureStart()
&& operand < symbolTable()->captureEnd();
return operand <= symbolTable()->captureStart()
&& operand > symbolTable()->captureEnd();
}
CodeType codeType() const { return m_unlinkedCode->codeType(); }
......
......@@ -76,7 +76,7 @@ struct CodeOrigin {
// would have owned the code if it had not been inlined. Otherwise returns 0.
ScriptExecutable* codeOriginOwner() const;
unsigned stackOffset() const;
int stackOffset() const;
static unsigned inlineDepthForCallFrame(InlineCallFrame*);
......@@ -97,7 +97,7 @@ struct InlineCallFrame {
WriteBarrier<JSFunction> callee; // This may be null, indicating that this is a closure call and that the JSFunction and JSScope are already on the stack.
CodeOrigin caller;
BitVector capturedVars; // Indexed by the machine call frame's variable numbering.
signed int stackOffset : 31;
signed stackOffset : 31;
bool isCall : 1;
CodeSpecializationKind specializationKind() const { return specializationFromIsCall(isCall); }
......@@ -119,7 +119,7 @@ struct InlineCallFrame {
MAKE_PRINT_METHOD(InlineCallFrame, dumpBriefFunctionInformation, briefFunctionInformation);
};
inline unsigned CodeOrigin::stackOffset() const
inline int CodeOrigin::stackOffset() const
{
if (!inlineCallFrame)
return 0;
......
......@@ -28,20 +28,19 @@
#include "CallFrame.h"
#include "JSObject.h"
#include "VirtualRegister.h"
#include <wtf/PrintStream.h>
#include <wtf/Vector.h>
namespace JSC {
inline VirtualRegister localToOperand(int local) { return (VirtualRegister)local; }
inline bool operandIsLocal(int operand) { return operand >= 0; }
inline int operandToLocal(int operand) { return operand; }
inline int localToOperand(int local) { return -local; }
inline bool operandIsLocal(int operand) { return operand <= 0; }
inline int operandToLocal(int operand) { return -operand; }
// argument 0 is 'this'.
inline bool operandIsArgument(int operand) { return operand < 0; }
inline int operandToArgument(int operand) { return -operand + CallFrame::thisArgumentOffset(); }
inline int argumentToOperand(int argument) { return -argument + CallFrame::thisArgumentOffset(); }
inline bool operandIsArgument(int operand) { return operand > 0; }
inline int operandToArgument(int operand) { return operand - CallFrame::thisArgumentOffset(); }
inline int argumentToOperand(int argument) { return argument + CallFrame::thisArgumentOffset(); }
template<typename T> struct OperandValueTraits;
......
......@@ -33,7 +33,7 @@ namespace JSC {
// Type for a virtual register number (spill location).
// Using an enum to make this type-checked at compile time, to avert programmer errors.
enum VirtualRegister { InvalidVirtualRegister = 0x7fffffff };
enum VirtualRegister { InvalidVirtualRegister = 0x3fffffff };
COMPILE_ASSERT(sizeof(VirtualRegister) == sizeof(int), VirtualRegister_is_32bit);
} // namespace JSC
......
......@@ -116,7 +116,7 @@ ParserError BytecodeGenerator::generate()
bool BytecodeGenerator::addVar(const Identifier& ident, bool isConstant, RegisterID*& r0)
{
ConcurrentJITLocker locker(symbolTable().m_lock);
int index = m_calleeRegisters.size();
int index = localToOperand(m_calleeRegisters.size());
SymbolTableEntry newEntry(index, isConstant ? ReadOnly : 0);
SymbolTable::Map::AddResult result = symbolTable().add(locker, ident.impl(), newEntry);
......@@ -223,7 +223,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, Unl
m_codeBlock->setActivationRegister(m_activationRegister->index());
}
m_symbolTable->setCaptureStart(m_codeBlock->m_numVars);
m_symbolTable->setCaptureStart(localToOperand(m_codeBlock->m_numVars));
if (functionBody->usesArguments() || codeBlock->usesEval()) { // May reify arguments object.
RegisterID* unmodifiedArgumentsRegister = addVar(); // Anonymous, so it can't be modified by user code.
......@@ -310,7 +310,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, Unl
instructions().append(m_activationRegister->index());
}
m_symbolTable->setCaptureEnd(codeBlock->m_numVars);
m_symbolTable->setCaptureEnd(localToOperand(codeBlock->m_numVars));
m_firstLazyFunction = codeBlock->m_numVars;
for (size_t i = 0; i < functionStack.size(); ++i) {
......@@ -325,7 +325,7 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, Unl
emitNewFunction(reg.get(), function);
else {
emitInitLazyRegister(reg.get());
m_lazyFunctions.set(reg->index(), function);
m_lazyFunctions.set(localToOperand(reg->index()), function);
}
}
}
......@@ -337,17 +337,17 @@ BytecodeGenerator::BytecodeGenerator(VM& vm, FunctionBodyNode* functionBody, Unl
}
if (shouldCaptureAllTheThings)
m_symbolTable->setCaptureEnd(codeBlock->m_numVars);
m_symbolTable->setCaptureEnd(localToOperand(codeBlock->m_numVars));
FunctionParameters& parameters = *functionBody->parameters();
m_parameters.grow(parameters.size() + 1); // reserve space for "this"
// Add "this" as a parameter
int nextParameterIndex = CallFrame::thisArgumentOffset();
m_thisRegister.setIndex(nextParameterIndex--);
m_thisRegister.setIndex(nextParameterIndex++);
m_codeBlock->addParameter();
for (size_t i = 0; i < parameters.size(); ++i, --nextParameterIndex) {
for (size_t i = 0; i < parameters.size(); ++i, ++nextParameterIndex) {
int index = nextParameterIndex;
if (capturedArguments.size() && capturedArguments[i]) {
ASSERT((functionBody->hasCapturedVariables() && functionBody->captures(parameters.at(i))) || shouldCaptureAllTheThings);
......@@ -505,15 +505,17 @@ RegisterID* BytecodeGenerator::uncheckedRegisterForArguments()
RegisterID* BytecodeGenerator::createLazyRegisterIfNecessary(RegisterID* reg)
{
if (m_lastLazyFunction <= reg->index() || reg->index() < m_firstLazyFunction)
int localVariableNumber = operandToLocal(reg->index());
if (m_lastLazyFunction <= localVariableNumber || localVariableNumber < m_firstLazyFunction)
return reg;
emitLazyNewFunction(reg, m_lazyFunctions.get(reg->index()));
emitLazyNewFunction(reg, m_lazyFunctions.get(localVariableNumber));
return reg;
}
RegisterID* BytecodeGenerator::newRegister()
{
m_calleeRegisters.append(m_calleeRegisters.size());
m_calleeRegisters.append(localToOperand(m_calleeRegisters.size()));
m_codeBlock->m_numCalleeRegisters = max<int>(m_codeBlock->m_numCalleeRegisters, m_calleeRegisters.size());
return &m_calleeRegisters.last();
}
......@@ -1484,7 +1486,7 @@ RegisterID* BytecodeGenerator::emitNewArray(RegisterID* dst, ElementNode* elemen
break;
argv.append(newTemporary());
// op_new_array requires the initial values to be a sequential range of registers
ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() + 1);
ASSERT(argv.size() == 1 || argv[argv.size() - 1]->index() == argv[argv.size() - 2]->index() - 1);
emitNode(argv.last().get(), n->value());
}
emitOpcode(op_new_array);
......
......@@ -66,7 +66,7 @@ namespace JSC {
RegisterID* thisRegister() { return m_argv[0].get(); }
RegisterID* argumentRegister(unsigned i) { return m_argv[i + 1].get(); }
unsigned registerOffset() { return m_argv.last()->index() + CallFrame::offsetFor(argumentCountIncludingThis()); }
unsigned registerOffset() { return -m_argv.last()->index() + CallFrame::offsetFor(argumentCountIncludingThis()); }
unsigned argumentCountIncludingThis() { return m_argv.size(); }
RegisterID* profileHookRegister() { return m_profileHookRegister.get(); }
ArgumentsNode* argumentsNode() { return m_argumentsNode; }
......
......@@ -393,7 +393,7 @@ inline CallArguments::CallArguments(BytecodeGenerator& generator, ArgumentsNode*
m_argv.grow(argumentCountIncludingThis);
for (int i = argumentCountIncludingThis - 1; i >= 0; --i) {
m_argv[i] = generator.newTemporary();
ASSERT(static_cast<size_t>(i) == m_argv.size() - 1 || m_argv[i]->index() == m_argv[i + 1]->index() + 1);
ASSERT(static_cast<size_t>(i) == m_argv.size() - 1 || m_argv[i]->index() == m_argv[i + 1]->index() - 1);
}
}
......
......@@ -389,11 +389,11 @@ private:
InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame;
if (!inlineCallFrame)
break;
if (operand >= static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize))
if (operand <= static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize))
continue;
if (operand == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset())
continue;
if (operand < static_cast<int>(inlineCallFrame->stackOffset - JSStack::CallFrameHeaderSize - inlineCallFrame->arguments.size()))
if (operand > static_cast<int>(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize + inlineCallFrame->arguments.size()))
continue;
int argument = operandToArgument(operand - inlineCallFrame->stackOffset);
return stack->m_argumentPositions[argument];
......@@ -773,7 +773,7 @@ private:
if (JSStack::CallFrameHeaderSize + (unsigned)argCount > m_parameterSlots)
m_parameterSlots = JSStack::CallFrameHeaderSize + argCount;
int registerOffset = currentInstruction[4].u.operand;
int registerOffset = -currentInstruction[4].u.operand;
int dummyThisArgument = op == Call ? 0 : 1;
for (int i = 0 + dummyThisArgument; i < argCount; ++i)
addVarArgChild(get(registerOffset + argumentToOperand(i)));
......@@ -1158,7 +1158,7 @@ void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, Co
}
int argumentCountIncludingThis = currentInstruction[3].u.operand;
int registerOffset = currentInstruction[4].u.operand;
int registerOffset = -currentInstruction[4].u.operand;
int resultOperand = currentInstruction[1].u.operand;
unsigned nextOffset = m_currentIndex + OPCODE_LENGTH(op_call);
......@@ -1283,7 +1283,7 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// FIXME: Don't flush constants!
int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) - JSStack::CallFrameHeaderSize;
int inlineCallFrameStart = m_inlineStackTop->remapOperand(registerOffset) + JSStack::CallFrameHeaderSize;
// Make sure that the area used by the call frame is reserved.
for (int arg = operandToLocal(inlineCallFrameStart) + JSStack::CallFrameHeaderSize + codeBlock->m_numVars; arg-- > operandToLocal(inlineCallFrameStart);)
......@@ -1957,7 +1957,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
int startOperand = currentInstruction[2].u.operand;
int numOperands = currentInstruction[3].u.operand;
ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile;
for (int operandIdx = startOperand; operandIdx < startOperand + numOperands; ++operandIdx)
for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx)
addVarArgChild(get(operandIdx));
set(currentInstruction[1].u.operand, addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0)));
NEXT_OPCODE(op_new_array);
......@@ -2237,7 +2237,7 @@ bool ByteCodeParser::parseBlock(unsigned limit)
#endif
OwnArrayPtr<Node*> toStringNodes = adoptArrayPtr(new Node*[numOperands]);
for (int i = 0; i < numOperands; i++)
toStringNodes[i] = addToGraph(ToString, get(startOperand + i));
toStringNodes[i] = addToGraph(ToString, get(startOperand - i));
for (int i = 0; i < numOperands; i++)
addToGraph(Phantom, toStringNodes[i]);
......@@ -3369,7 +3369,7 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
byteCodeParser->m_codeBlock->inlineCallFrames().size(),
byteCodeParser->m_codeBlock->ownerExecutable(),
codeBlock->ownerExecutable());
inlineCallFrame.stackOffset = inlineCallFrameStart + JSStack::CallFrameHeaderSize;
inlineCallFrame.stackOffset = inlineCallFrameStart - JSStack::CallFrameHeaderSize;
if (callee) {
initializeLazyWriteBarrierForInlineCallFrameCallee(
byteCodeParser->m_graph.m_plan.writeBarriers,
......
......@@ -371,8 +371,8 @@ void JITCompiler::compileFunction()
Label fromArityCheck(this);
// Plant a check that sufficient space is available in the JSStack.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291
addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
addPtr(TrustedImm32(-m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1);
// Return here after stack check.
Label fromStackCheck = label();
......
......@@ -167,7 +167,7 @@ void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIn
// it seems silly: you'd be diverting the program to error handling when it
// would have otherwise just kept running albeit less quickly.
if (!vm->interpreter->stack().grow(&exec->registers()[codeBlock->m_numCalleeRegisters])) {
if (!vm->interpreter->stack().grow(&exec->registers()[localToOperand(codeBlock->m_numCalleeRegisters)])) {
if (Options::verboseOSR())
dataLogF(" OSR failed because stack growth failed.\n");
return 0;
......
......@@ -545,7 +545,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UnboxedInt32InGPR:
case UnboxedBooleanInGPR: {
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(localToOperand(virtualRegister)));
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)localToOperand(virtualRegister)));
uint32_t tag = JSValue::EmptyValueTag;
if (recovery.technique() == InGPR)
tag = JSValue::CellTag;
......@@ -553,7 +553,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
tag = JSValue::Int32Tag;
else
tag = JSValue::BooleanTag;
m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor(localToOperand(virtualRegister)));
m_jit.store32(AssemblyHelpers::TrustedImm32(tag), AssemblyHelpers::tagFor((VirtualRegister)localToOperand(virtualRegister)));
break;
}
......@@ -562,8 +562,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UInt32InGPR:
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0);
m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + poisonIndex(virtualRegister)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1);
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(localToOperand(virtualRegister)));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor(localToOperand(virtualRegister)));
m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)localToOperand(virtualRegister)));
m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)localToOperand(virtualRegister)));
break;
default:
......
......@@ -531,7 +531,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
case UInt32InGPR:
case InFPR:
m_jit.load64(scratchDataBuffer + poisonIndex(virtualRegister), GPRInfo::regT0);
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor(localToOperand(virtualRegister)));
m_jit.store64(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)localToOperand(virtualRegister)));
break;
default:
......
......@@ -1414,7 +1414,7 @@ char* DFG_OPERATION operationNewArray(ExecState* exec, Structure* arrayStructure
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
return bitwise_cast<char*>(constructArray(exec, arrayStructure, static_cast<JSValue*>(buffer), size));
return bitwise_cast<char*>(constructArrayNegativeIndexed(exec, arrayStructure, static_cast<JSValue*>(buffer), size));
}
char* DFG_OPERATION operationNewEmptyArray(ExecState* exec, Structure* arrayStructure)
......@@ -1440,7 +1440,7 @@ char* DFG_OPERATION operationNewArrayBuffer(ExecState* exec, Structure* arrayStr
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
return bitwise_cast<char*>(constructArray(exec, arrayStructure, exec->codeBlock()->constantBuffer(start), size));
return bitwise_cast<char*>(constructArrayNegativeIndexed(exec, arrayStructure, exec->codeBlock()->constantBuffer(start), size));
}
char* DFG_OPERATION operationNewInt8ArrayWithSize(
......@@ -2258,7 +2258,7 @@ asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"mov -40(%r13), %r13\n"
"mov 40(%r13), %r13\n"
"mov %r13, %rdi\n"
"jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
......@@ -2268,7 +2268,7 @@ asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"mov -40(%edi), %edi\n"
"mov 40(%edi), %edi\n"
"mov %edi, 4(%esp)\n"
"jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
......@@ -2281,7 +2281,7 @@ HIDE_SYMBOL(getHostCallReturnValue) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"ldr r5, [r5, #-40]" "\n"
"ldr r5, [r5, #40]" "\n"
"mov r0, r5" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
......@@ -2292,7 +2292,7 @@ asm (
HIDE_SYMBOL(getHostCallReturnValue) "\n"
INLINE_ARM_FUNCTION(getHostCallReturnValue)
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"ldr r5, [r5, #-40]" "\n"
"ldr r5, [r5, #40]" "\n"
"mov r0, r5" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
......@@ -2303,7 +2303,7 @@ asm(
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
LOAD_FUNCTION_TO_T9(getHostCallReturnValueWithExecState)
"lw $s0, -40($s0)" "\n"
"lw $s0, 40($s0)" "\n"
"move $a0, $s0" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
......@@ -2313,7 +2313,7 @@ asm(
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
"add #-40, r14" "\n"
"add #40, r14" "\n"
"mov.l @r14, r14" "\n"
"mov r14, r4" "\n"
"mov.l 2f, " SH4_SCRATCH_REGISTER "\n"
......
......@@ -89,14 +89,14 @@ public:
// Use count must have hit zero for it to have been added to the free list!
ASSERT(!m_used[index]);
m_highWatermark = std::max(m_highWatermark, static_cast<unsigned>(index) + 1);
return localToOperand(index);
return (VirtualRegister)localToOperand(index);
}
// Allocate a new VirtualRegister, and add a corresponding entry to m_used.
size_t next = m_used.size();
m_used.append(0);
m_highWatermark = std::max(m_highWatermark, static_cast<unsigned>(next) + 1);
return localToOperand(next);
return (VirtualRegister)