Commit 426f5b02 authored by oliver@apple.com's avatar oliver@apple.com

fourthTier: DFG should refer to BasicBlocks by BasicBlock* and not BlockIndex

https://bugs.webkit.org/show_bug.cgi?id=118339

Reviewed by Michael Saboff.

This accomplishes two goals:

1) Simplifies a bunch of code. You can now much more directly get to a successor
   or predecessor, since you just get the pointer directly. The backend(s) always
   hold onto a pointer to the block they're on, so you don't have to do work to
   get the block from the index.

2) It allows for the possibility of inserting blocks into the program.
   Previously, if you did that, you'd have to edit all references to blocks since
   those references would have outdated indexing after an insertion. Now, if you
   change the indexing, you just have to invalidate some analyses and make sure
   that you change each block's BasicBlock::index accordingly.

* dfg/DFGAbstractState.cpp:
(JSC::DFG::AbstractState::initialize):
(JSC::DFG::AbstractState::endBasicBlock):
(JSC::DFG::AbstractState::mergeToSuccessors):
* dfg/DFGAbstractState.h:
(AbstractState):
* dfg/DFGArgumentsSimplificationPhase.cpp:
(JSC::DFG::ArgumentsSimplificationPhase::run):
* dfg/DFGBackwardsPropagationPhase.cpp:
(JSC::DFG::BackwardsPropagationPhase::run):
* dfg/DFGBasicBlock.h:
(DFG):
(JSC::DFG::BasicBlock::BasicBlock):
(JSC::DFG::BasicBlock::size):
(JSC::DFG::BasicBlock::isEmpty):
(JSC::DFG::BasicBlock::at):
(JSC::DFG::BasicBlock::operator[]):
(JSC::DFG::BasicBlock::last):
(JSC::DFG::BasicBlock::resize):
(JSC::DFG::BasicBlock::grow):
(BasicBlock):
(JSC::DFG::BasicBlock::append):
(JSC::DFG::BasicBlock::numSuccessors):
(JSC::DFG::BasicBlock::successor):
(JSC::DFG::BasicBlock::successorForCondition):
(JSC::DFG::BasicBlock::dump):
(UnlinkedBlock):
(JSC::DFG::UnlinkedBlock::UnlinkedBlock):
(JSC::DFG::getBytecodeBeginForBlock):
(JSC::DFG::blockForBytecodeOffset):
* dfg/DFGByteCodeParser.cpp:
(ByteCodeParser):
(InlineStackEntry):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::linkBlock):
(JSC::DFG::ByteCodeParser::linkBlocks):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
* dfg/DFGCFAPhase.cpp:
(JSC::DFG::CFAPhase::performBlockCFA):
(JSC::DFG::CFAPhase::performForwardCFA):
* dfg/DFGCFGSimplificationPhase.cpp:
(JSC::DFG::CFGSimplificationPhase::run):
(JSC::DFG::CFGSimplificationPhase::convertToJump):
* dfg/DFGCPSRethreadingPhase.cpp:
(JSC::DFG::CPSRethreadingPhase::freeUnnecessaryNodes):
(JSC::DFG::CPSRethreadingPhase::canonicalizeLocalsInBlocks):
(JSC::DFG::CPSRethreadingPhase::propagatePhis):
(CPSRethreadingPhase):
* dfg/DFGCSEPhase.cpp:
(JSC::DFG::CSEPhase::run):
* dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::run):
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* dfg/DFGDCEPhase.cpp:
(JSC::DFG::DCEPhase::run):
* dfg/DFGDisassembler.cpp:
(JSC::DFG::Disassembler::Disassembler):
(JSC::DFG::Disassembler::createDumpList):
* dfg/DFGDisassembler.h:
(JSC::DFG::Disassembler::setForBlockIndex):
* dfg/DFGDominators.cpp:
(JSC::DFG::Dominators::compute):
(JSC::DFG::Dominators::iterateForBlock):
* dfg/DFGDominators.h:
(JSC::DFG::Dominators::dominates):
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::run):
(JSC::DFG::FixupPhase::fixupNode):
* dfg/DFGGraph.cpp:
(JSC::DFG::Graph::dump):
(JSC::DFG::Graph::dumpBlockHeader):
(JSC::DFG::Graph::handleSuccessor):
(JSC::DFG::Graph::determineReachability):
(JSC::DFG::Graph::resetReachability):
* dfg/DFGGraph.h:
(JSC::DFG::Graph::numBlocks):
(JSC::DFG::Graph::block):
(JSC::DFG::Graph::lastBlock):
(Graph):
(JSC::DFG::Graph::appendBlock):
(JSC::DFG::Graph::killBlock):
(DFG):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::JITCompiler):
(JSC::DFG::JITCompiler::link):
* dfg/DFGJITCompiler.h:
(JSC::DFG::JITCompiler::setForBlockIndex):
* dfg/DFGNaturalLoops.cpp:
(JSC::DFG::NaturalLoop::dump):
(JSC::DFG::NaturalLoops::compute):
(JSC::DFG::NaturalLoops::loopsOf):
* dfg/DFGNaturalLoops.h:
(JSC::DFG::NaturalLoop::NaturalLoop):
(JSC::DFG::NaturalLoop::addBlock):
(JSC::DFG::NaturalLoop::header):
(JSC::DFG::NaturalLoop::at):
(JSC::DFG::NaturalLoop::operator[]):
(JSC::DFG::NaturalLoop::contains):
(NaturalLoop):
(JSC::DFG::NaturalLoops::headerOf):
(NaturalLoops):
* dfg/DFGNode.h:
(DFG):
(JSC::DFG::SwitchCase::SwitchCase):
(JSC::DFG::SwitchCase::withBytecodeIndex):
(SwitchCase):
(JSC::DFG::SwitchCase::targetBytecodeIndex):
(JSC::DFG::SwitchData::SwitchData):
(JSC::DFG::SwitchData::setFallThroughBytecodeIndex):
(JSC::DFG::SwitchData::fallThroughBytecodeIndex):
(SwitchData):
(JSC::DFG::Node::setTakenBlock):
(JSC::DFG::Node::setNotTakenBlock):
(JSC::DFG::Node::takenBlock):
(JSC::DFG::Node::notTakenBlock):
(JSC::DFG::Node::successor):
(JSC::DFG::Node::successorForCondition):
* dfg/DFGPredictionInjectionPhase.cpp:
(JSC::DFG::PredictionInjectionPhase::run):
* dfg/DFGPredictionPropagationPhase.cpp:
(JSC::DFG::PredictionPropagationPhase::propagateForward):
(JSC::DFG::PredictionPropagationPhase::propagateBackward):
(JSC::DFG::PredictionPropagationPhase::doRoundOfDoubleVoting):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::convertLastOSRExitToForward):
(JSC::DFG::SpeculativeJIT::nonSpeculativeCompare):
(JSC::DFG::SpeculativeJIT::nonSpeculativeStrictEq):
(JSC::DFG::SpeculativeJIT::compilePeepHoleDoubleBranch):
(JSC::DFG::SpeculativeJIT::compilePeepHoleObjectEquality):
(JSC::DFG::SpeculativeJIT::compilePeepHoleIntegerBranch):
(JSC::DFG::SpeculativeJIT::compilePeepHoleBranch):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::compile):
(JSC::DFG::SpeculativeJIT::createOSREntries):
(JSC::DFG::SpeculativeJIT::linkOSREntries):
(JSC::DFG::SpeculativeJIT::compileStrictEqForConstant):
(JSC::DFG::SpeculativeJIT::compileStrictEq):
(JSC::DFG::SpeculativeJIT::compileRegExpExec):
(JSC::DFG::SpeculativeJIT::addBranch):
(JSC::DFG::SpeculativeJIT::linkBranches):
* dfg/DFGSpeculativeJIT.h:
(JSC::DFG::SpeculativeJIT::nextBlock):
(SpeculativeJIT):
(JSC::DFG::SpeculativeJIT::detectPeepHoleBranch):
(JSC::DFG::SpeculativeJIT::branchDouble):
(JSC::DFG::SpeculativeJIT::branchDoubleNonZero):
(JSC::DFG::SpeculativeJIT::branch32):
(JSC::DFG::SpeculativeJIT::branchTest32):
(JSC::DFG::SpeculativeJIT::branch64):
(JSC::DFG::SpeculativeJIT::branch8):
(JSC::DFG::SpeculativeJIT::branchPtr):
(JSC::DFG::SpeculativeJIT::branchTestPtr):
(JSC::DFG::SpeculativeJIT::branchTest8):
(JSC::DFG::SpeculativeJIT::jump):
(JSC::DFG::SpeculativeJIT::addBranch):
(JSC::DFG::SpeculativeJIT::StringSwitchCase::StringSwitchCase):
(StringSwitchCase):
(JSC::DFG::SpeculativeJIT::BranchRecord::BranchRecord):
(BranchRecord):
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull):
(JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq):
(JSC::DFG::SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality):
(JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranchNull):
(JSC::DFG::SpeculativeJIT::nonSpeculativeCompareNull):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeBranch):
(JSC::DFG::SpeculativeJIT::nonSpeculativePeepholeStrictEq):
(JSC::DFG::SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality):
(JSC::DFG::SpeculativeJIT::emitObjectOrOtherBranch):
(JSC::DFG::SpeculativeJIT::emitBranch):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGTypeCheckHoistingPhase.cpp:
(JSC::DFG::TypeCheckHoistingPhase::run):
(JSC::DFG::TypeCheckHoistingPhase::identifyRedundantStructureChecks):
(JSC::DFG::TypeCheckHoistingPhase::identifyRedundantArrayChecks):
(JSC::DFG::TypeCheckHoistingPhase::disableHoistingAcrossOSREntries):
* dfg/DFGUnificationPhase.cpp:
(JSC::DFG::UnificationPhase::run):
* dfg/DFGValidate.cpp:
(JSC::DFG::Validate::validate):
(JSC::DFG::Validate::checkOperand):
(JSC::DFG::Validate::reportValidationContext):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
(JSC::DFG::VirtualRegisterAllocationPhase::run):
* ftl/FTLCapabilities.cpp:
(JSC::FTL::canCompile):
* ftl/FTLLowerDFGToLLVM.cpp:
(JSC::FTL::LowerDFGToLLVM::LowerDFGToLLVM):
(JSC::FTL::LowerDFGToLLVM::lower):
(JSC::FTL::LowerDFGToLLVM::compileBlock):
(JSC::FTL::LowerDFGToLLVM::compileJump):
(JSC::FTL::LowerDFGToLLVM::compileBranch):
(JSC::FTL::LowerDFGToLLVM::lowBlock):

git-svn-id: http://svn.webkit.org/repository/webkit/trunk@153267 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 5a554043
This diff is collapsed.
......@@ -2123,9 +2123,9 @@
034768DFFF38A50411DB9C8B /* Products */ = {
isa = PBXGroup;
children = (
932F5BE10822A1C700736975 /* jsc */,
0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */,
932F5BD90822A1C700736975 /* JavaScriptCore.framework */,
932F5BE10822A1C700736975 /* jsc */,
141211200A48793C00480255 /* minidom */,
14BD59BF0A3E8F9000BAF59C /* testapi */,
6511230514046A4C002B101D /* testRegExp */,
......
......@@ -85,7 +85,7 @@ void AbstractState::beginBasicBlock(BasicBlock* basicBlock)
void AbstractState::initialize(Graph& graph)
{
BasicBlock* root = graph.m_blocks[0].get();
BasicBlock* root = graph.block(0);
root->cfaShouldRevisit = true;
root->cfaHasVisited = false;
root->cfaFoundConstants = false;
......@@ -118,8 +118,8 @@ void AbstractState::initialize(Graph& graph)
root->valuesAtHead.local(i).clear();
root->valuesAtTail.local(i).clear();
}
for (BlockIndex blockIndex = 1 ; blockIndex < graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 1 ; blockIndex < graph.numBlocks(); ++blockIndex) {
BasicBlock* block = graph.block(blockIndex);
if (!block)
continue;
if (!block->isReachable)
......@@ -200,7 +200,7 @@ bool AbstractState::endBasicBlock(MergeMode mergeMode)
if (mergeMode != MergeToSuccessors)
return changed;
return mergeToSuccessors(m_graph, block);
return mergeToSuccessors(block);
}
void AbstractState::reset()
......@@ -1816,7 +1816,7 @@ inline bool AbstractState::merge(BasicBlock* from, BasicBlock* to)
return changed;
}
inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBlock)
inline bool AbstractState::mergeToSuccessors(BasicBlock* basicBlock)
{
Node* terminal = basicBlock->last();
......@@ -1826,24 +1826,24 @@ inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBloc
case Jump: {
ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" Merging to block #%u.\n", terminal->takenBlockIndex());
dataLog(" Merging to block ", *terminal->takenBlock(), ".\n");
#endif
return merge(basicBlock, graph.m_blocks[terminal->takenBlockIndex()].get());
return merge(basicBlock, terminal->takenBlock());
}
case Branch: {
ASSERT(basicBlock->cfaBranchDirection != InvalidBranchDirection);
bool changed = false;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" Merging to block #%u.\n", terminal->takenBlockIndex());
dataLog(" Merging to block ", *terminal->takenBlock(), ".\n");
#endif
if (basicBlock->cfaBranchDirection != TakeFalse)
changed |= merge(basicBlock, graph.m_blocks[terminal->takenBlockIndex()].get());
changed |= merge(basicBlock, terminal->takenBlock());
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" Merging to block #%u.\n", terminal->notTakenBlockIndex());
dataLog(" Merging to block ", *terminal->notTakenBlock(), ".\n");
#endif
if (basicBlock->cfaBranchDirection != TakeTrue)
changed |= merge(basicBlock, graph.m_blocks[terminal->notTakenBlockIndex()].get());
changed |= merge(basicBlock, terminal->notTakenBlock());
return changed;
}
......@@ -1852,9 +1852,9 @@ inline bool AbstractState::mergeToSuccessors(Graph& graph, BasicBlock* basicBloc
// we're not. However I somehow doubt that this will ever be a big deal.
ASSERT(basicBlock->cfaBranchDirection == InvalidBranchDirection);
SwitchData* data = terminal->switchData();
bool changed = merge(basicBlock, graph.m_blocks[data->fallThrough].get());
bool changed = merge(basicBlock, data->fallThrough);
for (unsigned i = data->cases.size(); i--;)
changed |= merge(basicBlock, graph.m_blocks[data->cases[i].target].get());
changed |= merge(basicBlock, data->cases[i].target);
return changed;
}
......
......@@ -227,7 +227,7 @@ public:
// successors. Returns true if any of the successors' states changed. Note
// that this is automatically called in endBasicBlock() if MergeMode is
// MergeToSuccessors.
bool mergeToSuccessors(Graph&, BasicBlock*);
bool mergeToSuccessors(BasicBlock*);
void dump(PrintStream& out);
......
......@@ -142,8 +142,8 @@ public:
// Figure out which variables are live, using a conservative approximation of
// liveness.
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
......@@ -164,8 +164,8 @@ public:
// used only for GetByVal and GetArrayLength accesses. At the same time,
// identify uses of CreateArguments that are not consistent with the arguments
// being aliased only to variables that satisfy these constraints.
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
......@@ -352,8 +352,8 @@ public:
// the arguments as requiring creation. This is a property of SetLocals to
// variables that are neither the correct arguments register nor are marked as
// being arguments-aliased.
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
......@@ -428,8 +428,8 @@ public:
InsertionSet insertionSet(m_graph);
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = 0; indexInBlock < block->size(); indexInBlock++) {
......@@ -661,8 +661,8 @@ public:
insertionSet.execute(block);
}
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
......
......@@ -44,8 +44,8 @@ public:
bool run()
{
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
......
......@@ -39,12 +39,14 @@
namespace JSC { namespace DFG {
class Graph;
class InsertionSet;
typedef Vector <BlockIndex, 2> PredecessorList;
typedef Vector<BasicBlock*, 2> PredecessorList;
struct BasicBlock : Vector<Node*, 8> {
struct BasicBlock : RefCounted<BasicBlock> {
BasicBlock(unsigned bytecodeBegin, unsigned numArguments, unsigned numLocals)
: bytecodeBegin(bytecodeBegin)
, index(NoBlock)
, isOSRTarget(false)
, cfaHasVisited(false)
, cfaShouldRevisit(false)
......@@ -74,6 +76,17 @@ struct BasicBlock : Vector<Node*, 8> {
valuesAtTail.ensureLocals(newNumLocals);
}
size_t size() const { return m_nodes.size(); }
bool isEmpty() const { return !size(); }
Node*& at(size_t i) { return m_nodes[i]; }
Node* at(size_t i) const { return m_nodes[i]; }
Node* operator[](size_t i) const { return at(i); }
Node* last() const { return at(size() - 1); }
void resize(size_t size) { m_nodes.resize(size); }
void grow(size_t size) { m_nodes.grow(size); }
void append(Node* node) { m_nodes.append(node); }
size_t numNodes() const { return phis.size() + size(); }
Node* node(size_t i) const
{
......@@ -101,15 +114,33 @@ struct BasicBlock : Vector<Node*, 8> {
return false;
}
unsigned numSuccessors() { return last()->numSuccessors(); }
BasicBlock* successor(unsigned index)
{
return last()->successor(index);
}
BasicBlock* successorForCondition(bool condition)
{
return last()->successorForCondition(condition);
}
#define DFG_DEFINE_APPEND_NODE(templatePre, templatePost, typeParams, valueParamsComma, valueParams, valueArgs) \
templatePre typeParams templatePost Node* appendNode(Graph&, SpeculatedType valueParamsComma valueParams);
DFG_VARIADIC_TEMPLATE_FUNCTION(DFG_DEFINE_APPEND_NODE)
#undef DFG_DEFINE_APPEND_NODE
void dump(PrintStream& out) const
{
out.print("#", index);
}
// This value is used internally for block linking and OSR entry. It is mostly meaningless
// for other purposes due to inlining.
unsigned bytecodeBegin;
BlockIndex index;
bool isOSRTarget;
bool cfaHasVisited;
bool cfaShouldRevisit;
......@@ -122,30 +153,44 @@ struct BasicBlock : Vector<Node*, 8> {
bool isReachable;
Vector<Node*> phis;
PredecessorList m_predecessors;
PredecessorList predecessors;
Operands<Node*, NodePointerTraits> variablesAtHead;
Operands<Node*, NodePointerTraits> variablesAtTail;
Operands<AbstractValue> valuesAtHead;
Operands<AbstractValue> valuesAtTail;
private:
friend class InsertionSet;
Vector<Node*, 8> m_nodes;
};
struct UnlinkedBlock {
BlockIndex m_blockIndex;
BasicBlock* m_block;
bool m_needsNormalLinking;
bool m_needsEarlyReturnLinking;
UnlinkedBlock() { }
explicit UnlinkedBlock(BlockIndex blockIndex)
: m_blockIndex(blockIndex)
explicit UnlinkedBlock(BasicBlock* block)
: m_block(block)
, m_needsNormalLinking(true)
, m_needsEarlyReturnLinking(false)
{
}
};
static inline unsigned getBytecodeBeginForBlock(BasicBlock** basicBlock)
{
return (*basicBlock)->bytecodeBegin;
}
static inline BasicBlock* blockForBytecodeOffset(Vector<BasicBlock*>& linkingTargets, unsigned bytecodeBegin)
{
return *binarySearch<BasicBlock*, unsigned>(linkingTargets, linkingTargets.size(), bytecodeBegin, getBytecodeBeginForBlock);
}
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)
......
......@@ -79,15 +79,14 @@ public:
}
private:
void performBlockCFA(BlockIndex blockIndex)
void performBlockCFA(BasicBlock* block)
{
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
if (!block)
return;
if (!block->cfaShouldRevisit)
return;
if (verbose)
dataLogF(" Block #%u (bc#%u):\n", blockIndex, block->bytecodeBegin);
dataLog(" Block ", *block, ":\n");
m_state.beginBasicBlock(block);
if (verbose) {
dataLogF(" head vars: ");
......@@ -127,8 +126,8 @@ private:
if (verbose)
dataLogF("CFA [%u]\n", ++m_count);
for (BlockIndex block = 0; block < m_graph.m_blocks.size(); ++block)
performBlockCFA(block);
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
performBlockCFA(m_graph.block(blockIndex));
}
private:
......
......@@ -69,8 +69,8 @@ private:
{
SamplingRegion samplingRegion("DFG CPS Rethreading: freeUnnecessaryNodes");
for (BlockIndex blockIndex = m_graph.m_blocks.size(); blockIndex--;) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
ASSERT(block->isReachable);
......@@ -387,8 +387,8 @@ private:
{
SamplingRegion samplingRegion("DFG CPS Rethreading: canonicalizeLocalsInBlocks");
for (m_blockIndex = m_graph.m_blocks.size(); m_blockIndex--;) {
m_block = m_graph.m_blocks[m_blockIndex].get();
for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
m_block = m_graph.block(blockIndex);
canonicalizeLocalsInBlock();
}
}
......@@ -402,20 +402,19 @@ private:
// Ensure that attempts to use this fail instantly.
m_block = 0;
m_blockIndex = NoBlock;
while (!phiStack.isEmpty()) {
PhiStackEntry entry = phiStack.last();
phiStack.removeLast();
BasicBlock* block = entry.m_block;
PredecessorList& predecessors = block->m_predecessors;
PredecessorList& predecessors = block->predecessors;
Node* currentPhi = entry.m_phi;
VariableAccessData* variable = currentPhi->variableAccessData();
size_t index = entry.m_index;
for (size_t i = predecessors.size(); i--;) {
BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get();
BasicBlock* predecessorBlock = predecessors[i];
Node* variableInPrevious = predecessorBlock->variablesAtTail.atFor<operandKind>(index);
if (!variableInPrevious) {
......@@ -481,7 +480,6 @@ private:
return m_localPhiStack;
}
BlockIndex m_blockIndex;
BasicBlock* m_block;
Vector<PhiStackEntry, 128> m_argumentPhiStack;
Vector<PhiStackEntry, 128> m_localPhiStack;
......
......@@ -52,8 +52,8 @@ public:
m_changed = false;
for (unsigned block = 0; block < m_graph.m_blocks.size(); ++block)
performBlockCSE(m_graph.m_blocks[block].get());
for (unsigned blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
performBlockCSE(m_graph.block(blockIndex));
return m_changed;
}
......
......@@ -52,24 +52,23 @@ public:
{
bool changed = false;
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
if (block->cfaFoundConstants)
changed |= foldConstants(blockIndex);
changed |= foldConstants(block);
}
return changed;
}
private:
bool foldConstants(BlockIndex blockIndex)
bool foldConstants(BasicBlock* block)
{
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF("Constant folding considering Block #%u.\n", blockIndex);
dataLog("Constant folding considering Block ", *block, ".\n");
#endif
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
bool changed = false;
m_state.beginBasicBlock(block);
for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) {
......
......@@ -46,8 +46,8 @@ public:
bool run()
{
// First reset the counts to 0 for all nodes.
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = block->size(); indexInBlock--;)
......@@ -60,8 +60,8 @@ public:
// - Nodes that are must-generate.
// - Nodes that are reachable from type checks.
// Set their ref counts to 1 and put them on the worklist.
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
for (unsigned indexInBlock = block->size(); indexInBlock--;) {
......@@ -81,8 +81,8 @@ public:
DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge);
}
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
......
......@@ -36,7 +36,7 @@ namespace JSC { namespace DFG {
Disassembler::Disassembler(Graph& graph)
: m_graph(graph)
{
m_labelForBlockIndex.resize(graph.m_blocks.size());
m_labelForBlockIndex.resize(graph.numBlocks());
}
void Disassembler::dump(PrintStream& out, LinkBuffer& linkBuffer)
......@@ -97,13 +97,13 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff
Node* lastNode = 0;
MacroAssembler::Label previousLabel = m_startOfCode;
for (size_t blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) {
BasicBlock* block = m_graph.m_blocks[blockIndex].get();
for (size_t blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
BasicBlock* block = m_graph.block(blockIndex);
if (!block)
continue;
dumpDisassembly(out, disassemblyPrefix, linkBuffer, previousLabel, m_labelForBlockIndex[blockIndex], lastNode);
append(result, out, previousOrigin);
m_graph.dumpBlockHeader(out, prefix, blockIndex, Graph::DumpLivePhisOnly);
m_graph.dumpBlockHeader(out, prefix, block, Graph::DumpLivePhisOnly);
append(result, out, previousOrigin);
Node* lastNodeForDisassembly = block->at(0);
for (size_t i = 0; i < block->size(); ++i) {
......@@ -118,7 +118,7 @@ Vector<Disassembler::DumpedOp> Disassembler::createDumpList(LinkBuffer& linkBuff
// as the end point. This case is hit either during peephole compare
// optimizations (the Branch won't have its own label) or if we have a
// forced OSR exit.
if (blockIndex + 1 < m_graph.m_blocks.size())
if (blockIndex + 1 < m_graph.numBlocks())
currentLabel = m_labelForBlockIndex[blockIndex + 1];
else
currentLabel = m_endOfMainPath;
......
......@@ -47,7 +47,7 @@ public:
Disassembler(Graph&);
void setStartOfCode(MacroAssembler::Label label) { m_startOfCode = label; }
void setForBlock(BlockIndex blockIndex, MacroAssembler::Label label)
void setForBlockIndex(BlockIndex blockIndex, MacroAssembler::Label label)
{
m_labelForBlockIndex[blockIndex] = label;
}
......
......@@ -44,9 +44,9 @@ void Dominators::compute(Graph& graph)
{
// This implements a naive dominator solver.
ASSERT(graph.m_blocks[0]->m_predecessors.isEmpty());
ASSERT(graph.block(0)->predecessors.isEmpty());
unsigned numBlocks = graph.m_blocks.size();
unsigned numBlocks = graph.numBlocks();
if (numBlocks > m_results.size()) {
m_results.grow(numBlocks);
......@@ -60,13 +60,13 @@ void Dominators::compute(Graph& graph)
m_scratch.clearAll();
for (unsigned i = numBlocks; i--;) {
if (!graph.m_blocks[i])
if (!graph.block(i))
continue;
m_scratch.set(i);
}
for (unsigned i = numBlocks; i-- > 1;) {
if (!graph.m_blocks[i] || graph.m_blocks[i]->m_predecessors.isEmpty())
if (!graph.block(i) || graph.block(i)->predecessors.isEmpty())
m_results[i].clearAll();
else
m_results[i].set(m_scratch);
......@@ -88,14 +88,14 @@ void Dominators::compute(Graph& graph)
bool Dominators::iterateForBlock(Graph& graph, BlockIndex i)
{
BasicBlock* block = graph.m_blocks[i].get();
BasicBlock* block = graph.block(i);
if (!block)
return false;
if (block->m_predecessors.isEmpty())
if (block->predecessors.isEmpty())
return false;
m_scratch.set(m_results[block->m_predecessors[0]]);
for (unsigned j = block->m_predecessors.size(); j-- > 1;)
m_scratch.filter(m_results[block->m_predecessors[j]]);
m_scratch.set(m_results[block->predecessors[0]->index]);
for (unsigned j = block->predecessors.size(); j-- > 1;)
m_scratch.filter(m_results[block->predecessors[j]->index]);
m_scratch.set(i);
return m_results[i].setAndCheck(m_scratch);
}
......
......@@ -31,6 +31,7 @@
#if ENABLE(DFG_JIT)
#include "DFGAnalysis.h"
#include "DFGBasicBlock.h"
#include "DFGCommon.h"
#include <wtf/FastBitVector.h>
......@@ -48,7 +49,12 @@ public:
bool dominates(BlockIndex from, BlockIndex to) const
{
ASSERT(isValid());
return m_results[to].get(from);
return m_results[from].get(to);
}
bool dominates(BasicBlock* from, BasicBlock* to) const
{
return dominates(from->index, to->index);
}
private:
......
......@@ -51,8 +51,8 @@ public:
ASSERT(m_graph.m_form == ThreadedCPS);
m_profitabilityChanged = false;
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex)
fixupBlock(m_graph.m_blocks[blockIndex].get());
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
fixupBlock(m_graph.block(blockIndex));
while (m_profitabilityChanged) {
m_profitabilityChanged = false;
......@@ -60,8 +60,8 @@ public:
for (unsigned i = m_graph.m_argumentPositions.size(); i--;)
m_graph.m_argumentPositions[i].mergeArgumentUnboxingAwareness();
for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex)
fixupSetLocalsInBlock(m_graph.m_blocks[blockIndex].get());
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
fixupSetLocalsInBlock(m_graph.block(blockIndex));
}
return true;
......@@ -589,10 +589,10 @@ private:
if (newChildEdge->hasBooleanResult()) {
node->children.setChild1(newChildEdge);
BlockIndex toBeTaken = node->notTakenBlockIndex();
BlockIndex toBeNotTaken = node->takenBlockIndex();
node->setTakenBlockIndex(toBeTaken);
node->setNotTakenBlockIndex(toBeNotTaken);
BasicBlock* toBeTaken = node->notTakenBlock();
BasicBlock* toBeNotTaken = node->takenBlock();
node->setTakenBlock(toBeTaken);
node->setNotTakenBlock(toBeNotTaken);
}
}
}
......
......@@ -253,15 +253,15 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node)
if (op == WeakJSConstant)
out.print(comma, RawPointer(node->weakConstant()), " (structure: ", *node->weakConstant()->structure(), ")");
if (node->isBranch() || node->isJump())
out.print(comma, "T:#", node->takenBlockIndex());
out.print(comma, "T:", *node->takenBlock());
if (node->isBranch())
out.print(comma, "F:#", node->notTakenBlockIndex());
out.print(comma, "F:", *node->notTakenBlock());
if (node->isSwitch()) {
SwitchData* data = node->switchData();
out.print(comma, data->kind);
for (unsigned i = 0; i < data->cases.size(); ++i)
out.print(comma, data->cases[i].value, ":#", data->cases[i].target);
out.print(comma, "default:#", data->fallThrough);
out.print(comma, data->cases[i].value, ":", *data->cases[i].target);
out.print(comma, "default:", *data->fallThrough);
}
out.print(comma, "bc#", node->codeOrigin.bytecodeIndex);
......@@ -277,37 +277,35 @@ void Graph::dump(PrintStream& out, const char* prefix, Node* node)
out.print("\n");