Commit ecd97b0c authored by fpizlo@apple.com's avatar fpizlo@apple.com
Browse files

Finally remove those DFG_ENABLE things

https://bugs.webkit.org/show_bug.cgi?id=125025

Rubber stamped by Sam Weinig.
        
This removes a bunch of unused and untested insanity.

* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::tallyFrequentExitSites):
* dfg/DFGArgumentsSimplificationPhase.cpp:
(JSC::DFG::ArgumentsSimplificationPhase::run):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::injectLazyOperandSpeculation):
(JSC::DFG::ByteCodeParser::getArrayModeConsideringSlowPath):
(JSC::DFG::ByteCodeParser::makeSafe):
(JSC::DFG::ByteCodeParser::makeDivSafe):
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::linkBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
(JSC::DFG::parse):
* dfg/DFGCFGSimplificationPhase.cpp:
(JSC::DFG::CFGSimplificationPhase::run):
(JSC::DFG::CFGSimplificationPhase::convertToJump):
(JSC::DFG::CFGSimplificationPhase::fixJettisonedPredecessors):
* dfg/DFGCSEPhase.cpp:
(JSC::DFG::CSEPhase::endIndexForPureCSE):
(JSC::DFG::CSEPhase::eliminateIrrelevantPhantomChildren):
(JSC::DFG::CSEPhase::setReplacement):
(JSC::DFG::CSEPhase::eliminate):
(JSC::DFG::CSEPhase::performNodeCSE):
* dfg/DFGCommon.h:
(JSC::DFG::verboseCompilationEnabled):
(JSC::DFG::logCompilationChanges):
(JSC::DFG::shouldDumpGraphAtEachPhase):
* dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
(JSC::DFG::FixupPhase::injectInt32ToDoubleNode):
* dfg/DFGInPlaceAbstractState.cpp:
(JSC::DFG::InPlaceAbstractState::initialize):
(JSC::DFG::InPlaceAbstractState::endBasicBlock):
(JSC::DFG::InPlaceAbstractState::mergeStateAtTail):
(JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileBody):
(JSC::DFG::JITCompiler::link):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::adjustAndJumpToTarget):
* dfg/DFGPredictionInjectionPhase.cpp:
(JSC::DFG::PredictionInjectionPhase::run):
* dfg/DFGPredictionPropagationPhase.cpp:
(JSC::DFG::PredictionPropagationPhase::run):
(JSC::DFG::PredictionPropagationPhase::propagate):
(JSC::DFG::PredictionPropagationPhase::propagateForward):
(JSC::DFG::PredictionPropagationPhase::propagateBackward):
(JSC::DFG::PredictionPropagationPhase::doRoundOfDoubleVoting):
* dfg/DFGScoreBoard.h:
(JSC::DFG::ScoreBoard::use):
* dfg/DFGSlowPathGenerator.h:
(JSC::DFG::SlowPathGenerator::generate):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::terminateSpeculativeExecution):
(JSC::DFG::SpeculativeJIT::runSlowPathGenerators):
(JSC::DFG::SpeculativeJIT::dump):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::checkGeneratedTypeForToInt32):
* dfg/DFGSpeculativeJIT.h:
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGVariableEventStream.cpp:
(JSC::DFG::VariableEventStream::reconstruct):
* dfg/DFGVariableEventStream.h:
(JSC::DFG::VariableEventStream::appendAndLog):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
(JSC::DFG::VirtualRegisterAllocationPhase::run):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@159886 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 8fa12745
2013-11-29 Filip Pizlo <fpizlo@apple.com>
Finally remove those DFG_ENABLE things
https://bugs.webkit.org/show_bug.cgi?id=125025
Rubber stamped by Sam Weinig.
This removes a bunch of unused and untested insanity.
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::tallyFrequentExitSites):
* dfg/DFGArgumentsSimplificationPhase.cpp:
(JSC::DFG::ArgumentsSimplificationPhase::run):
* dfg/DFGByteCodeParser.cpp:
(JSC::DFG::ByteCodeParser::injectLazyOperandSpeculation):
(JSC::DFG::ByteCodeParser::getArrayModeConsideringSlowPath):
(JSC::DFG::ByteCodeParser::makeSafe):
(JSC::DFG::ByteCodeParser::makeDivSafe):
(JSC::DFG::ByteCodeParser::handleCall):
(JSC::DFG::ByteCodeParser::handleInlining):
(JSC::DFG::ByteCodeParser::parseBlock):
(JSC::DFG::ByteCodeParser::linkBlock):
(JSC::DFG::ByteCodeParser::InlineStackEntry::InlineStackEntry):
(JSC::DFG::ByteCodeParser::parseCodeBlock):
(JSC::DFG::ByteCodeParser::parse):
(JSC::DFG::parse):
* dfg/DFGCFGSimplificationPhase.cpp:
(JSC::DFG::CFGSimplificationPhase::run):
(JSC::DFG::CFGSimplificationPhase::convertToJump):
(JSC::DFG::CFGSimplificationPhase::fixJettisonedPredecessors):
* dfg/DFGCSEPhase.cpp:
(JSC::DFG::CSEPhase::endIndexForPureCSE):
(JSC::DFG::CSEPhase::eliminateIrrelevantPhantomChildren):
(JSC::DFG::CSEPhase::setReplacement):
(JSC::DFG::CSEPhase::eliminate):
(JSC::DFG::CSEPhase::performNodeCSE):
* dfg/DFGCommon.h:
(JSC::DFG::verboseCompilationEnabled):
(JSC::DFG::logCompilationChanges):
(JSC::DFG::shouldDumpGraphAtEachPhase):
* dfg/DFGConstantFoldingPhase.cpp:
(JSC::DFG::ConstantFoldingPhase::foldConstants):
* dfg/DFGFixupPhase.cpp:
(JSC::DFG::FixupPhase::fixupNode):
(JSC::DFG::FixupPhase::injectInt32ToDoubleNode):
* dfg/DFGInPlaceAbstractState.cpp:
(JSC::DFG::InPlaceAbstractState::initialize):
(JSC::DFG::InPlaceAbstractState::endBasicBlock):
(JSC::DFG::InPlaceAbstractState::mergeStateAtTail):
(JSC::DFG::InPlaceAbstractState::mergeToSuccessors):
* dfg/DFGJITCompiler.cpp:
(JSC::DFG::JITCompiler::compileBody):
(JSC::DFG::JITCompiler::link):
* dfg/DFGOSRExitCompiler.cpp:
* dfg/DFGOSRExitCompiler32_64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompiler64.cpp:
(JSC::DFG::OSRExitCompiler::compileExit):
* dfg/DFGOSRExitCompilerCommon.cpp:
(JSC::DFG::adjustAndJumpToTarget):
* dfg/DFGPredictionInjectionPhase.cpp:
(JSC::DFG::PredictionInjectionPhase::run):
* dfg/DFGPredictionPropagationPhase.cpp:
(JSC::DFG::PredictionPropagationPhase::run):
(JSC::DFG::PredictionPropagationPhase::propagate):
(JSC::DFG::PredictionPropagationPhase::propagateForward):
(JSC::DFG::PredictionPropagationPhase::propagateBackward):
(JSC::DFG::PredictionPropagationPhase::doRoundOfDoubleVoting):
* dfg/DFGScoreBoard.h:
(JSC::DFG::ScoreBoard::use):
* dfg/DFGSlowPathGenerator.h:
(JSC::DFG::SlowPathGenerator::generate):
* dfg/DFGSpeculativeJIT.cpp:
(JSC::DFG::SpeculativeJIT::terminateSpeculativeExecution):
(JSC::DFG::SpeculativeJIT::runSlowPathGenerators):
(JSC::DFG::SpeculativeJIT::dump):
(JSC::DFG::SpeculativeJIT::compileCurrentBlock):
(JSC::DFG::SpeculativeJIT::checkGeneratedTypeForToInt32):
* dfg/DFGSpeculativeJIT.h:
* dfg/DFGSpeculativeJIT32_64.cpp:
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGSpeculativeJIT64.cpp:
(JSC::DFG::SpeculativeJIT::fillSpeculateInt32Internal):
(JSC::DFG::SpeculativeJIT::fillSpeculateDouble):
(JSC::DFG::SpeculativeJIT::fillSpeculateCell):
(JSC::DFG::SpeculativeJIT::fillSpeculateBoolean):
(JSC::DFG::SpeculativeJIT::compile):
* dfg/DFGVariableEventStream.cpp:
(JSC::DFG::VariableEventStream::reconstruct):
* dfg/DFGVariableEventStream.h:
(JSC::DFG::VariableEventStream::appendAndLog):
* dfg/DFGVirtualRegisterAllocationPhase.cpp:
(JSC::DFG::VirtualRegisterAllocationPhase::run):
* jit/JIT.cpp:
(JSC::JIT::privateCompile):
2013-11-29 Filip Pizlo <fpizlo@apple.com>
FTL IC should nop-fill to make up the difference between the actual IC size and the requested patchpoint size
......
......@@ -3260,10 +3260,6 @@ void CodeBlock::tallyFrequentExitSites()
if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
continue;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("OSR exit #", i, " (bc#", exit.m_codeOrigin.bytecodeIndex, ", ", exit.m_kind, ") for ", *this, " occurred frequently: counting as frequent exit site.\n");
#endif
}
break;
}
......@@ -3279,10 +3275,6 @@ void CodeBlock::tallyFrequentExitSites()
if (!exit.considerAddingAsFrequentExitSite(profiledBlock))
continue;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("OSR exit #", i, " (bc#", exit.m_codeOrigin.bytecodeIndex, ", ", exit.m_kind, ") for ", *this, " occurred frequently: counting as frequent exit site.\n");
#endif
}
break;
}
......
......@@ -372,53 +372,6 @@ public:
}
}
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF("Arguments aliasing states:\n");
for (unsigned i = 0; i < m_graph.m_variableAccessData.size(); ++i) {
VariableAccessData* variableAccessData = &m_graph.m_variableAccessData[i];
if (!variableAccessData->isRoot())
continue;
dataLog(" r", variableAccessData->local(), "(", VariableAccessDataDump(m_graph, variableAccessData), "): ");
if (variableAccessData->isCaptured())
dataLogF("Captured");
else {
ArgumentsAliasingData& data =
m_argumentsAliasing.find(variableAccessData)->value;
bool first = true;
if (data.callContextIsValid()) {
if (!first)
dataLogF(", ");
dataLogF("Have Call Context: %p", data.callContext);
first = false;
if (!m_createsArguments.contains(data.callContext))
dataLogF(" (Does Not Create Arguments)");
}
if (data.argumentsAssignmentIsValid()) {
if (!first)
dataLogF(", ");
dataLogF("Arguments Assignment Is Valid");
first = false;
}
if (!data.escapes) {
if (!first)
dataLogF(", ");
dataLogF("Does Not Escape");
first = false;
}
if (!first)
dataLogF(", ");
if (data.isValid()) {
if (m_createsArguments.contains(data.callContext))
dataLogF("VALID");
else
dataLogF("INVALID (due to argument creation)");
} else
dataLogF("INVALID (due to bad variable use)");
}
dataLogF("\n");
}
#endif
InsertionSet insertionSet(m_graph);
for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
......
......@@ -255,9 +255,6 @@ private:
ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock);
LazyOperandValueProfileKey key(m_currentIndex, node->local());
SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key);
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("Lazy operand [@", node->index(), ", bc#", m_currentIndex, ", r", node->local(), "] prediction: ", SpeculationDump(prediction), "\n");
#endif
node->variableAccessData()->predict(prediction);
return node;
}
......@@ -852,12 +849,6 @@ private:
profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
if (m_inlineStackTop->m_profiledBlock->numberOfRareCaseProfiles())
dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex, m_inlineStackTop->m_profiledBlock->rareCaseProfileForBytecodeOffset(m_currentIndex)->m_counter);
dataLogF("Array profile for bc#%u: %u %s%s\n", m_currentIndex, profile->observedArrayModes(locker), profile->structureIsPolymorphic(locker) ? " (polymorphic)" : "", profile->mayInterceptIndexedAccesses(locker) ? " (may intercept)" : "");
#endif
bool makeSafe =
m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| profile->outOfBounds(locker);
......@@ -899,18 +890,11 @@ private:
case ArithMul:
if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Making ArithMul @%u take deepest slow case.\n", node->index());
#endif
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow))
node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
} else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Making ArithMul @%u take faster slow case.\n", node->index());
#endif
else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)
|| m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
node->mergeFlags(NodeMayNegZero);
}
break;
default:
......@@ -936,10 +920,6 @@ private:
&& !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero))
return node;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(node->op()), node->index(), m_currentIndex, m_inlineStackTop->m_profiledBlock->specialFastCaseProfileForBytecodeOffset(m_currentIndex)->m_counter, m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow), m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero));
#endif
// FIXME: It might be possible to make this more granular. The DFG certainly can
// distinguish between negative zero and overflow in its exit profiles.
node->mergeFlags(NodeMayOverflow | NodeMayNegZero);
......@@ -1160,10 +1140,6 @@ void ByteCodeParser::handleCall(Instruction* currentInstruction, NodeType op, Co
callLinkStatus.setHasBadExecutableExitSite(m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadExecutable));
}
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog("For call at bc#", m_currentIndex, ": ", callLinkStatus, "\n");
#endif
if (!callLinkStatus.canOptimize()) {
// Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
// that we cannot optimize them.
......@@ -1287,10 +1263,6 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
if (!canInlineFunctionFor(codeBlock, kind, callLinkStatus.isClosureCall()))
return false;
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Inlining executable %p.\n", executable);
#endif
// Now we know without a doubt that we are committed to inlining. So begin the process
// by checking the callee (if necessary) and making sure that arguments and the callee
// are flushed.
......@@ -1382,9 +1354,6 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// If we created new blocks then the last block needs linking, but in the
// caller. It doesn't need to be linked to, but it needs outgoing links.
if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock, lastBlock->bytecodeBegin, m_currentIndex);
#endif
// For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
// for release builds because this block will never serve as a potential target
// in the linker's binary search.
......@@ -1393,10 +1362,6 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
}
m_currentBlock = m_graph.lastBlock();
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable);
#endif
return true;
}
......@@ -1407,10 +1372,6 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// Need to create a new basic block for the continuation at the caller.
RefPtr<BasicBlock> block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.numBlocks(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
#endif
// Link the early returns to the basic block we're about to create.
for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) {
if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking)
......@@ -1436,9 +1397,6 @@ bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, con
// At this point we return and continue to generate code for the caller, but
// in the new basic block.
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable);
#endif
return true;
}
......@@ -1901,11 +1859,6 @@ bool ByteCodeParser::parseBlock(unsigned limit)
// to be true.
if (!m_currentBlock->isEmpty())
addToGraph(Jump, OpInfo(m_currentIndex));
else {
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit, m_currentBlock);
#endif
}
return shouldContinueParsing;
}
......@@ -3325,17 +3278,11 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
switch (node->op()) {
case Jump:
node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Linked basic block %p to %p, #%u.\n", block, node->takenBlock(), node->takenBlock()->index);
#endif
break;
case Branch:
node->setTakenBlock(blockForBytecodeOffset(possibleTargets, node->takenBytecodeOffsetDuringParsing()));
node->setNotTakenBlock(blockForBytecodeOffset(possibleTargets, node->notTakenBytecodeOffsetDuringParsing()));
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Linked basic block %p to %p, #%u and %p, #%u.\n", block, node->takenBlock(), node->takenBlock()->index, node->notTakenBlock(), node->notTakenBlock()->index);
#endif
break;
case Switch:
......@@ -3345,9 +3292,6 @@ void ByteCodeParser::linkBlock(BasicBlock* block, Vector<BasicBlock*>& possibleT
break;
default:
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Marking basic block %p as linked.\n", block);
#endif
break;
}
......@@ -3472,12 +3416,6 @@ ByteCodeParser::InlineStackEntry::InlineStackEntry(
m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal());
}
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Current captured variables: ");
m_inlineCallFrame->capturedVars.dump(WTF::dataFile());
dataLogF("\n");
#endif
byteCodeParser->buildOperandMapsIfNecessary();
m_identifierRemap.resize(codeBlock->numberOfIdentifiers());
......@@ -3570,9 +3508,6 @@ void ByteCodeParser::parseCodeBlock()
}
bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime();
#if DFG_ENABLE(DEBUG_VERBOSE)
shouldDumpBytecode |= true;
#endif
if (shouldDumpBytecode) {
dataLog("Parsing ", *codeBlock);
if (inlineCallFrame()) {
......@@ -3601,12 +3536,6 @@ void ByteCodeParser::parseCodeBlock()
for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) {
// The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size();
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLog(
"Parsing bytecode with limit ", pointerDump(inlineCallFrame()),
" bc#", limit, " at inline depth ",
CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()), ".\n");
#endif
ASSERT(m_currentIndex < limit);
// Loop until we reach the current limit (i.e. next jump target).
......@@ -3626,15 +3555,9 @@ void ByteCodeParser::parseCodeBlock()
}
// Change its bytecode begin and continue.
m_currentBlock = m_graph.lastBlock();
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock, m_currentBlock->bytecodeBegin, m_currentIndex);
#endif
m_currentBlock->bytecodeBegin = m_currentIndex;
} else {
RefPtr<BasicBlock> block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals));
#if DFG_ENABLE(DEBUG_VERBOSE)
dataLogF("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block.get(), m_graph.numBlocks(), m_inlineStackTop->executable(), m_currentIndex, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()));
#endif
m_currentBlock = block.get();
// This assertion checks two things:
// 1) If the bytecodeBegin is greater than currentIndex, then something has gone
......@@ -3681,11 +3604,6 @@ bool ByteCodeParser::parse()
// Set during construction.
ASSERT(!m_currentIndex);
#if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
// We should be pretending that the code has an activation.
ASSERT(m_graph.needsActivation());
#endif
InlineStackEntry inlineStackEntry(
this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(),
m_codeBlock->numParameters(), CodeForCall);
......@@ -3715,12 +3633,7 @@ bool ByteCodeParser::parse()
bool parse(Graph& graph)
{
SamplingRegion samplingRegion("DFG Parsing");
#if DFG_DEBUG_LOCAL_DISBALE
UNUSED_PARAM(graph);
return false;
#else
return ByteCodeParser(graph).parse();
#endif
}
} } // namespace JSC::DFG
......
......@@ -64,25 +64,12 @@ public:
// Successor with one predecessor -> merge.
if (block->successor(0)->predecessors.size() == 1) {
ASSERT(block->successor(0)->predecessors[0] == block);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog("CFGSimplify: Jump merge on Block ", *block, " to Block ", *block->successor(0), ".\n");
#endif
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
mergeBlocks(block, block->successor(0), noBlocks());
innerChanged = outerChanged = true;
break;
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog("CFGSimplify: Not jump merging on Block ", *block, " to Block ", *block->successor(0), " because predecessors = ",);
for (unsigned i = 0; i < block->successor(0)->predecessors.size(); ++i) {
if (i)
dataLogF(", ");
dataLog(*block->successor(0)->predecessors[i]);
}
dataLogF(".\n");
#endif
}
// FIXME: Block only has a jump -> remove. This is tricky though because of
......@@ -102,23 +89,11 @@ public:
BasicBlock* targetBlock = block->successorForCondition(condition);
BasicBlock* jettisonedBlock = block->successorForCondition(!condition);
if (targetBlock->predecessors.size() == 1) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Known condition (", condition, ") branch merge ",
"on Block ", *block, " to Block ", *targetBlock,
", jettisoning Block ", *jettisonedBlock, ".\n");
#endif
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
mergeBlocks(block, targetBlock, oneBlock(jettisonedBlock));
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Known condition (", condition, ") ",
"branch->jump conversion on Block ", *block, " to Block ",
targetBlock, ", jettisoning Block ", jettisonedBlock, ".\n");
#endif
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
......@@ -144,11 +119,6 @@ public:
break;
}
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF("Not branch simplifying on Block #%u because the successors differ and the condition is not known.\n",
blockIndex);
#endif
// Branch to same destination -> jump.
// FIXME: this will currently not be hit because of the lack of jump-only
// block simplification.
......@@ -198,24 +168,12 @@ public:
}
if (targetBlock->predecessors.size() == 1) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Known constant (", value, ") switch merge on ",
"Block ", *block, " to Block ", *targetBlock, ".\n");
#endif
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
mergeBlocks(block, targetBlock, jettisonedBlocks);
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Known constant (", value, ") switch->jump "
"conversion on Block ", *block, " to Block #",
*targetBlock, ".\n");
#endif
if (extremeLogging)
m_graph.dump();
m_graph.dethread();
......@@ -286,19 +244,9 @@ private:
ASSERT(targetBlock);
ASSERT(targetBlock->isReachable);
if (targetBlock->predecessors.size() == 1) {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Branch/Switch to same successor merge on Block ", *block,
" to Block ", *targetBlock, ".\n");
#endif
m_graph.dethread();
mergeBlocks(block, targetBlock, noBlocks());
} else {
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"CFGSimplify: Branch->jump conversion to same successor on Block ",
*block, " to Block ", *targetBlock, ".\n",
#endif
Node* branch = block->last();
ASSERT(branch->isTerminal());
ASSERT(branch->op() == Branch || branch->op() == Switch);
......@@ -335,11 +283,6 @@ private:
void fixJettisonedPredecessors(BasicBlock* block, BasicBlock* jettisonedBlock)
{
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(
"Fixing predecessors and phis due to jettison of Block ", *jettisonedBlock,
" from Block ", *block, ".\n");
#endif
jettisonedBlock->removePredecessor(block);
}
......
......@@ -71,9 +71,6 @@ private:
else
result++;
ASSERT(result <= m_indexInBlock);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" limit %u: ", result);
#endif
return result;
}
......@@ -949,9 +946,6 @@ private:
if (edge->flags() & NodeRelevantToOSR)
continue;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLog(" Eliminating edge @", m_currentNode->index(), " -> @", edge->index());
#endif
node->children.removeEdge(i--);
m_changed = true;
}
......@@ -962,10 +956,6 @@ private:
if (!replacement)
return false;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" Replacing @%u -> @%u", m_currentNode->index(), replacement->index());
#endif
m_currentNode->convertToPhantom();
eliminateIrrelevantPhantomChildren(m_currentNode);
......@@ -979,10 +969,6 @@ private:
void eliminate()
{
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" Eliminating @%u", m_currentNode->index());
#endif
ASSERT(m_currentNode->mustGenerate());
m_currentNode->convertToPhantom();
eliminateIrrelevantPhantomChildren(m_currentNode);
......@@ -1010,10 +996,6 @@ private:
if (node->op() == SetLocal)
node->child1()->mergeFlags(NodeRelevantToOSR);
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF(" %s @%u: ", Graph::opName(node->op()), node->index());
#endif
switch (node->op()) {
case Identity:
......@@ -1337,9 +1319,6 @@ private:
}
m_lastSeen[node->op()] = m_indexInBlock;
#if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
dataLogF("\n");
#endif
}
void performBlockCSE(BasicBlock* block)
......
......@@ -34,39 +34,6 @@
#include "Options.h"
#include "VirtualRegister.h"
/* DFG_ENABLE() - turn on a specific features in the DFG JIT */
#define DFG_ENABLE(DFG_FEATURE) (defined DFG_ENABLE_##DFG_FEATURE && DFG_ENABLE_##DFG_FEATURE)
// Emit various logging information for debugging, including dumping the dataflow graphs.
#define DFG_ENABLE_DEBUG_VERBOSE 0
// Emit dumps during propagation, in addition to just after.
#define DFG_ENABLE_DEBUG_PROPAGATION_VERBOSE 0