Commit ba83364c authored by fpizlo@apple.com's avatar fpizlo@apple.com

Get rid of ENABLE(VALUE_PROFILER). It's on all the time now.

Rubber stamped by Mark Hahnenberg.

Source/JavaScriptCore: 

* bytecode/CallLinkStatus.cpp:
(JSC::CallLinkStatus::computeFor):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpValueProfiling):
(JSC::CodeBlock::dumpArrayProfiling):
(JSC::CodeBlock::dumpRareCaseProfile):
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::setNumParameters):
(JSC::CodeBlock::shrinkToFit):
(JSC::CodeBlock::shouldOptimizeNow):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::valueProfileForBytecodeOffset):
* bytecode/GetByIdStatus.cpp:
(JSC::GetByIdStatus::computeForChain):
(JSC::GetByIdStatus::computeFor):
* bytecode/LazyOperandValueProfile.cpp:
* bytecode/LazyOperandValueProfile.h:
* bytecode/PutByIdStatus.cpp:
(JSC::PutByIdStatus::computeFor):
* bytecode/ValueProfile.h:
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::newArrayProfile):
(JSC::BytecodeGenerator::newArrayAllocationProfile):
(JSC::BytecodeGenerator::emitProfiledOpcode):
* jit/GPRInfo.h:
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
* jit/JIT.h:
* jit/JITArithmetic.cpp:
(JSC::JIT::compileBinaryArithOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITArithmetic32_64.cpp:
(JSC::JIT::emitBinaryDoubleOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emitSlow_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITCall.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITCall32_64.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITInlines.h:
(JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile):
(JSC::JIT::emitValueProfilingSite):
(JSC::JIT::emitArrayProfilingSiteForBytecodeIndex):
(JSC::JIT::emitArrayProfileStoreToHoleSpecialCase):
(JSC::JIT::emitArrayProfileOutOfBoundsSpecialCase):
(JSC::arrayProfileSaw):
(JSC::JIT::chooseArrayMode):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* llint/LLIntOfflineAsmConfig.h:
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* profiler/ProfilerBytecodeSequence.cpp:
(JSC::Profiler::BytecodeSequence::BytecodeSequence):
* runtime/CommonSlowPaths.cpp:

Source/WTF: 

* wtf/Platform.h:



git-svn-id: http://svn.webkit.org/repository/webkit/trunk@161364 268f45cc-cd09-0410-ab3c-d52691b4dbfc
parent 0de5e877
2014-01-06 Filip Pizlo <fpizlo@apple.com>
Get rid of ENABLE(VALUE_PROFILER). It's on all the time now.
Rubber stamped by Mark Hahnenberg.
* bytecode/CallLinkStatus.cpp:
(JSC::CallLinkStatus::computeFor):
* bytecode/CodeBlock.cpp:
(JSC::CodeBlock::dumpValueProfiling):
(JSC::CodeBlock::dumpArrayProfiling):
(JSC::CodeBlock::dumpRareCaseProfile):
(JSC::CodeBlock::dumpBytecode):
(JSC::CodeBlock::CodeBlock):
(JSC::CodeBlock::setNumParameters):
(JSC::CodeBlock::shrinkToFit):
(JSC::CodeBlock::shouldOptimizeNow):
* bytecode/CodeBlock.h:
(JSC::CodeBlock::valueProfileForBytecodeOffset):
* bytecode/GetByIdStatus.cpp:
(JSC::GetByIdStatus::computeForChain):
(JSC::GetByIdStatus::computeFor):
* bytecode/LazyOperandValueProfile.cpp:
* bytecode/LazyOperandValueProfile.h:
* bytecode/PutByIdStatus.cpp:
(JSC::PutByIdStatus::computeFor):
* bytecode/ValueProfile.h:
* bytecompiler/BytecodeGenerator.cpp:
(JSC::BytecodeGenerator::newArrayProfile):
(JSC::BytecodeGenerator::newArrayAllocationProfile):
(JSC::BytecodeGenerator::emitProfiledOpcode):
* jit/GPRInfo.h:
* jit/JIT.cpp:
(JSC::JIT::JIT):
(JSC::JIT::privateCompileSlowCases):
(JSC::JIT::privateCompile):
* jit/JIT.h:
* jit/JITArithmetic.cpp:
(JSC::JIT::compileBinaryArithOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITArithmetic32_64.cpp:
(JSC::JIT::emitBinaryDoubleOp):
(JSC::JIT::emit_op_mul):
(JSC::JIT::emitSlow_op_mul):
(JSC::JIT::emit_op_div):
* jit/JITCall.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITCall32_64.cpp:
(JSC::JIT::emitPutCallResult):
* jit/JITInlines.h:
(JSC::JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile):
(JSC::JIT::emitValueProfilingSite):
(JSC::JIT::emitArrayProfilingSiteForBytecodeIndex):
(JSC::JIT::emitArrayProfileStoreToHoleSpecialCase):
(JSC::JIT::emitArrayProfileOutOfBoundsSpecialCase):
(JSC::arrayProfileSaw):
(JSC::JIT::chooseArrayMode):
* jit/JITOpcodes.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITOpcodes32_64.cpp:
(JSC::JIT::emit_op_get_argument_by_val):
* jit/JITPropertyAccess.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* jit/JITPropertyAccess32_64.cpp:
(JSC::JIT::emit_op_get_by_val):
(JSC::JIT::emitSlow_op_get_by_val):
(JSC::JIT::emit_op_get_by_id):
(JSC::JIT::emit_op_get_from_scope):
* llint/LLIntOfflineAsmConfig.h:
* llint/LLIntSlowPaths.cpp:
(JSC::LLInt::LLINT_SLOW_PATH_DECL):
* llint/LowLevelInterpreter.asm:
* llint/LowLevelInterpreter32_64.asm:
* llint/LowLevelInterpreter64.asm:
* profiler/ProfilerBytecodeSequence.cpp:
(JSC::Profiler::BytecodeSequence::BytecodeSequence):
* runtime/CommonSlowPaths.cpp:
2014-01-06 Filip Pizlo <fpizlo@apple.com>
LLInt shouldn't check for ENABLE(JIT).
......
......@@ -101,7 +101,7 @@ CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned byt
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
#if ENABLE(JIT)
if (!profiledBlock->hasBaselineJITProfiling())
return computeFromLLInt(profiledBlock, bytecodeIndex);
......
......@@ -622,16 +622,11 @@ void CodeBlock::dumpValueProfiling(PrintStream& out, const Instruction*& it, boo
ConcurrentJITLocker locker(m_lock);
++it;
#if ENABLE(VALUE_PROFILER)
CString description = it->u.profile->briefDescription(locker);
if (!description.length())
return;
beginDumpProfiling(out, hasPrintedProfiling);
out.print(description);
#else
UNUSED_PARAM(out);
UNUSED_PARAM(hasPrintedProfiling);
#endif
}
void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, bool& hasPrintedProfiling)
......@@ -639,7 +634,6 @@ void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, boo
ConcurrentJITLocker locker(m_lock);
++it;
#if ENABLE(VALUE_PROFILER)
if (!it->u.arrayProfile)
return;
CString description = it->u.arrayProfile->briefDescription(locker, this);
......@@ -647,13 +641,8 @@ void CodeBlock::dumpArrayProfiling(PrintStream& out, const Instruction*& it, boo
return;
beginDumpProfiling(out, hasPrintedProfiling);
out.print(description);
#else
UNUSED_PARAM(out);
UNUSED_PARAM(hasPrintedProfiling);
#endif
}
#if ENABLE(VALUE_PROFILER)
void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCaseProfile* profile, bool& hasPrintedProfiling)
{
if (!profile || !profile->m_counter)
......@@ -662,7 +651,6 @@ void CodeBlock::dumpRareCaseProfile(PrintStream& out, const char* name, RareCase
beginDumpProfiling(out, hasPrintedProfiling);
out.print(name, profile->m_counter);
}
#endif
void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instruction* begin, const Instruction*& it, const StubInfoMap& map)
{
......@@ -1422,10 +1410,8 @@ void CodeBlock::dumpBytecode(PrintStream& out, ExecState* exec, const Instructio
#endif
}
#if ENABLE(VALUE_PROFILER)
dumpRareCaseProfile(out, "rare case: ", rareCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
dumpRareCaseProfile(out, "special fast case: ", specialFastCaseProfileForBytecodeOffset(location), hasPrintedProfiling);
#endif
#if ENABLE(DFG_JIT)
Vector<DFG::FrequentExitSite> exitSites = exitProfile().exitSitesFor(location);
......@@ -1799,12 +1785,10 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock* unlin
}
case op_get_from_scope: {
#if ENABLE(VALUE_PROFILER)
ValueProfile* profile = &m_valueProfiles[pc[i + opLength - 1].u.operand];
ASSERT(profile->m_bytecodeOffset == -1);
profile->m_bytecodeOffset = i;
instructions[i + opLength - 1] = profile;
#endif
// get_from_scope dst, scope, id, ResolveModeAndType, Structure, Operand
const Identifier& ident = identifier(pc[i + 3].u.operand);
......@@ -1924,9 +1908,7 @@ void CodeBlock::setNumParameters(int newValue)
{
m_numParameters = newValue;
#if ENABLE(VALUE_PROFILER)
m_argumentValueProfiles.resizeToFit(newValue);
#endif
}
void EvalCodeCache::visitAggregate(SlotVisitor& visitor)
......@@ -2596,10 +2578,8 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode)
#if ENABLE(JIT)
m_callLinkInfos.shrinkToFit();
#endif
#if ENABLE(VALUE_PROFILER)
m_rareCaseProfiles.shrinkToFit();
m_specialFastCaseProfiles.shrinkToFit();
#endif
if (shrinkMode == EarlyShrink) {
m_additionalIdentifiers.shrinkToFit();
......@@ -3181,7 +3161,6 @@ bool CodeBlock::shouldReoptimizeFromLoopNow()
}
#endif
#if ENABLE(VALUE_PROFILER)
ArrayProfile* CodeBlock::getArrayProfile(unsigned bytecodeOffset)
{
for (unsigned i = 0; i < m_arrayProfiles.size(); ++i) {
......@@ -3254,10 +3233,6 @@ bool CodeBlock::shouldOptimizeNow()
if (Options::verboseOSR())
dataLog("Considering optimizing ", *this, "...\n");
#if ENABLE(VERBOSE_VALUE_PROFILE)
dumpValueProfiles();
#endif
if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay())
return true;
......@@ -3286,7 +3261,6 @@ bool CodeBlock::shouldOptimizeNow()
optimizeAfterWarmUp();
return false;
}
#endif
#if ENABLE(DFG_JIT)
void CodeBlock::tallyFrequentExitSites()
......
......@@ -277,12 +277,12 @@ public:
return result;
}
#if ENABLE(JIT)
bool hasBaselineJITProfiling() const
{
return jitType() == JITCode::BaselineJIT;
}
#if ENABLE(JIT)
virtual CodeBlock* replacement() = 0;
virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
......@@ -410,7 +410,6 @@ public:
CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
#endif
#if ENABLE(VALUE_PROFILER)
unsigned numberOfArgumentValueProfiles()
{
ASSERT(m_numParameters >= 0);
......@@ -429,13 +428,12 @@ public:
ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
{
ValueProfile* result = binarySearch<ValueProfile, int>(
m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
getValueProfileBytecodeOffset<ValueProfile>);
m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
getValueProfileBytecodeOffset<ValueProfile>);
ASSERT(result->m_bytecodeOffset != -1);
ASSERT(instructions()[bytecodeOffset + opcodeLength(
m_vm->interpreter->getOpcodeID(
instructions()[
bytecodeOffset].u.opcode)) - 1].u.profile == result);
m_vm->interpreter->getOpcodeID(
instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
return result;
}
SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
......@@ -543,7 +541,6 @@ public:
}
ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
#endif
// Exception handling support
......@@ -884,22 +881,11 @@ public:
unsigned numberOfDFGCompiles() { return 0; }
#endif
#if ENABLE(VALUE_PROFILER)
bool shouldOptimizeNow();
void updateAllValueProfilePredictions();
void updateAllArrayPredictions();
void updateAllPredictions();
#else
bool updateAllPredictionsAndCheckIfShouldOptimizeNow() { return false; }
void updateAllValueProfilePredictions() { }
void updateAllArrayPredictions() { }
void updateAllPredictions() { }
#endif
#if ENABLE(VERBOSE_VALUE_PROFILE)
void dumpValueProfiles();
#endif
unsigned frameRegisterCount();
// FIXME: Make these remaining members private.
......@@ -959,9 +945,7 @@ private:
ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
#endif
#if ENABLE(VALUE_PROFILER)
void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
#endif
void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
{
......@@ -996,9 +980,7 @@ private:
void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
#if ENABLE(VALUE_PROFILER)
void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
#endif
#if ENABLE(DFG_JIT)
bool shouldImmediatelyAssumeLivenessDuringScan()
......@@ -1080,14 +1062,12 @@ private:
DFG::ExitProfile m_exitProfile;
CompressedLazyOperandValueProfileHolder m_lazyOperandValueProfiles;
#endif
#if ENABLE(VALUE_PROFILER)
Vector<ValueProfile> m_argumentValueProfiles;
Vector<ValueProfile> m_valueProfiles;
SegmentedVector<RareCaseProfile, 8> m_rareCaseProfiles;
SegmentedVector<RareCaseProfile, 8> m_specialFastCaseProfiles;
Vector<ArrayAllocationProfile> m_arrayAllocationProfiles;
ArrayProfileVector m_arrayProfiles;
#endif
Vector<ObjectAllocationProfile> m_objectAllocationProfiles;
// Constant Pool
......
......@@ -69,7 +69,7 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned
void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBlock, StringImpl* uid)
{
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
#if ENABLE(JIT)
// Validate the chain. If the chain is invalid, then currently the best thing
// we can do is to assume that TakesSlow is true. In the future, it might be
// worth exploring reifying the structure chain from the structure we've got
......@@ -123,7 +123,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& m
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
#if ENABLE(JIT)
StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex));
if (!stubInfo || !stubInfo->seen)
return computeFromLLInt(profiledBlock, bytecodeIndex, uid);
......
......@@ -26,8 +26,6 @@
#include "config.h"
#include "LazyOperandValueProfile.h"
#if ENABLE(VALUE_PROFILER)
#include "Operations.h"
namespace JSC {
......@@ -100,5 +98,3 @@ SpeculatedType LazyOperandValueProfileParser::prediction(
} // namespace JSC
#endif // ENABLE(VALUE_PROFILER)
......@@ -26,10 +26,6 @@
#ifndef LazyOperandValueProfile_h
#define LazyOperandValueProfile_h
#include <wtf/Platform.h>
#if ENABLE(VALUE_PROFILER)
#include "ConcurrentJITLock.h"
#include "ValueProfile.h"
#include "VirtualRegister.h"
......@@ -188,8 +184,6 @@ private:
} // namespace JSC
#endif // ENABLE(VALUE_PROFILER)
#endif // LazyOperandValueProfile_h
......@@ -88,7 +88,7 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& m
UNUSED_PARAM(profiledBlock);
UNUSED_PARAM(bytecodeIndex);
UNUSED_PARAM(uid);
#if ENABLE(JIT) && ENABLE(VALUE_PROFILER)
#if ENABLE(JIT)
if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex))
return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset);
......
......@@ -29,10 +29,6 @@
#ifndef ValueProfile_h
#define ValueProfile_h
#include <wtf/Platform.h>
#if ENABLE(VALUE_PROFILER)
#include "ConcurrentJITLock.h"
#include "Heap.h"
#include "JSArray.h"
......@@ -212,7 +208,5 @@ inline int getRareCaseProfileBytecodeOffset(RareCaseProfile* rareCaseProfile)
} // namespace JSC
#endif // ENABLE(VALUE_PROFILER)
#endif // ValueProfile_h
......@@ -636,20 +636,12 @@ void BytecodeGenerator::emitOpcode(OpcodeID opcodeID)
UnlinkedArrayProfile BytecodeGenerator::newArrayProfile()
{
#if ENABLE(VALUE_PROFILER)
return m_codeBlock->addArrayProfile();
#else
return 0;
#endif
}
UnlinkedArrayAllocationProfile BytecodeGenerator::newArrayAllocationProfile()
{
#if ENABLE(VALUE_PROFILER)
return m_codeBlock->addArrayAllocationProfile();
#else
return 0;
#endif
}
UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
......@@ -659,11 +651,7 @@ UnlinkedObjectAllocationProfile BytecodeGenerator::newObjectAllocationProfile()
UnlinkedValueProfile BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID)
{
#if ENABLE(VALUE_PROFILER)
UnlinkedValueProfile result = m_codeBlock->addValueProfile();
#else
UnlinkedValueProfile result = 0;
#endif
emitOpcode(opcodeID);
return result;
}
......
......@@ -574,10 +574,6 @@ public:
static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1
static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2;
#if ENABLE(VALUE_PROFILER)
static const GPRReg bucketCounterRegister = ARM64Registers::x7;
#endif
// GPRReg mapping is direct, the machine regsiter numbers can
// be used directly as indices into the GPR RegisterBank.
COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0);
......
......@@ -81,10 +81,8 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock)
, m_byValInstructionIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
, m_randomGenerator(cryptographicallyRandomNumber())
#if ENABLE(VALUE_PROFILER)
, m_canBeOptimized(false)
, m_shouldEmitProfiling(false)
#endif
{
}
......@@ -329,7 +327,6 @@ void JIT::privateCompileSlowCases()
m_byValInstructionIndex = 0;
m_callLinkInfoIndex = 0;
#if ENABLE(VALUE_PROFILER)
// Use this to assert that slow-path code associates new profiling sites with existing
// ValueProfiles rather than creating new ones. This ensures that for a given instruction
// (say, get_by_id) we get combined statistics for both the fast-path executions of that
......@@ -337,7 +334,6 @@ void JIT::privateCompileSlowCases()
// new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
// which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
#endif
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
m_bytecodeOffset = iter->to;
......@@ -346,11 +342,9 @@ void JIT::privateCompileSlowCases()
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
#if ENABLE(VALUE_PROFILER)
RareCaseProfile* rareCaseProfile = 0;
if (shouldEmitProfiling())
rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
#endif
#if ENABLE(JIT_VERBOSE)
dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
......@@ -431,10 +425,8 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
#if ENABLE(VALUE_PROFILER)
if (shouldEmitProfiling())
add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
#endif
emitJumpSlowToHot(jump(), 0);
}
......@@ -442,9 +434,7 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
#if ENABLE(VALUE_PROFILER)
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#endif
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
......@@ -454,7 +444,6 @@ void JIT::privateCompileSlowCases()
CompilationResult JIT::privateCompile(JITCompilationEffort effort)
{
#if ENABLE(VALUE_PROFILER)
DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
switch (level) {
case DFG::CannotCompile:
......@@ -489,7 +478,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
break;
}
#endif
if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
......@@ -521,7 +509,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
Jump stackCheck;
if (m_codeBlock->codeType() == FunctionCode) {
#if ENABLE(VALUE_PROFILER)
ASSERT(m_bytecodeOffset == (unsigned)-1);
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
......@@ -536,10 +523,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
load32(Address(callFrameRegister, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
#endif
emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument), regT4);
emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
}
}
#endif
addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1);
stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
......
......@@ -331,16 +331,11 @@ namespace JSC {
template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
// scratch.
void emitValueProfilingSite(ValueProfile*, RegisterID);
void emitValueProfilingSite(unsigned bytecodeOffset, RegisterID);
void emitValueProfilingSite(RegisterID);
#else
void emitValueProfilingSite(unsigned, RegisterID) { }
void emitValueProfilingSite(RegisterID) { }
#endif
void emitValueProfilingSite(ValueProfile*);
void emitValueProfilingSite(unsigned bytecodeOffset);
void emitValueProfilingSite();
void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
......@@ -827,11 +822,9 @@ namespace JSC {
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(VM*);
#if ENABLE(VALUE_PROFILER)
bool m_canBeOptimized;
bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
#endif
} JIT_CLASS_ALIGNMENT;
} // namespace JSC
......
......@@ -668,16 +668,13 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, int, int op1, int op2, Operand
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
#if ENABLE(VALUE_PROFILER)
RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
#endif
if (opcodeID == op_add)
addSlowCase(branchAdd32(Overflow, regT1, regT0));
else if (opcodeID == op_sub)
addSlowCase(branchSub32(Overflow, regT1, regT0));
else {
ASSERT(opcodeID == op_mul);
#if ENABLE(VALUE_PROFILER)
if (shouldEmitProfiling()) {
// We want to be able to measure if this is taking the slow case just
// because of negative zero. If this produces positive zero, then we
......@@ -701,10 +698,6 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, int, int op1, int op2, Operand
addSlowCase(branchMul32(Overflow, regT1, regT0));
addSlowCase(branchTest32(Zero, regT0));
}
#else
addSlowCase(branchMul32(Overflow, regT1, regT0));
addSlowCase(branchTest32(Zero, regT0));
#endif
}
emitFastArithIntToImmNoCheck(regT0, regT0);
}
......@@ -849,19 +842,15 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
// For now, only plant a fast int case if the constant operand is greater than zero.
int32_t value;
if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
#if ENABLE(VALUE_PROFILER)
// Add a special fast case profile because the DFG JIT will expect one.
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
#endif
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
emitFastArithReTagImmediate(regT1, regT0);
} else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
#if ENABLE(VALUE_PROFILER)
// Add a special fast case profile because the DFG JIT will expect one.
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
#endif
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
......@@ -930,7 +919,6 @@ void JIT::emit_op_div(Instruction* currentInstruction)
}
divDouble(fpRegT1, fpRegT0);
#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
......@@ -957,11 +945,6 @@ void JIT::emit_op_div(Instruction* currentInstruction)
move(tagTypeNumberRegister, regT0);
trueDouble.link(this);
isInteger.link(this);
#else
// Double result.
moveDoubleTo64(fpRegT0, regT0);
sub64(tagTypeNumberRegister, regT0);
#endif
emitPutVirtualRegister(dst, regT0);
}
......
......@@ -742,7 +742,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
emitLoadDouble(op1, fpRegT1);
divDouble(fpRegT0, fpRegT1);
#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
......@@ -766,9 +765,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT1);
isInteger.link(this);
#else
emitStoreDouble(dst, fpRegT1);
#endif
break;
}
case op_jless:
......@@ -846,7 +842,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
case op_div: {
emitLoadDouble(op2, fpRegT2);
divDouble(fpRegT2, fpRegT0);
#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
......@@ -870,9 +865,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
isInteger.link(this);
#else
emitStoreDouble(dst, fpRegT0);
#endif
break;
}
case op_jless:
......@@ -924,9 +916,7 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
#if ENABLE(VALUE_PROFILER)
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
#endif
JumpList notInt32Op1;
JumpList notInt32Op2;
......@@ -969,12 +959,10 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
negZero.link(this);
#if ENABLE(VALUE_PROFILER)
// We only get here if we have a genuine negative zero. Record this,
// so that the speculative JIT knows that we failed speculation
// because of a negative zero.
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
#endif
overflow.link(this);
if (!supportsFloatingPoint()) {
......@@ -1005,9 +993,7 @@ void JIT::emit_op_div(Instruction* currentInstruction)
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
#if ENABLE(VALUE_PROFILER)
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
#endif
if (!supportsFloatingPoint()) {
addSlowCase(jump());
......@@ -1028,7 +1014,6 @@ void JIT::emit_op_div(Instruction* currentInstruction)
convertInt32ToDouble(regT0, fpRegT0);
convertInt32ToDouble(regT2, fpRegT1);
divDouble(fpRegT1, fpRegT0);
#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
......@@ -1051,9 +1036,6 @@ void JIT::emit_op_div(Instruction* currentInstruction)
notInteger.link(this);
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
#else
emitStoreDouble(dst, fpRegT0);
#endif
end.append(jump());
// Double divide.
......
......@@ -48,7 +48,7 @@ namespace JSC {
void JIT::emitPutCallResult(Instruction* instruction)
{
int dst = instruction[1