CodeBlock.h 48 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1.  Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 * 2.  Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
 *     its contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
29

30 31 32
#ifndef CodeBlock_h
#define CodeBlock_h

33
#include "ArrayProfile.h"
34
#include "ByValInfo.h"
35
#include "BytecodeConventions.h"
36 37
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
38
#include "CodeBlockHash.h"
39
#include "CodeBlockSet.h"
40
#include "ConcurrentJITLock.h"
41
#include "CodeOrigin.h"
42
#include "CodeType.h"
43
#include "CompactJITCodeMap.h"
44
#include "DFGCommon.h"
45
#include "DFGCommonData.h"
46
#include "DFGExitProfile.h"
47
#include "DFGMinifiedGraph.h"
48
#include "DFGOSREntry.h"
49
#include "DFGOSRExit.h"
50
#include "DFGVariableEventStream.h"
51
#include "DeferredCompilationCallback.h"
52
#include "EvalCodeCache.h"
53
#include "ExecutionCounter.h"
54 55
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
56
#include "ObjectAllocationProfile.h"
57
#include "Options.h"
58
#include "Operations.h"
59
#include "PutPropertySlot.h"
60
#include "Instruction.h"
61
#include "JITCode.h"
62
#include "JITWriteBarrier.h"
63
#include "JSGlobalObject.h"
64
#include "JumpReplacementWatchpoint.h"
65
#include "JumpTable.h"
66
#include "LLIntCallLinkInfo.h"
67
#include "LazyOperandValueProfile.h"
68
#include "LineInfo.h"
69
#include "ProfilerCompilation.h"
70
#include "RegExpObject.h"
71
#include "StructureStubInfo.h"
72
#include "UnconditionalFinalizer.h"
73
#include "ValueProfile.h"
74
#include "VirtualRegister.h"
75
#include "Watchpoint.h"
76
#include <wtf/FastMalloc.h>
77
#include <wtf/PassOwnPtr.h>
78
#include <wtf/RefCountedArray.h>
79
#include <wtf/RefPtr.h>
80
#include <wtf/SegmentedVector.h>
81
#include <wtf/Vector.h>
82
#include <wtf/text/WTFString.h>
83

84
namespace JSC {
85

86 87 88
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
89

90
inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
91

92
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
93

94
class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
95 96 97 98 99 100 101
    WTF_MAKE_FAST_ALLOCATED;
    friend class JIT;
    friend class LLIntOffsetsExtractor;
public:
    enum CopyParsedBlockTag { CopyParsedBlock };
protected:
    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
102 103
        
    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
104

105 106
    WriteBarrier<JSGlobalObject> m_globalObject;
    Heap* m_heap;
107

108 109
public:
    JS_EXPORT_PRIVATE virtual ~CodeBlock();
110

111
    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
112

113 114
    CString inferredName() const;
    CodeBlockHash hash() const;
115 116
    bool hasHash() const;
    bool isSafeToComputeHash() const;
117 118 119 120
    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
    void dump(PrintStream&) const;
121

122 123
    int numParameters() const { return m_numParameters; }
    void setNumParameters(int newValue);
124

125 126
    int* addressOfNumParameters() { return &m_numParameters; }
    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
127

128 129 130 131 132 133 134 135 136 137
    CodeBlock* alternative() { return m_alternative.get(); }
    PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
    void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
    
    CodeSpecializationKind specializationKind() const
    {
        return specializationFromIsConstruct(m_isConstructor);
    }
    
    CodeBlock* baselineVersion();
138

139
    void visitAggregate(SlotVisitor&);
140

141
    static void dumpStatistics();
142

143 144 145 146 147 148 149 150 151
    void dumpBytecode(PrintStream& = WTF::dataFile());
    void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
    void printStructures(PrintStream&, const Instruction*);
    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);

    bool isStrictMode() const { return m_isStrictMode; }

    inline bool isKnownNotImmediate(int index)
    {
152
        if (index == m_thisRegister.offset() && !m_isStrictMode)
153 154 155 156
            return true;

        if (isConstantRegisterIndex(index))
            return getConstant(index).isCell();
157

158 159 160 161 162 163 164 165 166 167 168 169 170
        return false;
    }

    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
    {
        return index >= m_numVars;
    }

    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
weinig@apple.com's avatar
weinig@apple.com committed
171

172 173
#if ENABLE(JIT)

174 175 176 177
    StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation));
    }
178

179 180 181 182
    StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex));
    }
183

184
    void resetStub(StructureStubInfo&);
185

186 187 188 189
    ByValInfo& getByValInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
    }
190

191 192 193 194
    CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
    }
195

196 197
    CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
    {
198
        ASSERT(!JITCode::isOptimizingJIT(jitType()));
199 200
        return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
    }
201
#endif // ENABLE(JIT)
202

203 204
    void unlinkIncomingCalls();

205
#if ENABLE(JIT)
206
    void unlinkCalls();
207 208 209
        
    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
        
210 211 212 213
    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
    {
        return m_incomingCalls.isOnList(incoming);
    }
214 215
#endif // ENABLE(JIT)

216
#if ENABLE(LLINT)
217
    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
218
#endif // ENABLE(LLINT)
219

220 221 222 223 224 225 226 227 228 229 230 231 232 233
    void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
    {
        m_jitCodeMap = jitCodeMap;
    }
    CompactJITCodeMap* jitCodeMap()
    {
        return m_jitCodeMap.get();
    }
    
    unsigned bytecodeOffset(Instruction* returnAddress)
    {
        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
        return static_cast<Instruction*>(returnAddress) - instructions().begin();
    }
234

235
    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
236

237 238 239
    unsigned numberOfInstructions() const { return m_instructions.size(); }
    RefCountedArray<Instruction>& instructions() { return m_instructions; }
    const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
240

241
    size_t predictedMachineCodeSize();
242

243
    bool usesOpcode(OpcodeID);
244

245
    unsigned instructionCount() { return m_instructions.size(); }
246

247
    int argumentIndexAfterCapture(size_t argument);
248

249 250 251 252 253 254
    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
    void install();
    
    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
    PassRefPtr<CodeBlock> newReplacement();
    
255 256
    void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
    {
257 258
        ASSERT(m_heap->isDeferred());
        m_heap->reportExtraMemoryCost(code->size());
259 260 261 262 263 264 265 266 267 268 269 270 271 272 273
        ConcurrentJITLocker locker(m_lock);
        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
        m_jitCode = code;
        m_jitCodeWithArityCheck = codeWithArityCheck;
    }
    PassRefPtr<JITCode> jitCode() { return m_jitCode; }
    MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
    JITCode::JITType jitType() const
    {
        JITCode* jitCode = m_jitCode.get();
        WTF::loadLoadFence();
        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
        return result;
    }
274 275

#if ENABLE(JIT)
276 277 278 279 280
    bool hasBaselineJITProfiling() const
    {
        return jitType() == JITCode::BaselineJIT;
    }
    void jettison();
281
    
282
    virtual CodeBlock* replacement() = 0;
283

284 285
    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
    DFG::CapabilityLevel capabilityLevel()
286
    {
287 288
        DFG::CapabilityLevel result = capabilityLevelInternal();
        m_capabilityLevelState = result;
289 290
        return result;
    }
291
    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
292

293 294
    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
295 296
#endif

297
    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
298

299 300
    void setVM(VM* vm) { m_vm = vm; }
    VM* vm() { return m_vm; }
301

302 303
    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
    VirtualRegister thisRegister() const { return m_thisRegister; }
304

305 306
    bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
    bool usesEval() const { return m_unlinkedCode->usesEval(); }
307

308
    void setArgumentsRegister(VirtualRegister argumentsRegister)
309
    {
310
        ASSERT(argumentsRegister.isValid());
311 312 313
        m_argumentsRegister = argumentsRegister;
        ASSERT(usesArguments());
    }
314
    VirtualRegister argumentsRegister() const
315 316 317 318
    {
        ASSERT(usesArguments());
        return m_argumentsRegister;
    }
319
    VirtualRegister uncheckedArgumentsRegister()
320 321
    {
        if (!usesArguments())
322
            return VirtualRegister();
323 324
        return argumentsRegister();
    }
325
    void setActivationRegister(VirtualRegister activationRegister)
326 327 328
    {
        m_activationRegister = activationRegister;
    }
329 330

    VirtualRegister activationRegister() const
331 332 333 334
    {
        ASSERT(needsFullScopeChain());
        return m_activationRegister;
    }
335 336

    VirtualRegister uncheckedActivationRegister()
337 338
    {
        if (!needsFullScopeChain())
339
            return VirtualRegister();
340 341
        return activationRegister();
    }
342 343

    bool usesArguments() const { return m_argumentsRegister.isValid(); }
344

345 346
    bool needsActivation() const
    {
347
        return m_needsActivation;
348
    }
349

350
    bool isCaptured(VirtualRegister operand, InlineCallFrame* inlineCallFrame = 0) const
351
    {
352 353
        if (operand.isArgument())
            return operand.toArgument() && usesArguments();
354

355
        if (inlineCallFrame)
356
            return inlineCallFrame->capturedVars.get(operand.toLocal());
357

358 359 360 361
        // The activation object isn't in the captured region, but it's "captured"
        // in the sense that stores to its location can be observed indirectly.
        if (needsActivation() && operand == activationRegister())
            return true;
362

363 364 365
        // Ditto for the arguments object.
        if (usesArguments() && operand == argumentsRegister())
            return true;
366

367 368 369
        // Ditto for the arguments object.
        if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
            return true;
370

371 372 373
        // We're in global code so there are no locals to capture
        if (!symbolTable())
            return false;
374

375 376
        return operand.offset() <= symbolTable()->captureStart()
            && operand.offset() > symbolTable()->captureEnd();
377
    }
378

379
    CodeType codeType() const { return m_unlinkedCode->codeType(); }
380 381 382 383 384 385
    PutPropertySlot::Context putByIdContext() const
    {
        if (codeType() == EvalCode)
            return PutPropertySlot::PutByIdEval;
        return PutPropertySlot::PutById;
    }
386

387 388 389
    SourceProvider* source() const { return m_source.get(); }
    unsigned sourceOffset() const { return m_sourceOffset; }
    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
390

391 392
    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
393

394
    void createActivation(CallFrame*);
395

396
    void clearEvalCache();
397

398
    String nameForRegister(VirtualRegister);
399

400
#if ENABLE(JIT)
401
    void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
402
    void sortStructureStubInfos();
403 404
    size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
    StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
405

406 407 408
    void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
409

410 411 412
    void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
    size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
    CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
413
#endif
414

415
#if ENABLE(VALUE_PROFILER)
416 417 418 419 420 421 422 423 424 425 426 427
    unsigned numberOfArgumentValueProfiles()
    {
        ASSERT(m_numParameters >= 0);
        ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
        return m_argumentValueProfiles.size();
    }
    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
    {
        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
        ASSERT(result->m_bytecodeOffset == -1);
        return result;
    }
428

429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446
    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
    {
        ValueProfile* result = binarySearch<ValueProfile, int>(
                                                               m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
                                                               getValueProfileBytecodeOffset<ValueProfile>);
        ASSERT(result->m_bytecodeOffset != -1);
        ASSERT(instructions()[bytecodeOffset + opcodeLength(
                                                            m_vm->interpreter->getOpcodeID(
                                                                                           instructions()[
                                                                                                          bytecodeOffset].u.opcode)) - 1].u.profile == result);
        return result;
    }
    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
    {
        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
    }
447

448 449 450 451 452 453 454 455 456 457
    unsigned totalNumberOfValueProfiles()
    {
        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
    }
    ValueProfile* getFromAllValueProfiles(unsigned index)
    {
        if (index < numberOfArgumentValueProfiles())
            return valueProfileForArgument(index);
        return valueProfile(index - numberOfArgumentValueProfiles());
    }
458

459 460 461 462 463 464 465 466 467 468 469 470 471
    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
    {
        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_rareCaseProfiles.last();
    }
    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
472

473 474 475 476 477 478 479
    bool likelyToTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
480

481 482 483 484 485 486 487
    bool couldTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::couldTakeSlowCaseMinimumCount();
    }
488

489 490 491 492 493 494 495 496 497 498 499 500 501
    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
    {
        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_specialFastCaseProfiles.last();
    }
    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
502

503 504 505 506 507 508 509
    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
    }
510

511 512 513 514 515 516 517
    bool couldTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
    }
518

519 520 521 522 523 524 525 526 527
    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount - specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
528

529 530 531 532 533 534 535 536 537
    bool likelyToTakeAnySlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount + specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
538

539 540 541 542 543 544 545 546 547
    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
    {
        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
        return &m_arrayProfiles.last();
    }
    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
548 549
#endif

550
    // Exception handling support
551

552 553 554 555 556 557 558 559 560 561 562 563 564
    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
    void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
    {
        size_t count = unlinkedHandlers.size();
        if (!count)
            return;
        createRareDataIfNecessary();
        m_rareData->m_exceptionHandlers.resize(count);
        for (size_t i = 0; i < count; ++i) {
            m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
            m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
            m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
            m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
565 566
        }

567 568 569 570
    }
    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }

    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
571

572
#if ENABLE(DFG_JIT)
573
    Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
574
    {
575
        return m_jitCode->dfgCommon()->codeOrigins;
576
    }
577
    
578 579 580
    // Having code origins implies that there has been some inlining.
    bool hasCodeOrigins()
    {
581
        return JITCode::isOptimizingJIT(jitType());
582
    }
583
        
584 585
    bool canGetCodeOrigin(unsigned index)
    {
586
        if (!hasCodeOrigins())
587
            return false;
588
        return index < codeOrigins().size();
589
    }
590

591 592
    CodeOrigin codeOrigin(unsigned index)
    {
593
        return codeOrigins()[index];
594
    }
595

596 597 598
    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
    {
        ASSERT(JITCode::isBaselineCode(jitType()));
599 600 601 602 603 604 605 606
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.add(locker, site);
    }
        
    bool hasExitSite(const DFG::FrequentExitSite& site) const
    {
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.hasExitSite(locker, site);
607
    }
608

609
    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
610

611 612 613 614
    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
    {
        return m_lazyOperandValueProfiles;
    }
615 616
#endif

617
    // Constant Pool
618 619 620 621 622 623 624 625 626 627
#if ENABLE(DFG_JIT)
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
    size_t numberOfDFGIdentifiers() const
    {
        if (!JITCode::isOptimizingJIT(jitType()))
            return 0;

        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
    }

628 629 630 631 632
    const Identifier& identifier(int index) const
    {
        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
        if (static_cast<unsigned>(index) < unlinkedIdentifiers)
            return m_unlinkedCode->identifier(index);
633 634
        ASSERT(JITCode::isOptimizingJIT(jitType()));
        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
635
    }
636 637 638 639
#else
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
640

641
    Vector<WriteBarrier<Unknown> >& constants() { return m_constantRegisters; }
642 643 644 645 646 647 648 649
    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
    unsigned addConstant(JSValue v)
    {
        unsigned result = m_constantRegisters.size();
        m_constantRegisters.append(WriteBarrier<Unknown>());
        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
        return result;
    }
650

651
    unsigned addConstantLazily()
652
    {
653
        unsigned result = m_constantRegisters.size();
654
        m_constantRegisters.append(WriteBarrier<Unknown>());
655
        return result;
656
    }
657

658
    bool findConstant(JSValue, unsigned& result);
659 660 661 662
    unsigned addOrFindConstant(JSValue);
    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
663

664 665 666
    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
    int numberOfFunctionDecls() { return m_functionDecls.size(); }
    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
667

668
    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
669

670 671 672 673 674 675 676 677 678 679 680 681 682
    unsigned numberOfConstantBuffers() const
    {
        if (!m_rareData)
            return 0;
        return m_rareData->m_constantBuffers.size();
    }
    unsigned addConstantBuffer(const Vector<JSValue>& buffer)
    {
        createRareDataIfNecessary();
        unsigned size = m_rareData->m_constantBuffers.size();
        m_rareData->m_constantBuffers.append(buffer);
        return size;
    }
683

684 685 686 687 688 689 690 691 692
    Vector<JSValue>& constantBufferAsVector(unsigned index)
    {
        ASSERT(m_rareData);
        return m_rareData->m_constantBuffers[index];
    }
    JSValue* constantBuffer(unsigned index)
    {
        return constantBufferAsVector(index).data();
    }
693

694
    JSGlobalObject* globalObject() { return m_globalObject.get(); }
695

696
    JSGlobalObject* globalObjectFor(CodeOrigin);
697

698
    // Jump Tables
699

700 701 702 703
    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
    void clearSwitchJumpTables()
704 705 706
    {
        if (!m_rareData)
            return;
707
        m_rareData->m_switchJumpTables.clear();
708
    }
709

710 711 712
    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
713

714

715
    SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
716

717
    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
718

719 720 721
    enum ShrinkMode {
        // Shrink prior to generating machine code that may point directly into vectors.
        EarlyShrink,
722

723 724 725 726 727 728
        // Shrink after generating machine code, and after possibly creating new vectors
        // and appending to others. At this time it is not safe to shrink certain vectors
        // because we would have generated machine code that references them directly.
        LateShrink
    };
    void shrinkToFit(ShrinkMode);
729

730 731
    void copyPostParseDataFrom(CodeBlock* alternative);
    void copyPostParseDataFromAlternative();
732

733 734
    // Functions for controlling when JITting kicks in, in a mixed mode
    // execution world.
735

736 737 738 739
    bool checkIfJITThresholdReached()
    {
        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
    }
740

741 742 743 744
    void dontJITAnytimeSoon()
    {
        m_llintExecuteCounter.deferIndefinitely();
    }
745

746 747 748 749
    void jitAfterWarmUp()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
    }
750

751 752 753 754
    void jitSoon()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
    }
755

756 757 758 759
    const ExecutionCounter& llintExecuteCounter() const
    {
        return m_llintExecuteCounter;
    }
760

761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783
    // Functions for controlling when tiered compilation kicks in. This
    // controls both when the optimizing compiler is invoked and when OSR
    // entry happens. Two triggers exist: the loop trigger and the return
    // trigger. In either case, when an addition to m_jitExecuteCounter
    // causes it to become non-negative, the optimizing compiler is
    // invoked. This includes a fast check to see if this CodeBlock has
    // already been optimized (i.e. replacement() returns a CodeBlock
    // that was optimized with a higher tier JIT than this one). In the
    // case of the loop trigger, if the optimized compilation succeeds
    // (or has already succeeded in the past) then OSR is attempted to
    // redirect program flow into the optimized code.

    // These functions are called from within the optimization triggers,
    // and are used as a single point at which we define the heuristics
    // for how much warm-up is mandated before the next optimization
    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
    // as this is called from the CodeBlock constructor.

    // When we observe a lot of speculation failures, we trigger a
    // reoptimization. But each time, we increase the optimization trigger
    // to avoid thrashing.
    unsigned reoptimizationRetryCounter() const;
    void countReoptimization();
784
#if ENABLE(JIT)
785
    unsigned numberOfDFGCompiles();
786 787 788

    int32_t codeTypeThresholdMultiplier() const;

789
    int32_t adjustedCounterValue(int32_t desiredThreshold);
790 791 792 793 794

    int32_t* addressOfJITExecuteCounter()
    {
        return &m_jitExecuteCounter.m_counter;
    }
795

796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855
    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }

    const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }

    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }

    // Check if the optimization threshold has been reached, and if not,
    // adjust the heuristics accordingly. Returns true if the threshold has
    // been reached.
    bool checkIfOptimizationThresholdReached();

    // Call this to force the next optimization trigger to fire. This is
    // rarely wise, since optimization triggers are typically more
    // expensive than executing baseline code.
    void optimizeNextInvocation();

    // Call this to prevent optimization from happening again. Note that
    // optimization will still happen after roughly 2^29 invocations,
    // so this is really meant to delay that as much as possible. This
    // is called if optimization failed, and we expect it to fail in
    // the future as well.
    void dontOptimizeAnytimeSoon();

    // Call this to reinitialize the counter to its starting state,
    // forcing a warm-up to happen before the next optimization trigger
    // fires. This is called in the CodeBlock constructor. It also
    // makes sense to call this if an OSR exit occurred. Note that
    // OSR exit code is code generated, so the value of the execute
    // counter that this corresponds to is also available directly.
    void optimizeAfterWarmUp();

    // Call this to force an optimization trigger to fire only after
    // a lot of warm-up.
    void optimizeAfterLongWarmUp();

    // Call this to cause an optimization trigger to fire soon, but
    // not necessarily the next one. This makes sense if optimization
    // succeeds. Successfuly optimization means that all calls are
    // relinked to the optimized code, so this only affects call
    // frames that are still executing this CodeBlock. The value here
    // is tuned to strike a balance between the cost of OSR entry
    // (which is too high to warrant making every loop back edge to
    // trigger OSR immediately) and the cost of executing baseline
    // code (which is high enough that we don't necessarily want to
    // have a full warm-up). The intuition for calling this instead of
    // optimizeNextInvocation() is for the case of recursive functions
    // with loops. Consider that there may be N call frames of some
    // recursive function, for a reasonably large value of N. The top
    // one triggers optimization, and then returns, and then all of
    // the others return. We don't want optimization to be triggered on
    // each return, as that would be superfluous. It only makes sense
    // to trigger optimization if one of those functions becomes hot
    // in the baseline code.
    void optimizeSoon();

    void forceOptimizationSlowPathConcurrently();

    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
856
    
857
    uint32_t osrExitCounter() const { return m_osrExitCounter; }
858

859
    void countOSRExit() { m_osrExitCounter++; }
860

861
    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
862

863
    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
864

865 866 867 868 869
    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
    uint32_t exitCountThresholdForReoptimization();
    uint32_t exitCountThresholdForReoptimizationFromLoop();
    bool shouldReoptimizeNow();
    bool shouldReoptimizeFromLoopNow();
870 871 872
#else // No JIT
    void optimizeAfterWarmUp() { }
    unsigned numberOfDFGCompiles() { return 0; }
873
#endif
ggaren@apple.com's avatar
ggaren@apple.com committed
874

875
#if ENABLE(VALUE_PROFILER)
876
    bool shouldOptimizeNow();
877
    void updateAllValueProfilePredictions();
878
    void updateAllArrayPredictions();
879
    void updateAllPredictions();
880
#else
881
    bool updateAllPredictionsAndCheckIfShouldOptimizeNow() { return false; }
882
    void updateAllValueProfilePredictions() { }
883
    void updateAllArrayPredictions() { }
884
    void updateAllPredictions() { }
885
#endif
886

fpizlo@apple.com's avatar
fpizlo@apple.com committed
887
#if ENABLE(JIT)
888
    void reoptimize();
fpizlo@apple.com's avatar
fpizlo@apple.com committed
889
#endif
890

891
#if ENABLE(VERBOSE_VALUE_PROFILE)
892
    void dumpValueProfiles();
893
#endif
894

895 896 897 898 899 900 901 902 903 904 905