CodeBlock.h 47.8 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1.  Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 * 2.  Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
 *     its contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
29

30 31 32
#ifndef CodeBlock_h
#define CodeBlock_h

33
#include "ArrayProfile.h"
34
#include "ByValInfo.h"
35
#include "BytecodeConventions.h"
36 37
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
38
#include "CodeBlockHash.h"
39
#include "CodeBlockSet.h"
40
#include "ConcurrentJITLock.h"
41
#include "CodeOrigin.h"
42
#include "CodeType.h"
43
#include "CompactJITCodeMap.h"
44
#include "DFGCommon.h"
45
#include "DFGCommonData.h"
46
#include "DFGExitProfile.h"
47
#include "DFGMinifiedGraph.h"
48
#include "DFGOSREntry.h"
49
#include "DFGOSRExit.h"
50
#include "DFGVariableEventStream.h"
51
#include "DeferredCompilationCallback.h"
52
#include "EvalCodeCache.h"
53
#include "ExecutionCounter.h"
54 55
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
56
#include "ObjectAllocationProfile.h"
57
#include "Options.h"
58
#include "Operations.h"
59
#include "PutPropertySlot.h"
60
#include "Instruction.h"
61
#include "JITCode.h"
62
#include "JITWriteBarrier.h"
63
#include "JSGlobalObject.h"
64
#include "JumpReplacementWatchpoint.h"
65
#include "JumpTable.h"
66
#include "LLIntCallLinkInfo.h"
67
#include "LazyOperandValueProfile.h"
68
#include "LineInfo.h"
69
#include "ProfilerCompilation.h"
70
#include "RegExpObject.h"
71
#include "StructureStubInfo.h"
72
#include "UnconditionalFinalizer.h"
73
#include "ValueProfile.h"
74
#include "Watchpoint.h"
75
#include <wtf/FastMalloc.h>
76
#include <wtf/PassOwnPtr.h>
77
#include <wtf/RefCountedArray.h>
78
#include <wtf/RefPtr.h>
79
#include <wtf/SegmentedVector.h>
80
#include <wtf/Vector.h>
81
#include <wtf/text/WTFString.h>
82

83
namespace JSC {
84

85 86 87
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
88

89
inline int unmodifiedArgumentsRegister(int argumentsRegister) { return argumentsRegister + 1; }
90

91
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
92

93
class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
94 95 96 97 98 99 100
    WTF_MAKE_FAST_ALLOCATED;
    friend class JIT;
    friend class LLIntOffsetsExtractor;
public:
    enum CopyParsedBlockTag { CopyParsedBlock };
protected:
    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
101 102
        
    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
103

104 105
    WriteBarrier<JSGlobalObject> m_globalObject;
    Heap* m_heap;
106

107 108
public:
    JS_EXPORT_PRIVATE virtual ~CodeBlock();
109

110
    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
111

112 113
    CString inferredName() const;
    CodeBlockHash hash() const;
114 115
    bool hasHash() const;
    bool isSafeToComputeHash() const;
116 117 118 119
    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
    void dump(PrintStream&) const;
120

121 122
    int numParameters() const { return m_numParameters; }
    void setNumParameters(int newValue);
123

124 125
    int* addressOfNumParameters() { return &m_numParameters; }
    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
126

127 128 129 130 131 132 133 134 135 136
    CodeBlock* alternative() { return m_alternative.get(); }
    PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
    void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
    
    CodeSpecializationKind specializationKind() const
    {
        return specializationFromIsConstruct(m_isConstructor);
    }
    
    CodeBlock* baselineVersion();
137

138
    void visitAggregate(SlotVisitor&);
139

140
    static void dumpStatistics();
141

142 143 144 145 146 147 148 149 150
    void dumpBytecode(PrintStream& = WTF::dataFile());
    void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
    void printStructures(PrintStream&, const Instruction*);
    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);

    bool isStrictMode() const { return m_isStrictMode; }

    inline bool isKnownNotImmediate(int index)
    {
151
        if (index == m_thisRegister && !m_isStrictMode)
152 153 154 155
            return true;

        if (isConstantRegisterIndex(index))
            return getConstant(index).isCell();
156

157 158 159 160 161 162 163 164 165 166 167 168 169
        return false;
    }

    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
    {
        return index >= m_numVars;
    }

    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
weinig@apple.com's avatar
weinig@apple.com committed
170

171 172
#if ENABLE(JIT)

173 174 175 176
    StructureStubInfo& getStubInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<StructureStubInfo, void*>(m_structureStubInfos, m_structureStubInfos.size(), returnAddress.value(), getStructureStubInfoReturnLocation));
    }
177

178 179 180 181
    StructureStubInfo& getStubInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<StructureStubInfo, unsigned>(m_structureStubInfos, m_structureStubInfos.size(), bytecodeIndex, getStructureStubInfoBytecodeIndex));
    }
182

183
    void resetStub(StructureStubInfo&);
184

185 186 187 188
    ByValInfo& getByValInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
    }
189

190 191 192 193
    CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
    }
194

195 196
    CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
    {
197
        ASSERT(!JITCode::isOptimizingJIT(jitType()));
198 199
        return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
    }
200
#endif // ENABLE(JIT)
201

202 203
    void unlinkIncomingCalls();

204
#if ENABLE(JIT)
205
    void unlinkCalls();
206 207 208
        
    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
        
209 210 211 212
    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
    {
        return m_incomingCalls.isOnList(incoming);
    }
213 214
#endif // ENABLE(JIT)

215
#if ENABLE(LLINT)
216
    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
217
#endif // ENABLE(LLINT)
218

219 220 221 222 223 224 225 226 227 228 229 230 231 232
    void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
    {
        m_jitCodeMap = jitCodeMap;
    }
    CompactJITCodeMap* jitCodeMap()
    {
        return m_jitCodeMap.get();
    }
    
    unsigned bytecodeOffset(Instruction* returnAddress)
    {
        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
        return static_cast<Instruction*>(returnAddress) - instructions().begin();
    }
233

234
    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
235

236 237 238
    unsigned numberOfInstructions() const { return m_instructions.size(); }
    RefCountedArray<Instruction>& instructions() { return m_instructions; }
    const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
239

240
    size_t predictedMachineCodeSize();
241

242
    bool usesOpcode(OpcodeID);
243

244
    unsigned instructionCount() { return m_instructions.size(); }
245

246
    int argumentIndexAfterCapture(size_t argument);
247

248 249 250 251 252 253
    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
    void install();
    
    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
    PassRefPtr<CodeBlock> newReplacement();
    
254 255
    void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
    {
256 257
        ASSERT(m_heap->isDeferred());
        m_heap->reportExtraMemoryCost(code->size());
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
        ConcurrentJITLocker locker(m_lock);
        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
        m_jitCode = code;
        m_jitCodeWithArityCheck = codeWithArityCheck;
    }
    PassRefPtr<JITCode> jitCode() { return m_jitCode; }
    MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
    JITCode::JITType jitType() const
    {
        JITCode* jitCode = m_jitCode.get();
        WTF::loadLoadFence();
        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
        return result;
    }
273 274

#if ENABLE(JIT)
275 276 277 278 279
    bool hasBaselineJITProfiling() const
    {
        return jitType() == JITCode::BaselineJIT;
    }
    void jettison();
280
    
281
    virtual CodeBlock* replacement() = 0;
282

283 284
    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
    DFG::CapabilityLevel capabilityLevel()
285
    {
286 287
        DFG::CapabilityLevel result = capabilityLevelInternal();
        m_capabilityLevelState = result;
288 289
        return result;
    }
290
    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
291

292 293
    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
294 295
#endif

296
    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
297

298 299
    void setVM(VM* vm) { m_vm = vm; }
    VM* vm() { return m_vm; }
300

301 302
    void setThisRegister(int thisRegister) { m_thisRegister = thisRegister; }
    int thisRegister() const { return m_thisRegister; }
303

304 305
    bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
    bool usesEval() const { return m_unlinkedCode->usesEval(); }
306

307
    void setArgumentsRegister(int argumentsRegister)
308
    {
309
        ASSERT(argumentsRegister != (int)InvalidVirtualRegister);
310 311 312
        m_argumentsRegister = argumentsRegister;
        ASSERT(usesArguments());
    }
313
    int argumentsRegister() const
314 315 316 317
    {
        ASSERT(usesArguments());
        return m_argumentsRegister;
    }
318
    int uncheckedArgumentsRegister()
319 320
    {
        if (!usesArguments())
321
            return InvalidVirtualRegister;
322 323
        return argumentsRegister();
    }
324
    void setActivationRegister(int activationRegister)
325 326 327
    {
        m_activationRegister = activationRegister;
    }
328
    int activationRegister() const
329 330 331 332
    {
        ASSERT(needsFullScopeChain());
        return m_activationRegister;
    }
333
    int uncheckedActivationRegister()
334 335
    {
        if (!needsFullScopeChain())
336
            return InvalidVirtualRegister;
337 338
        return activationRegister();
    }
339
    bool usesArguments() const { return m_argumentsRegister != (int)InvalidVirtualRegister; }
340

341 342
    bool needsActivation() const
    {
343
        return m_needsActivation;
344
    }
345

346
    bool isCaptured(int operand, InlineCallFrame* inlineCallFrame = 0) const
347
    {
348 349
        if (operandIsArgument(operand))
            return operandToArgument(operand) && usesArguments();
350

351
        if (inlineCallFrame)
352
            return inlineCallFrame->capturedVars.get(operandToLocal(operand));
353

354 355 356 357
        // The activation object isn't in the captured region, but it's "captured"
        // in the sense that stores to its location can be observed indirectly.
        if (needsActivation() && operand == activationRegister())
            return true;
358

359 360 361
        // Ditto for the arguments object.
        if (usesArguments() && operand == argumentsRegister())
            return true;
362

363 364 365
        // Ditto for the arguments object.
        if (usesArguments() && operand == unmodifiedArgumentsRegister(argumentsRegister()))
            return true;
366

367 368 369
        // We're in global code so there are no locals to capture
        if (!symbolTable())
            return false;
370

371 372
        return operand <= symbolTable()->captureStart()
            && operand > symbolTable()->captureEnd();
373
    }
374

375
    CodeType codeType() const { return m_unlinkedCode->codeType(); }
376 377 378 379 380 381
    PutPropertySlot::Context putByIdContext() const
    {
        if (codeType() == EvalCode)
            return PutPropertySlot::PutByIdEval;
        return PutPropertySlot::PutById;
    }
382

383 384 385
    SourceProvider* source() const { return m_source.get(); }
    unsigned sourceOffset() const { return m_sourceOffset; }
    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
386

387 388
    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
389

390
    void createActivation(CallFrame*);
391

392
    void clearEvalCache();
393

394
    String nameForRegister(int registerNumber);
395

396
#if ENABLE(JIT)
397
    void setNumberOfStructureStubInfos(size_t size) { m_structureStubInfos.grow(size); }
398
    void sortStructureStubInfos();
399 400
    size_t numberOfStructureStubInfos() const { return m_structureStubInfos.size(); }
    StructureStubInfo& structureStubInfo(int index) { return m_structureStubInfos[index]; }
401

402 403 404
    void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
405

406 407 408
    void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
    size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
    CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
409
#endif
410

411
#if ENABLE(VALUE_PROFILER)
412 413 414 415 416 417 418 419 420 421 422 423
    unsigned numberOfArgumentValueProfiles()
    {
        ASSERT(m_numParameters >= 0);
        ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
        return m_argumentValueProfiles.size();
    }
    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
    {
        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
        ASSERT(result->m_bytecodeOffset == -1);
        return result;
    }
424

425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
    {
        ValueProfile* result = binarySearch<ValueProfile, int>(
                                                               m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
                                                               getValueProfileBytecodeOffset<ValueProfile>);
        ASSERT(result->m_bytecodeOffset != -1);
        ASSERT(instructions()[bytecodeOffset + opcodeLength(
                                                            m_vm->interpreter->getOpcodeID(
                                                                                           instructions()[
                                                                                                          bytecodeOffset].u.opcode)) - 1].u.profile == result);
        return result;
    }
    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
    {
        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
    }
443

444 445 446 447 448 449 450 451 452 453
    unsigned totalNumberOfValueProfiles()
    {
        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
    }
    ValueProfile* getFromAllValueProfiles(unsigned index)
    {
        if (index < numberOfArgumentValueProfiles())
            return valueProfileForArgument(index);
        return valueProfile(index - numberOfArgumentValueProfiles());
    }
454

455 456 457 458 459 460 461 462 463 464 465 466 467
    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
    {
        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_rareCaseProfiles.last();
    }
    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
468

469 470 471 472 473 474 475
    bool likelyToTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
476

477 478 479 480 481 482 483
    bool couldTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::couldTakeSlowCaseMinimumCount();
    }
484

485 486 487 488 489 490 491 492 493 494 495 496 497
    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
    {
        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_specialFastCaseProfiles.last();
    }
    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
498

499 500 501 502 503 504 505
    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
    }
506

507 508 509 510 511 512 513
    bool couldTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
    }
514

515 516 517 518 519 520 521 522 523
    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount - specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
524

525 526 527 528 529 530 531 532 533
    bool likelyToTakeAnySlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount + specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
534

535 536 537 538 539 540 541 542 543
    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
    {
        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
        return &m_arrayProfiles.last();
    }
    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
544 545
#endif

546
    // Exception handling support
547

548 549 550 551 552 553 554 555 556 557 558 559 560
    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
    void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
    {
        size_t count = unlinkedHandlers.size();
        if (!count)
            return;
        createRareDataIfNecessary();
        m_rareData->m_exceptionHandlers.resize(count);
        for (size_t i = 0; i < count; ++i) {
            m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
            m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
            m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
            m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
561 562
        }

563 564 565 566
    }
    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }

    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
567

568
#if ENABLE(DFG_JIT)
569
    Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
570
    {
571
        return m_jitCode->dfgCommon()->codeOrigins;
572
    }
573
    
574 575 576
    // Having code origins implies that there has been some inlining.
    bool hasCodeOrigins()
    {
577
        return JITCode::isOptimizingJIT(jitType());
578
    }
579
        
580 581
    bool canGetCodeOrigin(unsigned index)
    {
582
        if (!hasCodeOrigins())
583
            return false;
584
        return index < codeOrigins().size();
585
    }
586

587 588
    CodeOrigin codeOrigin(unsigned index)
    {
589
        return codeOrigins()[index];
590
    }
591

592 593 594
    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
    {
        ASSERT(JITCode::isBaselineCode(jitType()));
595 596 597 598 599 600 601 602
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.add(locker, site);
    }
        
    bool hasExitSite(const DFG::FrequentExitSite& site) const
    {
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.hasExitSite(locker, site);
603
    }
604

605
    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
606

607 608 609 610
    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
    {
        return m_lazyOperandValueProfiles;
    }
611 612
#endif

613
    // Constant Pool
614 615 616 617 618 619 620 621 622 623
#if ENABLE(DFG_JIT)
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
    size_t numberOfDFGIdentifiers() const
    {
        if (!JITCode::isOptimizingJIT(jitType()))
            return 0;

        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
    }

624 625 626 627 628
    const Identifier& identifier(int index) const
    {
        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
        if (static_cast<unsigned>(index) < unlinkedIdentifiers)
            return m_unlinkedCode->identifier(index);
629 630
        ASSERT(JITCode::isOptimizingJIT(jitType()));
        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
631
    }
632 633 634 635
#else
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
636

637
    Vector<WriteBarrier<Unknown> >& constants() { return m_constantRegisters; }
638 639 640 641 642 643 644 645
    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
    unsigned addConstant(JSValue v)
    {
        unsigned result = m_constantRegisters.size();
        m_constantRegisters.append(WriteBarrier<Unknown>());
        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
        return result;
    }
646

647
    unsigned addConstantLazily()
648
    {
649
        unsigned result = m_constantRegisters.size();
650
        m_constantRegisters.append(WriteBarrier<Unknown>());
651
        return result;
652
    }
653

654
    bool findConstant(JSValue, unsigned& result);
655 656 657 658
    unsigned addOrFindConstant(JSValue);
    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
659

660 661 662
    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
    int numberOfFunctionDecls() { return m_functionDecls.size(); }
    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
663

664
    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
665

666 667 668 669 670 671 672 673 674 675 676 677 678
    unsigned numberOfConstantBuffers() const
    {
        if (!m_rareData)
            return 0;
        return m_rareData->m_constantBuffers.size();
    }
    unsigned addConstantBuffer(const Vector<JSValue>& buffer)
    {
        createRareDataIfNecessary();
        unsigned size = m_rareData->m_constantBuffers.size();
        m_rareData->m_constantBuffers.append(buffer);
        return size;
    }
679

680 681 682 683 684 685 686 687 688
    Vector<JSValue>& constantBufferAsVector(unsigned index)
    {
        ASSERT(m_rareData);
        return m_rareData->m_constantBuffers[index];
    }
    JSValue* constantBuffer(unsigned index)
    {
        return constantBufferAsVector(index).data();
    }
689

690
    JSGlobalObject* globalObject() { return m_globalObject.get(); }
691

692
    JSGlobalObject* globalObjectFor(CodeOrigin);
693

694
    // Jump Tables
695

696 697 698 699
    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
    void clearSwitchJumpTables()
700 701 702
    {
        if (!m_rareData)
            return;
703
        m_rareData->m_switchJumpTables.clear();
704
    }
705

706 707 708
    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
709

710

711
    SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
712

713
    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
714

715 716 717
    enum ShrinkMode {
        // Shrink prior to generating machine code that may point directly into vectors.
        EarlyShrink,
718

719 720 721 722 723 724
        // Shrink after generating machine code, and after possibly creating new vectors
        // and appending to others. At this time it is not safe to shrink certain vectors
        // because we would have generated machine code that references them directly.
        LateShrink
    };
    void shrinkToFit(ShrinkMode);
725

726 727
    void copyPostParseDataFrom(CodeBlock* alternative);
    void copyPostParseDataFromAlternative();
728

729 730
    // Functions for controlling when JITting kicks in, in a mixed mode
    // execution world.
731

732 733 734 735
    bool checkIfJITThresholdReached()
    {
        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
    }
736

737 738 739 740
    void dontJITAnytimeSoon()
    {
        m_llintExecuteCounter.deferIndefinitely();
    }
741

742 743 744 745
    void jitAfterWarmUp()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
    }
746

747 748 749 750
    void jitSoon()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
    }
751

752 753 754 755
    const ExecutionCounter& llintExecuteCounter() const
    {
        return m_llintExecuteCounter;
    }
756

757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779
    // Functions for controlling when tiered compilation kicks in. This
    // controls both when the optimizing compiler is invoked and when OSR
    // entry happens. Two triggers exist: the loop trigger and the return
    // trigger. In either case, when an addition to m_jitExecuteCounter
    // causes it to become non-negative, the optimizing compiler is
    // invoked. This includes a fast check to see if this CodeBlock has
    // already been optimized (i.e. replacement() returns a CodeBlock
    // that was optimized with a higher tier JIT than this one). In the
    // case of the loop trigger, if the optimized compilation succeeds
    // (or has already succeeded in the past) then OSR is attempted to
    // redirect program flow into the optimized code.

    // These functions are called from within the optimization triggers,
    // and are used as a single point at which we define the heuristics
    // for how much warm-up is mandated before the next optimization
    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
    // as this is called from the CodeBlock constructor.

    // When we observe a lot of speculation failures, we trigger a
    // reoptimization. But each time, we increase the optimization trigger
    // to avoid thrashing.
    unsigned reoptimizationRetryCounter() const;
    void countReoptimization();
780
#if ENABLE(JIT)
781
    unsigned numberOfDFGCompiles();
782 783 784

    int32_t codeTypeThresholdMultiplier() const;

785
    int32_t adjustedCounterValue(int32_t desiredThreshold);
786 787 788 789 790

    int32_t* addressOfJITExecuteCounter()
    {
        return &m_jitExecuteCounter.m_counter;
    }
791

792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851
    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }

    const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }

    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }

    // Check if the optimization threshold has been reached, and if not,
    // adjust the heuristics accordingly. Returns true if the threshold has
    // been reached.
    bool checkIfOptimizationThresholdReached();

    // Call this to force the next optimization trigger to fire. This is
    // rarely wise, since optimization triggers are typically more
    // expensive than executing baseline code.
    void optimizeNextInvocation();

    // Call this to prevent optimization from happening again. Note that
    // optimization will still happen after roughly 2^29 invocations,
    // so this is really meant to delay that as much as possible. This
    // is called if optimization failed, and we expect it to fail in
    // the future as well.
    void dontOptimizeAnytimeSoon();

    // Call this to reinitialize the counter to its starting state,
    // forcing a warm-up to happen before the next optimization trigger
    // fires. This is called in the CodeBlock constructor. It also
    // makes sense to call this if an OSR exit occurred. Note that
    // OSR exit code is code generated, so the value of the execute
    // counter that this corresponds to is also available directly.
    void optimizeAfterWarmUp();

    // Call this to force an optimization trigger to fire only after
    // a lot of warm-up.
    void optimizeAfterLongWarmUp();

    // Call this to cause an optimization trigger to fire soon, but
    // not necessarily the next one. This makes sense if optimization
    // succeeds. Successfuly optimization means that all calls are
    // relinked to the optimized code, so this only affects call
    // frames that are still executing this CodeBlock. The value here
    // is tuned to strike a balance between the cost of OSR entry
    // (which is too high to warrant making every loop back edge to
    // trigger OSR immediately) and the cost of executing baseline
    // code (which is high enough that we don't necessarily want to
    // have a full warm-up). The intuition for calling this instead of
    // optimizeNextInvocation() is for the case of recursive functions
    // with loops. Consider that there may be N call frames of some
    // recursive function, for a reasonably large value of N. The top
    // one triggers optimization, and then returns, and then all of
    // the others return. We don't want optimization to be triggered on
    // each return, as that would be superfluous. It only makes sense
    // to trigger optimization if one of those functions becomes hot
    // in the baseline code.
    void optimizeSoon();

    void forceOptimizationSlowPathConcurrently();

    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
852
    
853
    uint32_t osrExitCounter() const { return m_osrExitCounter; }
854

855
    void countOSRExit() { m_osrExitCounter++; }
856

857
    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
858

859
    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
860

861 862 863 864 865
    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
    uint32_t exitCountThresholdForReoptimization();
    uint32_t exitCountThresholdForReoptimizationFromLoop();
    bool shouldReoptimizeNow();
    bool shouldReoptimizeFromLoopNow();
866 867 868
#else // No JIT
    void optimizeAfterWarmUp() { }
    unsigned numberOfDFGCompiles() { return 0; }
869
#endif
ggaren@apple.com's avatar
ggaren@apple.com committed
870

871
#if ENABLE(VALUE_PROFILER)
872
    bool shouldOptimizeNow();
873
    void updateAllValueProfilePredictions();
874
    void updateAllArrayPredictions();
875
    void updateAllPredictions();
876
#else
877
    bool updateAllPredictionsAndCheckIfShouldOptimizeNow() { return false; }
878
    void updateAllValueProfilePredictions() { }
879
    void updateAllArrayPredictions() { }
880
    void updateAllPredictions() { }
881
#endif
882

fpizlo@apple.com's avatar
fpizlo@apple.com committed
883
#if ENABLE(JIT)
884
    void reoptimize();
fpizlo@apple.com's avatar
fpizlo@apple.com committed
885
#endif
886

887
#if ENABLE(VERBOSE_VALUE_PROFILE)
888
    void dumpValueProfiles();
889
#endif
890

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912