CodeBlock.h 47.6 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1.  Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 * 2.  Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
 *     its contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
29

30 31 32
#ifndef CodeBlock_h
#define CodeBlock_h

33
#include "ArrayProfile.h"
34
#include "ByValInfo.h"
35
#include "BytecodeConventions.h"
36
#include "BytecodeLivenessAnalysis.h"
37 38
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
39
#include "CodeBlockHash.h"
40
#include "CodeBlockSet.h"
41
#include "ConcurrentJITLock.h"
42
#include "CodeOrigin.h"
43
#include "CodeType.h"
44
#include "CompactJITCodeMap.h"
45
#include "DFGCommon.h"
46
#include "DFGCommonData.h"
47
#include "DFGExitProfile.h"
48
#include "DFGMinifiedGraph.h"
49
#include "DFGOSREntry.h"
50
#include "DFGOSRExit.h"
51
#include "DFGVariableEventStream.h"
52
#include "DeferredCompilationCallback.h"
53
#include "EvalCodeCache.h"
54
#include "ExecutionCounter.h"
55 56
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
57
#include "ObjectAllocationProfile.h"
58
#include "Options.h"
59
#include "Operations.h"
60
#include "PutPropertySlot.h"
61
#include "Instruction.h"
62
#include "JITCode.h"
63
#include "JITWriteBarrier.h"
64
#include "JSGlobalObject.h"
65
#include "JumpTable.h"
66
#include "LLIntCallLinkInfo.h"
67
#include "LazyOperandValueProfile.h"
68
#include "LineInfo.h"
69
#include "ProfilerCompilation.h"
70
#include "RegExpObject.h"
71
#include "StructureStubInfo.h"
72
#include "UnconditionalFinalizer.h"
73
#include "ValueProfile.h"
74
#include "VirtualRegister.h"
75
#include "Watchpoint.h"
76
#include <wtf/Bag.h>
77
#include <wtf/FastMalloc.h>
78
#include <wtf/PassOwnPtr.h>
79
#include <wtf/RefCountedArray.h>
80
#include <wtf/RefPtr.h>
81
#include <wtf/SegmentedVector.h>
82
#include <wtf/Vector.h>
83
#include <wtf/text/WTFString.h>
84

85
namespace JSC {
86

87 88 89
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
90

91
inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
92

93
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
94

95 96
enum ReoptimizationMode { DontCountReoptimization, CountReoptimization };

97
class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
98
    WTF_MAKE_FAST_ALLOCATED;
99
    friend class BytecodeLivenessAnalysis;
100 101 102 103 104 105
    friend class JIT;
    friend class LLIntOffsetsExtractor;
public:
    enum CopyParsedBlockTag { CopyParsedBlock };
protected:
    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
106 107
        
    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
108

109 110
    WriteBarrier<JSGlobalObject> m_globalObject;
    Heap* m_heap;
111

112 113
public:
    JS_EXPORT_PRIVATE virtual ~CodeBlock();
114

115
    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
116

117 118
    CString inferredName() const;
    CodeBlockHash hash() const;
119 120
    bool hasHash() const;
    bool isSafeToComputeHash() const;
121 122 123 124
    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
    void dump(PrintStream&) const;
125

126 127
    int numParameters() const { return m_numParameters; }
    void setNumParameters(int newValue);
128

129 130
    int* addressOfNumParameters() { return &m_numParameters; }
    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
131

132 133 134 135 136 137 138 139 140
    CodeBlock* alternative() { return m_alternative.get(); }
    PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
    void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
    
    CodeSpecializationKind specializationKind() const
    {
        return specializationFromIsConstruct(m_isConstructor);
    }
    
141
    CodeBlock* baselineAlternative();
142 143 144
    
    // FIXME: Get rid of this.
    // https://bugs.webkit.org/show_bug.cgi?id=123677
145
    CodeBlock* baselineVersion();
146

147
    void visitAggregate(SlotVisitor&);
148

149 150 151 152 153 154
    void dumpBytecode(PrintStream& = WTF::dataFile());
    void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
    void printStructures(PrintStream&, const Instruction*);
    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);

    bool isStrictMode() const { return m_isStrictMode; }
155
    ECMAMode ecmaMode() const { return isStrictMode() ? StrictMode : NotStrictMode; }
156 157 158

    inline bool isKnownNotImmediate(int index)
    {
159
        if (index == m_thisRegister.offset() && !m_isStrictMode)
160 161 162 163
            return true;

        if (isConstantRegisterIndex(index))
            return getConstant(index).isCell();
164

165 166 167 168 169 170 171 172 173 174 175 176 177
        return false;
    }

    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
    {
        return index >= m_numVars;
    }

    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
weinig@apple.com's avatar
weinig@apple.com committed
178

179
#if ENABLE(JIT)
180 181 182
    StructureStubInfo* addStubInfo();
    Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
    Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
183

184
    void resetStub(StructureStubInfo&);
185 186
    
    void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
187

188 189 190 191
    ByValInfo& getByValInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
    }
192

193 194 195 196
    CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
    }
197

198 199
    CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
    {
200
        ASSERT(!JITCode::isOptimizingJIT(jitType()));
201 202
        return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
    }
203
#endif // ENABLE(JIT)
204

205 206
    void unlinkIncomingCalls();

207
#if ENABLE(JIT)
208
    void unlinkCalls();
209 210 211
        
    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
        
212 213 214 215
    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
    {
        return m_incomingCalls.isOnList(incoming);
    }
216 217
#endif // ENABLE(JIT)

218
#if ENABLE(LLINT)
219
    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
220
#endif // ENABLE(LLINT)
221

222 223 224 225 226 227 228 229 230 231 232 233 234 235
    void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
    {
        m_jitCodeMap = jitCodeMap;
    }
    CompactJITCodeMap* jitCodeMap()
    {
        return m_jitCodeMap.get();
    }
    
    unsigned bytecodeOffset(Instruction* returnAddress)
    {
        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
        return static_cast<Instruction*>(returnAddress) - instructions().begin();
    }
236

237
    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
238

239 240 241
    unsigned numberOfInstructions() const { return m_instructions.size(); }
    RefCountedArray<Instruction>& instructions() { return m_instructions; }
    const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
242

243
    size_t predictedMachineCodeSize();
244

245
    bool usesOpcode(OpcodeID);
246

247
    unsigned instructionCount() const { return m_instructions.size(); }
248

249
    int argumentIndexAfterCapture(size_t argument);
fpizlo@apple.com's avatar
fpizlo@apple.com committed
250 251 252
    
    bool hasSlowArguments();
    const SlowArgument* machineSlowArguments();
253

254 255 256 257 258 259
    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
    void install();
    
    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
    PassRefPtr<CodeBlock> newReplacement();
    
260 261
    void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
    {
262 263
        ASSERT(m_heap->isDeferred());
        m_heap->reportExtraMemoryCost(code->size());
264 265 266 267 268 269 270 271 272 273 274 275 276 277 278
        ConcurrentJITLocker locker(m_lock);
        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
        m_jitCode = code;
        m_jitCodeWithArityCheck = codeWithArityCheck;
    }
    PassRefPtr<JITCode> jitCode() { return m_jitCode; }
    MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
    JITCode::JITType jitType() const
    {
        JITCode* jitCode = m_jitCode.get();
        WTF::loadLoadFence();
        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
        return result;
    }
279

280 281 282 283
    bool hasBaselineJITProfiling() const
    {
        return jitType() == JITCode::BaselineJIT;
    }
284
    
285
#if ENABLE(JIT)
286
    virtual CodeBlock* replacement() = 0;
287

288 289
    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
    DFG::CapabilityLevel capabilityLevel()
290
    {
291 292
        DFG::CapabilityLevel result = capabilityLevelInternal();
        m_capabilityLevelState = result;
293 294
        return result;
    }
295
    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
296

297 298
    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
299 300
#endif

301 302
    void jettison(ReoptimizationMode = DontCountReoptimization);
    
303
    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
304

305 306
    void setVM(VM* vm) { m_vm = vm; }
    VM* vm() { return m_vm; }
307

308 309
    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
    VirtualRegister thisRegister() const { return m_thisRegister; }
310

311 312
    bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
    bool usesEval() const { return m_unlinkedCode->usesEval(); }
313

314
    void setArgumentsRegister(VirtualRegister argumentsRegister)
315
    {
316
        ASSERT(argumentsRegister.isValid());
317 318 319
        m_argumentsRegister = argumentsRegister;
        ASSERT(usesArguments());
    }
320
    VirtualRegister argumentsRegister() const
321 322 323 324
    {
        ASSERT(usesArguments());
        return m_argumentsRegister;
    }
325
    VirtualRegister uncheckedArgumentsRegister()
326 327
    {
        if (!usesArguments())
328
            return VirtualRegister();
329 330
        return argumentsRegister();
    }
331
    void setActivationRegister(VirtualRegister activationRegister)
332 333 334
    {
        m_activationRegister = activationRegister;
    }
335 336

    VirtualRegister activationRegister() const
337 338 339 340
    {
        ASSERT(needsFullScopeChain());
        return m_activationRegister;
    }
341 342

    VirtualRegister uncheckedActivationRegister()
343 344
    {
        if (!needsFullScopeChain())
345
            return VirtualRegister();
346 347
        return activationRegister();
    }
348 349

    bool usesArguments() const { return m_argumentsRegister.isValid(); }
350

351 352
    bool needsActivation() const
    {
353
        return m_needsActivation;
354
    }
355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375
    
    unsigned captureCount() const
    {
        if (!symbolTable())
            return 0;
        return symbolTable()->captureCount();
    }
    
    int captureStart() const
    {
        if (!symbolTable())
            return 0;
        return symbolTable()->captureStart();
    }
    
    int captureEnd() const
    {
        if (!symbolTable())
            return 0;
        return symbolTable()->captureEnd();
    }
376

fpizlo@apple.com's avatar
fpizlo@apple.com committed
377 378 379 380
    bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
    
    int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
    int framePointerOffsetToGetActivationRegisters();
381

382
    CodeType codeType() const { return m_unlinkedCode->codeType(); }
383 384 385 386 387 388
    PutPropertySlot::Context putByIdContext() const
    {
        if (codeType() == EvalCode)
            return PutPropertySlot::PutByIdEval;
        return PutPropertySlot::PutById;
    }
389

390 391 392
    SourceProvider* source() const { return m_source.get(); }
    unsigned sourceOffset() const { return m_sourceOffset; }
    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
393

394 395
    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
396

397
    void createActivation(CallFrame*);
398

399
    void clearEvalCache();
400

401
    String nameForRegister(VirtualRegister);
402

403
#if ENABLE(JIT)
404 405 406
    void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
407

408 409 410
    void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
    size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
    CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
411
#endif
412

413 414 415 416 417 418 419 420 421 422 423 424
    unsigned numberOfArgumentValueProfiles()
    {
        ASSERT(m_numParameters >= 0);
        ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
        return m_argumentValueProfiles.size();
    }
    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
    {
        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
        ASSERT(result->m_bytecodeOffset == -1);
        return result;
    }
425

426 427 428 429 430
    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
    {
        ValueProfile* result = binarySearch<ValueProfile, int>(
431 432
            m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
            getValueProfileBytecodeOffset<ValueProfile>);
433 434
        ASSERT(result->m_bytecodeOffset != -1);
        ASSERT(instructions()[bytecodeOffset + opcodeLength(
435 436
            m_vm->interpreter->getOpcodeID(
                instructions()[bytecodeOffset].u.opcode)) - 1].u.profile == result);
437 438 439 440 441 442
        return result;
    }
    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
    {
        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
    }
443

444 445 446 447 448 449 450 451 452 453
    unsigned totalNumberOfValueProfiles()
    {
        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
    }
    ValueProfile* getFromAllValueProfiles(unsigned index)
    {
        if (index < numberOfArgumentValueProfiles())
            return valueProfileForArgument(index);
        return valueProfile(index - numberOfArgumentValueProfiles());
    }
454

455 456 457 458 459 460 461 462 463 464
    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
    {
        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_rareCaseProfiles.last();
    }
    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
465 466
            m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
            getRareCaseProfileBytecodeOffset);
467
    }
468

469 470 471 472 473 474 475
    bool likelyToTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
476

477 478 479 480 481 482 483
    bool couldTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::couldTakeSlowCaseMinimumCount();
    }
484

485 486 487 488 489 490 491 492 493 494 495 496 497
    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
    {
        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_specialFastCaseProfiles.last();
    }
    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
498

499 500 501 502 503 504 505
    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
    }
506

507 508 509 510 511 512 513
    bool couldTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
    }
514

515 516 517 518 519 520 521 522 523
    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount - specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
524

525 526 527 528 529 530 531 532 533
    bool likelyToTakeAnySlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount + specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
534

535 536 537 538 539 540 541 542 543
    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
    {
        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
        return &m_arrayProfiles.last();
    }
    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
544

545
    // Exception handling support
546

547 548 549 550 551 552 553 554 555 556 557 558 559
    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
    void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
    {
        size_t count = unlinkedHandlers.size();
        if (!count)
            return;
        createRareDataIfNecessary();
        m_rareData->m_exceptionHandlers.resize(count);
        for (size_t i = 0; i < count; ++i) {
            m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
            m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
            m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
            m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
560 561
        }

562 563 564 565
    }
    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }

    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
566

567
#if ENABLE(DFG_JIT)
568
    Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
569
    {
570
        return m_jitCode->dfgCommon()->codeOrigins;
571
    }
572
    
573 574 575
    // Having code origins implies that there has been some inlining.
    bool hasCodeOrigins()
    {
576
        return JITCode::isOptimizingJIT(jitType());
577
    }
578
        
579 580
    bool canGetCodeOrigin(unsigned index)
    {
581
        if (!hasCodeOrigins())
582
            return false;
583
        return index < codeOrigins().size();
584
    }
585

586 587
    CodeOrigin codeOrigin(unsigned index)
    {
588
        return codeOrigins()[index];
589
    }
590

591 592 593
    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
    {
        ASSERT(JITCode::isBaselineCode(jitType()));
594 595 596 597 598 599 600 601
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.add(locker, site);
    }
        
    bool hasExitSite(const DFG::FrequentExitSite& site) const
    {
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.hasExitSite(locker, site);
602
    }
603

604
    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
605

606 607 608 609
    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
    {
        return m_lazyOperandValueProfiles;
    }
fpizlo@apple.com's avatar
fpizlo@apple.com committed
610 611 612 613 614 615
#else // ENABLE(DFG_JIT)
    bool addFrequentExitSite(const DFG::FrequentExitSite&)
    {
        return false;
    }
#endif // ENABLE(DFG_JIT)
616

617
    // Constant Pool
618 619 620 621 622 623 624 625 626 627
#if ENABLE(DFG_JIT)
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
    size_t numberOfDFGIdentifiers() const
    {
        if (!JITCode::isOptimizingJIT(jitType()))
            return 0;

        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
    }

628 629 630 631 632
    const Identifier& identifier(int index) const
    {
        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
        if (static_cast<unsigned>(index) < unlinkedIdentifiers)
            return m_unlinkedCode->identifier(index);
633 634
        ASSERT(JITCode::isOptimizingJIT(jitType()));
        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
635
    }
636 637 638 639
#else
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
640

641
    Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
642 643 644 645 646 647 648 649
    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
    unsigned addConstant(JSValue v)
    {
        unsigned result = m_constantRegisters.size();
        m_constantRegisters.append(WriteBarrier<Unknown>());
        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
        return result;
    }
650

651
    unsigned addConstantLazily()
652
    {
653
        unsigned result = m_constantRegisters.size();
654
        m_constantRegisters.append(WriteBarrier<Unknown>());
655
        return result;
656
    }
657

658
    bool findConstant(JSValue, unsigned& result);
659 660 661 662
    unsigned addOrFindConstant(JSValue);
    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
663

664 665 666
    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
    int numberOfFunctionDecls() { return m_functionDecls.size(); }
    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
667

668
    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
669

670 671 672 673 674 675 676 677 678 679 680 681 682
    unsigned numberOfConstantBuffers() const
    {
        if (!m_rareData)
            return 0;
        return m_rareData->m_constantBuffers.size();
    }
    unsigned addConstantBuffer(const Vector<JSValue>& buffer)
    {
        createRareDataIfNecessary();
        unsigned size = m_rareData->m_constantBuffers.size();
        m_rareData->m_constantBuffers.append(buffer);
        return size;
    }
683

684 685 686 687 688 689 690 691 692
    Vector<JSValue>& constantBufferAsVector(unsigned index)
    {
        ASSERT(m_rareData);
        return m_rareData->m_constantBuffers[index];
    }
    JSValue* constantBuffer(unsigned index)
    {
        return constantBufferAsVector(index).data();
    }
693

694
    JSGlobalObject* globalObject() { return m_globalObject.get(); }
695

696
    JSGlobalObject* globalObjectFor(CodeOrigin);
697

698 699 700 701 702 703
    BytecodeLivenessAnalysis& livenessAnalysis()
    {
        if (!m_livenessAnalysis)
            m_livenessAnalysis = std::make_unique<BytecodeLivenessAnalysis>(this);
        return *m_livenessAnalysis;
    }
704 705
    
    void validate();
706

707
    // Jump Tables
708

709 710 711 712
    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
    void clearSwitchJumpTables()
713 714 715
    {
        if (!m_rareData)
            return;
716
        m_rareData->m_switchJumpTables.clear();
717
    }
718

719 720 721
    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
722

723

724
    SymbolTable* symbolTable() const { return m_symbolTable.get(); }
725

726
    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
727

728 729 730
    enum ShrinkMode {
        // Shrink prior to generating machine code that may point directly into vectors.
        EarlyShrink,
731

732 733 734 735 736 737
        // Shrink after generating machine code, and after possibly creating new vectors
        // and appending to others. At this time it is not safe to shrink certain vectors
        // because we would have generated machine code that references them directly.
        LateShrink
    };
    void shrinkToFit(ShrinkMode);
738

739 740
    void copyPostParseDataFrom(CodeBlock* alternative);
    void copyPostParseDataFromAlternative();
741

742 743
    // Functions for controlling when JITting kicks in, in a mixed mode
    // execution world.
744

745 746 747 748
    bool checkIfJITThresholdReached()
    {
        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
    }
749

750 751 752 753
    void dontJITAnytimeSoon()
    {
        m_llintExecuteCounter.deferIndefinitely();
    }
754

755 756 757 758
    void jitAfterWarmUp()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
    }
759

760 761 762 763
    void jitSoon()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
    }
764

765 766 767 768
    const ExecutionCounter& llintExecuteCounter() const
    {
        return m_llintExecuteCounter;
    }
769

770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792
    // Functions for controlling when tiered compilation kicks in. This
    // controls both when the optimizing compiler is invoked and when OSR
    // entry happens. Two triggers exist: the loop trigger and the return
    // trigger. In either case, when an addition to m_jitExecuteCounter
    // causes it to become non-negative, the optimizing compiler is
    // invoked. This includes a fast check to see if this CodeBlock has
    // already been optimized (i.e. replacement() returns a CodeBlock
    // that was optimized with a higher tier JIT than this one). In the
    // case of the loop trigger, if the optimized compilation succeeds
    // (or has already succeeded in the past) then OSR is attempted to
    // redirect program flow into the optimized code.

    // These functions are called from within the optimization triggers,
    // and are used as a single point at which we define the heuristics
    // for how much warm-up is mandated before the next optimization
    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
    // as this is called from the CodeBlock constructor.

    // When we observe a lot of speculation failures, we trigger a
    // reoptimization. But each time, we increase the optimization trigger
    // to avoid thrashing.
    unsigned reoptimizationRetryCounter() const;
    void countReoptimization();
793
#if ENABLE(JIT)
794
    unsigned numberOfDFGCompiles();
795 796 797

    int32_t codeTypeThresholdMultiplier() const;

798
    int32_t adjustedCounterValue(int32_t desiredThreshold);
799 800 801 802 803

    int32_t* addressOfJITExecuteCounter()
    {
        return &m_jitExecuteCounter.m_counter;
    }
804

805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864
    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }

    const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }

    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }

    // Check if the optimization threshold has been reached, and if not,
    // adjust the heuristics accordingly. Returns true if the threshold has
    // been reached.
    bool checkIfOptimizationThresholdReached();

    // Call this to force the next optimization trigger to fire. This is
    // rarely wise, since optimization triggers are typically more
    // expensive than executing baseline code.
    void optimizeNextInvocation();

    // Call this to prevent optimization from happening again. Note that
    // optimization will still happen after roughly 2^29 invocations,
    // so this is really meant to delay that as much as possible. This
    // is called if optimization failed, and we expect it to fail in
    // the future as well.
    void dontOptimizeAnytimeSoon();

    // Call this to reinitialize the counter to its starting state,
    // forcing a warm-up to happen before the next optimization trigger
    // fires. This is called in the CodeBlock constructor. It also
    // makes sense to call this if an OSR exit occurred. Note that
    // OSR exit code is code generated, so the value of the execute
    // counter that this corresponds to is also available directly.
    void optimizeAfterWarmUp();

    // Call this to force an optimization trigger to fire only after
    // a lot of warm-up.
    void optimizeAfterLongWarmUp();

    // Call this to cause an optimization trigger to fire soon, but
    // not necessarily the next one. This makes sense if optimization
    // succeeds. Successfuly optimization means that all calls are
    // relinked to the optimized code, so this only affects call
    // frames that are still executing this CodeBlock. The value here
    // is tuned to strike a balance between the cost of OSR entry
    // (which is too high to warrant making every loop back edge to
    // trigger OSR immediately) and the cost of executing baseline
    // code (which is high enough that we don't necessarily want to
    // have a full warm-up). The intuition for calling this instead of
    // optimizeNextInvocation() is for the case of recursive functions
    // with loops. Consider that there may be N call frames of some
    // recursive function, for a reasonably large value of N. The top
    // one triggers optimization, and then returns, and then all of
    // the others return. We don't want optimization to be triggered on
    // each return, as that would be superfluous. It only makes sense
    // to trigger optimization if one of those functions becomes hot
    // in the baseline code.
    void optimizeSoon();

    void forceOptimizationSlowPathConcurrently();

    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
865
    
866
    uint32_t osrExitCounter() const { return m_osrExitCounter; }
867

868
    void countOSRExit() { m_osrExitCounter++; }
869

870
    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
871

872
    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
873

874 875 876 877 878
    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
    uint32_t exitCountThresholdForReoptimization();
    uint32_t exitCountThresholdForReoptimizationFromLoop();
    bool shouldReoptimizeNow();
    bool shouldReoptimizeFromLoopNow();
879 880 881
#else // No JIT
    void optimizeAfterWarmUp() { }
    unsigned numberOfDFGCompiles() { return 0; }
882
#endif
ggaren@apple.com's avatar
ggaren@apple.com committed
883

884
    bool shouldOptimizeNow();
885
    void updateAllValueProfilePredictions();
886
    void updateAllArrayPredictions();
887
    void updateAllPredictions();
888

889
    unsigned frameRegisterCount();
890

891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912
    // FIXME: Make these remaining members private.

    int m_numCalleeRegisters;
    int m_numVars;
    bool m_isConstructor;
    
    // This is intentionally public; it's the responsibility of anyone doing any
    // of the following to hold the lock:
    //
    // - Modifying any inline cache in this code block.
    //
    // - Quering any inline cache in this code block, from a thread other than
    //   the main thread.
    //
    // Additionally, it's only legal to modify the inline cache on the main
    // thread. This means that the main thread can query the inline cache without
    // locking. This is crucial since executing the inline cache is effectively
    // "querying" it.
    //
    // Another exception to the rules is that the GC can do whatever it wants
    // without holding any locks, because the GC is guaranteed to wait until any
    // concurrent compilation threads finish what they're doing.
913
    mutable ConcurrentJITLock m_lock;
914
    
915
    bool m_shouldAlwaysBeInlined;
916
    bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
917
    
918
    bool m_didFailFTLCompilation;
919 920 921 922 923 924

    // Internal methods for use by validation code. It would be private if it wasn't
    // for the fact that we use it from anonymous namespaces.
    void beginValidationDidFail();
    NO_RETURN_DUE_TO_CRASH void endValidationDidFail();

925
protected:
926 927
    virtual void visitWeakReferences(SlotVisitor&) OVERRIDE;
    virtual void finalizeUnconditionally() OVERRIDE;
928

929
#if ENABLE(DFG_JIT)
930
    void tallyFrequentExitSites();
931
#else
932
    void tallyFrequentExitSites() { }
933 934
#endif

935
private:
936
    friend class CodeBlockSet;
937
    
938 939
    CodeBlock* specialOSREntryBlockOrNull();
    
940 941
    void noticeIncomingCall(ExecState* callerFrame);
    
942
    double optimizationThresholdScalingFactor();
943 944

#if ENABLE(JIT)
945
    ClosureCallStubRoutine* findClosureCallForReturnPC(ReturnAddressPtr);
946
#endif
947
        
948
    void updateAllPredictionsAndCountLiveness(unsigned& numberOfLiveNonArgumentValueProfiles, unsigned& numberOfSamplesInProfiles);
949

950
    void setConstantRegisters(const Vector<WriteBarrier<Unknown>>& constants)
951 952 953 954 955 956
    {
        size_t count = constants.size();
        m_constantRegisters.resize(count);
        for (size_t i = 0; i < count; i++)
            m_constantRegisters[i].set(*m_vm, ownerExecutable(), constants[i].get());
    }
957

958
    void dumpBytecode(PrintStream&, ExecState*, const Instruction* begin, const Instruction*&, const StubInfoMap& = StubInfoMap());
959 960 961 962 963 964

    CString registerName(int r) const;
    void printUnaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
    void printBinaryOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
    void printConditionalJump(PrintStream&, ExecState*, const Instruction*, const Instruction*&, int location, const char* op);
    void printGetByIdOp(PrintStream&, ExecState*, int location, const Instruction*&);
965
    void printGetByIdCacheStatus(PrintStream&, ExecState*, int location, const StubInfoMap&);
966
    enum CacheDumpMode { DumpCaches, DontDumpCaches };
967
    void printCallOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op, CacheDumpMode, bool& hasPrintedProfiling);
968
    void printPutByIdOp(PrintStream&, ExecState*, int location, const Instruction*&, const char* op);
969 970 971 972 973 974 975 976 977 978 979
    void printLocationAndOp(PrintStream& out, ExecState*, int location, const Instruction*&, const char* op)
    {
        out.printf("[%4d] %-17s ", location, op);
    }

    void printLocationOpAndRegisterOperand(PrintStream& out, ExecState* exec, int location, const Instruction*& it, const char* op, int operand)
    {
        printLocationAndOp(out, exec, location, it, op);
        out.printf("%s", registerName(operand).data());
    }

980 981 982 983
    void beginDumpProfiling(PrintStream&, bool& hasPrintedProfiling);
    void dumpValueProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
    void dumpArrayProfiling(PrintStream&, const Instruction*&, bool& hasPrintedProfiling);
    void dumpRareCaseProfile(PrintStream&, const char* name, RareCaseProfile*, bool& hasPrintedProfiling);
984
        
985
#if ENABLE(DFG_JIT)
986 987
    bool shouldImmediatelyAssumeLivenessDuringScan()
    {
988 989 990
        // Interpreter and Baseline JIT CodeBlocks don't need to be jettisoned when
        // their weak references go stale. So if a basline JIT CodeBlock gets
        // scanned, we can assume that this means that it's live.
991 992 993 994 995 996
        if (!JITCode::isOptimizingJIT(jitType()))
            return true;

        // For simplicity, we don't attempt to jettison code blocks during GC if
        // they are executing. Instead we strongly mark their weak references to
        // allow them to continue to execute soundly.
997
        if (m_mayBeExecuting)
998 999 1000 1001 1002 1003 1004
            return true;

        if (Options::forceDFGCodeBlockLiveness())
            return true;

        return false;
    }
1005
#else
1006
    bool shouldImmediatelyAssumeLivenessDuringScan() { return true; }
1007
#endif
1008 1009 1010 1011
    
    void propagateTransitions(SlotVisitor&);
    void determineLiveness(SlotVisitor&);
        
1012 1013
    void stronglyVisitStrongReferences(SlotVisitor&);
    void stronglyVisitWeakReferences(SlotVisitor&);
1014

1015 1016 1017 1018 1019
    void createRareDataIfNecessary()
    {
        if (!m_rareData)
            m_rareData = adoptPtr(new RareData);
    }