CodeBlock.h 46.8 KB
Newer Older
1
/*
2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 * Copyright (C) 2008, 2009, 2010, 2011, 2012, 2013 Apple Inc. All rights reserved.
 * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 *
 * 1.  Redistributions of source code must retain the above copyright
 *     notice, this list of conditions and the following disclaimer.
 * 2.  Redistributions in binary form must reproduce the above copyright
 *     notice, this list of conditions and the following disclaimer in the
 *     documentation and/or other materials provided with the distribution.
 * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
 *     its contributors may be used to endorse or promote products derived
 *     from this software without specific prior written permission.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */
29

30 31 32
#ifndef CodeBlock_h
#define CodeBlock_h

33
#include "ArrayProfile.h"
34
#include "ByValInfo.h"
35
#include "BytecodeConventions.h"
36 37
#include "CallLinkInfo.h"
#include "CallReturnOffsetToBytecodeOffset.h"
38
#include "CodeBlockHash.h"
39
#include "CodeBlockSet.h"
40
#include "ConcurrentJITLock.h"
41
#include "CodeOrigin.h"
42
#include "CodeType.h"
43
#include "CompactJITCodeMap.h"
44
#include "DFGCommon.h"
45
#include "DFGCommonData.h"
46
#include "DFGExitProfile.h"
47
#include "DFGMinifiedGraph.h"
48
#include "DFGOSREntry.h"
49
#include "DFGOSRExit.h"
50
#include "DFGVariableEventStream.h"
51
#include "DeferredCompilationCallback.h"
52
#include "EvalCodeCache.h"
53
#include "ExecutionCounter.h"
54 55
#include "ExpressionRangeInfo.h"
#include "HandlerInfo.h"
56
#include "ObjectAllocationProfile.h"
57
#include "Options.h"
58
#include "Operations.h"
59
#include "PutPropertySlot.h"
60
#include "Instruction.h"
61
#include "JITCode.h"
62
#include "JITWriteBarrier.h"
63
#include "JSGlobalObject.h"
64
#include "JumpReplacementWatchpoint.h"
65
#include "JumpTable.h"
66
#include "LLIntCallLinkInfo.h"
67
#include "LazyOperandValueProfile.h"
68
#include "LineInfo.h"
69
#include "ProfilerCompilation.h"
70
#include "RegExpObject.h"
71
#include "StructureStubInfo.h"
72
#include "UnconditionalFinalizer.h"
73
#include "ValueProfile.h"
74
#include "VirtualRegister.h"
75
#include "Watchpoint.h"
76
#include <wtf/Bag.h>
77
#include <wtf/FastMalloc.h>
78
#include <wtf/PassOwnPtr.h>
79
#include <wtf/RefCountedArray.h>
80
#include <wtf/RefPtr.h>
81
#include <wtf/SegmentedVector.h>
82
#include <wtf/Vector.h>
83
#include <wtf/text/WTFString.h>
84

85
namespace JSC {
86

87 88 89
class ExecState;
class LLIntOffsetsExtractor;
class RepatchBuffer;
90

91
inline VirtualRegister unmodifiedArgumentsRegister(VirtualRegister argumentsRegister) { return VirtualRegister(argumentsRegister.offset() + 1); }
92

93
static ALWAYS_INLINE int missingThisObjectMarker() { return std::numeric_limits<int>::max(); }
94

95
class CodeBlock : public ThreadSafeRefCounted<CodeBlock>, public UnconditionalFinalizer, public WeakReferenceHarvester {
96 97 98 99 100 101 102
    WTF_MAKE_FAST_ALLOCATED;
    friend class JIT;
    friend class LLIntOffsetsExtractor;
public:
    enum CopyParsedBlockTag { CopyParsedBlock };
protected:
    CodeBlock(CopyParsedBlockTag, CodeBlock& other);
103 104
        
    CodeBlock(ScriptExecutable* ownerExecutable, UnlinkedCodeBlock*, JSScope*, PassRefPtr<SourceProvider>, unsigned sourceOffset, unsigned firstLineColumnOffset);
105

106 107
    WriteBarrier<JSGlobalObject> m_globalObject;
    Heap* m_heap;
108

109 110
public:
    JS_EXPORT_PRIVATE virtual ~CodeBlock();
111

112
    UnlinkedCodeBlock* unlinkedCodeBlock() const { return m_unlinkedCode.get(); }
113

114 115
    CString inferredName() const;
    CodeBlockHash hash() const;
116 117
    bool hasHash() const;
    bool isSafeToComputeHash() const;
118 119 120 121
    CString sourceCodeForTools() const; // Not quite the actual source we parsed; this will do things like prefix the source for a function with a reified signature.
    CString sourceCodeOnOneLine() const; // As sourceCodeForTools(), but replaces all whitespace runs with a single space.
    void dumpAssumingJITType(PrintStream&, JITCode::JITType) const;
    void dump(PrintStream&) const;
122

123 124
    int numParameters() const { return m_numParameters; }
    void setNumParameters(int newValue);
125

126 127
    int* addressOfNumParameters() { return &m_numParameters; }
    static ptrdiff_t offsetOfNumParameters() { return OBJECT_OFFSETOF(CodeBlock, m_numParameters); }
128

129 130 131 132 133 134 135 136 137 138
    CodeBlock* alternative() { return m_alternative.get(); }
    PassRefPtr<CodeBlock> releaseAlternative() { return m_alternative.release(); }
    void setAlternative(PassRefPtr<CodeBlock> alternative) { m_alternative = alternative; }
    
    CodeSpecializationKind specializationKind() const
    {
        return specializationFromIsConstruct(m_isConstructor);
    }
    
    CodeBlock* baselineVersion();
139

140
    void visitAggregate(SlotVisitor&);
141

142
    static void dumpStatistics();
143

144 145 146 147 148 149 150 151 152
    void dumpBytecode(PrintStream& = WTF::dataFile());
    void dumpBytecode(PrintStream&, unsigned bytecodeOffset);
    void printStructures(PrintStream&, const Instruction*);
    void printStructure(PrintStream&, const char* name, const Instruction*, int operand);

    bool isStrictMode() const { return m_isStrictMode; }

    inline bool isKnownNotImmediate(int index)
    {
153
        if (index == m_thisRegister.offset() && !m_isStrictMode)
154 155 156 157
            return true;

        if (isConstantRegisterIndex(index))
            return getConstant(index).isCell();
158

159 160 161 162 163 164 165 166 167 168 169 170 171
        return false;
    }

    ALWAYS_INLINE bool isTemporaryRegisterIndex(int index)
    {
        return index >= m_numVars;
    }

    HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset);
    unsigned lineNumberForBytecodeOffset(unsigned bytecodeOffset);
    unsigned columnNumberForBytecodeOffset(unsigned bytecodeOffset);
    void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot,
                                          int& startOffset, int& endOffset, unsigned& line, unsigned& column);
weinig@apple.com's avatar
weinig@apple.com committed
172

173
#if ENABLE(JIT)
174 175 176
    StructureStubInfo* addStubInfo();
    Bag<StructureStubInfo>::iterator begin() { return m_stubInfos.begin(); }
    Bag<StructureStubInfo>::iterator end() { return m_stubInfos.end(); }
177

178
    void resetStub(StructureStubInfo&);
179 180
    
    void getStubInfoMap(const ConcurrentJITLocker&, StubInfoMap& result);
181

182 183 184 185
    ByValInfo& getByValInfo(unsigned bytecodeIndex)
    {
        return *(binarySearch<ByValInfo, unsigned>(m_byValInfos, m_byValInfos.size(), bytecodeIndex, getByValInfoBytecodeIndex));
    }
186

187 188 189 190
    CallLinkInfo& getCallLinkInfo(ReturnAddressPtr returnAddress)
    {
        return *(binarySearch<CallLinkInfo, void*>(m_callLinkInfos, m_callLinkInfos.size(), returnAddress.value(), getCallLinkInfoReturnLocation));
    }
191

192 193
    CallLinkInfo& getCallLinkInfo(unsigned bytecodeIndex)
    {
194
        ASSERT(!JITCode::isOptimizingJIT(jitType()));
195 196
        return *(binarySearch<CallLinkInfo, unsigned>(m_callLinkInfos, m_callLinkInfos.size(), bytecodeIndex, getCallLinkInfoBytecodeIndex));
    }
197
#endif // ENABLE(JIT)
198

199 200
    void unlinkIncomingCalls();

201
#if ENABLE(JIT)
202
    void unlinkCalls();
203 204 205
        
    void linkIncomingCall(ExecState* callerFrame, CallLinkInfo*);
        
206 207 208 209
    bool isIncomingCallAlreadyLinked(CallLinkInfo* incoming)
    {
        return m_incomingCalls.isOnList(incoming);
    }
210 211
#endif // ENABLE(JIT)

212
#if ENABLE(LLINT)
213
    void linkIncomingCall(ExecState* callerFrame, LLIntCallLinkInfo*);
214
#endif // ENABLE(LLINT)
215

216 217 218 219 220 221 222 223 224 225 226 227 228 229
    void setJITCodeMap(PassOwnPtr<CompactJITCodeMap> jitCodeMap)
    {
        m_jitCodeMap = jitCodeMap;
    }
    CompactJITCodeMap* jitCodeMap()
    {
        return m_jitCodeMap.get();
    }
    
    unsigned bytecodeOffset(Instruction* returnAddress)
    {
        RELEASE_ASSERT(returnAddress >= instructions().begin() && returnAddress < instructions().end());
        return static_cast<Instruction*>(returnAddress) - instructions().begin();
    }
230

231
    bool isNumericCompareFunction() { return m_unlinkedCode->isNumericCompareFunction(); }
232

233 234 235
    unsigned numberOfInstructions() const { return m_instructions.size(); }
    RefCountedArray<Instruction>& instructions() { return m_instructions; }
    const RefCountedArray<Instruction>& instructions() const { return m_instructions; }
236

237
    size_t predictedMachineCodeSize();
238

239
    bool usesOpcode(OpcodeID);
240

241
    unsigned instructionCount() { return m_instructions.size(); }
242

243
    int argumentIndexAfterCapture(size_t argument);
fpizlo@apple.com's avatar
fpizlo@apple.com committed
244 245 246
    
    bool hasSlowArguments();
    const SlowArgument* machineSlowArguments();
247

248 249 250 251 252 253
    // Exactly equivalent to codeBlock->ownerExecutable()->installCode(codeBlock);
    void install();
    
    // Exactly equivalent to codeBlock->ownerExecutable()->newReplacementCodeBlockFor(codeBlock->specializationKind())
    PassRefPtr<CodeBlock> newReplacement();
    
254 255
    void setJITCode(PassRefPtr<JITCode> code, MacroAssemblerCodePtr codeWithArityCheck)
    {
256 257
        ASSERT(m_heap->isDeferred());
        m_heap->reportExtraMemoryCost(code->size());
258 259 260 261 262 263 264 265 266 267 268 269 270 271 272
        ConcurrentJITLocker locker(m_lock);
        WTF::storeStoreFence(); // This is probably not needed because the lock will also do something similar, but it's good to be paranoid.
        m_jitCode = code;
        m_jitCodeWithArityCheck = codeWithArityCheck;
    }
    PassRefPtr<JITCode> jitCode() { return m_jitCode; }
    MacroAssemblerCodePtr jitCodeWithArityCheck() { return m_jitCodeWithArityCheck; }
    JITCode::JITType jitType() const
    {
        JITCode* jitCode = m_jitCode.get();
        WTF::loadLoadFence();
        JITCode::JITType result = JITCode::jitTypeFor(jitCode);
        WTF::loadLoadFence(); // This probably isn't needed. Oh well, paranoia is good.
        return result;
    }
273 274

#if ENABLE(JIT)
275 276 277 278 279
    bool hasBaselineJITProfiling() const
    {
        return jitType() == JITCode::BaselineJIT;
    }
    void jettison();
280
    
281
    virtual CodeBlock* replacement() = 0;
282

283 284
    virtual DFG::CapabilityLevel capabilityLevelInternal() = 0;
    DFG::CapabilityLevel capabilityLevel()
285
    {
286 287
        DFG::CapabilityLevel result = capabilityLevelInternal();
        m_capabilityLevelState = result;
288 289
        return result;
    }
290
    DFG::CapabilityLevel capabilityLevelState() { return m_capabilityLevelState; }
291

292 293
    bool hasOptimizedReplacement(JITCode::JITType typeToReplace);
    bool hasOptimizedReplacement(); // the typeToReplace is my JITType
294 295
#endif

296
    ScriptExecutable* ownerExecutable() const { return m_ownerExecutable.get(); }
297

298 299
    void setVM(VM* vm) { m_vm = vm; }
    VM* vm() { return m_vm; }
300

301 302
    void setThisRegister(VirtualRegister thisRegister) { m_thisRegister = thisRegister; }
    VirtualRegister thisRegister() const { return m_thisRegister; }
303

304 305
    bool needsFullScopeChain() const { return m_unlinkedCode->needsFullScopeChain(); }
    bool usesEval() const { return m_unlinkedCode->usesEval(); }
306

307
    void setArgumentsRegister(VirtualRegister argumentsRegister)
308
    {
309
        ASSERT(argumentsRegister.isValid());
310 311 312
        m_argumentsRegister = argumentsRegister;
        ASSERT(usesArguments());
    }
313
    VirtualRegister argumentsRegister() const
314 315 316 317
    {
        ASSERT(usesArguments());
        return m_argumentsRegister;
    }
318
    VirtualRegister uncheckedArgumentsRegister()
319 320
    {
        if (!usesArguments())
321
            return VirtualRegister();
322 323
        return argumentsRegister();
    }
324
    void setActivationRegister(VirtualRegister activationRegister)
325 326 327
    {
        m_activationRegister = activationRegister;
    }
328 329

    VirtualRegister activationRegister() const
330 331 332 333
    {
        ASSERT(needsFullScopeChain());
        return m_activationRegister;
    }
334 335

    VirtualRegister uncheckedActivationRegister()
336 337
    {
        if (!needsFullScopeChain())
338
            return VirtualRegister();
339 340
        return activationRegister();
    }
341 342

    bool usesArguments() const { return m_argumentsRegister.isValid(); }
343

344 345
    bool needsActivation() const
    {
346
        return m_needsActivation;
347
    }
348

fpizlo@apple.com's avatar
fpizlo@apple.com committed
349 350 351 352
    bool isCaptured(VirtualRegister operand, InlineCallFrame* = 0) const;
    
    int framePointerOffsetToGetActivationRegisters(int machineCaptureStart);
    int framePointerOffsetToGetActivationRegisters();
353

354
    CodeType codeType() const { return m_unlinkedCode->codeType(); }
355 356 357 358 359 360
    PutPropertySlot::Context putByIdContext() const
    {
        if (codeType() == EvalCode)
            return PutPropertySlot::PutByIdEval;
        return PutPropertySlot::PutById;
    }
361

362 363 364
    SourceProvider* source() const { return m_source.get(); }
    unsigned sourceOffset() const { return m_sourceOffset; }
    unsigned firstLineColumnOffset() const { return m_firstLineColumnOffset; }
365

366 367
    size_t numberOfJumpTargets() const { return m_unlinkedCode->numberOfJumpTargets(); }
    unsigned jumpTarget(int index) const { return m_unlinkedCode->jumpTarget(index); }
368

369
    void createActivation(CallFrame*);
370

371
    void clearEvalCache();
372

373
    String nameForRegister(VirtualRegister);
374

375
#if ENABLE(JIT)
376 377 378
    void setNumberOfByValInfos(size_t size) { m_byValInfos.grow(size); }
    size_t numberOfByValInfos() const { return m_byValInfos.size(); }
    ByValInfo& byValInfo(size_t index) { return m_byValInfos[index]; }
379

380 381 382
    void setNumberOfCallLinkInfos(size_t size) { m_callLinkInfos.grow(size); }
    size_t numberOfCallLinkInfos() const { return m_callLinkInfos.size(); }
    CallLinkInfo& callLinkInfo(int index) { return m_callLinkInfos[index]; }
383
#endif
384

385
#if ENABLE(VALUE_PROFILER)
386 387 388 389 390 391 392 393 394 395 396 397
    unsigned numberOfArgumentValueProfiles()
    {
        ASSERT(m_numParameters >= 0);
        ASSERT(m_argumentValueProfiles.size() == static_cast<unsigned>(m_numParameters));
        return m_argumentValueProfiles.size();
    }
    ValueProfile* valueProfileForArgument(unsigned argumentIndex)
    {
        ValueProfile* result = &m_argumentValueProfiles[argumentIndex];
        ASSERT(result->m_bytecodeOffset == -1);
        return result;
    }
398

399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416
    unsigned numberOfValueProfiles() { return m_valueProfiles.size(); }
    ValueProfile* valueProfile(int index) { return &m_valueProfiles[index]; }
    ValueProfile* valueProfileForBytecodeOffset(int bytecodeOffset)
    {
        ValueProfile* result = binarySearch<ValueProfile, int>(
                                                               m_valueProfiles, m_valueProfiles.size(), bytecodeOffset,
                                                               getValueProfileBytecodeOffset<ValueProfile>);
        ASSERT(result->m_bytecodeOffset != -1);
        ASSERT(instructions()[bytecodeOffset + opcodeLength(
                                                            m_vm->interpreter->getOpcodeID(
                                                                                           instructions()[
                                                                                                          bytecodeOffset].u.opcode)) - 1].u.profile == result);
        return result;
    }
    SpeculatedType valueProfilePredictionForBytecodeOffset(const ConcurrentJITLocker& locker, int bytecodeOffset)
    {
        return valueProfileForBytecodeOffset(bytecodeOffset)->computeUpdatedPrediction(locker);
    }
417

418 419 420 421 422 423 424 425 426 427
    unsigned totalNumberOfValueProfiles()
    {
        return numberOfArgumentValueProfiles() + numberOfValueProfiles();
    }
    ValueProfile* getFromAllValueProfiles(unsigned index)
    {
        if (index < numberOfArgumentValueProfiles())
            return valueProfileForArgument(index);
        return valueProfile(index - numberOfArgumentValueProfiles());
    }
428

429 430 431 432 433 434 435 436 437 438
    RareCaseProfile* addRareCaseProfile(int bytecodeOffset)
    {
        m_rareCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_rareCaseProfiles.last();
    }
    unsigned numberOfRareCaseProfiles() { return m_rareCaseProfiles.size(); }
    RareCaseProfile* rareCaseProfile(int index) { return &m_rareCaseProfiles[index]; }
    RareCaseProfile* rareCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
439 440
            m_rareCaseProfiles, m_rareCaseProfiles.size(), bytecodeOffset,
            getRareCaseProfileBytecodeOffset);
441
    }
442

443 444 445 446 447 448 449
    bool likelyToTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
450

451 452 453 454 455 456 457
    bool couldTakeSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return value >= Options::couldTakeSlowCaseMinimumCount();
    }
458

459 460 461 462 463 464 465 466 467 468 469 470 471
    RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset)
    {
        m_specialFastCaseProfiles.append(RareCaseProfile(bytecodeOffset));
        return &m_specialFastCaseProfiles.last();
    }
    unsigned numberOfSpecialFastCaseProfiles() { return m_specialFastCaseProfiles.size(); }
    RareCaseProfile* specialFastCaseProfile(int index) { return &m_specialFastCaseProfiles[index]; }
    RareCaseProfile* specialFastCaseProfileForBytecodeOffset(int bytecodeOffset)
    {
        return tryBinarySearch<RareCaseProfile, int>(
                                                     m_specialFastCaseProfiles, m_specialFastCaseProfiles.size(), bytecodeOffset,
                                                     getRareCaseProfileBytecodeOffset);
    }
472

473 474 475 476 477 478 479
    bool likelyToTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount();
    }
480

481 482 483 484 485 486 487
    bool couldTakeSpecialFastCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount();
    }
488

489 490 491 492 493 494 495 496 497
    bool likelyToTakeDeepestSlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount - specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
498

499 500 501 502 503 504 505 506 507
    bool likelyToTakeAnySlowCase(int bytecodeOffset)
    {
        if (!hasBaselineJITProfiling())
            return false;
        unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter;
        unsigned value = slowCaseCount + specialFastCaseCount;
        return value >= Options::likelyToTakeSlowCaseMinimumCount();
    }
508

509 510 511 512 513 514 515 516 517
    unsigned numberOfArrayProfiles() const { return m_arrayProfiles.size(); }
    const ArrayProfileVector& arrayProfiles() { return m_arrayProfiles; }
    ArrayProfile* addArrayProfile(unsigned bytecodeOffset)
    {
        m_arrayProfiles.append(ArrayProfile(bytecodeOffset));
        return &m_arrayProfiles.last();
    }
    ArrayProfile* getArrayProfile(unsigned bytecodeOffset);
    ArrayProfile* getOrAddArrayProfile(unsigned bytecodeOffset);
518 519
#endif

520
    // Exception handling support
521

522 523 524 525 526 527 528 529 530 531 532 533 534
    size_t numberOfExceptionHandlers() const { return m_rareData ? m_rareData->m_exceptionHandlers.size() : 0; }
    void allocateHandlers(const Vector<UnlinkedHandlerInfo>& unlinkedHandlers)
    {
        size_t count = unlinkedHandlers.size();
        if (!count)
            return;
        createRareDataIfNecessary();
        m_rareData->m_exceptionHandlers.resize(count);
        for (size_t i = 0; i < count; ++i) {
            m_rareData->m_exceptionHandlers[i].start = unlinkedHandlers[i].start;
            m_rareData->m_exceptionHandlers[i].end = unlinkedHandlers[i].end;
            m_rareData->m_exceptionHandlers[i].target = unlinkedHandlers[i].target;
            m_rareData->m_exceptionHandlers[i].scopeDepth = unlinkedHandlers[i].scopeDepth;
535 536
        }

537 538 539 540
    }
    HandlerInfo& exceptionHandler(int index) { RELEASE_ASSERT(m_rareData); return m_rareData->m_exceptionHandlers[index]; }

    bool hasExpressionInfo() { return m_unlinkedCode->hasExpressionInfo(); }
541

542
#if ENABLE(DFG_JIT)
543
    Vector<CodeOrigin, 0, UnsafeVectorOverflow>& codeOrigins()
544
    {
545
        return m_jitCode->dfgCommon()->codeOrigins;
546
    }
547
    
548 549 550
    // Having code origins implies that there has been some inlining.
    bool hasCodeOrigins()
    {
551
        return JITCode::isOptimizingJIT(jitType());
552
    }
553
        
554 555
    bool canGetCodeOrigin(unsigned index)
    {
556
        if (!hasCodeOrigins())
557
            return false;
558
        return index < codeOrigins().size();
559
    }
560

561 562
    CodeOrigin codeOrigin(unsigned index)
    {
563
        return codeOrigins()[index];
564
    }
565

566 567 568
    bool addFrequentExitSite(const DFG::FrequentExitSite& site)
    {
        ASSERT(JITCode::isBaselineCode(jitType()));
569 570 571 572 573 574 575 576
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.add(locker, site);
    }
        
    bool hasExitSite(const DFG::FrequentExitSite& site) const
    {
        ConcurrentJITLocker locker(m_lock);
        return m_exitProfile.hasExitSite(locker, site);
577
    }
578

579
    DFG::ExitProfile& exitProfile() { return m_exitProfile; }
580

581 582 583 584
    CompressedLazyOperandValueProfileHolder& lazyOperandValueProfiles()
    {
        return m_lazyOperandValueProfiles;
    }
585 586
#endif

587
    // Constant Pool
588 589 590 591 592 593 594 595 596 597
#if ENABLE(DFG_JIT)
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers() + numberOfDFGIdentifiers(); }
    size_t numberOfDFGIdentifiers() const
    {
        if (!JITCode::isOptimizingJIT(jitType()))
            return 0;

        return m_jitCode->dfgCommon()->dfgIdentifiers.size();
    }

598 599 600 601 602
    const Identifier& identifier(int index) const
    {
        size_t unlinkedIdentifiers = m_unlinkedCode->numberOfIdentifiers();
        if (static_cast<unsigned>(index) < unlinkedIdentifiers)
            return m_unlinkedCode->identifier(index);
603 604
        ASSERT(JITCode::isOptimizingJIT(jitType()));
        return m_jitCode->dfgCommon()->dfgIdentifiers[index - unlinkedIdentifiers];
605
    }
606 607 608 609
#else
    size_t numberOfIdentifiers() const { return m_unlinkedCode->numberOfIdentifiers(); }
    const Identifier& identifier(int index) const { return m_unlinkedCode->identifier(index); }
#endif
610

611
    Vector<WriteBarrier<Unknown>>& constants() { return m_constantRegisters; }
612 613 614 615 616 617 618 619
    size_t numberOfConstantRegisters() const { return m_constantRegisters.size(); }
    unsigned addConstant(JSValue v)
    {
        unsigned result = m_constantRegisters.size();
        m_constantRegisters.append(WriteBarrier<Unknown>());
        m_constantRegisters.last().set(m_globalObject->vm(), m_ownerExecutable.get(), v);
        return result;
    }
620

621
    unsigned addConstantLazily()
622
    {
623
        unsigned result = m_constantRegisters.size();
624
        m_constantRegisters.append(WriteBarrier<Unknown>());
625
        return result;
626
    }
627

628
    bool findConstant(JSValue, unsigned& result);
629 630 631 632
    unsigned addOrFindConstant(JSValue);
    WriteBarrier<Unknown>& constantRegister(int index) { return m_constantRegisters[index - FirstConstantRegisterIndex]; }
    ALWAYS_INLINE bool isConstantRegisterIndex(int index) const { return index >= FirstConstantRegisterIndex; }
    ALWAYS_INLINE JSValue getConstant(int index) const { return m_constantRegisters[index - FirstConstantRegisterIndex].get(); }
633

634 635 636
    FunctionExecutable* functionDecl(int index) { return m_functionDecls[index].get(); }
    int numberOfFunctionDecls() { return m_functionDecls.size(); }
    FunctionExecutable* functionExpr(int index) { return m_functionExprs[index].get(); }
637

638
    RegExp* regexp(int index) const { return m_unlinkedCode->regexp(index); }
639

640 641 642 643 644 645 646 647 648 649 650 651 652
    unsigned numberOfConstantBuffers() const
    {
        if (!m_rareData)
            return 0;
        return m_rareData->m_constantBuffers.size();
    }
    unsigned addConstantBuffer(const Vector<JSValue>& buffer)
    {
        createRareDataIfNecessary();
        unsigned size = m_rareData->m_constantBuffers.size();
        m_rareData->m_constantBuffers.append(buffer);
        return size;
    }
653

654 655 656 657 658 659 660 661 662
    Vector<JSValue>& constantBufferAsVector(unsigned index)
    {
        ASSERT(m_rareData);
        return m_rareData->m_constantBuffers[index];
    }
    JSValue* constantBuffer(unsigned index)
    {
        return constantBufferAsVector(index).data();
    }
663

664
    JSGlobalObject* globalObject() { return m_globalObject.get(); }
665

666
    JSGlobalObject* globalObjectFor(CodeOrigin);
667

668
    // Jump Tables
669

670 671 672 673
    size_t numberOfSwitchJumpTables() const { return m_rareData ? m_rareData->m_switchJumpTables.size() : 0; }
    SimpleJumpTable& addSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_switchJumpTables.append(SimpleJumpTable()); return m_rareData->m_switchJumpTables.last(); }
    SimpleJumpTable& switchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_switchJumpTables[tableIndex]; }
    void clearSwitchJumpTables()
674 675 676
    {
        if (!m_rareData)
            return;
677
        m_rareData->m_switchJumpTables.clear();
678
    }
679

680 681 682
    size_t numberOfStringSwitchJumpTables() const { return m_rareData ? m_rareData->m_stringSwitchJumpTables.size() : 0; }
    StringJumpTable& addStringSwitchJumpTable() { createRareDataIfNecessary(); m_rareData->m_stringSwitchJumpTables.append(StringJumpTable()); return m_rareData->m_stringSwitchJumpTables.last(); }
    StringJumpTable& stringSwitchJumpTable(int tableIndex) { RELEASE_ASSERT(m_rareData); return m_rareData->m_stringSwitchJumpTables[tableIndex]; }
683

684

685
    SharedSymbolTable* symbolTable() const { return m_unlinkedCode->symbolTable(); }
686

687
    EvalCodeCache& evalCodeCache() { createRareDataIfNecessary(); return m_rareData->m_evalCodeCache; }
688

689 690 691
    enum ShrinkMode {
        // Shrink prior to generating machine code that may point directly into vectors.
        EarlyShrink,
692

693 694 695 696 697 698
        // Shrink after generating machine code, and after possibly creating new vectors
        // and appending to others. At this time it is not safe to shrink certain vectors
        // because we would have generated machine code that references them directly.
        LateShrink
    };
    void shrinkToFit(ShrinkMode);
699

700 701
    void copyPostParseDataFrom(CodeBlock* alternative);
    void copyPostParseDataFromAlternative();
702

703 704
    // Functions for controlling when JITting kicks in, in a mixed mode
    // execution world.
705

706 707 708 709
    bool checkIfJITThresholdReached()
    {
        return m_llintExecuteCounter.checkIfThresholdCrossedAndSet(this);
    }
710

711 712 713 714
    void dontJITAnytimeSoon()
    {
        m_llintExecuteCounter.deferIndefinitely();
    }
715

716 717 718 719
    void jitAfterWarmUp()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this);
    }
720

721 722 723 724
    void jitSoon()
    {
        m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this);
    }
725

726 727 728 729
    const ExecutionCounter& llintExecuteCounter() const
    {
        return m_llintExecuteCounter;
    }
730

731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753
    // Functions for controlling when tiered compilation kicks in. This
    // controls both when the optimizing compiler is invoked and when OSR
    // entry happens. Two triggers exist: the loop trigger and the return
    // trigger. In either case, when an addition to m_jitExecuteCounter
    // causes it to become non-negative, the optimizing compiler is
    // invoked. This includes a fast check to see if this CodeBlock has
    // already been optimized (i.e. replacement() returns a CodeBlock
    // that was optimized with a higher tier JIT than this one). In the
    // case of the loop trigger, if the optimized compilation succeeds
    // (or has already succeeded in the past) then OSR is attempted to
    // redirect program flow into the optimized code.

    // These functions are called from within the optimization triggers,
    // and are used as a single point at which we define the heuristics
    // for how much warm-up is mandated before the next optimization
    // trigger files. All CodeBlocks start out with optimizeAfterWarmUp(),
    // as this is called from the CodeBlock constructor.

    // When we observe a lot of speculation failures, we trigger a
    // reoptimization. But each time, we increase the optimization trigger
    // to avoid thrashing.
    unsigned reoptimizationRetryCounter() const;
    void countReoptimization();
754
#if ENABLE(JIT)
755
    unsigned numberOfDFGCompiles();
756 757 758

    int32_t codeTypeThresholdMultiplier() const;

759
    int32_t adjustedCounterValue(int32_t desiredThreshold);
760 761 762 763 764

    int32_t* addressOfJITExecuteCounter()
    {
        return &m_jitExecuteCounter.m_counter;
    }
765

766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825
    static ptrdiff_t offsetOfJITExecuteCounter() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_counter); }
    static ptrdiff_t offsetOfJITExecutionActiveThreshold() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_activeThreshold); }
    static ptrdiff_t offsetOfJITExecutionTotalCount() { return OBJECT_OFFSETOF(CodeBlock, m_jitExecuteCounter) + OBJECT_OFFSETOF(ExecutionCounter, m_totalCount); }

    const ExecutionCounter& jitExecuteCounter() const { return m_jitExecuteCounter; }

    unsigned optimizationDelayCounter() const { return m_optimizationDelayCounter; }

    // Check if the optimization threshold has been reached, and if not,
    // adjust the heuristics accordingly. Returns true if the threshold has
    // been reached.
    bool checkIfOptimizationThresholdReached();

    // Call this to force the next optimization trigger to fire. This is
    // rarely wise, since optimization triggers are typically more
    // expensive than executing baseline code.
    void optimizeNextInvocation();

    // Call this to prevent optimization from happening again. Note that
    // optimization will still happen after roughly 2^29 invocations,
    // so this is really meant to delay that as much as possible. This
    // is called if optimization failed, and we expect it to fail in
    // the future as well.
    void dontOptimizeAnytimeSoon();

    // Call this to reinitialize the counter to its starting state,
    // forcing a warm-up to happen before the next optimization trigger
    // fires. This is called in the CodeBlock constructor. It also
    // makes sense to call this if an OSR exit occurred. Note that
    // OSR exit code is code generated, so the value of the execute
    // counter that this corresponds to is also available directly.
    void optimizeAfterWarmUp();

    // Call this to force an optimization trigger to fire only after
    // a lot of warm-up.
    void optimizeAfterLongWarmUp();

    // Call this to cause an optimization trigger to fire soon, but
    // not necessarily the next one. This makes sense if optimization
    // succeeds. Successfuly optimization means that all calls are
    // relinked to the optimized code, so this only affects call
    // frames that are still executing this CodeBlock. The value here
    // is tuned to strike a balance between the cost of OSR entry
    // (which is too high to warrant making every loop back edge to
    // trigger OSR immediately) and the cost of executing baseline
    // code (which is high enough that we don't necessarily want to
    // have a full warm-up). The intuition for calling this instead of
    // optimizeNextInvocation() is for the case of recursive functions
    // with loops. Consider that there may be N call frames of some
    // recursive function, for a reasonably large value of N. The top
    // one triggers optimization, and then returns, and then all of
    // the others return. We don't want optimization to be triggered on
    // each return, as that would be superfluous. It only makes sense
    // to trigger optimization if one of those functions becomes hot
    // in the baseline code.
    void optimizeSoon();

    void forceOptimizationSlowPathConcurrently();

    void setOptimizationThresholdBasedOnCompilationResult(CompilationResult);
826
    
827
    uint32_t osrExitCounter() const { return m_osrExitCounter; }
828

829
    void countOSRExit() { m_osrExitCounter++; }
830

831
    uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; }
832

833
    static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); }
834

835 836 837 838 839
    uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold);
    uint32_t exitCountThresholdForReoptimization();
    uint32_t exitCountThresholdForReoptimizationFromLoop();
    bool shouldReoptimizeNow();
    bool shouldReoptimizeFromLoopNow();
840 841 842
#else // No JIT
    void optimizeAfterWarmUp() { }
    unsigned numberOfDFGCompiles() { return 0; }
843
#endif
ggaren@apple.com's avatar
ggaren@apple.com committed
844

845
#if ENABLE(VALUE_PROFILER)
846
    bool shouldOptimizeNow();
847
    void updateAllValueProfilePredictions();
848
    void updateAllArrayPredictions();
849
    void updateAllPredictions();
850
#else
851
    bool updateAllPredictionsAndCheckIfShouldOptimizeNow() { return false; }
852
    void updateAllValueProfilePredictions() { }
853
    void updateAllArrayPredictions() { }
854
    void updateAllPredictions() { }
855
#endif
856

fpizlo@apple.com's avatar
fpizlo@apple.com committed
857
#if ENABLE(JIT)
858
    void reoptimize();
fpizlo@apple.com's avatar
fpizlo@apple.com committed
859
#endif
860

861
#if ENABLE(VERBOSE_VALUE_PROFILE)
862
    void dumpValueProfiles();
863
#endif
864

865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886
    // FIXME: Make these remaining members private.

    int m_numCalleeRegisters;
    int m_numVars;
    bool m_isConstructor;
    
    // This is intentionally public; it's the responsibility of anyone doing any
    // of the following to hold the lock:
    //
    // - Modifying any inline cache in this code block.
    //
    // - Quering any inline cache in this code block, from a thread other than
    //   the main thread.
    //
    // Additionally, it's only legal to modify the inline cache on the main
    // thread. This means that the main thread can query the inline cache without
    // locking. This is crucial since executing the inline cache is effectively
    // "querying" it.
    //
    // Another exception to the rules is that the GC can do whatever it wants
    // without holding any locks, because the GC is guaranteed to wait until any
    // concurrent compilation threads finish what they're doing.
887
    mutable ConcurrentJITLock m_lock;
888
    
889
    bool m_shouldAlwaysBeInlined;
890
    bool m_allTransitionsHaveBeenMarked; // Initialized and used on every GC.
891
    
892 893
    bool m_didFailFTLCompilation;
    
894
protected:
895 896
    virtual void visitWeakReferences(SlotVisitor&) OVERRIDE;
    virtual void finalizeUnconditionally() OVERRIDE;
897

898
#if ENABLE(DFG_JIT)
899
    void tallyFrequentExitSites();
900
#else
901
    void tallyFrequentExitSites() { }
902 903
#endif

904
private:
905
    friend class CodeBlockSet;
906