DFGSpeculativeJIT.h 110 KB
Newer Older
1
/*
2
 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#ifndef DFGSpeculativeJIT_h
#define DFGSpeculativeJIT_h

29 30
#include <wtf/Platform.h>

31 32
#if ENABLE(DFG_JIT)

33
#include "DFGAbstractState.h"
34 35
#include "DFGGenerationInfo.h"
#include "DFGJITCompiler.h"
36
#include "DFGOSRExit.h"
37
#include "DFGOSRExitJumpPlaceholder.h"
38
#include "DFGOperations.h"
39
#include "DFGSilentRegisterSavePlan.h"
40
#include "DFGValueSource.h"
41
#include "MarkedAllocator.h"
42
#include "ValueRecovery.h"
43 44 45

namespace JSC { namespace DFG {

46
class GPRTemporary;
47
class JSValueOperand;
48
class SlowPathGenerator;
49
class SpeculativeJIT;
50 51 52 53 54 55
class SpeculateIntegerOperand;
class SpeculateStrictInt32Operand;
class SpeculateDoubleOperand;
class SpeculateCellOperand;
class SpeculateBooleanOperand;

56 57
enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue};

58 59 60 61 62 63 64 65
inline GPRReg extractResult(GPRReg result) { return result; }
#if USE(JSVALUE64)
inline GPRReg extractResult(JSValueRegs result) { return result.gpr(); }
#else
inline JSValueRegs extractResult(JSValueRegs result) { return result; }
#endif
inline NoResultTag extractResult(NoResultTag) { return NoResult; }

66 67 68 69
// === SpeculativeJIT ===
//
// The SpeculativeJIT is used to generate a fast, but potentially
// incomplete code path for the dataflow. When code generating
70
// we may make assumptions about operand types, dynamically check,
71 72
// and bail-out to an alternate code path if these checks fail.
// Importantly, the speculative code path cannot be reentered once
73
// a speculative check has failed. This allows the SpeculativeJIT
74 75
// to propagate type information (including information that has
// only speculatively been asserted) through the dataflow.
76
class SpeculativeJIT {
77
    friend struct OSRExit;
78 79 80 81 82
private:
    typedef JITCompiler::TrustedImm32 TrustedImm32;
    typedef JITCompiler::Imm32 Imm32;
    typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
    typedef JITCompiler::ImmPtr ImmPtr;
83 84
    typedef JITCompiler::TrustedImm64 TrustedImm64;
    typedef JITCompiler::Imm64 Imm64;
85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113

    // These constants are used to set priorities for spill order for
    // the register allocator.
#if USE(JSVALUE64)
    enum SpillOrder {
        SpillOrderConstant = 1, // no spill, and cheap fill
        SpillOrderSpilled  = 2, // no spill
        SpillOrderJS       = 4, // needs spill
        SpillOrderCell     = 4, // needs spill
        SpillOrderStorage  = 4, // needs spill
        SpillOrderInteger  = 5, // needs spill and box
        SpillOrderBoolean  = 5, // needs spill and box
        SpillOrderDouble   = 6, // needs spill and convert
    };
#elif USE(JSVALUE32_64)
    enum SpillOrder {
        SpillOrderConstant = 1, // no spill, and cheap fill
        SpillOrderSpilled  = 2, // no spill
        SpillOrderJS       = 4, // needs spill
        SpillOrderStorage  = 4, // needs spill
        SpillOrderDouble   = 4, // needs spill
        SpillOrderInteger  = 5, // needs spill and box
        SpillOrderCell     = 5, // needs spill and box
        SpillOrderBoolean  = 5, // needs spill and box
    };
#endif

    enum UseChildrenMode { CallUseChildren, UseChildrenCalledExplicitly };
    
114
public:
115
    SpeculativeJIT(JITCompiler&);
116
    ~SpeculativeJIT();
117

118
    bool compile();
119
    void createOSREntries();
120
    void linkOSREntries(LinkBuffer&);
121

122 123 124 125 126 127 128 129 130 131
    BlockIndex nextBlock()
    {
        for (BlockIndex result = m_block + 1; ; result++) {
            if (result >= m_jit.graph().m_blocks.size())
                return NoBlock;
            if (m_jit.graph().m_blocks[result])
                return result;
        }
    }
    
132
    GPRReg fillInteger(Edge, DataFormat& returnFormat);
133
#if USE(JSVALUE64)
134
    GPRReg fillJSValue(Edge);
135
#elif USE(JSVALUE32_64)
136
    bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&);
137
#endif
138
    GPRReg fillStorage(Edge);
139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159

    // lock and unlock GPR & FPR registers.
    void lock(GPRReg reg)
    {
        m_gprs.lock(reg);
    }
    void lock(FPRReg reg)
    {
        m_fprs.lock(reg);
    }
    void unlock(GPRReg reg)
    {
        m_gprs.unlock(reg);
    }
    void unlock(FPRReg reg)
    {
        m_fprs.unlock(reg);
    }

    // Used to check whether a child node is on its last use,
    // and its machine registers may be reused.
160
    bool canReuse(Node* node)
161
    {
162
        VirtualRegister virtualRegister = node->virtualRegister();
163 164 165
        GenerationInfo& info = m_generationInfo[virtualRegister];
        return info.canReuse();
    }
166
    bool canReuse(Edge nodeUse)
167
    {
168
        return canReuse(nodeUse.node());
169
    }
170 171 172 173 174 175 176 177 178 179 180 181 182 183
    GPRReg reuse(GPRReg reg)
    {
        m_gprs.lock(reg);
        return reg;
    }
    FPRReg reuse(FPRReg reg)
    {
        m_fprs.lock(reg);
        return reg;
    }

    // Allocate a gpr/fpr.
    GPRReg allocate()
    {
184 185 186
#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
#endif
187 188 189 190 191
        VirtualRegister spillMe;
        GPRReg gpr = m_gprs.allocate(spillMe);
        if (spillMe != InvalidVirtualRegister) {
#if USE(JSVALUE32_64)
            GenerationInfo& info = m_generationInfo[spillMe];
192
            RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
193 194 195 196 197 198 199 200 201
            if ((info.registerFormat() & DataFormatJS))
                m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
#endif
            spill(spillMe);
        }
        return gpr;
    }
    GPRReg allocate(GPRReg specific)
    {
202 203 204
#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
#endif
205 206 207 208
        VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
        if (spillMe != InvalidVirtualRegister) {
#if USE(JSVALUE32_64)
            GenerationInfo& info = m_generationInfo[spillMe];
209
            RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
210 211 212 213 214 215 216 217 218 219 220 221 222
            if ((info.registerFormat() & DataFormatJS))
                m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
#endif
            spill(spillMe);
        }
        return specific;
    }
    GPRReg tryAllocate()
    {
        return m_gprs.tryAllocate();
    }
    FPRReg fprAllocate()
    {
223 224 225
#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
        m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
#endif
226 227 228 229 230 231 232 233 234 235 236 237
        VirtualRegister spillMe;
        FPRReg fpr = m_fprs.allocate(spillMe);
        if (spillMe != InvalidVirtualRegister)
            spill(spillMe);
        return fpr;
    }

    // Check whether a VirtualRegsiter is currently in a machine register.
    // We use this when filling operands to fill those that are already in
    // machine registers first (by locking VirtualRegsiters that are already
    // in machine register before filling those that are not we attempt to
    // avoid spilling values we will need immediately).
238
    bool isFilled(Node* node)
239
    {
240
        VirtualRegister virtualRegister = node->virtualRegister();
241 242 243
        GenerationInfo& info = m_generationInfo[virtualRegister];
        return info.registerFormat() != DataFormatNone;
    }
244
    bool isFilledDouble(Node* node)
245
    {
246
        VirtualRegister virtualRegister = node->virtualRegister();
247 248 249 250 251
        GenerationInfo& info = m_generationInfo[virtualRegister];
        return info.registerFormat() == DataFormatDouble;
    }

    // Called on an operand once it has been consumed by a parent node.
252
    void use(Node* node)
253
    {
254
        if (!node->hasResult())
255
            return;
256
        VirtualRegister virtualRegister = node->virtualRegister();
257 258 259 260
        GenerationInfo& info = m_generationInfo[virtualRegister];

        // use() returns true when the value becomes dead, and any
        // associated resources may be freed.
261
        if (!info.use(*m_stream))
262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280
            return;

        // Release the associated machine registers.
        DataFormat registerFormat = info.registerFormat();
#if USE(JSVALUE64)
        if (registerFormat == DataFormatDouble)
            m_fprs.release(info.fpr());
        else if (registerFormat != DataFormatNone)
            m_gprs.release(info.gpr());
#elif USE(JSVALUE32_64)
        if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
            m_fprs.release(info.fpr());
        else if (registerFormat & DataFormatJS) {
            m_gprs.release(info.tagGPR());
            m_gprs.release(info.payloadGPR());
        } else if (registerFormat != DataFormatNone)
            m_gprs.release(info.gpr());
#endif
    }
281
    void use(Edge nodeUse)
282
    {
283
        use(nodeUse.node());
284
    }
285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300
    
    RegisterSet usedRegisters()
    {
        RegisterSet result;
        for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
            GPRReg gpr = GPRInfo::toRegister(i);
            if (m_gprs.isInUse(gpr))
                result.set(gpr);
        }
        for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
            FPRReg fpr = FPRInfo::toRegister(i);
            if (m_fprs.isInUse(fpr))
                result.set(fpr);
        }
        return result;
    }
301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319
    
    bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
    {
        return m_jit.graph().masqueradesAsUndefinedWatchpointIsStillValid(codeOrigin);
    }
    void speculationWatchpointForMasqueradesAsUndefined(const CodeOrigin& codeOrigin)
    {
        m_jit.addLazily(
            speculationWatchpoint(),
            m_jit.graph().globalObjectFor(codeOrigin)->masqueradesAsUndefinedWatchpoint());
    }
    bool masqueradesAsUndefinedWatchpointIsStillValid()
    {
        return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->codeOrigin);
    }
    void speculationWatchpointForMasqueradesAsUndefined()
    {
        speculationWatchpointForMasqueradesAsUndefined(m_currentNode->codeOrigin);
    }
320 321 322

    static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);

323
    void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
324
    void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
325
    void writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
326

327
    static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
328
    {
329
        return AssemblyHelpers::selectScratchGPR(preserve1, preserve2, preserve3, preserve4);
330 331
    }

332 333
    // Called by the speculative operand types, below, to fill operand to
    // machine registers, implicitly generating speculation checks as needed.
334
    GPRReg fillSpeculateInt(Edge, DataFormat& returnFormat);
335
    GPRReg fillSpeculateIntStrict(Edge);
336 337 338
    FPRReg fillSpeculateDouble(Edge);
    GPRReg fillSpeculateCell(Edge);
    GPRReg fillSpeculateBoolean(Edge);
339
    GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
340

341 342 343
    void addSlowPathGenerator(PassOwnPtr<SlowPathGenerator>);
    void runSlowPathGenerators();
    
344 345
    void compile(Node*);
    void noticeOSRBirth(Node*);
346
    void bail();
347
    void compile(BasicBlock&);
348

349 350
    void checkArgumentTypes();

351 352 353 354 355
    void clearGenerationInfo();

    // These methods are used when generating 'unexpected'
    // calls out from JIT code to C++ helper routines -
    // they spill all live values to the appropriate
356
    // slots in the JSStack without changing any state
357
    // in the GenerationInfo.
358 359 360 361
    SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source);
    SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source);
    void silentSpill(const SilentRegisterSavePlan&);
    void silentFill(const SilentRegisterSavePlan&, GPRReg canTrample);
362 363 364
    
    template<typename CollectionType>
    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
365
    {
366
        ASSERT(plans.isEmpty());
367
        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
368 369 370 371 372 373 374
            GPRReg gpr = iter.regID();
            if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2) {
                SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr);
                if (doSpill)
                    silentSpill(plan);
                plans.append(plan);
            }
375 376
        }
        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
377 378 379 380 381 382
            if (iter.name() != InvalidVirtualRegister && iter.regID() != fprExclude) {
                SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID());
                if (doSpill)
                    silentSpill(plan);
                plans.append(plan);
            }
383 384
        }
    }
385
    template<typename CollectionType>
386 387 388 389 390
    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag)
    {
        silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg);
    }
    template<typename CollectionType>
391
    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude)
392
    {
393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422
        silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude);
    }
#if USE(JSVALUE32_64)
    template<typename CollectionType>
    void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude)
    {
        silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR());
    }
#endif
    
    void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
    {
        silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude);
    }
    void silentSpillAllRegisters(FPRReg exclude)
    {
        silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude);
    }
    
    static GPRReg pickCanTrample(GPRReg exclude)
    {
        GPRReg result = GPRInfo::regT0;
        if (result == exclude)
            result = GPRInfo::regT1;
        return result;
    }
    static GPRReg pickCanTrample(FPRReg)
    {
        return GPRInfo::regT0;
    }
423 424 425 426
    static GPRReg pickCanTrample(NoResultTag)
    {
        return GPRInfo::regT0;
    }
427 428 429 430 431 432 433 434 435 436 437 438 439

#if USE(JSVALUE32_64)
    static GPRReg pickCanTrample(JSValueRegs exclude)
    {
        GPRReg result = GPRInfo::regT0;
        if (result == exclude.tagGPR()) {
            result = GPRInfo::regT1;
            if (result == exclude.payloadGPR())
                result = GPRInfo::regT2;
        } else if (result == exclude.payloadGPR()) {
            result = GPRInfo::regT1;
            if (result == exclude.tagGPR())
                result = GPRInfo::regT2;
440
        }
441
        return result;
442
    }
443 444 445 446
#endif
    
    template<typename RegisterType>
    void silentFillAllRegisters(RegisterType exclude)
447
    {
448
        GPRReg canTrample = pickCanTrample(exclude);
449
        
450 451 452 453
        while (!m_plans.isEmpty()) {
            SilentRegisterSavePlan& plan = m_plans.last();
            silentFill(plan, canTrample);
            m_plans.removeLast();
454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481
        }
    }

    // These methods convert between doubles, and doubles boxed and JSValues.
#if USE(JSVALUE64)
    GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
    {
        return m_jit.boxDouble(fpr, gpr);
    }
    FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
    {
        return m_jit.unboxDouble(gpr, fpr);
    }
    GPRReg boxDouble(FPRReg fpr)
    {
        return boxDouble(fpr, allocate());
    }
#elif USE(JSVALUE32_64)
    void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
    {
        m_jit.boxDouble(fpr, tagGPR, payloadGPR);
    }
    void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
    {
        m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
    }
#endif

482
    // Spill a VirtualRegister to the JSStack.
483 484 485 486 487 488 489 490 491
    void spill(VirtualRegister spillMe)
    {
        GenerationInfo& info = m_generationInfo[spillMe];

#if USE(JSVALUE32_64)
        if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
            return;
#endif
        // Check the GenerationInfo to see if this value need writing
492
        // to the JSStack - if not, mark it as spilled & return.
493
        if (!info.needsSpill()) {
494
            info.setSpilled(*m_stream, spillMe);
495 496 497 498 499 500 501 502 503
            return;
        }

        DataFormat spillFormat = info.registerFormat();
        switch (spillFormat) {
        case DataFormatStorage: {
            // This is special, since it's not a JS value - as in it's not visible to JS
            // code.
            m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
504
            info.spill(*m_stream, spillMe, DataFormatStorage);
505 506 507
            return;
        }

508 509
        case DataFormatInteger: {
            m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
510
            info.spill(*m_stream, spillMe, DataFormatInteger);
511 512 513
            return;
        }

514 515 516
#if USE(JSVALUE64)
        case DataFormatDouble: {
            m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
517
            info.spill(*m_stream, spillMe, DataFormatDouble);
518 519 520 521 522
            return;
        }
            
        default:
            // The following code handles JSValues, int32s, and cells.
523
            RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
524 525 526 527 528
            
            GPRReg reg = info.gpr();
            // We need to box int32 and cell values ...
            // but on JSVALUE64 boxing a cell is a no-op!
            if (spillFormat == DataFormatInteger)
529
                m_jit.or64(GPRInfo::tagTypeNumberRegister, reg);
530 531
            
            // Spill the value, and record it as spilled in its boxed form.
532
            m_jit.store64(reg, JITCompiler::addressFor(spillMe));
533
            info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
534 535
            return;
#elif USE(JSVALUE32_64)
536 537 538
        case DataFormatCell:
        case DataFormatBoolean: {
            m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
539
            info.spill(*m_stream, spillMe, spillFormat);
540 541 542
            return;
        }

543 544 545 546
        case DataFormatDouble:
        case DataFormatJSDouble: {
            // On JSVALUE32_64 boxing a double is a no-op.
            m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
547
            info.spill(*m_stream, spillMe, DataFormatJSDouble);
548 549
            return;
        }
550

551
        default:
552
            // The following code handles JSValues.
553
            RELEASE_ASSERT(spillFormat & DataFormatJS);
554 555
            m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
            m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
556
            info.spill(*m_stream, spillMe, spillFormat);
557 558 559 560 561
            return;
#endif
        }
    }
    
562 563
    bool isKnownInteger(Node* node) { return !(m_state.forNode(node).m_type & ~SpecInt32); }
    bool isKnownCell(Node* node) { return !(m_state.forNode(node).m_type & ~SpecCell); }
564
    
565 566 567
    bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32); }
    bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecNumber); }
    bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); }
568 569
    
    // Checks/accessors for constant values.
570 571 572 573 574 575 576 577 578
    bool isConstant(Node* node) { return m_jit.graph().isConstant(node); }
    bool isJSConstant(Node* node) { return m_jit.graph().isJSConstant(node); }
    bool isInt32Constant(Node* node) { return m_jit.graph().isInt32Constant(node); }
    bool isDoubleConstant(Node* node) { return m_jit.graph().isDoubleConstant(node); }
    bool isNumberConstant(Node* node) { return m_jit.graph().isNumberConstant(node); }
    bool isBooleanConstant(Node* node) { return m_jit.graph().isBooleanConstant(node); }
    bool isFunctionConstant(Node* node) { return m_jit.graph().isFunctionConstant(node); }
    int32_t valueOfInt32Constant(Node* node) { return m_jit.graph().valueOfInt32Constant(node); }
    double valueOfNumberConstant(Node* node) { return m_jit.graph().valueOfNumberConstant(node); }
579
#if USE(JSVALUE32_64)
580
    void* addressOfDoubleConstant(Node* node) { return m_jit.addressOfDoubleConstant(node); }
581
#endif
582 583 584 585
    JSValue valueOfJSConstant(Node* node) { return m_jit.graph().valueOfJSConstant(node); }
    bool valueOfBooleanConstant(Node* node) { return m_jit.graph().valueOfBooleanConstant(node); }
    JSFunction* valueOfFunctionConstant(Node* node) { return m_jit.graph().valueOfFunctionConstant(node); }
    bool isNullConstant(Node* node)
586
    {
587
        if (!isConstant(node))
588
            return false;
589
        return valueOfJSConstant(node).isNull();
590 591
    }

592
    StringImpl* identifierUID(unsigned index)
593
    {
594
        return m_jit.graph().identifiers()[index];
595 596
    }

597
    // Spill all VirtualRegisters back to the JSStack.
598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631
    void flushRegisters()
    {
        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
            if (iter.name() != InvalidVirtualRegister) {
                spill(iter.name());
                iter.release();
            }
        }
        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
            if (iter.name() != InvalidVirtualRegister) {
                spill(iter.name());
                iter.release();
            }
        }
    }

#ifndef NDEBUG
    // Used to ASSERT flushRegisters() has been called prior to
    // calling out from JIT code to a C helper function.
    bool isFlushed()
    {
        for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
            if (iter.name() != InvalidVirtualRegister)
                return false;
        }
        for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
            if (iter.name() != InvalidVirtualRegister)
                return false;
        }
        return true;
    }
#endif

#if USE(JSVALUE64)
632
    MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
633
    {
634
        return MacroAssembler::Imm64(JSValue::encode(valueOfJSConstant(node)));
635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651
    }
#endif

    // Helper functions to enable code sharing in implementations of bit/shift ops.
    void bitOp(NodeType op, int32_t imm, GPRReg op1, GPRReg result)
    {
        switch (op) {
        case BitAnd:
            m_jit.and32(Imm32(imm), op1, result);
            break;
        case BitOr:
            m_jit.or32(Imm32(imm), op1, result);
            break;
        case BitXor:
            m_jit.xor32(Imm32(imm), op1, result);
            break;
        default:
652
            RELEASE_ASSERT_NOT_REACHED();
653 654 655 656 657 658 659 660 661 662 663 664 665 666 667
        }
    }
    void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
    {
        switch (op) {
        case BitAnd:
            m_jit.and32(op1, op2, result);
            break;
        case BitOr:
            m_jit.or32(op1, op2, result);
            break;
        case BitXor:
            m_jit.xor32(op1, op2, result);
            break;
        default:
668
            RELEASE_ASSERT_NOT_REACHED();
669 670 671 672 673 674 675 676 677 678 679 680 681 682 683
        }
    }
    void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
    {
        switch (op) {
        case BitRShift:
            m_jit.rshift32(op1, Imm32(shiftAmount), result);
            break;
        case BitLShift:
            m_jit.lshift32(op1, Imm32(shiftAmount), result);
            break;
        case BitURShift:
            m_jit.urshift32(op1, Imm32(shiftAmount), result);
            break;
        default:
684
            RELEASE_ASSERT_NOT_REACHED();
685 686 687 688 689 690 691 692 693 694 695 696 697 698 699
        }
    }
    void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
    {
        switch (op) {
        case BitRShift:
            m_jit.rshift32(op1, shiftAmount, result);
            break;
        case BitLShift:
            m_jit.lshift32(op1, shiftAmount, result);
            break;
        case BitURShift:
            m_jit.urshift32(op1, shiftAmount, result);
            break;
        default:
700
            RELEASE_ASSERT_NOT_REACHED();
701 702 703
        }
    }
    
704 705
    // Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
    unsigned detectPeepHoleBranch()
706
    {
707
        BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
708 709

        // Check that no intervening nodes will be generated.
710
        for (unsigned index = m_indexInBlock + 1; index < block->size() - 1; ++index) {
711 712
            Node* node = block->at(index);
            if (node->shouldGenerate())
713
                return UINT_MAX;
714 715 716
        }

        // Check if the lastNode is a branch on this node.
717 718
        Node* lastNode = block->last();
        return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? block->size() - 1 : UINT_MAX;
719 720
    }
    
721 722 723 724
    void compileMovHint(Node*);
    void compileMovHintAndCheck(Node*);
    void compileInlineStart(Node*);

725
    void nonSpeculativeUInt32ToNumber(Node*);
726 727

#if USE(JSVALUE64)
728
    void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
729
    void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
730
#elif USE(JSVALUE32_64)
731
    void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
732
    void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
733
#endif
734 735 736
    
    void compileIn(Node*);
    
737
    void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false);
738 739
    void nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert = false);
    bool nonSpeculativeCompareNull(Node*, Edge operand, bool invert = false);
740
    
741 742 743
    void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
    void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
    bool nonSpeculativeCompare(Node*, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
744
    
745 746 747
    void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false);
    void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
    bool nonSpeculativeStrictEq(Node*, bool invert = false);
748
    
749 750
    void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg);
    void compileInstanceOf(Node*);
751
    
752 753
    // Access to our fixed callee CallFrame.
    MacroAssembler::Address callFrameSlot(int slot)
754
    {
755 756 757 758 759 760 761
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)));
    }

    // Access to our fixed callee CallFrame.
    MacroAssembler::Address argumentSlot(int argument)
    {
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)));
762 763
    }

764 765 766 767 768 769 770 771 772 773 774
    MacroAssembler::Address callFrameTagSlot(int slot)
    {
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
    }

    MacroAssembler::Address callFramePayloadSlot(int slot)
    {
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
    }

    MacroAssembler::Address argumentTagSlot(int argument)
775
    {
776
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
777 778
    }

779
    MacroAssembler::Address argumentPayloadSlot(int argument)
780
    {
781
        return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
782 783
    }

784
    void emitCall(Node*);
785 786 787 788 789
    
    // Called once a node has completed code generation but prior to setting
    // its result, to free up its children. (This must happen prior to setting
    // the nodes result, since the node may have the same VirtualRegister as
    // a child, and as such will use the same GeneratioInfo).
790
    void useChildren(Node*);
791 792 793

    // These method called to initialize the the GenerationInfo
    // to describe the result of an operation.
794
    void integerResult(GPRReg reg, Node* node, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren)
795 796 797 798
    {
        if (mode == CallUseChildren)
            useChildren(node);

799
        VirtualRegister virtualRegister = node->virtualRegister();
800 801 802 803 804
        GenerationInfo& info = m_generationInfo[virtualRegister];

        if (format == DataFormatInteger) {
            m_jit.jitAssertIsInt32(reg);
            m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
805
            info.initInteger(node, node->refCount(), reg);
806 807
        } else {
#if USE(JSVALUE64)
808
            RELEASE_ASSERT(format == DataFormatJSInteger);
809 810
            m_jit.jitAssertIsJSInt32(reg);
            m_gprs.retain(reg, virtualRegister, SpillOrderJS);
811
            info.initJSValue(node, node->refCount(), reg, format);
812
#elif USE(JSVALUE32_64)
813
            RELEASE_ASSERT_NOT_REACHED();
814 815 816
#endif
        }
    }
817
    void integerResult(GPRReg reg, Node* node, UseChildrenMode mode)
818
    {
819
        integerResult(reg, node, DataFormatInteger, mode);
820
    }
821
    void noResult(Node* node, UseChildrenMode mode = CallUseChildren)
822 823 824 825 826
    {
        if (mode == UseChildrenCalledExplicitly)
            return;
        useChildren(node);
    }
827
    void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
828 829 830 831
    {
        if (mode == CallUseChildren)
            useChildren(node);

832
        VirtualRegister virtualRegister = node->virtualRegister();
833 834
        m_gprs.retain(reg, virtualRegister, SpillOrderCell);
        GenerationInfo& info = m_generationInfo[virtualRegister];
835
        info.initCell(node, node->refCount(), reg);
836
    }
837
    void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
838 839 840 841
    {
        if (mode == CallUseChildren)
            useChildren(node);

842
        VirtualRegister virtualRegister = node->virtualRegister();
843 844
        m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
        GenerationInfo& info = m_generationInfo[virtualRegister];
845
        info.initBoolean(node, node->refCount(), reg);
846 847
    }
#if USE(JSVALUE64)
848
    void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
849 850 851 852 853 854 855
    {
        if (format == DataFormatJSInteger)
            m_jit.jitAssertIsJSInt32(reg);
        
        if (mode == CallUseChildren)
            useChildren(node);

856
        VirtualRegister virtualRegister = node->virtualRegister();
857 858
        m_gprs.retain(reg, virtualRegister, SpillOrderJS);
        GenerationInfo& info = m_generationInfo[virtualRegister];
859
        info.initJSValue(node, node->refCount(), reg, format);
860
    }
861
    void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode)
862
    {
863
        jsValueResult(reg, node, DataFormatJS, mode);
864 865
    }
#elif USE(JSVALUE32_64)
866
    void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
867 868 869 870
    {
        if (mode == CallUseChildren)
            useChildren(node);

871
        VirtualRegister virtualRegister = node->virtualRegister();
872 873 874
        m_gprs.retain(tag, virtualRegister, SpillOrderJS);
        m_gprs.retain(payload, virtualRegister, SpillOrderJS);
        GenerationInfo& info = m_generationInfo[virtualRegister];
875
        info.initJSValue(node, node->refCount(), tag, payload, format);
876
    }
877
    void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode)
878
    {
879
        jsValueResult(tag, payload, node, DataFormatJS, mode);
880 881
    }
#endif
882
    void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
883 884 885 886
    {
        if (mode == CallUseChildren)
            useChildren(node);
        
887
        VirtualRegister virtualRegister = node->virtualRegister();
888 889
        m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
        GenerationInfo& info = m_generationInfo[virtualRegister];
890
        info.initStorage(node, node->refCount(), reg);
891
    }
892
    void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
893 894 895 896
    {
        if (mode == CallUseChildren)
            useChildren(node);

897
        VirtualRegister virtualRegister = node->virtualRegister();
898 899
        m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
        GenerationInfo& info = m_generationInfo[virtualRegister];
900
        info.initDouble(node, node->refCount(), reg);
901
    }
902
    void initConstantInfo(Node* node)
903
    {
904 905
        ASSERT(isInt32Constant(node) || isNumberConstant(node) || isJSConstant(node));
        m_generationInfo[node->virtualRegister()].initConstant(node, node->refCount());
906
    }
907
    
908 909 910 911 912
    // These methods add calls to C++ helper functions.
    // These methods are broadly value representation specific (i.e.
    // deal with the fact that a JSValue may be passed in one or two
    // machine registers, and delegate the calling convention specific
    // decision as to how to fill the regsiters to setupArguments* methods.
913

914 915 916 917 918
    JITCompiler::Call callOperation(P_DFGOperation_E operation, GPRReg result)
    {
        m_jit.setupArgumentsExecState();
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
919 920 921 922 923
    JITCompiler::Call callOperation(P_DFGOperation_EC operation, GPRReg result, GPRReg cell)
    {
        m_jit.setupArgumentsWithExecState(cell);
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
924
    JITCompiler::Call callOperation(P_DFGOperation_EO operation, GPRReg result, GPRReg object)
925
    {
926 927 928 929 930 931
        m_jit.setupArgumentsWithExecState(object);
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
    JITCompiler::Call callOperation(P_DFGOperation_EOS operation, GPRReg result, GPRReg object, size_t size)
    {
        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
932 933
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
934 935 936 937 938
    JITCompiler::Call callOperation(P_DFGOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
    {
        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
939 940 941 942 943
    JITCompiler::Call callOperation(C_DFGOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
    {
        m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(static_cast<size_t>(size)));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
944 945 946 947 948
    JITCompiler::Call callOperation(P_DFGOperation_EPS operation, GPRReg result, GPRReg old, size_t size)
    {
        m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
949 950 951 952 953
    JITCompiler::Call callOperation(P_DFGOperation_ES operation, GPRReg result, size_t size)
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(size));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
954
    JITCompiler::Call callOperation(P_DFGOperation_ESt operation, GPRReg result, Structure* structure)
955 956 957 958
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
959
    JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2)
960 961 962 963
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2);
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
964 965 966 967 968
    JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2)
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
969
    JITCompiler::Call callOperation(P_DFGOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
970 971 972 973
    {
        m_jit.setupArgumentsWithExecState(arg1, arg2);
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
974
    JITCompiler::Call callOperation(P_DFGOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size)
975 976 977 978
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
979
    JITCompiler::Call callOperation(P_DFGOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size)
980 981 982 983
    {
        m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size));
        return appendCallWithExceptionCheckSetResult(operation, result);
    }
984
    JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)