DFGSpeculativeJIT.cpp 197 KB
Newer Older
1
/*
2
 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#include "config.h"
#include "DFGSpeculativeJIT.h"

#if ENABLE(DFG_JIT)
30

31
#include "Arguments.h"
32
#include "DFGAbstractInterpreterInlines.h"
33
#include "DFGArrayifySlowPathGenerator.h"
34
#include "DFGBinarySwitch.h"
35
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
36
#include "DFGSaneStringGetByValSlowPathGenerator.h"
37
#include "DFGSlowPathGenerator.h"
38
#include "JSCJSValueInlines.h"
39
#include "LinkBuffer.h"
40
#include <wtf/MathExtras.h>
41

42
namespace JSC { namespace DFG {
43

44 45 46
SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
    : m_compileOkay(true)
    , m_jit(jit)
47
    , m_currentNode(0)
48
    , m_indexInBlock(0)
49
    , m_generationInfo(m_jit.graph().frameRegisterCount())
50
    , m_state(m_jit.graph())
51
    , m_interpreter(m_jit.graph(), m_state)
52 53
    , m_stream(&jit.jitCode()->variableEventStream)
    , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
54
    , m_isCheckingArgumentTypes(false)
55 56 57 58 59 60 61
{
}

SpeculativeJIT::~SpeculativeJIT()
{
}

62
void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
63
{
64
    ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
65 66
    
    GPRTemporary scratch(this);
67
    GPRTemporary scratch2(this);
68
    GPRReg scratchGPR = scratch.gpr();
69
    GPRReg scratch2GPR = scratch2.gpr();
70 71 72 73
    
    unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
    
    JITCompiler::JumpList slowCases;
74
    
75 76 77
    slowCases.append(
        emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
    m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
78
    emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
79 80 81 82
    
    m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
    m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
    
83 84 85 86 87 88 89 90 91 92 93 94 95 96 97
    if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
#if USE(JSVALUE64)
        m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
        for (unsigned i = numElements; i < vectorLength; ++i)
            m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
#else
        EncodedValueDescriptor value;
        value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
        for (unsigned i = numElements; i < vectorLength; ++i) {
            m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
            m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
        }
#endif
    }
    
98 99 100 101 102 103 104 105 106
    // I want a slow path that also loads out the storage pointer, and that's
    // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
    // of work for a very small piece of functionality. :-/
    addSlowPathGenerator(adoptPtr(
        new CallArrayAllocatorSlowPathGenerator(
            slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
            structure, numElements)));
}

107
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
108 109 110
{
    if (!m_compileOkay)
        return;
111
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
112
    m_jit.appendExitInfo(jumpToFail);
113
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
114 115
}

116 117 118 119 120 121
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
{
    if (!m_compileOkay)
        return;
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
    m_jit.appendExitInfo(jumpsToFail);
122
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
123 124 125 126 127 128 129 130 131 132 133
}

void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
{
    if (!m_compileOkay)
        return;
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
}

134 135
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
136
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
137
    speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
138 139
}

140
OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
141 142 143
{
    if (!m_compileOkay)
        return OSRExitJumpPlaceholder();
144
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
145
    unsigned index = m_jit.jitCode()->osrExit.size();
146
    m_jit.appendExitInfo();
147
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
148 149 150
    return OSRExitJumpPlaceholder(index);
}

151
OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
152
{
153
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
154
    return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
155 156
}

157
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
158
{
159 160
    if (!m_compileOkay)
        return;
161 162 163
    backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
164 165
}

166
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
167
{
168
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
169
    speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
170 171
}

172
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
173 174 175
{
    if (!m_compileOkay)
        return;
176
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
177
    unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
178
    m_jit.appendExitInfo(jumpToFail);
179
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
180 181
}

182
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
183
{
184
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
185
    backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
186 187
}

188
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
189
{
190 191 192 193
    if (!m_compileOkay)
        return;
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
    if (m_speculationDirection == ForwardSpeculation)
194 195 196
        convertLastOSRExitToForward();
}

197
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
198
{
199
    speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
200 201
}

202
void SpeculativeJIT::emitInvalidationPoint(Node* node)
203 204
{
    if (!m_compileOkay)
205 206 207
        return;
    ASSERT(m_canExit);
    ASSERT(m_speculationDirection == BackwardSpeculation);
208 209
    OSRExitCompilationInfo& info = m_jit.appendExitInfo(JITCompiler::JumpList());
    m_jit.jitCode()->appendOSRExit(OSRExit(
210
        UncountableInvalidation, JSValueSource(),
211 212
        m_jit.graph().methodOfGettingAValueProfileFor(node),
        this, m_stream->size()));
213 214 215
    info.m_replacementSource = m_jit.watchpointLabel();
    ASSERT(info.m_replacementSource.isSet());
    noResult(node);
216 217
}

218
void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
219
{
220
    m_jit.jitCode()->lastOSRExit().convertToForward(
221
        m_block, m_currentNode, m_indexInBlock, valueRecovery);
222 223
}

224
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
225
{
226
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
227
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
228 229 230
    convertLastOSRExitToForward(valueRecovery);
}

231
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
232
{
233
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
234
    backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
235
    convertLastOSRExitToForward(valueRecovery);
236 237
}

238
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
239
{
240
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
241 242
    if (!m_compileOkay)
        return;
243
    speculationCheck(kind, jsValueRegs, node, m_jit.jump());
244 245 246 247 248
    m_compileOkay = false;
}

void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
249
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
250
    terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
251 252
}

253
void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
254
{
255
    ASSERT(needsTypeCheck(edge, typesPassedThrough));
256
    m_interpreter.filter(edge, typesPassedThrough);
257
    backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
258 259
}

260 261
void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
{
262 263 264
    backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
265 266 267 268
}

void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
{
269
    backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
270 271 272
    convertLastOSRExitToForward(valueRecovery);
}

273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292
RegisterSet SpeculativeJIT::usedRegisters()
{
    RegisterSet result;
    
    for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
        GPRReg gpr = GPRInfo::toRegister(i);
        if (m_gprs.isInUse(gpr))
            result.set(gpr);
    }
    for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
        FPRReg fpr = FPRInfo::toRegister(i);
        if (m_fprs.isInUse(fpr))
            result.set(fpr);
    }
    
    result.merge(RegisterSet::specialRegisters());
    
    return result;
}

293 294
void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
{
295
    m_slowPathGenerators.append(slowPathGenerator);
296 297 298 299 300 301 302 303
}

void SpeculativeJIT::runSlowPathGenerators()
{
    for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
        m_slowPathGenerators[i]->generate(this);
}

304 305 306
// On Windows we need to wrap fmod; on other platforms we can call it directly.
// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
307
static double JIT_OPERATION fmodAsDFGOperation(double x, double y)
308 309 310 311 312 313 314
{
    return fmod(x, y);
}
#else
#define fmodAsDFGOperation fmod
#endif

315 316 317 318 319 320 321 322
void SpeculativeJIT::clearGenerationInfo()
{
    for (unsigned i = 0; i < m_generationInfo.size(); ++i)
        m_generationInfo[i] = GenerationInfo();
    m_gprs = RegisterBank<GPRInfo>();
    m_fprs = RegisterBank<FPRInfo>();
}

323 324
SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
{
325
    GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
326
    Node* node = info.node();
327 328 329 330 331 332 333 334 335 336 337 338
    DataFormat registerFormat = info.registerFormat();
    ASSERT(registerFormat != DataFormatNone);
    ASSERT(registerFormat != DataFormatDouble);
        
    SilentSpillAction spillAction;
    SilentFillAction fillAction;
        
    if (!info.needsSpill())
        spillAction = DoNothingForSpill;
    else {
#if USE(JSVALUE64)
        ASSERT(info.gpr() == source);
339
        if (registerFormat == DataFormatInt32)
340 341 342
            spillAction = Store32Payload;
        else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
            spillAction = StorePtr;
343 344
        else if (registerFormat == DataFormatInt52 || registerFormat == DataFormatStrictInt52)
            spillAction = Store64;
345 346 347 348 349 350 351 352 353 354 355 356 357 358 359
        else {
            ASSERT(registerFormat & DataFormatJS);
            spillAction = Store64;
        }
#elif USE(JSVALUE32_64)
        if (registerFormat & DataFormatJS) {
            ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
            spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
        } else {
            ASSERT(info.gpr() == source);
            spillAction = Store32Payload;
        }
#endif
    }
        
360
    if (registerFormat == DataFormatInt32) {
361
        ASSERT(info.gpr() == source);
362
        ASSERT(isJSInt32(info.registerFormat()));
363 364
        if (node->hasConstant()) {
            ASSERT(isInt32Constant(node));
365 366 367 368 369
            fillAction = SetInt32Constant;
        } else
            fillAction = Load32Payload;
    } else if (registerFormat == DataFormatBoolean) {
#if USE(JSVALUE64)
370
        RELEASE_ASSERT_NOT_REACHED();
371 372 373
        fillAction = DoNothingForFill;
#elif USE(JSVALUE32_64)
        ASSERT(info.gpr() == source);
374 375
        if (node->hasConstant()) {
            ASSERT(isBooleanConstant(node));
376 377 378 379 380 381
            fillAction = SetBooleanConstant;
        } else
            fillAction = Load32Payload;
#endif
    } else if (registerFormat == DataFormatCell) {
        ASSERT(info.gpr() == source);
382 383
        if (node->hasConstant()) {
            JSValue value = valueOfJSConstant(node);
384 385 386 387 388 389 390 391 392 393 394 395
            ASSERT_UNUSED(value, value.isCell());
            fillAction = SetCellConstant;
        } else {
#if USE(JSVALUE64)
            fillAction = LoadPtr;
#else
            fillAction = Load32Payload;
#endif
        }
    } else if (registerFormat == DataFormatStorage) {
        ASSERT(info.gpr() == source);
        fillAction = LoadPtr;
396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431
    } else if (registerFormat == DataFormatInt52) {
        if (node->hasConstant())
            fillAction = SetInt52Constant;
        else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
            fillAction = Load32PayloadConvertToInt52;
        else if (info.spillFormat() == DataFormatInt52)
            fillAction = Load64;
        else if (info.spillFormat() == DataFormatStrictInt52)
            fillAction = Load64ShiftInt52Left;
        else if (info.spillFormat() == DataFormatNone)
            fillAction = Load64;
        else {
            // Should never happen. Anything that qualifies as an int32 will never
            // be turned into a cell (immediate spec fail) or a double (to-double
            // conversions involve a separate node).
            RELEASE_ASSERT_NOT_REACHED();
            fillAction = Load64; // Make GCC happy.
        }
    } else if (registerFormat == DataFormatStrictInt52) {
        if (node->hasConstant())
            fillAction = SetStrictInt52Constant;
        else if (isJSInt32(info.spillFormat()) || info.spillFormat() == DataFormatJS)
            fillAction = Load32PayloadSignExtend;
        else if (info.spillFormat() == DataFormatInt52)
            fillAction = Load64ShiftInt52Right;
        else if (info.spillFormat() == DataFormatStrictInt52)
            fillAction = Load64;
        else if (info.spillFormat() == DataFormatNone)
            fillAction = Load64;
        else {
            // Should never happen. Anything that qualifies as an int32 will never
            // be turned into a cell (immediate spec fail) or a double (to-double
            // conversions involve a separate node).
            RELEASE_ASSERT_NOT_REACHED();
            fillAction = Load64; // Make GCC happy.
        }
432 433 434 435
    } else {
        ASSERT(registerFormat & DataFormatJS);
#if USE(JSVALUE64)
        ASSERT(info.gpr() == source);
436 437
        if (node->hasConstant()) {
            if (valueOfJSConstant(node).isCell())
438 439
                fillAction = SetTrustedJSConstant;
                fillAction = SetJSConstant;
440 441
        } else if (info.spillFormat() == DataFormatInt32) {
            ASSERT(registerFormat == DataFormatJSInt32);
442 443 444 445 446 447 448 449
            fillAction = Load32PayloadBoxInt;
        } else if (info.spillFormat() == DataFormatDouble) {
            ASSERT(registerFormat == DataFormatJSDouble);
            fillAction = LoadDoubleBoxDouble;
        } else
            fillAction = Load64;
#else
        ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
450
        if (node->hasConstant())
451 452 453 454 455
            fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
        else if (info.payloadGPR() == source)
            fillAction = Load32Payload;
        else { // Fill the Tag
            switch (info.spillFormat()) {
456 457
            case DataFormatInt32:
                ASSERT(registerFormat == DataFormatJSInt32);
458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475
                fillAction = SetInt32Tag;
                break;
            case DataFormatCell:
                ASSERT(registerFormat == DataFormatJSCell);
                fillAction = SetCellTag;
                break;
            case DataFormatBoolean:
                ASSERT(registerFormat == DataFormatJSBoolean);
                fillAction = SetBooleanTag;
                break;
            default:
                fillAction = Load32Tag;
                break;
            }
        }
#endif
    }
        
476
    return SilentRegisterSavePlan(spillAction, fillAction, node, source);
477 478 479 480
}
    
SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
{
481
    GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
482
    Node* node = info.node();
483 484 485 486 487 488 489 490
    ASSERT(info.registerFormat() == DataFormatDouble);

    SilentSpillAction spillAction;
    SilentFillAction fillAction;
        
    if (!info.needsSpill())
        spillAction = DoNothingForSpill;
    else {
491
        ASSERT(!node->hasConstant());
492 493 494 495 496 497
        ASSERT(info.spillFormat() == DataFormatNone);
        ASSERT(info.fpr() == source);
        spillAction = StoreDouble;
    }
        
#if USE(JSVALUE64)
498 499
    if (node->hasConstant()) {
        ASSERT(isNumberConstant(node));
500 501 502 503 504 505 506 507 508
        fillAction = SetDoubleConstant;
    } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
        // it was already spilled previously and not as a double, which means we need unboxing.
        ASSERT(info.spillFormat() & DataFormatJS);
        fillAction = LoadJSUnboxDouble;
    } else
        fillAction = LoadDouble;
#elif USE(JSVALUE32_64)
    ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
509 510
    if (node->hasConstant()) {
        ASSERT(isNumberConstant(node));
511 512 513 514 515
        fillAction = SetDoubleConstant;
    } else
        fillAction = LoadDouble;
#endif

516
    return SilentRegisterSavePlan(spillAction, fillAction, node, source);
517 518 519 520 521 522 523 524
}
    
void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
{
    switch (plan.spillAction()) {
    case DoNothingForSpill:
        break;
    case Store32Tag:
525
        m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
526 527
        break;
    case Store32Payload:
528
        m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
529 530
        break;
    case StorePtr:
531
        m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
532 533 534
        break;
#if USE(JSVALUE64)
    case Store64:
535
        m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
536 537 538
        break;
#endif
    case StoreDouble:
539
        m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
540 541
        break;
    default:
542
        RELEASE_ASSERT_NOT_REACHED();
543 544 545 546 547 548 549 550 551 552 553 554
    }
}
    
void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
{
#if USE(JSVALUE32_64)
    UNUSED_PARAM(canTrample);
#endif
    switch (plan.fillAction()) {
    case DoNothingForFill:
        break;
    case SetInt32Constant:
555
        m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
556
        break;
557 558 559 560 561 562 563 564
#if USE(JSVALUE64)
    case SetInt52Constant:
        m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt() << JSValue::int52ShiftAmount), plan.gpr());
        break;
    case SetStrictInt52Constant:
        m_jit.move(Imm64(valueOfJSConstant(plan.node()).asMachineInt()), plan.gpr());
        break;
#endif // USE(JSVALUE64)
565
    case SetBooleanConstant:
566
        m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
567 568
        break;
    case SetCellConstant:
569
        m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
570 571 572
        break;
#if USE(JSVALUE64)
    case SetTrustedJSConstant:
573
        m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
574 575
        break;
    case SetJSConstant:
576
        m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
577 578
        break;
    case SetDoubleConstant:
579
        m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
580 581 582
        m_jit.move64ToDouble(canTrample, plan.fpr());
        break;
    case Load32PayloadBoxInt:
583
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
584 585
        m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
        break;
586 587 588 589 590 591 592 593 594
    case Load32PayloadConvertToInt52:
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
        m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
        m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
        break;
    case Load32PayloadSignExtend:
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
        m_jit.signExtend32ToPtr(plan.gpr(), plan.gpr());
        break;
595
    case LoadDoubleBoxDouble:
596
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
597 598 599
        m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
        break;
    case LoadJSUnboxDouble:
600
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
601 602 603 604
        unboxDouble(canTrample, plan.fpr());
        break;
#else
    case SetJSConstantTag:
605
        m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
606 607
        break;
    case SetJSConstantPayload:
608
        m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
609 610 611 612 613 614 615 616 617 618 619
        break;
    case SetInt32Tag:
        m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
        break;
    case SetCellTag:
        m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
        break;
    case SetBooleanTag:
        m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
        break;
    case SetDoubleConstant:
620
        m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
621 622 623
        break;
#endif
    case Load32Tag:
624
        m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
625 626
        break;
    case Load32Payload:
627
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
628 629
        break;
    case LoadPtr:
630
        m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
631 632 633
        break;
#if USE(JSVALUE64)
    case Load64:
634
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
635
        break;
636 637 638 639 640 641 642 643
    case Load64ShiftInt52Right:
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
        m_jit.rshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
        break;
    case Load64ShiftInt52Left:
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
        m_jit.lshift64(TrustedImm32(JSValue::int52ShiftAmount), plan.gpr());
        break;
644 645
#endif
    case LoadDouble:
646
        m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
647 648
        break;
    default:
649
        RELEASE_ASSERT_NOT_REACHED();
650 651 652
    }
}
    
653
JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
654
{
655 656 657 658 659 660 661 662
    switch (arrayMode.arrayClass()) {
    case Array::OriginalArray: {
        CRASH();
        JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
        return result;
    }
        
    case Array::Array:
663 664
        m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
        return m_jit.branch32(
665
            MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
666
        
667 668 669 670 671 672 673
    case Array::NonArray:
    case Array::OriginalNonArray:
        m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
        return m_jit.branch32(
            MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
        
    case Array::PossiblyArray:
674
        m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
675
        return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
676
    }
677 678 679
    
    RELEASE_ASSERT_NOT_REACHED();
    return JITCompiler::Jump();
680 681
}

682
JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
683 684 685
{
    JITCompiler::JumpList result;
    
686
    switch (arrayMode.type()) {
687
    case Array::Int32:
688
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
689 690

    case Array::Double:
691
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
692 693

    case Array::Contiguous:
694
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
695

696 697
    case Array::ArrayStorage:
    case Array::SlowPutArrayStorage: {
698 699
        ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
        
700 701 702 703 704 705 706 707 708 709 710 711 712 713 714
        if (arrayMode.isJSArray()) {
            if (arrayMode.isSlowPut()) {
                result.append(
                    m_jit.branchTest32(
                        MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
                m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
                m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
                result.append(
                    m_jit.branch32(
                        MacroAssembler::Above, tempGPR,
                        TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
                break;
            }
            m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
            result.append(
715
                m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
716 717
            break;
        }
718
        m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
719
        if (arrayMode.isSlowPut()) {
720 721 722
            m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
            result.append(
                m_jit.branch32(
723
                    MacroAssembler::Above, tempGPR,
724
                    TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
725
            break;
726 727
        }
        result.append(
728
            m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
729 730 731 732 733 734 735 736 737 738
        break;
    }
    default:
        CRASH();
        break;
    }
    
    return result;
}

739
void SpeculativeJIT::checkArray(Node* node)
740
{
741 742
    ASSERT(node->arrayMode().isSpecific());
    ASSERT(!node->arrayMode().doesConversion());
743
    
744
    SpeculateCellOperand base(this, node->child1());
745 746
    GPRReg baseReg = base.gpr();
    
747 748
    if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
        noResult(m_currentNode);
749 750
        return;
    }
751 752 753
    
    const ClassInfo* expectedClassInfo = 0;
    
754
    switch (node->arrayMode().type()) {
755
    case Array::String:
756
        RELEASE_ASSERT_NOT_REACHED(); // Should have been a Phantom(String:)
757
        break;
758 759
    case Array::Int32:
    case Array::Double:
760 761 762
    case Array::Contiguous:
    case Array::ArrayStorage:
    case Array::SlowPutArrayStorage: {
763 764 765 766 767 768
        GPRTemporary temp(this);
        GPRReg tempGPR = temp.gpr();
        m_jit.loadPtr(
            MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
        m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
        speculationCheck(
769 770
            BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
            jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
771
        
772
        noResult(m_currentNode);
773 774
        return;
    }
775
    case Array::Arguments:
776
        expectedClassInfo = Arguments::info();
777 778
        break;
    default:
779
        expectedClassInfo = classInfoForType(node->arrayMode().typedArrayType());
780 781
        break;
    }
782
    
783 784
    RELEASE_ASSERT(expectedClassInfo);
    
785 786 787 788
    GPRTemporary temp(this);
    m_jit.loadPtr(
        MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
    speculationCheck(
789
        BadType, JSValueSource::unboxedCell(baseReg), node,
790 791 792
        m_jit.branchPtr(
            MacroAssembler::NotEqual,
            MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
793 794
            MacroAssembler::TrustedImmPtr(expectedClassInfo)));
    
795
    noResult(m_currentNode);
796 797
}

798
void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
799
{
800
    ASSERT(node->arrayMode().doesConversion());
801 802
    
    GPRTemporary temp(this);
803
    GPRTemporary structure;
804
    GPRReg tempGPR = temp.gpr();
805
    GPRReg structureGPR = InvalidGPRReg;
806
    
807
    if (node->op() != ArrayifyToStructure) {
808 809 810 811
        GPRTemporary realStructure(this);
        structure.adopt(realStructure);
        structureGPR = structure.gpr();
    }
812
        
813
    // We can skip all that comes next if we already have array storage.
814
    MacroAssembler::JumpList slowPath;
815
    
816
    if (node->op() == ArrayifyToStructure) {
817 818
        slowPath.append(m_jit.branchWeakPtr(
            JITCompiler::NotEqual,
819
            JITCompiler::Address(baseReg, JSCell::structureOffset()),
820
            node->structure()));
821 822 823
    } else {
        m_jit.loadPtr(
            MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
824
        
825 826
        m_jit.load8(
            MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
827
        
828
        slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
829 830
    }
    
831 832
    addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
        slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
833
    
834
    noResult(m_currentNode);
835 836
}

837
void SpeculativeJIT::arrayify(Node* node)
838
{
839
    ASSERT(node->arrayMode().isSpecific());
840
    
841
    SpeculateCellOperand base(this, node->child1());
842
    
843
    if (!node->child2()) {
844 845 846 847
        arrayify(node, base.gpr(), InvalidGPRReg);
        return;
    }
    
848
    SpeculateInt32Operand property(this, node->child2());
849 850 851 852
    
    arrayify(node, base.gpr(), property.gpr());
}

853
GPRReg SpeculativeJIT::fillStorage(Edge edge)
854
{
855
    VirtualRegister virtualRegister = edge->virtualRegister();
856
    GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
857 858 859
    
    switch (info.registerFormat()) {
    case DataFormatNone: {
860 861 862 863
        if (info.spillFormat() == DataFormatStorage) {
            GPRReg gpr = allocate();
            m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
            m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
864
            info.fillStorage(*m_stream, gpr);
865 866
            return gpr;
        }
867
        
868
        // Must be a cell; fill it as a cell and then return the pointer.
869
        return fillSpeculateCell(edge);
870 871
    }
        
872
    case DataFormatStorage: {
873 874 875 876
        GPRReg gpr = info.gpr();
        m_gprs.lock(gpr);
        return gpr;
    }
877
        
878
    default:
879
        return fillSpeculateCell(edge);