DFGSpeculativeJIT.cpp 176 KB
Newer Older
1
/*
2
 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#include "config.h"
#include "DFGSpeculativeJIT.h"

#if ENABLE(DFG_JIT)
30

31
#include "Arguments.h"
32
#include "DFGArrayifySlowPathGenerator.h"
33
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
34
#include "DFGSlowPathGenerator.h"
35
#include "JSCJSValueInlines.h"
36
#include "LinkBuffer.h"
37

38
namespace JSC { namespace DFG {
39

40 41 42
SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
    : m_compileOkay(true)
    , m_jit(jit)
43
    , m_currentNode(0)
44 45 46 47 48 49
    , m_indexInBlock(0)
    , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
    , m_arguments(jit.codeBlock()->numParameters())
    , m_variables(jit.graph().m_localVars)
    , m_lastSetOperand(std::numeric_limits<int>::max())
    , m_state(m_jit.graph())
50 51
    , m_stream(&jit.jitCode()->variableEventStream)
    , m_minifiedGraph(&jit.jitCode()->minifiedDFG)
52
    , m_isCheckingArgumentTypes(false)
53 54 55 56 57 58 59
{
}

SpeculativeJIT::~SpeculativeJIT()
{
}

60
void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
61
{
62
    ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
63 64
    
    GPRTemporary scratch(this);
65
    GPRTemporary scratch2(this);
66
    GPRReg scratchGPR = scratch.gpr();
67
    GPRReg scratch2GPR = scratch2.gpr();
68 69 70 71
    
    unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
    
    JITCompiler::JumpList slowCases;
72
    
73 74 75
    slowCases.append(
        emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
    m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
76
    emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
77 78 79 80
    
    m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
    m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
    
81 82 83 84 85 86 87 88 89 90 91 92 93 94 95
    if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
#if USE(JSVALUE64)
        m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
        for (unsigned i = numElements; i < vectorLength; ++i)
            m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
#else
        EncodedValueDescriptor value;
        value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
        for (unsigned i = numElements; i < vectorLength; ++i) {
            m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
            m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
        }
#endif
    }
    
96 97 98 99 100 101 102 103 104
    // I want a slow path that also loads out the storage pointer, and that's
    // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
    // of work for a very small piece of functionality. :-/
    addSlowPathGenerator(adoptPtr(
        new CallArrayAllocatorSlowPathGenerator(
            slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
            structure, numElements)));
}

105
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
106 107 108
{
    if (!m_compileOkay)
        return;
109
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
110
    m_jit.appendExitInfo(jumpToFail);
111
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
112 113
}

114 115 116 117 118 119
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
{
    if (!m_compileOkay)
        return;
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
    m_jit.appendExitInfo(jumpsToFail);
120
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
121 122 123 124 125 126 127 128 129 130 131
}

void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
{
    if (!m_compileOkay)
        return;
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
}

132 133
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
134
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
135
    speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
136 137
}

138
OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
139 140 141
{
    if (!m_compileOkay)
        return OSRExitJumpPlaceholder();
142
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
143
    unsigned index = m_jit.jitCode()->osrExit.size();
144
    m_jit.appendExitInfo();
145
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
146 147 148
    return OSRExitJumpPlaceholder(index);
}

149
OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
150
{
151
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
152
    return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
153 154
}

155
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
156
{
157 158
    if (!m_compileOkay)
        return;
159 160 161
    backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
162 163
}

164
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
165
{
166
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
167
    speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
168 169
}

170
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
171 172 173
{
    if (!m_compileOkay)
        return;
174
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
175
    unsigned recoveryIndex = m_jit.jitCode()->appendSpeculationRecovery(recovery);
176
    m_jit.appendExitInfo(jumpToFail);
177
    m_jit.jitCode()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), recoveryIndex));
178 179
}

180
void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
181
{
182
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
183
    backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
184 185
}

186
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
187
{
188 189 190 191
    if (!m_compileOkay)
        return;
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
    if (m_speculationDirection == ForwardSpeculation)
192 193 194
        convertLastOSRExitToForward();
}

195
void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
196
{
197
    speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
198 199
}

200
JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
201 202 203
{
    if (!m_compileOkay)
        return 0;
204
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
205
    m_jit.appendExitInfo(JITCompiler::JumpList());
206 207
    OSRExit& exit = m_jit.jitCode()->osrExit[
        m_jit.jitCode()->appendOSRExit(OSRExit(
208 209
            kind, jsValueSource,
            m_jit.graph().methodOfGettingAValueProfileFor(node),
210 211
            this, m_stream->size()))];
    exit.m_watchpointIndex = m_jit.jitCode()->appendWatchpoint(
212
        JumpReplacementWatchpoint(m_jit.watchpointLabel()));
213 214
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
215
    return &m_jit.jitCode()->watchpoints[exit.m_watchpointIndex];
216 217 218 219
}

JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
{
220
    return speculationWatchpoint(kind, JSValueSource(), 0);
221 222
}

223
void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
224
{
225
    m_jit.jitCode()->lastOSRExit().convertToForward(
226
        m_jit.graph().m_blocks[m_block].get(), m_currentNode, m_indexInBlock, valueRecovery);
227 228
}

229
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
230
{
231
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
232
    backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
233 234 235
    convertLastOSRExitToForward(valueRecovery);
}

236
void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
237
{
238
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
239
    backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
240
    convertLastOSRExitToForward(valueRecovery);
241 242
}

243
void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
244
{
245
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
246
#if DFG_ENABLE(DEBUG_VERBOSE)
247
    dataLogF("SpeculativeJIT was terminated.\n");
248 249 250
#endif
    if (!m_compileOkay)
        return;
251
    speculationCheck(kind, jsValueRegs, node, m_jit.jump());
252 253 254 255 256
    m_compileOkay = false;
}

void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
257
    ASSERT(m_isCheckingArgumentTypes || m_canExit);
258
    terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
259 260
}

261
void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
262
{
263
    ASSERT(needsTypeCheck(edge, typesPassedThrough));
264
    m_state.filter(edge, typesPassedThrough);
265
    backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
266 267
}

268 269
void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
{
270 271 272
    backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
    if (m_speculationDirection == ForwardSpeculation)
        convertLastOSRExitToForward();
273 274 275 276
}

void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
{
277
    backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
278 279 280
    convertLastOSRExitToForward(valueRecovery);
}

281 282
void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
{
283
    m_slowPathGenerators.append(slowPathGenerator);
284 285 286 287 288
}

void SpeculativeJIT::runSlowPathGenerators()
{
#if DFG_ENABLE(DEBUG_VERBOSE)
289
    dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
290 291 292 293 294
#endif
    for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
        m_slowPathGenerators[i]->generate(this);
}

295 296 297 298 299 300 301 302 303 304 305
// On Windows we need to wrap fmod; on other platforms we can call it directly.
// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
{
    return fmod(x, y);
}
#else
#define fmodAsDFGOperation fmod
#endif

306 307 308 309 310 311 312 313
void SpeculativeJIT::clearGenerationInfo()
{
    for (unsigned i = 0; i < m_generationInfo.size(); ++i)
        m_generationInfo[i] = GenerationInfo();
    m_gprs = RegisterBank<GPRInfo>();
    m_fprs = RegisterBank<FPRInfo>();
}

314 315 316
SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
{
    GenerationInfo& info = m_generationInfo[spillMe];
317
    Node* node = info.node();
318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351
    DataFormat registerFormat = info.registerFormat();
    ASSERT(registerFormat != DataFormatNone);
    ASSERT(registerFormat != DataFormatDouble);
        
    SilentSpillAction spillAction;
    SilentFillAction fillAction;
        
    if (!info.needsSpill())
        spillAction = DoNothingForSpill;
    else {
#if USE(JSVALUE64)
        ASSERT(info.gpr() == source);
        if (registerFormat == DataFormatInteger)
            spillAction = Store32Payload;
        else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
            spillAction = StorePtr;
        else {
            ASSERT(registerFormat & DataFormatJS);
            spillAction = Store64;
        }
#elif USE(JSVALUE32_64)
        if (registerFormat & DataFormatJS) {
            ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
            spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
        } else {
            ASSERT(info.gpr() == source);
            spillAction = Store32Payload;
        }
#endif
    }
        
    if (registerFormat == DataFormatInteger) {
        ASSERT(info.gpr() == source);
        ASSERT(isJSInteger(info.registerFormat()));
352 353
        if (node->hasConstant()) {
            ASSERT(isInt32Constant(node));
354 355 356 357 358
            fillAction = SetInt32Constant;
        } else
            fillAction = Load32Payload;
    } else if (registerFormat == DataFormatBoolean) {
#if USE(JSVALUE64)
359
        RELEASE_ASSERT_NOT_REACHED();
360 361 362
        fillAction = DoNothingForFill;
#elif USE(JSVALUE32_64)
        ASSERT(info.gpr() == source);
363 364
        if (node->hasConstant()) {
            ASSERT(isBooleanConstant(node));
365 366 367 368 369 370
            fillAction = SetBooleanConstant;
        } else
            fillAction = Load32Payload;
#endif
    } else if (registerFormat == DataFormatCell) {
        ASSERT(info.gpr() == source);
371 372
        if (node->hasConstant()) {
            JSValue value = valueOfJSConstant(node);
373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388
            ASSERT_UNUSED(value, value.isCell());
            fillAction = SetCellConstant;
        } else {
#if USE(JSVALUE64)
            fillAction = LoadPtr;
#else
            fillAction = Load32Payload;
#endif
        }
    } else if (registerFormat == DataFormatStorage) {
        ASSERT(info.gpr() == source);
        fillAction = LoadPtr;
    } else {
        ASSERT(registerFormat & DataFormatJS);
#if USE(JSVALUE64)
        ASSERT(info.gpr() == source);
389 390
        if (node->hasConstant()) {
            if (valueOfJSConstant(node).isCell())
391 392 393 394 395 396 397 398 399 400 401 402 403
                fillAction = SetTrustedJSConstant;
            else
                fillAction = SetJSConstant;
        } else if (info.spillFormat() == DataFormatInteger) {
            ASSERT(registerFormat == DataFormatJSInteger);
            fillAction = Load32PayloadBoxInt;
        } else if (info.spillFormat() == DataFormatDouble) {
            ASSERT(registerFormat == DataFormatJSDouble);
            fillAction = LoadDoubleBoxDouble;
        } else
            fillAction = Load64;
#else
        ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
404
        if (node->hasConstant())
405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429
            fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
        else if (info.payloadGPR() == source)
            fillAction = Load32Payload;
        else { // Fill the Tag
            switch (info.spillFormat()) {
            case DataFormatInteger:
                ASSERT(registerFormat == DataFormatJSInteger);
                fillAction = SetInt32Tag;
                break;
            case DataFormatCell:
                ASSERT(registerFormat == DataFormatJSCell);
                fillAction = SetCellTag;
                break;
            case DataFormatBoolean:
                ASSERT(registerFormat == DataFormatJSBoolean);
                fillAction = SetBooleanTag;
                break;
            default:
                fillAction = Load32Tag;
                break;
            }
        }
#endif
    }
        
430
    return SilentRegisterSavePlan(spillAction, fillAction, node, source);
431 432 433 434 435
}
    
SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
{
    GenerationInfo& info = m_generationInfo[spillMe];
436
    Node* node = info.node();
437 438 439 440 441 442 443 444
    ASSERT(info.registerFormat() == DataFormatDouble);

    SilentSpillAction spillAction;
    SilentFillAction fillAction;
        
    if (!info.needsSpill())
        spillAction = DoNothingForSpill;
    else {
445
        ASSERT(!node->hasConstant());
446 447 448 449 450 451
        ASSERT(info.spillFormat() == DataFormatNone);
        ASSERT(info.fpr() == source);
        spillAction = StoreDouble;
    }
        
#if USE(JSVALUE64)
452 453
    if (node->hasConstant()) {
        ASSERT(isNumberConstant(node));
454 455 456 457 458 459 460 461 462
        fillAction = SetDoubleConstant;
    } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
        // it was already spilled previously and not as a double, which means we need unboxing.
        ASSERT(info.spillFormat() & DataFormatJS);
        fillAction = LoadJSUnboxDouble;
    } else
        fillAction = LoadDouble;
#elif USE(JSVALUE32_64)
    ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
463 464
    if (node->hasConstant()) {
        ASSERT(isNumberConstant(node));
465 466 467 468 469
        fillAction = SetDoubleConstant;
    } else
        fillAction = LoadDouble;
#endif

470
    return SilentRegisterSavePlan(spillAction, fillAction, node, source);
471 472 473 474 475 476 477 478
}
    
void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
{
    switch (plan.spillAction()) {
    case DoNothingForSpill:
        break;
    case Store32Tag:
479
        m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
480 481
        break;
    case Store32Payload:
482
        m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
483 484
        break;
    case StorePtr:
485
        m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
486 487 488
        break;
#if USE(JSVALUE64)
    case Store64:
489
        m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
490 491 492
        break;
#endif
    case StoreDouble:
493
        m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
494 495
        break;
    default:
496
        RELEASE_ASSERT_NOT_REACHED();
497 498 499 500 501 502 503 504 505 506 507 508
    }
}
    
void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
{
#if USE(JSVALUE32_64)
    UNUSED_PARAM(canTrample);
#endif
    switch (plan.fillAction()) {
    case DoNothingForFill:
        break;
    case SetInt32Constant:
509
        m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
510 511
        break;
    case SetBooleanConstant:
512
        m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
513 514
        break;
    case SetCellConstant:
515
        m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
516 517 518
        break;
#if USE(JSVALUE64)
    case SetTrustedJSConstant:
519
        m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
520 521
        break;
    case SetJSConstant:
522
        m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
523 524
        break;
    case SetDoubleConstant:
525
        m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
526 527 528
        m_jit.move64ToDouble(canTrample, plan.fpr());
        break;
    case Load32PayloadBoxInt:
529
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
530 531 532
        m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
        break;
    case LoadDoubleBoxDouble:
533
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
534 535 536
        m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
        break;
    case LoadJSUnboxDouble:
537
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
538 539 540 541
        unboxDouble(canTrample, plan.fpr());
        break;
#else
    case SetJSConstantTag:
542
        m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
543 544
        break;
    case SetJSConstantPayload:
545
        m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
546 547 548 549 550 551 552 553 554 555 556
        break;
    case SetInt32Tag:
        m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
        break;
    case SetCellTag:
        m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
        break;
    case SetBooleanTag:
        m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
        break;
    case SetDoubleConstant:
557
        m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
558 559 560
        break;
#endif
    case Load32Tag:
561
        m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
562 563
        break;
    case Load32Payload:
564
        m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
565 566
        break;
    case LoadPtr:
567
        m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
568 569 570
        break;
#if USE(JSVALUE64)
    case Load64:
571
        m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
572 573 574
        break;
#endif
    case LoadDouble:
575
        m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
576 577
        break;
    default:
578
        RELEASE_ASSERT_NOT_REACHED();
579 580 581
    }
}
    
582
const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
583
{
584
    switch (arrayMode.type()) {
585
    case Array::Int8Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
586
        return &m_jit.vm()->int8ArrayDescriptor();
587
    case Array::Int16Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
588
        return &m_jit.vm()->int16ArrayDescriptor();
589
    case Array::Int32Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
590
        return &m_jit.vm()->int32ArrayDescriptor();
591
    case Array::Uint8Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
592
        return &m_jit.vm()->uint8ArrayDescriptor();
593
    case Array::Uint8ClampedArray:
ggaren@apple.com's avatar
ggaren@apple.com committed
594
        return &m_jit.vm()->uint8ClampedArrayDescriptor();
595
    case Array::Uint16Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
596
        return &m_jit.vm()->uint16ArrayDescriptor();
597
    case Array::Uint32Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
598
        return &m_jit.vm()->uint32ArrayDescriptor();
599
    case Array::Float32Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
600
        return &m_jit.vm()->float32ArrayDescriptor();
601
    case Array::Float64Array:
ggaren@apple.com's avatar
ggaren@apple.com committed
602
        return &m_jit.vm()->float64ArrayDescriptor();
603 604 605 606 607
    default:
        return 0;
    }
}

608
JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
609
{
610 611 612 613 614 615 616 617
    switch (arrayMode.arrayClass()) {
    case Array::OriginalArray: {
        CRASH();
        JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
        return result;
    }
        
    case Array::Array:
618 619
        m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
        return m_jit.branch32(
620
            MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
621 622 623
        
    default:
        m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
624
        return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
625 626 627
    }
}

628
JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
629 630 631
{
    JITCompiler::JumpList result;
    
632
    switch (arrayMode.type()) {
633
    case Array::Int32:
634
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
635 636

    case Array::Double:
637
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
638 639

    case Array::Contiguous:
640
        return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
641

642 643
    case Array::ArrayStorage:
    case Array::SlowPutArrayStorage: {
644 645
        ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
        
646 647 648 649 650 651 652 653 654 655 656 657 658 659 660
        if (arrayMode.isJSArray()) {
            if (arrayMode.isSlowPut()) {
                result.append(
                    m_jit.branchTest32(
                        MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
                m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
                m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
                result.append(
                    m_jit.branch32(
                        MacroAssembler::Above, tempGPR,
                        TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
                break;
            }
            m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
            result.append(
661
                m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
662 663
            break;
        }
664
        m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
665
        if (arrayMode.isSlowPut()) {
666 667 668
            m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
            result.append(
                m_jit.branch32(
669
                    MacroAssembler::Above, tempGPR,
670
                    TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
671
            break;
672 673
        }
        result.append(
674
            m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
675 676 677 678 679 680 681 682 683 684
        break;
    }
    default:
        CRASH();
        break;
    }
    
    return result;
}

685
void SpeculativeJIT::checkArray(Node* node)
686
{
687 688
    ASSERT(node->arrayMode().isSpecific());
    ASSERT(!node->arrayMode().doesConversion());
689
    
690
    SpeculateCellOperand base(this, node->child1());
691 692
    GPRReg baseReg = base.gpr();
    
693
    const TypedArrayDescriptor* result = typedArrayDescriptor(node->arrayMode());
694
    
695 696
    if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
        noResult(m_currentNode);
697 698
        return;
    }
699 700 701
    
    const ClassInfo* expectedClassInfo = 0;
    
702
    switch (node->arrayMode().type()) {
703 704 705
    case Array::String:
        expectedClassInfo = &JSString::s_info;
        break;
706 707
    case Array::Int32:
    case Array::Double:
708 709 710
    case Array::Contiguous:
    case Array::ArrayStorage:
    case Array::SlowPutArrayStorage: {
711 712 713 714 715 716
        GPRTemporary temp(this);
        GPRReg tempGPR = temp.gpr();
        m_jit.loadPtr(
            MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
        m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
        speculationCheck(
717 718
            BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
            jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
719
        
720
        noResult(m_currentNode);
721 722
        return;
    }
723 724 725 726 727 728 729 730 731 732 733 734 735 736 737
    case Array::Arguments:
        expectedClassInfo = &Arguments::s_info;
        break;
    case Array::Int8Array:
    case Array::Int16Array:
    case Array::Int32Array:
    case Array::Uint8Array:
    case Array::Uint8ClampedArray:
    case Array::Uint16Array:
    case Array::Uint32Array:
    case Array::Float32Array:
    case Array::Float64Array:
        expectedClassInfo = result->m_classInfo;
        break;
    default:
738
        RELEASE_ASSERT_NOT_REACHED();
739 740
        break;
    }
741 742 743 744 745
    
    GPRTemporary temp(this);
    m_jit.loadPtr(
        MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
    speculationCheck(
746
        BadType, JSValueSource::unboxedCell(baseReg), node,
747 748 749
        m_jit.branchPtr(
            MacroAssembler::NotEqual,
            MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
750 751
            MacroAssembler::TrustedImmPtr(expectedClassInfo)));
    
752
    noResult(m_currentNode);
753 754
}

755
void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
756
{
757
    ASSERT(node->arrayMode().doesConversion());
758 759
    
    GPRTemporary temp(this);
760
    GPRTemporary structure;
761
    GPRReg tempGPR = temp.gpr();
762
    GPRReg structureGPR = InvalidGPRReg;
763
    
764
    if (node->op() != ArrayifyToStructure) {
765 766 767 768
        GPRTemporary realStructure(this);
        structure.adopt(realStructure);
        structureGPR = structure.gpr();
    }
769
        
770
    // We can skip all that comes next if we already have array storage.
771
    MacroAssembler::JumpList slowPath;
772
    
773
    if (node->op() == ArrayifyToStructure) {
774 775
        slowPath.append(m_jit.branchWeakPtr(
            JITCompiler::NotEqual,
776
            JITCompiler::Address(baseReg, JSCell::structureOffset()),
777
            node->structure()));
778 779 780
    } else {
        m_jit.loadPtr(
            MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
781
        
782 783
        m_jit.load8(
            MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
784
        
785
        slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
786 787
    }
    
788 789
    addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
        slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
790
    
791
    noResult(m_currentNode);
792 793
}

794
void SpeculativeJIT::arrayify(Node* node)
795
{
796
    ASSERT(node->arrayMode().isSpecific());
797
    
798
    SpeculateCellOperand base(this, node->child1());
799
    
800
    if (!node->child2()) {
801 802 803 804
        arrayify(node, base.gpr(), InvalidGPRReg);
        return;
    }
    
805
    SpeculateIntegerOperand property(this, node->child2());
806 807 808 809
    
    arrayify(node, base.gpr(), property.gpr());
}

810
GPRReg SpeculativeJIT::fillStorage(Edge edge)
811
{
812
    VirtualRegister virtualRegister = edge->virtualRegister();
813 814 815 816
    GenerationInfo& info = m_generationInfo[virtualRegister];
    
    switch (info.registerFormat()) {
    case DataFormatNone: {
817 818 819 820
        if (info.spillFormat() == DataFormatStorage) {
            GPRReg gpr = allocate();
            m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
            m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
821
            info.fillStorage(*m_stream, gpr);
822 823
            return gpr;
        }
824
        
825
        // Must be a cell; fill it as a cell and then return the pointer.
826
        return fillSpeculateCell(edge);
827 828
    }
        
829
    case DataFormatStorage: {
830 831 832 833
        GPRReg gpr = info.gpr();
        m_gprs.lock(gpr);
        return gpr;
    }
834
        
835
    default:
836
        return fillSpeculateCell(edge);
837 838 839
    }
}

840
void SpeculativeJIT::useChildren(Node* node)
841
{
842 843
    if (node->flags() & NodeHasVarArgs) {
        for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
844 845 846
            if (!!m_jit.graph().m_varArgChildren[childIdx])
                use(m_jit.graph().m_varArgChildren[childIdx]);
        }
847
    } else {
848
        Edge child1 = node->child1();
849
        if (!child1) {
850
            ASSERT(!node->child2() && !node->child3());
851 852 853 854
            return;
        }
        use(child1);
        
855
        Edge child2 = node->child2();
856
        if (!child2) {
857
            ASSERT(!node->child3());
858 859 860 861
            return;
        }
        use(child2);