DFGSpeculativeJIT32_64.cpp 208 KB
Newer Older
1
/*
2
 * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
3
 * Copyright (C) 2011 Intel Corporation. All rights reserved.
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#include "config.h"
#include "DFGSpeculativeJIT.h"

#if ENABLE(DFG_JIT)
31

32
#include "ArrayPrototype.h"
33
#include "DFGCallArrayAllocatorSlowPathGenerator.h"
34
#include "DFGSlowPathGenerator.h"
35
#include "JSActivation.h"
36
#include "ObjectPrototype.h"
37
#include "Operations.h"
38

39
40
namespace JSC { namespace DFG {

41
42
#if USE(JSVALUE32_64)

43
GPRReg SpeculativeJIT::fillInteger(Edge edge, DataFormat& returnFormat)
44
{
45
46
47
    ASSERT(!needsTypeCheck(edge, SpecInt32));
    
    VirtualRegister virtualRegister = edge->virtualRegister();
48
49
50
51
52
    GenerationInfo& info = m_generationInfo[virtualRegister];

    if (info.registerFormat() == DataFormatNone) {
        GPRReg gpr = allocate();

53
        if (edge->hasConstant()) {
54
            m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
55
56
57
            if (isInt32Constant(edge.node()))
                m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
            else if (isNumberConstant(edge.node()))
58
                RELEASE_ASSERT_NOT_REACHED();
59
            else {
60
61
                ASSERT(isJSConstant(edge.node()));
                JSValue jsValue = valueOfJSConstant(edge.node());
62
63
64
                m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr);
            }
        } else {
65
            ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger || info.spillFormat() == DataFormatInteger);
66
67
68
69
            m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
            m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
        }

70
        info.fillInteger(*m_stream, gpr);
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
        returnFormat = DataFormatInteger;
        return gpr;
    }

    switch (info.registerFormat()) {
    case DataFormatNone:
        // Should have filled, above.
    case DataFormatJSDouble:
    case DataFormatDouble:
    case DataFormatJS:
    case DataFormatCell:
    case DataFormatJSCell:
    case DataFormatBoolean:
    case DataFormatJSBoolean:
    case DataFormatStorage:
        // Should only be calling this function if we know this operand to be integer.
87
        RELEASE_ASSERT_NOT_REACHED();
88
89
90
91
92
93
94
95
96
97
98

    case DataFormatJSInteger: {
        GPRReg tagGPR = info.tagGPR();
        GPRReg payloadGPR = info.payloadGPR();
        m_gprs.lock(tagGPR);
        m_jit.jitAssertIsJSInt32(tagGPR);
        m_gprs.unlock(tagGPR);
        m_gprs.lock(payloadGPR);
        m_gprs.release(tagGPR);
        m_gprs.release(payloadGPR);
        m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
99
        info.fillInteger(*m_stream, payloadGPR);
100
101
102
103
104
105
106
107
108
109
110
111
        returnFormat = DataFormatInteger;
        return payloadGPR;
    }

    case DataFormatInteger: {
        GPRReg gpr = info.gpr();
        m_gprs.lock(gpr);
        m_jit.jitAssertIsInt32(gpr);
        returnFormat = DataFormatInteger;
        return gpr;
    }

112
    default:
113
        RELEASE_ASSERT_NOT_REACHED();
114
115
        return InvalidGPRReg;
    }
116
117
}

118
bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
119
120
121
122
{
    // FIXME: For double we could fill with a FPR.
    UNUSED_PARAM(fpr);

123
    VirtualRegister virtualRegister = edge->virtualRegister();
124
125
126
127
128
    GenerationInfo& info = m_generationInfo[virtualRegister];

    switch (info.registerFormat()) {
    case DataFormatNone: {

129
        if (edge->hasConstant()) {
130
131
            tagGPR = allocate();
            payloadGPR = allocate();
132
133
            m_jit.move(Imm32(valueOfJSConstant(edge.node()).tag()), tagGPR);
            m_jit.move(Imm32(valueOfJSConstant(edge.node()).payload()), payloadGPR);
134
135
            m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
            m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
136
            info.fillJSValue(*m_stream, tagGPR, payloadGPR, isInt32Constant(edge.node()) ? DataFormatJSInteger : DataFormatJS);
137
138
        } else {
            DataFormat spillFormat = info.spillFormat();
139
            ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
140
141
            tagGPR = allocate();
            payloadGPR = allocate();
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
            switch (spillFormat) {
            case DataFormatInteger:
                m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR);
                spillFormat = DataFormatJSInteger; // This will be used as the new register format.
                break;
            case DataFormatCell:
                m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR);
                spillFormat = DataFormatJSCell; // This will be used as the new register format.
                break;
            case DataFormatBoolean:
                m_jit.move(TrustedImm32(JSValue::BooleanTag), tagGPR);
                spillFormat = DataFormatJSBoolean; // This will be used as the new register format.
                break;
            default:
                m_jit.load32(JITCompiler::tagFor(virtualRegister), tagGPR);
                break;
            }
159
            m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR);
160
161
            m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
            m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
162
            info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
        }

        return true;
    }

    case DataFormatInteger:
    case DataFormatCell:
    case DataFormatBoolean: {
        GPRReg gpr = info.gpr();
        // If the register has already been locked we need to take a copy.
        if (m_gprs.isLocked(gpr)) {
            payloadGPR = allocate();
            m_jit.move(gpr, payloadGPR);
        } else {
            payloadGPR = gpr;
            m_gprs.lock(gpr);
        }
        tagGPR = allocate();
        uint32_t tag = JSValue::EmptyValueTag;
        DataFormat fillFormat = DataFormatJS;
        switch (info.registerFormat()) {
        case DataFormatInteger:
            tag = JSValue::Int32Tag;
            fillFormat = DataFormatJSInteger;
            break;
        case DataFormatCell:
            tag = JSValue::CellTag;
            fillFormat = DataFormatJSCell;
            break;
        case DataFormatBoolean:
            tag = JSValue::BooleanTag;
            fillFormat = DataFormatJSBoolean;
            break;
        default:
197
            RELEASE_ASSERT_NOT_REACHED();
198
199
200
201
202
203
            break;
        }
        m_jit.move(TrustedImm32(tag), tagGPR);
        m_gprs.release(gpr);
        m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
        m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
204
        info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat);
205
206
207
208
209
210
211
212
213
214
215
216
217
218
        return true;
    }

    case DataFormatJSDouble:
    case DataFormatDouble: {
        FPRReg oldFPR = info.fpr();
        m_fprs.lock(oldFPR);
        tagGPR = allocate();
        payloadGPR = allocate();
        boxDouble(oldFPR, tagGPR, payloadGPR);
        m_fprs.unlock(oldFPR);
        m_fprs.release(oldFPR);
        m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
        m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
219
        info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
        return true;
    }

    case DataFormatJS:
    case DataFormatJSInteger:
    case DataFormatJSCell:
    case DataFormatJSBoolean: {
        tagGPR = info.tagGPR();
        payloadGPR = info.payloadGPR();
        m_gprs.lock(tagGPR);
        m_gprs.lock(payloadGPR);
        return true;
    }
        
    case DataFormatStorage:
        // this type currently never occurs
236
        RELEASE_ASSERT_NOT_REACHED();
237

238
    default:
239
        RELEASE_ASSERT_NOT_REACHED();
240
241
        return true;
    }
242
243
}

244
void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node* node)
245
{
246
    IntegerOperand op1(this, node->child1());
247
248
249
250
251
252
253
    FPRTemporary boxer(this);
    GPRTemporary resultTag(this, op1);
    GPRTemporary resultPayload(this);
        
    JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
        
    m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
254
    m_jit.move(JITCompiler::TrustedImmPtr(&AssemblyHelpers::twoToThe32), resultPayload.gpr()); // reuse resultPayload register here.
255
256
257
258
259
260
261
262
263
264
265
266
267
    m_jit.addDouble(JITCompiler::Address(resultPayload.gpr(), 0), boxer.fpr());
        
    boxDouble(boxer.fpr(), resultTag.gpr(), resultPayload.gpr());
        
    JITCompiler::Jump done = m_jit.jump();
        
    positive.link(&m_jit);
        
    m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTag.gpr());
    m_jit.move(op1.gpr(), resultPayload.gpr());
        
    done.link(&m_jit);

268
    jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
269
270
}

271
void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
272
273
{
    JITCompiler::DataLabelPtr structureToCompare;
274
    JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
275
    
276
    JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), resultPayloadGPR);
277
278
279
280
281
    JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
    JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
    
    JITCompiler::Label doneLabel = m_jit.label();

282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
    OwnPtr<SlowPathGenerator> slowPath;
    if (baseTagGPROrNone == InvalidGPRReg) {
        if (!slowPathTarget.isSet()) {
            slowPath = slowPathCall(
                structureCheck.m_jump, this, operationGetByIdOptimize,
                JSValueRegs(resultTagGPR, resultPayloadGPR),
                static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
                identifier(identifierNumber));
        } else {
            JITCompiler::JumpList slowCases;
            slowCases.append(structureCheck.m_jump);
            slowCases.append(slowPathTarget);
            slowPath = slowPathCall(
                slowCases, this, operationGetByIdOptimize,
                JSValueRegs(resultTagGPR, resultPayloadGPR),
                static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
                identifier(identifierNumber));
        }
    } else {
        if (!slowPathTarget.isSet()) {
            slowPath = slowPathCall(
                structureCheck.m_jump, this, operationGetByIdOptimize,
                JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR,
                identifier(identifierNumber));
        } else {
            JITCompiler::JumpList slowCases;
            slowCases.append(structureCheck.m_jump);
            slowCases.append(slowPathTarget);
            slowPath = slowPathCall(
                slowCases, this, operationGetByIdOptimize,
                JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR,
                identifier(identifierNumber));
        }
    }
    m_jit.addPropertyAccess(
        PropertyAccessRecord(
318
            codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
319
320
            tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel,
            safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR),
321
            safeCast<int8_t>(resultPayloadGPR), usedRegisters(),
322
323
            spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
    addSlowPathGenerator(slowPath.release());
324
325
}

326
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
327
328
{
    JITCompiler::DataLabelPtr structureToCompare;
329
    JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
330

331
    writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
332

333
    JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), scratchGPR);
334
335
336
    JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
    JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));

337
    JITCompiler::Label doneLabel = m_jit.label();
338
    V_DFGOperation_EJCI optimizedCall;
339
    if (m_jit.strictModeFor(m_currentNode->codeOrigin)) {
340
341
342
343
344
345
346
347
348
349
        if (putKind == Direct)
            optimizedCall = operationPutByIdDirectStrictOptimize;
        else
            optimizedCall = operationPutByIdStrictOptimize;
    } else {
        if (putKind == Direct)
            optimizedCall = operationPutByIdDirectNonStrictOptimize;
        else
            optimizedCall = operationPutByIdNonStrictOptimize;
    }
350
351
352
    OwnPtr<SlowPathGenerator> slowPath;
    if (!slowPathTarget.isSet()) {
        slowPath = slowPathCall(
353
            structureCheck.m_jump, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
354
355
356
357
358
359
            basePayloadGPR, identifier(identifierNumber));
    } else {
        JITCompiler::JumpList slowCases;
        slowCases.append(structureCheck.m_jump);
        slowCases.append(slowPathTarget);
        slowPath = slowPathCall(
360
            slowCases, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
361
362
            basePayloadGPR, identifier(identifierNumber));
    }
363
364
365
366
367
    RegisterSet currentlyUsedRegisters = usedRegisters();
    currentlyUsedRegisters.clear(scratchGPR);
    ASSERT(currentlyUsedRegisters.get(basePayloadGPR));
    ASSERT(currentlyUsedRegisters.get(valueTagGPR));
    ASSERT(currentlyUsedRegisters.get(valuePayloadGPR));
368
369
    m_jit.addPropertyAccess(
        PropertyAccessRecord(
370
            codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
371
372
373
374
            JITCompiler::DataLabelCompact(tagStoreWithPatch.label()),
            JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()),
            slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR),
            safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR),
375
            usedRegisters()));
376
    addSlowPathGenerator(slowPath.release());
377
378
}

379
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
380
381
382
383
384
385
386
387
388
{
    JSValueOperand arg(this, operand);
    GPRReg argTagGPR = arg.tagGPR();
    GPRReg argPayloadGPR = arg.payloadGPR();

    GPRTemporary resultPayload(this, arg, false);
    GPRReg resultPayloadGPR = resultPayload.gpr();

    JITCompiler::Jump notCell;
389
    JITCompiler::Jump notMasqueradesAsUndefined;   
390
    if (m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
391
392
393
        if (!isKnownCell(operand.node()))
            notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));

394
        m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
395
396
397
        m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
        notMasqueradesAsUndefined = m_jit.jump();
    } else {
398
399
400
401
402
403
        GPRTemporary localGlobalObject(this);
        GPRTemporary remoteGlobalObject(this);

        if (!isKnownCell(operand.node()))
            notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));

404
405
406
407
408
409
410
411
412
        m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR);
        JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined));
        
        m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
        notMasqueradesAsUndefined = m_jit.jump();

        isMasqueradesAsUndefined.link(&m_jit);
        GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
        GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
413
        m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR);
414
415
416
417
        m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
        m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR);
    }
 
418
    if (!isKnownCell(operand.node())) {
419
420
421
422
423
424
425
426
427
428
429
430
        JITCompiler::Jump done = m_jit.jump();
        
        notCell.link(&m_jit);
        // null or undefined?
        COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
        m_jit.move(argTagGPR, resultPayloadGPR);
        m_jit.or32(TrustedImm32(1), resultPayloadGPR);
        m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);

        done.link(&m_jit);
    }
    
431
432
    notMasqueradesAsUndefined.link(&m_jit);
 
433
    booleanResult(resultPayloadGPR, m_currentNode);
434
435
}

436
void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
437
{
438
439
    BlockIndex taken = branchNode->takenBlockIndex();
    BlockIndex notTaken = branchNode->notTakenBlockIndex();
440
    
441
    if (taken == nextBlock()) {
442
443
444
445
446
447
448
449
450
451
452
453
        invert = !invert;
        BlockIndex tmp = taken;
        taken = notTaken;
        notTaken = tmp;
    }

    JSValueOperand arg(this, operand);
    GPRReg argTagGPR = arg.tagGPR();
    GPRReg argPayloadGPR = arg.payloadGPR();
    
    GPRTemporary result(this, arg);
    GPRReg resultGPR = result.gpr();
454

455
    JITCompiler::Jump notCell;
456

457
    if (m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
458
459
460
        if (!isKnownCell(operand.node()))
            notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));

461
        m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
462
463
        jump(invert ? taken : notTaken, ForceJump);
    } else {
464
465
466
467
468
469
        GPRTemporary localGlobalObject(this);
        GPRTemporary remoteGlobalObject(this);

        if (!isKnownCell(operand.node()))
            notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));

470
471
472
473
474
        m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
        branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken);
   
        GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
        GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
475
        m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR);
476
477
478
479
        m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
        branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
    }
 
480
    if (!isKnownCell(operand.node())) {
481
        jump(notTaken, ForceJump);
482
483
484
485
486
487
        
        notCell.link(&m_jit);
        // null or undefined?
        COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
        m_jit.move(argTagGPR, resultGPR);
        m_jit.or32(TrustedImm32(1), resultGPR);
488
        branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
489
490
    }
    
491
    jump(notTaken);
492
493
}

494
bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert)
495
{
496
497
    unsigned branchIndexInBlock = detectPeepHoleBranch();
    if (branchIndexInBlock != UINT_MAX) {
498
        Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
499

500
        ASSERT(node->adjustedRefCount() == 1);
501
        
502
        nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
503
    
504
505
        use(node->child1());
        use(node->child2());
506
        m_indexInBlock = branchIndexInBlock;
507
        m_currentNode = branchNode;
508
509
510
511
512
513
514
515
516
        
        return true;
    }
    
    nonSpeculativeNonPeepholeCompareNull(operand, invert);
    
    return false;
}

517
void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
518
{
519
520
    BlockIndex taken = branchNode->takenBlockIndex();
    BlockIndex notTaken = branchNode->notTakenBlockIndex();
521
522
523
524
525

    JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;

    // The branch instruction will branch to the taken block.
    // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
526
    if (taken == nextBlock()) {
527
528
529
530
531
532
533
        cond = JITCompiler::invert(cond);
        callResultCondition = JITCompiler::Zero;
        BlockIndex tmp = taken;
        taken = notTaken;
        notTaken = tmp;
    }

534
535
    JSValueOperand arg1(this, node->child1());
    JSValueOperand arg2(this, node->child2());
536
537
538
539
540
541
542
    GPRReg arg1TagGPR = arg1.tagGPR();
    GPRReg arg1PayloadGPR = arg1.payloadGPR();
    GPRReg arg2TagGPR = arg2.tagGPR();
    GPRReg arg2PayloadGPR = arg2.payloadGPR();
    
    JITCompiler::JumpList slowPath;
    
543
    if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
544
545
546
547
548
549
550
551
552
        GPRResult result(this);
        GPRReg resultGPR = result.gpr();

        arg1.use();
        arg2.use();

        flushRegisters();
        callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);

553
        branchTest32(callResultCondition, resultGPR, taken);
554
555
556
557
558
559
560
    } else {
        GPRTemporary result(this);
        GPRReg resultGPR = result.gpr();
    
        arg1.use();
        arg2.use();

561
        if (!isKnownInteger(node->child1().node()))
562
            slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
563
        if (!isKnownInteger(node->child2().node()))
564
565
            slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
    
566
        branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
567
    
568
        if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
569
            jump(notTaken, ForceJump);
570
571
572
573
574
575
576
    
            slowPath.link(&m_jit);
    
            silentSpillAllRegisters(resultGPR);
            callOperation(helperFunction, resultGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
            silentFillAllRegisters(resultGPR);
        
577
            branchTest32(callResultCondition, resultGPR, taken);
578
579
580
        }
    }

581
    jump(notTaken);
582
583
    
    m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1;
584
    m_currentNode = branchNode;
585
586
}

587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
template<typename JumpType>
class CompareAndBoxBooleanSlowPathGenerator
    : public CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg> {
public:
    CompareAndBoxBooleanSlowPathGenerator(
        JumpType from, SpeculativeJIT* jit,
        S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
        GPRReg arg2Tag, GPRReg arg2Payload)
        : CallSlowPathGenerator<JumpType, S_DFGOperation_EJJ, GPRReg>(
            from, jit, function, NeedToSpill, result)
        , m_arg1Tag(arg1Tag)
        , m_arg1Payload(arg1Payload)
        , m_arg2Tag(arg2Tag)
        , m_arg2Payload(arg2Payload)
    {
    }
    
protected:
    virtual void generateInternal(SpeculativeJIT* jit)
    {
        this->setUp(jit);
        this->recordCall(
            jit->callOperation(
                this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag,
                m_arg2Payload));
        jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
        this->tearDown(jit);
    }
   
private:
    GPRReg m_arg1Tag;
    GPRReg m_arg1Payload;
    GPRReg m_arg2Tag;
    GPRReg m_arg2Payload;
};

623
void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
624
{
625
626
    JSValueOperand arg1(this, node->child1());
    JSValueOperand arg2(this, node->child2());
627
628
629
630
631
632
633
    GPRReg arg1TagGPR = arg1.tagGPR();
    GPRReg arg1PayloadGPR = arg1.payloadGPR();
    GPRReg arg2TagGPR = arg2.tagGPR();
    GPRReg arg2PayloadGPR = arg2.payloadGPR();
    
    JITCompiler::JumpList slowPath;
    
634
    if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
635
636
637
638
639
640
641
642
643
        GPRResult result(this);
        GPRReg resultPayloadGPR = result.gpr();
    
        arg1.use();
        arg2.use();

        flushRegisters();
        callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
        
644
        booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
645
646
647
648
649
650
651
    } else {
        GPRTemporary resultPayload(this, arg1, false);
        GPRReg resultPayloadGPR = resultPayload.gpr();

        arg1.use();
        arg2.use();
    
652
        if (!isKnownInteger(node->child1().node()))
653
            slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
654
        if (!isKnownInteger(node->child2().node()))
655
656
657
658
            slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));

        m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
    
659
        if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
660
661
662
663
            addSlowPathGenerator(adoptPtr(
                new CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>(
                    slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
                    arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR)));
664
665
        }
        
666
        booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
667
668
669
    }
}

670
void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
671
{
672
673
    BlockIndex taken = branchNode->takenBlockIndex();
    BlockIndex notTaken = branchNode->notTakenBlockIndex();
674
675
676

    // The branch instruction will branch to the taken block.
    // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
677
    if (taken == nextBlock()) {
678
679
680
681
682
683
        invert = !invert;
        BlockIndex tmp = taken;
        taken = notTaken;
        notTaken = tmp;
    }
    
684
685
    JSValueOperand arg1(this, node->child1());
    JSValueOperand arg2(this, node->child2());
686
687
688
689
690
691
692
693
694
695
696
    GPRReg arg1TagGPR = arg1.tagGPR();
    GPRReg arg1PayloadGPR = arg1.payloadGPR();
    GPRReg arg2TagGPR = arg2.tagGPR();
    GPRReg arg2PayloadGPR = arg2.payloadGPR();
    
    GPRTemporary resultPayload(this, arg1, false);
    GPRReg resultPayloadGPR = resultPayload.gpr();
    
    arg1.use();
    arg2.use();
    
697
    if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
698
699
        // see if we get lucky: if the arguments are cells and they reference the same
        // cell, then they must be strictly equal.
700
        branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken);
701
702
703
704
705
        
        silentSpillAllRegisters(resultPayloadGPR);
        callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
        silentFillAllRegisters(resultPayloadGPR);
        
706
        branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
707
708
709
710
711
712
713
    } else {
        // FIXME: Add fast paths for twoCells, number etc.

        silentSpillAllRegisters(resultPayloadGPR);
        callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
        silentFillAllRegisters(resultPayloadGPR);
        
714
        branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, resultPayloadGPR, taken);
715
716
    }
    
717
    jump(notTaken);
718
719
}

720
void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
721
{
722
723
    JSValueOperand arg1(this, node->child1());
    JSValueOperand arg2(this, node->child2());
724
725
726
727
728
729
730
731
732
733
734
    GPRReg arg1TagGPR = arg1.tagGPR();
    GPRReg arg1PayloadGPR = arg1.payloadGPR();
    GPRReg arg2TagGPR = arg2.tagGPR();
    GPRReg arg2PayloadGPR = arg2.payloadGPR();
    
    GPRTemporary resultPayload(this, arg1, false);
    GPRReg resultPayloadGPR = resultPayload.gpr();
    
    arg1.use();
    arg2.use();
    
735
    if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
736
737
        // see if we get lucky: if the arguments are cells and they reference the same
        // cell, then they must be strictly equal.
738
        // FIXME: this should flush registers instead of silent spill/fill.
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
        JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
        
        m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
        JITCompiler::Jump done = m_jit.jump();

        notEqualCase.link(&m_jit);
        
        silentSpillAllRegisters(resultPayloadGPR);
        callOperation(operationCompareStrictEqCell, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
        silentFillAllRegisters(resultPayloadGPR);
        
        m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
        
        done.link(&m_jit);
    } else {
        // FIXME: Add fast paths.

        silentSpillAllRegisters(resultPayloadGPR);
        callOperation(operationCompareStrictEq, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
        silentFillAllRegisters(resultPayloadGPR);
        
        m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
    }

763
    booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
764
765
}

766
void SpeculativeJIT::emitCall(Node* node)
767
{
768
769
    if (node->op() != Call)
        ASSERT(node->op() == Construct);
770
771
772

    // For constructors, the this argument is not passed but we have to make space
    // for it.
773
    int dummyThisArgument = node->op() == Call ? 0 : 1;
774

775
    CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
776

777
    Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()];
778
    JSValueOperand callee(this, calleeEdge);
779
780
    GPRReg calleeTagGPR = callee.tagGPR();
    GPRReg calleePayloadGPR = callee.payloadGPR();
781
    use(calleeEdge);
782

783
    // The call instruction's first child is either the function (normal call) or the
784
    // receiver (method call). subsequent children are the arguments.
785
    int numPassedArgs = node->numChildren() - 1;
786

787
788
789
790
    m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount));
    m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(JSStack::CallerFrame));
    m_jit.store32(calleePayloadGPR, callFramePayloadSlot(JSStack::Callee));
    m_jit.store32(calleeTagGPR, callFrameTagSlot(JSStack::Callee));
791

792
    for (int i = 0; i < numPassedArgs; i++) {
793
        Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
794
        JSValueOperand arg(this, argEdge);
795
796
        GPRReg argTagGPR = arg.tagGPR();
        GPRReg argPayloadGPR = arg.payloadGPR();
797
        use(argEdge);
798

799
800
        m_jit.store32(argTagGPR, argumentTagSlot(i + dummyThisArgument));
        m_jit.store32(argPayloadGPR, argumentPayloadSlot(i + dummyThisArgument));
801
802
803
804
805
806
807
808
809
810
    }

    flushRegisters();

    GPRResult resultPayload(this);
    GPRResult2 resultTag(this);
    GPRReg resultPayloadGPR = resultPayload.gpr();
    GPRReg resultTagGPR = resultTag.gpr();

    JITCompiler::DataLabelPtr targetToCheck;
811
    JITCompiler::JumpList slowPath;
812

813
    CallBeginToken token;
814
    m_jit.beginCall(node->codeOrigin, token);
815
816
817
    
    m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
    
818
    slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag)));
819
    slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
820
    m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR);
821
822
    m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
    m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
823

824
    CodeOrigin codeOrigin = node->codeOrigin;
825
    JITCompiler::Call fastCall = m_jit.nearCall();
826
    m_jit.notifyCall(fastCall, codeOrigin, token);
827
828
829
830
831

    JITCompiler::Jump done = m_jit.jump();

    slowPath.link(&m_jit);

832
833
834
835
836
837
838
839
840
841
842
843
844
845
    if (calleeTagGPR == GPRInfo::nonArgGPR0) {
        if (calleePayloadGPR == GPRInfo::nonArgGPR1)
            m_jit.swap(GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR0);
        else {
            m_jit.move(calleeTagGPR, GPRInfo::nonArgGPR1);
            m_jit.move(calleePayloadGPR, GPRInfo::nonArgGPR0);
        }
    } else {
        m_jit.move(calleePayloadGPR, GPRInfo::nonArgGPR0);
        m_jit.move(calleeTagGPR, GPRInfo::nonArgGPR1);
    }
    m_jit.prepareForExceptionCheck();
    JITCompiler::Call slowCall = m_jit.nearCall();
    m_jit.notifyCall(slowCall, codeOrigin, token);
846
847
848

    done.link(&m_jit);

849
    m_jit.setupResults(resultPayloadGPR, resultTagGPR);
850

851
    jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
852

853
    m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleePayloadGPR, node->codeOrigin);
854
855
}

856
template<bool strict>
857
GPRReg SpeculativeJIT::fillSpeculateIntInternal(Edge edge, DataFormat& returnFormat)
858
{
859
#if DFG_ENABLE(DEBUG_VERBOSE)
860
    dataLogF("SpecInt@%d   ", edge->index());
861
#endif
862
863
864
865
866
    AbstractValue& value = m_state.forNode(edge);
    SpeculatedType type = value.m_type;
    ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
    value.filter(SpecInt32);
    VirtualRegister virtualRegister = edge->virtualRegister();
867
868
869
870
    GenerationInfo& info = m_generationInfo[virtualRegister];

    switch (info.registerFormat()) {
    case DataFormatNone: {
871
        if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) {
872
            terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
873
874
875
876
            returnFormat = DataFormatInteger;
            return allocate();
        }
        
877
878
        if (edge->hasConstant()) {
            ASSERT(isInt32Constant(edge.node()));
879
            GPRReg gpr = allocate();
880
            m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
881
            m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
882
            info.fillInteger(*m_stream, gpr);
883
            returnFormat = DataFormatInteger;
884
            return gpr;
885
886
        }

887
        DataFormat spillFormat = info.spillFormat();
888
        ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
889
890

        // If we know this was spilled as an integer we can fill without checking.
891
        if (type & ~SpecInt32)
892
            speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
893

894
        GPRReg gpr = allocate();
895
        m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
896
        m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
897
        info.fillInteger(*m_stream, gpr);
898
899
900
901
902
903
904
905
906
907
908
        returnFormat = DataFormatInteger;
        return gpr;
    }

    case DataFormatJSInteger:
    case DataFormatJS: {
        // Check the value is an integer.
        GPRReg tagGPR = info.tagGPR();
        GPRReg payloadGPR = info.payloadGPR();
        m_gprs.lock(tagGPR);
        m_gprs.lock(payloadGPR);
909
        if (type & ~SpecInt32)
910
            speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
911
912
        m_gprs.unlock(tagGPR);
        m_gprs.release(tagGPR);
913
914
        m_gprs.release(payloadGPR);
        m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
915
        info.fillInteger(*m_stream, payloadGPR);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
        // If !strict we're done, return.
        returnFormat = DataFormatInteger;
        return payloadGPR;
    }

    case DataFormatInteger: {
        GPRReg gpr = info.gpr();
        m_gprs.lock(gpr);
        returnFormat = DataFormatInteger;
        return gpr;
    }

    case DataFormatDouble:
    case DataFormatCell:
    case DataFormatBoolean:
    case DataFormatJSDouble:
932
    case DataFormatJSCell:
933
    case DataFormatJSBoolean:
934
        terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
935
936
        returnFormat = DataFormatInteger;
        return allocate();
937

938
    case DataFormatStorage:
939
    default:
940
        RELEASE_ASSERT_NOT_REACHED();
941
942
        return InvalidGPRReg;
    }
943
944
}

945
GPRReg SpeculativeJIT::fillSpeculateInt(Edge edge, DataFormat& returnFormat)
946
{
947
    return fillSpeculateIntInternal<false>(edge, returnFormat);
948
949
}

950
GPRReg SpeculativeJIT::fillSpeculateIntStrict(Edge edge)
951
952
{
    DataFormat mustBeDataFormatInteger;
953
    GPRReg result = fillSpeculateIntInternal<true>(edge, mustBeDataFormatInteger);
954
955
956
957
    ASSERT(mustBeDataFormatInteger == DataFormatInteger);
    return result;
}

958
FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
959
{
960
#if DFG_ENABLE(DEBUG_VERBOSE)
961
    dataLogF("SpecDouble@%d   ", edge->index());
962
#endif
963
964
965
966
967
    AbstractValue& value = m_state.forNode(edge);
    SpeculatedType type = value.m_type;
    ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecNumber));
    value.filter(SpecNumber);
    VirtualRegister virtualRegister = edge->virtualRegister();
968
969
970
971
    GenerationInfo& info = m_generationInfo[virtualRegister];

    if (info.registerFormat() == DataFormatNone) {

972
973
        if (edge->hasConstant()) {
            if (isInt32Constant(edge.node())) {
974
                GPRReg gpr = allocate();
975
                m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr);
976
                m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
977
                info.fillInteger(*m_stream, gpr);
978
                unlock(gpr);
979
            } else if (isNumberConstant(edge.node())) {
980
                FPRReg fpr = fprAllocate();
981
                m_jit.loadDouble(addressOfDoubleConstant(edge.node()), fpr);
982
                m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
983
                info.fillDouble(*m_stream, fpr);
984
                return fpr;
985
            } else {
986
                terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
987
988
                return fprAllocate();
            }
989
990
        } else {
            DataFormat spillFormat = info.spillFormat();
991
            ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
oliver@apple.com's avatar