JITPropertyAccess.cpp 76.5 KB
Newer Older
1
/*
2
 * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#include "config.h"
27

28
#if ENABLE(JIT)
29
#include "JIT.h"
30

31
#include "CodeBlock.h"
32
#include "GCAwareJITStubRoutine.h"
33
#include "GetterSetter.h"
34
#include "Interpreter.h"
35 36 37 38 39
#include "JITInlineMethods.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
40
#include "JSVariableObject.h"
41 42 43 44
#include "LinkBuffer.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
45

46 47
#ifndef NDEBUG
#include <stdio.h>
48
#endif
49

50
using namespace std;
51

52
namespace JSC {
53
#if USE(JSVALUE64)
54

55
JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
56 57 58
{
    JSInterfaceJIT jit;
    JumpList failures;
59
    failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(globalData->stringStructure.get())));
60

61
    // Load string length to regT2, and start the process of loading the data pointer into regT0
62 63
    jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
    jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
64
    failures.append(jit.branchTest32(Zero, regT0));
65

66 67 68 69
    // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
    failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
    
    // Load the character
70 71 72 73 74 75 76 77 78
    JumpList is16Bit;
    JumpList cont8Bit;
    // Load the string flags
    jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
    jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
    is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
    jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
    cont8Bit.append(jit.jump());
    is16Bit.link(&jit);
79
    jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
80 81
    cont8Bit.link(&jit);

82 83
    failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
    jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
84 85 86 87
    jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
    jit.ret();
    
    failures.link(&jit);
88
    jit.move(TrustedImm32(0), regT0);
89 90
    jit.ret();
    
91
    LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
92
    return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
93 94
}

95 96
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
97 98 99
    unsigned dst = currentInstruction[1].u.operand;
    unsigned base = currentInstruction[2].u.operand;
    unsigned property = currentInstruction[3].u.operand;
100
    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
101 102

    emitGetVirtualRegisters(base, regT0, property, regT1);
103
    emitJumpSlowCaseIfNotImmediateInteger(regT1);
104

105
    // This is technically incorrect - we're zero-extending an int32.  On the hot path this doesn't matter.
106 107
    // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
    // number was signed since m_vectorLength is always less than intmax (since the total allocation
108 109 110 111
    // size is always less than 4Gb).  As such zero extending wil have been correct (and extending the value
    // to 64-bits is necessary since it's used in the address calculation.  We zero extend rather than sign
    // extending since it makes it easier to re-tag the value in the slow case.
    zeroExtend32ToPtr(regT1, regT1);
112

113
    emitJumpSlowCaseIfNotJSCell(regT0, base);
114
    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
115
    emitArrayProfilingSite(regT2, regT3, profile);
116
    and32(TrustedImm32(IndexingShapeMask), regT2);
117 118 119 120 121 122 123 124 125 126 127 128

    PatchableJump badType;
    JumpList slowCases;

    JITArrayMode mode = chooseArrayMode(profile);
    switch (mode) {
    case JITContiguous:
        slowCases = emitContiguousGetByVal(currentInstruction, badType);
        break;
    case JITArrayStorage:
        slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
        break;
129 130 131
    default:
        CRASH();
        break;
132 133 134 135 136 137 138 139
    }
    
    addSlowCase(badType);
    addSlowCase(slowCases);
    
    Label done = label();
    
#if !ASSERT_DISABLED
140
    Jump resultOK = branchTest64(NonZero, regT0);
141 142 143
    breakpoint();
    resultOK.link(this);
#endif
144

145 146 147 148 149 150 151 152 153 154
    emitValueProfilingSite();
    emitPutVirtualRegister(dst);
    
    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}

JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType)
{
    JumpList slowCases;
    
155
    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
156
    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
157
    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
158 159
    load64(BaseIndex(regT2, regT1, TimesEight), regT0);
    slowCases.append(branchTest64(Zero, regT0));
160 161 162
    
    return slowCases;
}
163

164 165 166
JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
{
    JumpList slowCases;
167 168 169

    add32(TrustedImm32(-ArrayStorageShape), regT2, regT3);
    badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
170

171 172 173
    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));

174 175
    load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
    slowCases.append(branchTest64(Zero, regT0));
176 177
    
    return slowCases;
178 179
}

180 181 182 183 184 185 186 187 188 189
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned base = currentInstruction[2].u.operand;
    unsigned property = currentInstruction[3].u.operand;
    
    linkSlowCase(iter); // property int32 check
    linkSlowCaseIfNotJSCell(iter, base); // base cell check
    Jump nonCell = jump();
    linkSlowCase(iter); // base array check
190
    Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_globalData->stringStructure.get()));
191
    emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
192
    Jump failed = branchTest64(Zero, regT0);
193 194 195 196 197 198 199 200 201
    emitPutVirtualRegister(dst, regT0);
    emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
    failed.link(this);
    notString.link(this);
    nonCell.link(this);
    
    linkSlowCase(iter); // vector length check
    linkSlowCase(iter); // empty value
    
202 203
    Label slowPath = label();
    
204 205 206
    JITStubCall stubCall(this, cti_op_get_by_val);
    stubCall.addArgument(base, regT2);
    stubCall.addArgument(property, regT2);
207 208 209 210 211
    Call call = stubCall.call(dst);

    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
    m_byValInstructionIndex++;
212

213
    emitValueProfilingSite();
214 215
}

216
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
217
{
218 219 220
    ASSERT(sizeof(JSValue) == 8);
    
    if (finalObjectMode == MayBeFinal) {
221
        Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
222
        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
223
        neg32(offset);
224 225
        Jump done = jump();
        isInline.link(this);
226
        addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
227 228 229
        done.link(this);
    } else {
#if !ASSERT_DISABLED
230
        Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
231 232 233
        breakpoint();
        isOutOfLine.link(this);
#endif
234
        loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
235
        neg32(offset);
236
    }
237
    signExtend32ToPtr(offset, offset);
238
    load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
239 240
}

241 242 243 244 245 246 247 248 249 250
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned base = currentInstruction[2].u.operand;
    unsigned property = currentInstruction[3].u.operand;
    unsigned expected = currentInstruction[4].u.operand;
    unsigned iter = currentInstruction[5].u.operand;
    unsigned i = currentInstruction[6].u.operand;

    emitGetVirtualRegister(property, regT0);
251
    addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
252 253 254 255
    emitGetVirtualRegisters(base, regT0, iter, regT1);
    emitJumpSlowCaseIfNotJSCell(regT0, base);

    // Test base's structure
256
    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
257 258
    addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
    load32(addressFor(i), regT3);
259
    sub32(TrustedImm32(1), regT3);
260
    addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
261 262 263 264
    Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
    add32(TrustedImm32(firstOutOfLineOffset), regT3);
    sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
    inlineProperty.link(this);
265
    compileGetDirectOffset(regT0, regT0, regT3, regT1);
266 267 268 269 270 271 272 273 274 275 276 277 278 279 280

    emitPutVirtualRegister(dst, regT0);
}

void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned dst = currentInstruction[1].u.operand;
    unsigned base = currentInstruction[2].u.operand;
    unsigned property = currentInstruction[3].u.operand;

    linkSlowCase(iter);
    linkSlowCaseIfNotJSCell(iter, base);
    linkSlowCase(iter);
    linkSlowCase(iter);

281
    JITStubCall stubCall(this, cti_op_get_by_val_generic);
282 283 284 285 286
    stubCall.addArgument(base, regT2);
    stubCall.addArgument(property, regT2);
    stubCall.call(dst);
}

287 288
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
289 290
    unsigned base = currentInstruction[1].u.operand;
    unsigned property = currentInstruction[2].u.operand;
291
    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
292 293

    emitGetVirtualRegisters(base, regT0, property, regT1);
294 295 296
    emitJumpSlowCaseIfNotImmediateInteger(regT1);
    // See comment in op_get_by_val.
    zeroExtend32ToPtr(regT1, regT1);
297
    emitJumpSlowCaseIfNotJSCell(regT0, base);
298
    loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
299
    emitArrayProfilingSite(regT2, regT3, profile);
300
    and32(TrustedImm32(IndexingShapeMask), regT2);
301 302 303 304 305 306 307 308 309 310 311 312
    
    PatchableJump badType;
    JumpList slowCases;
    
    JITArrayMode mode = chooseArrayMode(profile);
    switch (mode) {
    case JITContiguous:
        slowCases = emitContiguousPutByVal(currentInstruction, badType);
        break;
    case JITArrayStorage:
        slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
        break;
313 314 315
    default:
        CRASH();
        break;
316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332
    }
    
    addSlowCase(badType);
    addSlowCase(slowCases);
    
    Label done = label();
    
    m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));

    emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
}

JIT::JumpList JIT::emitContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType)
{
    unsigned value = currentInstruction[3].u.operand;
    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
    
333
    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ContiguousShape));
334
    
335
    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
336 337 338 339
    Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));

    Label storeResult = label();
    emitGetVirtualRegister(value, regT3);
340
    store64(regT3, BaseIndex(regT2, regT1, TimesEight));
341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365
    
    Jump done = jump();
    outOfBounds.link(this);
    
    JumpList slowCases;
    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfVectorLength())));
    
    emitArrayProfileStoreToHoleSpecialCase(profile);
    
    add32(TrustedImm32(1), regT1, regT3);
    store32(regT3, Address(regT2, Butterfly::offsetOfPublicLength()));
    jump().linkTo(storeResult, this);
    
    done.link(this);
    
    return slowCases;
}

JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
{
    unsigned value = currentInstruction[3].u.operand;
    ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
    
    JumpList slowCases;
    
366
    badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
367 368
    loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
    slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
369

370
    Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
371

372
    Label storeResult(this);
373
    emitGetVirtualRegister(value, regT3);
374
    store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
375 376 377
    Jump end = jump();
    
    empty.link(this);
378 379
    emitArrayProfileStoreToHoleSpecialCase(profile);
    add32(TrustedImm32(1), Address(regT2, ArrayStorage::numValuesInVectorOffset()));
380
    branch32(Below, regT1, Address(regT2, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
381

382
    add32(TrustedImm32(1), regT1);
383
    store32(regT1, Address(regT2, ArrayStorage::lengthOffset()));
384
    sub32(TrustedImm32(1), regT1);
385 386 387
    jump().linkTo(storeResult, this);

    end.link(this);
388 389
    
    return slowCases;
390 391
}

392 393 394 395 396 397 398 399 400
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned base = currentInstruction[1].u.operand;
    unsigned property = currentInstruction[2].u.operand;
    unsigned value = currentInstruction[3].u.operand;

    linkSlowCase(iter); // property int32 check
    linkSlowCaseIfNotJSCell(iter, base); // base cell check
    linkSlowCase(iter); // base not array check
401 402 403
    linkSlowCase(iter); // out of bounds
    
    Label slowPath = label();
404 405 406 407 408

    JITStubCall stubPutByValCall(this, cti_op_put_by_val);
    stubPutByValCall.addArgument(regT0);
    stubPutByValCall.addArgument(property, regT2);
    stubPutByValCall.addArgument(value, regT2);
409 410 411 412 413
    Call call = stubPutByValCall.call();

    m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
    m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
    m_byValInstructionIndex++;
414 415
}

416 417
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
418
    JITStubCall stubCall(this, cti_op_put_by_index);
419
    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
420
    stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
421 422 423 424
    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
    stubCall.call();
}

barraclough@apple.com's avatar
barraclough@apple.com committed
425
void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
426
{
barraclough@apple.com's avatar
barraclough@apple.com committed
427
    JITStubCall stubCall(this, cti_op_put_getter_setter);
428
    stubCall.addArgument(currentInstruction[1].u.operand, regT2);
429
    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
430
    stubCall.addArgument(currentInstruction[3].u.operand, regT2);
barraclough@apple.com's avatar
barraclough@apple.com committed
431
    stubCall.addArgument(currentInstruction[4].u.operand, regT2);
432 433 434 435 436
    stubCall.call();
}

void JIT::emit_op_del_by_id(Instruction* currentInstruction)
{
437
    JITStubCall stubCall(this, cti_op_del_by_id);
438
    stubCall.addArgument(currentInstruction[2].u.operand, regT2);
439
    stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
440 441 442
    stubCall.call(currentInstruction[1].u.operand);
}

443 444 445
void JIT::emit_op_method_check(Instruction* currentInstruction)
{
    // Assert that the following instruction is a get_by_id.
446 447
    ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id
        || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line);
448 449 450 451 452 453 454 455 456

    currentInstruction += OPCODE_LENGTH(op_method_check);
    unsigned resultVReg = currentInstruction[1].u.operand;
    unsigned baseVReg = currentInstruction[2].u.operand;
    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));

    emitGetVirtualRegister(baseVReg, regT0);

    // Do the method check - check the object & its prototype's structure inline (this is the common case).
457
    m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
458
    MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
459

460
    Jump notCell = emitJumpIfNotJSCell(regT0);
461 462 463

    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);

464 465 466
    Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
    DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
    Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
467 468

    // This will be relinked to load the function without doing a load.
469
    DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
470 471 472

    END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);

473 474 475 476 477 478 479 480 481
    Jump match = jump();

    // Link the failure cases here.
    notCell.link(this);
    structureCheck.link(this);
    protoStructureCheck.link(this);

    // Do a regular(ish) get_by_id (the slow case will be link to
    // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
482
    compileGetByIdHotPath(baseVReg, ident);
483 484

    match.link(this);
485
    emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
486 487 488
    emitPutVirtualRegister(resultVReg);

    // We've already generated the following get_by_id, so make sure it's skipped over.
489
    m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
490 491

    m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction);
492 493 494 495 496 497 498 499 500
}

void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    currentInstruction += OPCODE_LENGTH(op_method_check);
    unsigned resultVReg = currentInstruction[1].u.operand;
    unsigned baseVReg = currentInstruction[2].u.operand;
    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));

501
    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
502
    emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
503 504

    // We've already generated the following get_by_id, so make sure it's skipped over.
505
    m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
506 507 508 509 510 511 512 513 514
}

void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
    unsigned resultVReg = currentInstruction[1].u.operand;
    unsigned baseVReg = currentInstruction[2].u.operand;
    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));

    emitGetVirtualRegister(baseVReg, regT0);
515
    compileGetByIdHotPath(baseVReg, ident);
516
    emitValueProfilingSite();
517 518 519
    emitPutVirtualRegister(resultVReg);
}

520
void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident)
521
{
522 523
    // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
    // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
524 525 526
    // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
    // to jump back to if one of these trampolies finds a match.

527
    emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
528 529 530 531 532
    
    if (*ident == m_globalData->propertyNames->length && canBeOptimized()) {
        loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
        emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
    }
533

534 535
    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);

536
    Label hotPathBegin(this);
537

538
    DataLabelPtr structureToCompare;
539
    PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
540
    addSlowCase(structureCheck);
541

542
    ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
543
    DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
544 545

    Label putResult(this);
546 547 548

    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);

549
    m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult));
550 551
}

552 553 554 555 556 557
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
    unsigned resultVReg = currentInstruction[1].u.operand;
    unsigned baseVReg = currentInstruction[2].u.operand;
    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));

558
    compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
559
    emitValueProfilingSite();
560
}
561

562
void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
563 564 565
{
    // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
    // so that we only need track one pointer into the slow case code - we track a pointer to the location
566
    // of the call (which we can use to look up the patch information), but should a array-length or
567 568 569
    // prototype access trampoline fail we want to bail out back to here.  To do so we can subtract back
    // the distance from the call to the head of the slow case.

570 571
    linkSlowCaseIfNotJSCell(iter, baseVReg);
    linkSlowCase(iter);
572

573 574
    BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);

575
    Label coldPathBegin(this);
576
    JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
577
    stubCall.addArgument(regT0);
578
    stubCall.addArgument(TrustedImmPtr(ident));
579
    Call call = stubCall.call(resultVReg);
580

581 582
    END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);

583
    // Track the location of the call; this will be used to recover patch information.
584
    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
585 586
}

587
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
588
{
589 590 591
    unsigned baseVReg = currentInstruction[1].u.operand;
    unsigned valueVReg = currentInstruction[3].u.operand;

592
    // In order to be able to patch both the Structure, and the object offset, we store one pointer,
593 594 595
    // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
    // such that the Structure & offset are always at the same distance from this.

596
    emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
597 598

    // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
599
    emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
600

601 602
    BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);

603
    Label hotPathBegin(this);
604 605

    // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
606
    DataLabelPtr structureToCompare;
607
    addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
608

609
    ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
610
    DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
611 612 613

    END_UNINTERRUPTED_SEQUENCE(sequencePutById);

614 615
    emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);

616
    m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel));
617 618
}

619
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
620
{
621 622
    unsigned baseVReg = currentInstruction[1].u.operand;
    Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
623
    unsigned direct = currentInstruction[8].u.operand;
624

625 626
    linkSlowCaseIfNotJSCell(iter, baseVReg);
    linkSlowCase(iter);
627

628
    JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
629
    stubCall.addArgument(regT0);
630
    stubCall.addArgument(TrustedImmPtr(ident));
631
    stubCall.addArgument(regT1);
632
    move(regT0, nonArgGPR1);
633
    Call call = stubCall.call();
634

635
    // Track the location of the call; this will be used to recover patch information.
636
    m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
637 638
}

639 640
// Compile a store into an object's property storage.  May overwrite the
// value in objectReg.
641
void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
642
{
643
    if (isInlineOffset(cachedOffset)) {
644
        store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
645 646 647
        return;
    }
    
648
    loadPtr(Address(base, JSObject::butterflyOffset()), base);
649
    store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
650 651 652
}

// Compile a load from an object's property storage.  May overwrite base.
653
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
654
{
655
    if (isInlineOffset(cachedOffset)) {
656
        load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
657 658 659
        return;
    }
    
660
    loadPtr(Address(base, JSObject::butterflyOffset()), result);
661
    load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
662 663
}

664
void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
665
{
666
    if (isInlineOffset(cachedOffset)) {
667
        load64(base->locationForOffset(cachedOffset), result);
668 669 670
        return;
    }
    
671
    loadPtr(base->butterflyAddress(), result);
672
    load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
673 674
}

675
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
676
{
677 678
    move(nonArgGPR1, regT0);

679
    JumpList failureCases;
680
    // Check eax is an object of the right Structure.
681
    failureCases.append(emitJumpIfNotJSCell(regT0));
682
    failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
683
    
684
    testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo);
685 686
    
    ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
687 688

    // ecx = baseObject->m_structure
689
    if (!direct) {
690 691
        for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
            ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
692
            testPrototype((*it)->storedPrototype(), failureCases, stubInfo);
693
        }
694
    }
695

696 697 698
    // If we succeed in all of our checks, and the code was optimizable, then make sure we
    // decrement the rare case counter.
#if ENABLE(VALUE_PROFILER)
699
    if (m_codeBlock->canCompileWithDFG() >= DFG::ShouldProfile) {
700 701 702 703 704 705
        sub32(
            TrustedImm32(1),
            AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
    }
#endif
    
706
    // emit a call only if storage realloc is needed
707
    bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
708
    if (willNeedStorageRealloc) {
709 710
        // This trampoline was called to like a JIT stub; before we can can call again we need to
        // remove the return address from the stack, to prevent the stack from becoming misaligned.
711
        preserveReturnAddressAfterCall(regT3);
712
 
713 714 715 716
        JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
        stubCall.skipArgument(); // base
        stubCall.skipArgument(); // ident
        stubCall.skipArgument(); // value
717
        stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
718
        stubCall.addArgument(TrustedImmPtr(newStructure));
719
        stubCall.call(regT0);
720
        emitGetJITStubArg(2, regT1);
721 722

        restoreReturnAddressBeforeReturn(regT3);
723
    }
724 725 726 727

    // Planting the new structure triggers the write barrier so we need
    // an unconditional barrier here.
    emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
728

729
    ASSERT(newStructure->classInfo() == oldStructure->classInfo());
730
    storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
731
    compilePutDirectOffset(regT0, regT1, cachedOffset);
732

733
    ret();
734
    
735 736 737 738
    ASSERT(!failureCases.empty());
    failureCases.link(this);
    restoreArgumentReferenceForTrampoline();
    Call failureCall = tailRecursiveCall();
739

740
    LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
741

742
    patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
743

744 745
    if (willNeedStorageRealloc) {
        ASSERT(m_calls.size() == 1);
746
        patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
747
    }
748
    
749 750 751 752 753 754 755 756 757
    stubInfo->stubRoutine = createJITStubRoutine(
        FINALIZE_CODE(
            patchBuffer,
            ("Baseline put_by_id transition for CodeBlock %p, return point %p",
             m_codeBlock, returnAddress.value())),
        *m_globalData,
        m_codeBlock->ownerExecutable(),
        willNeedStorageRealloc,
        newStructure);
758
    RepatchBuffer repatchBuffer(m_codeBlock);
759
    repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
760 761
}

762
void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
763
{
764
    RepatchBuffer repatchBuffer(codeBlock);
765

766
    // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
767 768
    // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
769

770
    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
771
    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
772 773
    repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
774 775
}

776
void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
777
{
778
    RepatchBuffer repatchBuffer(codeBlock);
779

780
    // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
781
    // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
782
    repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
783

784
    // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
785
    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
786 787
    repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
    repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
788 789
}

790
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
791
{
792 793
    StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);

794
    // Check eax is an array