MarkedBlock.cpp 8.06 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28
/*
 * Copyright (C) 2011 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
#include "MarkedBlock.h"

29
#include "JSCell.h"
30
#include "JSObject.h"
31
#include "ScopeChain.h"
32

33 34
namespace JSC {

35
MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
36
{
37
    PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
mitz@apple.com's avatar
mitz@apple.com committed
38
    if (!static_cast<bool>(allocation))
39
        CRASH();
40
    return new (allocation.base()) MarkedBlock(allocation, heap, cellSize);
41 42 43 44 45 46 47
}

void MarkedBlock::destroy(MarkedBlock* block)
{
    block->m_allocation.deallocate();
}

48
MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize)
49
    : m_inNewSpace(false)
50
    , m_allocation(allocation)
51
    , m_heap(heap)
52 53 54 55 56
{
    initForCellSize(cellSize);
}

void MarkedBlock::initForCellSize(size_t cellSize)
57
{
58 59
    m_atomsPerCell = (cellSize + atomSize - 1) / atomSize;
    m_endAtom = atomsPerBlock - m_atomsPerCell + 1;
60
    setDestructorState(SomeFreeCellsStillHaveObjects);
61 62
}

63 64 65 66 67 68 69
template<MarkedBlock::DestructorState specializedDestructorState>
void MarkedBlock::callDestructor(JSCell* cell, void* jsFinalObjectVPtr)
{
    if (specializedDestructorState == FreeCellsDontHaveObjects)
        return;
    void* vptr = cell->vptr();
    if (specializedDestructorState == AllFreeCellsHaveObjects || vptr) {
70 71 72
#if ENABLE(SIMPLE_HEAP_PROFILING)
        m_heap->m_destroyedTypeCounts.countVPtr(vptr);
#endif
73 74 75 76 77 78 79 80 81 82
        if (vptr == jsFinalObjectVPtr) {
            JSFinalObject* object = reinterpret_cast<JSFinalObject*>(cell);
            object->JSFinalObject::~JSFinalObject();
        } else
            cell->~JSCell();
    }
}

template<MarkedBlock::DestructorState specializedDestructorState>
void MarkedBlock::specializedReset()
83
{
84 85
    void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;

86
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
87
        callDestructor<specializedDestructorState>(reinterpret_cast<JSCell*>(&atoms()[i]), jsFinalObjectVPtr);
88 89
}

90
void MarkedBlock::reset()
91
{
92 93 94 95 96 97 98 99 100 101 102
    switch (destructorState()) {
    case FreeCellsDontHaveObjects:
    case SomeFreeCellsStillHaveObjects:
        specializedReset<SomeFreeCellsStillHaveObjects>();
        break;
    default:
        ASSERT(destructorState() == AllFreeCellsHaveObjects);
        specializedReset<AllFreeCellsHaveObjects>();
        break;
    }
}
103

104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
template<MarkedBlock::DestructorState specializedDestructorState>
void MarkedBlock::specializedSweep()
{
    if (specializedDestructorState != FreeCellsDontHaveObjects) {
        void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
        
        for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
            if (m_marks.get(i))
                continue;
            
            JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
            callDestructor<specializedDestructorState>(cell, jsFinalObjectVPtr);
            cell->setVPtr(0);
        }
        
        setDestructorState(FreeCellsDontHaveObjects);
    }
}
122

123 124 125 126 127 128 129 130 131 132 133 134 135 136
void MarkedBlock::sweep()
{
    HEAP_DEBUG_BLOCK(this);
    
    switch (destructorState()) {
    case FreeCellsDontHaveObjects:
        break;
    case SomeFreeCellsStillHaveObjects:
        specializedSweep<SomeFreeCellsStillHaveObjects>();
        break;
    default:
        ASSERT(destructorState() == AllFreeCellsHaveObjects);
        specializedSweep<AllFreeCellsHaveObjects>();
        break;
137 138 139
    }
}

140 141
template<MarkedBlock::DestructorState specializedDestructorState>
ALWAYS_INLINE MarkedBlock::FreeCell* MarkedBlock::produceFreeList()
142 143 144 145 146
{
    // This returns a free list that is ordered in reverse through the block.
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
    
147 148
    void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
    
149 150 151 152 153
    FreeCell* result = 0;
    
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
        if (!m_marks.testAndSet(i)) {
            JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
154 155
            if (specializedDestructorState != FreeCellsDontHaveObjects)
                callDestructor<specializedDestructorState>(cell, jsFinalObjectVPtr);
156 157 158 159 160 161
            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
            freeCell->next = result;
            result = freeCell;
        }
    }
    
162 163 164 165 166 167
    // This is sneaky: if we're producing a free list then we intend to
    // fill up the free cells in the block with objects, which means that
    // if we have a new GC then all of the free stuff in this block will
    // comprise objects rather than empty cells.
    setDestructorState(AllFreeCellsHaveObjects);

168 169 170
    return result;
}

171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189
MarkedBlock::FreeCell* MarkedBlock::lazySweep()
{
    // This returns a free list that is ordered in reverse through the block.
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
    
    HEAP_DEBUG_BLOCK(this);
    
    switch (destructorState()) {
    case FreeCellsDontHaveObjects:
        return produceFreeList<FreeCellsDontHaveObjects>();
    case SomeFreeCellsStillHaveObjects:
        return produceFreeList<SomeFreeCellsStillHaveObjects>();
    default:
        ASSERT(destructorState() == AllFreeCellsHaveObjects);
        return produceFreeList<AllFreeCellsHaveObjects>();
    }
}

190 191 192 193 194
MarkedBlock::FreeCell* MarkedBlock::blessNewBlockForFastPath()
{
    // This returns a free list that is ordered in reverse through the block,
    // as in lazySweep() above.
    
195 196
    HEAP_DEBUG_BLOCK(this);

197 198 199 200 201 202 203
    FreeCell* result = 0;
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
        m_marks.set(i);
        FreeCell* freeCell = reinterpret_cast<FreeCell*>(&atoms()[i]);
        freeCell->next = result;
        result = freeCell;
    }
204 205 206 207 208 209
    
    // See produceFreeList(). If we're here then we intend to fill the
    // block with objects, so once a GC happens, all free cells will be
    // occupied by objects.
    setDestructorState(AllFreeCellsHaveObjects);

210 211 212 213 214
    return result;
}

void MarkedBlock::blessNewBlockForSlowPath()
{
215 216 217
    HEAP_DEBUG_BLOCK(this);

    m_marks.clearAll();
218
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell)
219 220 221
        reinterpret_cast<FreeCell*>(&atoms()[i])->setNoObject();
    
    setDestructorState(FreeCellsDontHaveObjects);
222 223 224 225
}

void MarkedBlock::canonicalizeBlock(FreeCell* firstFreeCell)
{
226 227 228
    HEAP_DEBUG_BLOCK(this);
    
    ASSERT(destructorState() == AllFreeCellsHaveObjects);
229
    
230 231 232 233 234 235 236 237 238 239 240
    if (firstFreeCell) {
        for (FreeCell* current = firstFreeCell; current;) {
            FreeCell* next = current->next;
            size_t i = atomNumber(current);
            
            m_marks.clear(i);
            
            current->setNoObject();
            
            current = next;
        }
241
        
242
        setDestructorState(SomeFreeCellsStillHaveObjects);
243 244 245
    }
}

246
} // namespace JSC