MarkedBlock.cpp 7.18 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright (C) 2011 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
#include "MarkedBlock.h"

29
#include "JSCell.h"
30
#include "JSObject.h"
31
#include "ScopeChain.h"
32

33
34
namespace JSC {

35
MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize, bool cellsNeedDestruction)
36
{
37
    PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
mitz@apple.com's avatar
mitz@apple.com committed
38
    if (!static_cast<bool>(allocation))
39
        CRASH();
40
    return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction);
41
42
}

43
MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
44
{
45
    return new (NotNull, block) MarkedBlock(block->m_allocation, heap, cellSize, cellsNeedDestruction);
46
47
}

48
49
50
51
52
void MarkedBlock::destroy(MarkedBlock* block)
{
    block->m_allocation.deallocate();
}

53
MarkedBlock::MarkedBlock(PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction)
54
55
    : HeapBlock(allocation)
    , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
56
    , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
57
    , m_cellsNeedDestruction(cellsNeedDestruction)
58
    , m_state(New) // All cells start out unmarked.
59
    , m_heap(heap)
60
{
61
    ASSERT(heap);
62
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
63
64
}

65
inline void MarkedBlock::callDestructor(JSCell* cell)
66
{
67
68
    // A previous eager sweep may already have run cell's destructor.
    if (cell->isZapped())
69
        return;
70

71
#if ENABLE(SIMPLE_HEAP_PROFILING)
72
    m_heap->m_destroyedTypeCounts.countVPtr(vptr);
73
#endif
74
    cell->methodTable()->destroy(cell);
75

76
    cell->zap();
77
78
}

79
template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
80
MarkedBlock::FreeCell* MarkedBlock::specializedSweep()
81
{
82
    ASSERT(blockState != Allocated && blockState != FreeListed);
83
    ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
84

85
    // This produces a free list that is ordered in reverse through the block.
86
87
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
88
    FreeCell* head = 0;
89
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
90
91
92
        if (blockState == Marked && m_marks.get(i))
            continue;

93
        JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
94
95
96
        if (blockState == Zapped && !cell->isZapped())
            continue;

97
        if (destructorCallNeeded && blockState != New)
98
            callDestructor(cell);
99
100

        if (sweepMode == SweepToFreeList) {
101
            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
102
103
            freeCell->next = head;
            head = freeCell;
104
105
        }
    }
106
107
108

    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
    return head;
109
110
}

111
MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode)
112
{
113
114
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);

115
116
117
118
119
120
121
122
123
124
125
    if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
        return 0;

    if (m_cellsNeedDestruction)
        return sweepHelper<true>(sweepMode);
    return sweepHelper<false>(sweepMode);
}

template<bool destructorCallNeeded>
MarkedBlock::FreeCell* MarkedBlock::sweepHelper(SweepMode sweepMode)
{
126
127
128
    switch (m_state) {
    case New:
        ASSERT(sweepMode == SweepToFreeList);
129
        return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
130
131
132
133
134
135
136
137
138
    case FreeListed:
        // Happens when a block transitions to fully allocated.
        ASSERT(sweepMode == SweepToFreeList);
        return 0;
    case Allocated:
        ASSERT_NOT_REACHED();
        return 0;
    case Marked:
        return sweepMode == SweepToFreeList
139
140
            ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
            : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
141
142
    case Zapped:
        return sweepMode == SweepToFreeList
143
144
            ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
            : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
145
    }
ggaren@apple.com's avatar
ggaren@apple.com committed
146
147
148

    ASSERT_NOT_REACHED();
    return 0;
149
150
}

151
void MarkedBlock::zapFreeList(FreeCell* firstFreeCell)
152
{
153
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
154

155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
    if (m_state == Marked) {
        // If the block is in the Marked state then we know that:
        // 1) It was not used for allocation during the previous allocation cycle.
        // 2) It may have dead objects, and we only know them to be dead by the
        //    fact that their mark bits are unset.
        // Hence if the block is Marked we need to leave it Marked.
        
        ASSERT(!firstFreeCell);
        
        return;
    }
    
    if (m_state == Zapped) {
        // If the block is in the Zapped state then we know that someone already
        // zapped it for us. This could not have happened during a GC, but might
        // be the result of someone having done a GC scan to perform some operation
        // over all live objects (or all live blocks). It also means that somebody
        // had allocated in this block since the last GC, swept all dead objects
        // onto the free list, left the block in the FreeListed state, then the heap
        // scan happened, and canonicalized the block, leading to all dead objects
        // being zapped. Therefore, it is safe for us to simply do nothing, since
        // dead objects will have 0 in their vtables and live objects will have
        // non-zero vtables, which is consistent with the block being zapped.
        
        ASSERT(!firstFreeCell);
        
        return;
    }
    
    ASSERT(m_state == FreeListed);
    
186
187
188
    // Roll back to a coherent state for Heap introspection. Cells newly
    // allocated from our free list are not currently marked, so we need another
    // way to tell what's live vs dead. We use zapping for that.
189
    
190
191
192
193
    FreeCell* next;
    for (FreeCell* current = firstFreeCell; current; current = next) {
        next = current->next;
        reinterpret_cast<JSCell*>(current)->zap();
194
    }
195
    
196
    m_state = Zapped;
197
198
}

199
} // namespace JSC