MarkedBlock.cpp 7.12 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright (C) 2011 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
#include "MarkedBlock.h"

29
#include "JSCell.h"
30
#include "JSObject.h"
31
#include "ScopeChain.h"
32

33
34
namespace JSC {

35
MarkedBlock* MarkedBlock::create(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures)
36
{
37
    return new (NotNull, allocation.base()) MarkedBlock(allocation, heap, cellSize, cellsNeedDestruction, onlyContainsStructures);
38
39
}

40
MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize, bool cellsNeedDestruction, bool onlyContainsStructures)
41
    : HeapBlock<MarkedBlock>(allocation)
42
    , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
43
    , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
44
    , m_cellsNeedDestruction(cellsNeedDestruction)
45
    , m_onlyContainsStructures(onlyContainsStructures)
46
    , m_state(New) // All cells start out unmarked.
47
    , m_weakSet(heap)
48
{
49
    ASSERT(heap);
50
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
51
52
}

53
inline void MarkedBlock::callDestructor(JSCell* cell)
54
{
55
56
    // A previous eager sweep may already have run cell's destructor.
    if (cell->isZapped())
57
        return;
58

59
#if ENABLE(SIMPLE_HEAP_PROFILING)
60
    m_heap->m_destroyedTypeCounts.countVPtr(vptr);
61
#endif
62

63
64
65
66
67
#if !ASSERT_DISABLED || ENABLE(GC_VALIDATION)
    cell->clearStructure();
#endif

    cell->methodTable()->destroy(cell);
68
    cell->zap();
69
70
}

71
template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, bool destructorCallNeeded>
72
MarkedBlock::FreeList MarkedBlock::specializedSweep()
73
{
74
    ASSERT(blockState != Allocated && blockState != FreeListed);
75
    ASSERT(destructorCallNeeded || sweepMode != SweepOnly);
76

77
    // This produces a free list that is ordered in reverse through the block.
78
79
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
80
    FreeCell* head = 0;
81
    size_t count = 0;
82
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
83
84
85
        if (blockState == Marked && m_marks.get(i))
            continue;

86
        JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
87
88
89
        if (blockState == Zapped && !cell->isZapped())
            continue;

90
        if (destructorCallNeeded && blockState != New)
91
            callDestructor(cell);
92
93

        if (sweepMode == SweepToFreeList) {
94
            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
95
96
            freeCell->next = head;
            head = freeCell;
97
            ++count;
98
99
        }
    }
100
101

    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
102
    return FreeList(head, count * cellSize());
103
104
}

105
MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
106
{
107
108
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);

109
110
    m_weakSet.sweep();

111
    if (sweepMode == SweepOnly && !m_cellsNeedDestruction)
112
        return FreeList();
113
114
115
116
117
118
119

    if (m_cellsNeedDestruction)
        return sweepHelper<true>(sweepMode);
    return sweepHelper<false>(sweepMode);
}

template<bool destructorCallNeeded>
120
MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
121
{
122
123
124
    switch (m_state) {
    case New:
        ASSERT(sweepMode == SweepToFreeList);
125
        return specializedSweep<New, SweepToFreeList, destructorCallNeeded>();
126
127
128
    case FreeListed:
        // Happens when a block transitions to fully allocated.
        ASSERT(sweepMode == SweepToFreeList);
129
        return FreeList();
130
131
    case Allocated:
        ASSERT_NOT_REACHED();
132
        return FreeList();
133
134
    case Marked:
        return sweepMode == SweepToFreeList
135
136
            ? specializedSweep<Marked, SweepToFreeList, destructorCallNeeded>()
            : specializedSweep<Marked, SweepOnly, destructorCallNeeded>();
137
138
    case Zapped:
        return sweepMode == SweepToFreeList
139
140
            ? specializedSweep<Zapped, SweepToFreeList, destructorCallNeeded>()
            : specializedSweep<Zapped, SweepOnly, destructorCallNeeded>();
141
    }
ggaren@apple.com's avatar
ggaren@apple.com committed
142
143

    ASSERT_NOT_REACHED();
144
    return FreeList();
145
146
}

147
void MarkedBlock::zapFreeList(const FreeList& freeList)
148
{
149
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
150
    FreeCell* head = freeList.head;
151

152
153
154
155
156
157
158
    if (m_state == Marked) {
        // If the block is in the Marked state then we know that:
        // 1) It was not used for allocation during the previous allocation cycle.
        // 2) It may have dead objects, and we only know them to be dead by the
        //    fact that their mark bits are unset.
        // Hence if the block is Marked we need to leave it Marked.
        
159
        ASSERT(!head);
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
        
        return;
    }
    
    if (m_state == Zapped) {
        // If the block is in the Zapped state then we know that someone already
        // zapped it for us. This could not have happened during a GC, but might
        // be the result of someone having done a GC scan to perform some operation
        // over all live objects (or all live blocks). It also means that somebody
        // had allocated in this block since the last GC, swept all dead objects
        // onto the free list, left the block in the FreeListed state, then the heap
        // scan happened, and canonicalized the block, leading to all dead objects
        // being zapped. Therefore, it is safe for us to simply do nothing, since
        // dead objects will have 0 in their vtables and live objects will have
        // non-zero vtables, which is consistent with the block being zapped.
        
176
        ASSERT(!head);
177
178
179
180
181
182
        
        return;
    }
    
    ASSERT(m_state == FreeListed);
    
183
184
185
    // Roll back to a coherent state for Heap introspection. Cells newly
    // allocated from our free list are not currently marked, so we need another
    // way to tell what's live vs dead. We use zapping for that.
186
    
187
    FreeCell* next;
188
    for (FreeCell* current = head; current; current = next) {
189
190
        next = current->next;
        reinterpret_cast<JSCell*>(current)->zap();
191
    }
192
    
193
    m_state = Zapped;
194
195
}

196
} // namespace JSC