MarkedBlock.cpp 6.89 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright (C) 2011 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
#include "MarkedBlock.h"

29
#include "IncrementalSweeper.h"
30
#include "JSCell.h"
31
#include "JSDestructibleObject.h"
32
#include "Operations.h"
33

34
35
namespace JSC {

36
MarkedBlock* MarkedBlock::create(DeadBlock* block, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
37
{
38
39
    Region* region = block->region();
    return new (NotNull, block) MarkedBlock(region, allocator, cellSize, destructorType);
40
41
}

42
43
MarkedBlock::MarkedBlock(Region* region, MarkedAllocator* allocator, size_t cellSize, DestructorType destructorType)
    : HeapBlock<MarkedBlock>(region)
44
    , m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
45
    , m_endAtom((allocator->cellSize() ? atomsPerBlock : region->blockSize() / atomSize) - m_atomsPerCell + 1)
46
47
    , m_destructorType(destructorType)
    , m_allocator(allocator)
48
    , m_state(New) // All cells start out unmarked.
49
    , m_weakSet(allocator->heap()->globalData())
50
{
51
    ASSERT(allocator);
52
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
53
54
}

55
inline void MarkedBlock::callDestructor(JSCell* cell)
56
{
57
58
    // A previous eager sweep may already have run cell's destructor.
    if (cell->isZapped())
59
        return;
60

61
#if ENABLE(SIMPLE_HEAP_PROFILING)
62
    m_heap->m_destroyedTypeCounts.countVPtr(vptr);
63
#endif
64

65
    cell->methodTable()->destroy(cell);
66
    cell->zap();
67
68
}

69
template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode, MarkedBlock::DestructorType dtorType>
70
MarkedBlock::FreeList MarkedBlock::specializedSweep()
71
{
72
    ASSERT(blockState != Allocated && blockState != FreeListed);
73
    ASSERT(!(dtorType == MarkedBlock::None && sweepMode == SweepOnly));
74

75
    // This produces a free list that is ordered in reverse through the block.
76
77
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
78
    FreeCell* head = 0;
79
    size_t count = 0;
80
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
81
        if (blockState == Marked && (m_marks.get(i) || (m_newlyAllocated && m_newlyAllocated->get(i))))
82
83
            continue;

84
        JSCell* cell = reinterpret_cast_ptr<JSCell*>(&atoms()[i]);
85

86
        if (dtorType != MarkedBlock::None && blockState != New)
87
            callDestructor(cell);
88
89

        if (sweepMode == SweepToFreeList) {
90
            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
91
92
            freeCell->next = head;
            head = freeCell;
93
            ++count;
94
95
        }
    }
96

97
98
99
100
101
    // We only want to discard the newlyAllocated bits if we're creating a FreeList,
    // otherwise we would lose information on what's currently alive.
    if (sweepMode == SweepToFreeList && m_newlyAllocated)
        m_newlyAllocated.clear();

102
    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Marked);
103
    return FreeList(head, count * cellSize());
104
105
}

106
MarkedBlock::FreeList MarkedBlock::sweep(SweepMode sweepMode)
107
{
108
109
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);

110
111
    m_weakSet.sweep();

112
    if (sweepMode == SweepOnly && m_destructorType == MarkedBlock::None)
113
        return FreeList();
114

115
116
117
118
119
    if (m_destructorType == MarkedBlock::ImmortalStructure)
        return sweepHelper<MarkedBlock::ImmortalStructure>(sweepMode);
    if (m_destructorType == MarkedBlock::Normal)
        return sweepHelper<MarkedBlock::Normal>(sweepMode);
    return sweepHelper<MarkedBlock::None>(sweepMode);
120
121
}

122
template<MarkedBlock::DestructorType dtorType>
123
MarkedBlock::FreeList MarkedBlock::sweepHelper(SweepMode sweepMode)
124
{
125
126
127
    switch (m_state) {
    case New:
        ASSERT(sweepMode == SweepToFreeList);
128
        return specializedSweep<New, SweepToFreeList, dtorType>();
129
130
131
    case FreeListed:
        // Happens when a block transitions to fully allocated.
        ASSERT(sweepMode == SweepToFreeList);
132
        return FreeList();
133
134
    case Allocated:
        ASSERT_NOT_REACHED();
135
        return FreeList();
136
137
    case Marked:
        return sweepMode == SweepToFreeList
138
139
            ? specializedSweep<Marked, SweepToFreeList, dtorType>()
            : specializedSweep<Marked, SweepOnly, dtorType>();
140
    }
ggaren@apple.com's avatar
ggaren@apple.com committed
141
142

    ASSERT_NOT_REACHED();
143
    return FreeList();
144
145
}

146
class SetNewlyAllocatedFunctor : public MarkedBlock::VoidFunctor {
147
public:
148
149
150
151
152
    SetNewlyAllocatedFunctor(MarkedBlock* block)
        : m_block(block)
    {
    }

153
154
    void operator()(JSCell* cell)
    {
155
156
        ASSERT(MarkedBlock::blockFor(cell) == m_block);
        m_block->setNewlyAllocated(cell);
157
    }
158
159
160

private:
    MarkedBlock* m_block;
161
162
163
};

void MarkedBlock::canonicalizeCellLivenessData(const FreeList& freeList)
164
{
165
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
166
    FreeCell* head = freeList.head;
167

168
169
170
171
172
173
174
    if (m_state == Marked) {
        // If the block is in the Marked state then we know that:
        // 1) It was not used for allocation during the previous allocation cycle.
        // 2) It may have dead objects, and we only know them to be dead by the
        //    fact that their mark bits are unset.
        // Hence if the block is Marked we need to leave it Marked.
        
175
        ASSERT(!head);
176
177
        return;
    }
178
   
179
180
    ASSERT(m_state == FreeListed);
    
181
182
    // Roll back to a coherent state for Heap introspection. Cells newly
    // allocated from our free list are not currently marked, so we need another
183
    // way to tell what's live vs dead. 
184
    
185
186
187
188
    ASSERT(!m_newlyAllocated);
    m_newlyAllocated = adoptPtr(new WTF::Bitmap<atomsPerBlock>());

    SetNewlyAllocatedFunctor functor(this);
189
190
    forEachCell(functor);

191
    FreeCell* next;
192
    for (FreeCell* current = head; current; current = next) {
193
194
        next = current->next;
        reinterpret_cast<JSCell*>(current)->zap();
195
        clearNewlyAllocated(current);
196
    }
197
    
198
    m_state = Marked;
199
200
}

201
} // namespace JSC