MarkedBlock.cpp 5.23 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
/*
 * Copyright (C) 2011 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
 * THE POSSIBILITY OF SUCH DAMAGE.
 */

#include "config.h"
#include "MarkedBlock.h"

29
#include "JSCell.h"
30
#include "JSObject.h"
31
#include "ScopeChain.h"
32

33
34
namespace JSC {

35
MarkedBlock* MarkedBlock::create(Heap* heap, size_t cellSize)
36
{
37
    PageAllocationAligned allocation = PageAllocationAligned::allocate(blockSize, blockSize, OSAllocator::JSGCHeapPages);
mitz@apple.com's avatar
mitz@apple.com committed
38
    if (!static_cast<bool>(allocation))
39
        CRASH();
40
    return new (allocation.base()) MarkedBlock(allocation, heap, cellSize);
41
42
}

43
44
45
46
47
MarkedBlock* MarkedBlock::recycle(MarkedBlock* block, size_t cellSize)
{
    return new (block) MarkedBlock(block->m_allocation, block->m_heap, cellSize);
}

48
49
50
51
52
void MarkedBlock::destroy(MarkedBlock* block)
{
    block->m_allocation.deallocate();
}

53
MarkedBlock::MarkedBlock(const PageAllocationAligned& allocation, Heap* heap, size_t cellSize)
54
55
56
    : m_atomsPerCell((cellSize + atomSize - 1) / atomSize)
    , m_endAtom(atomsPerBlock - m_atomsPerCell + 1)
    , m_state(New) // All cells start out unmarked.
57
    , m_allocation(allocation)
58
    , m_heap(heap)
59
{
60
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
61
62
}

63
inline void MarkedBlock::callDestructor(JSCell* cell, void* jsFinalObjectVPtr)
64
{
65
66
    // A previous eager sweep may already have run cell's destructor.
    if (cell->isZapped())
67
        return;
68

69
    void* vptr = cell->vptr();
70
#if ENABLE(SIMPLE_HEAP_PROFILING)
71
    m_heap->m_destroyedTypeCounts.countVPtr(vptr);
72
#endif
73
    if (vptr != jsFinalObjectVPtr)
74
        cell->~JSCell();
75

76
    cell->zap();
77
78
}

79
80
template<MarkedBlock::BlockState blockState, MarkedBlock::SweepMode sweepMode>
MarkedBlock::FreeCell* MarkedBlock::specializedSweep()
81
{
82
    ASSERT(blockState != Allocated && blockState != FreeListed);
83

84
    // This produces a free list that is ordered in reverse through the block.
85
86
    // This is fine, since the allocation code makes no assumptions about the
    // order of the free list.
87
    FreeCell* head = 0;
88
    void* jsFinalObjectVPtr = m_heap->globalData()->jsFinalObjectVPtr;
89
    for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
90
91
92
93
94
95
96
97
98
99
100
        if (blockState == Marked && m_marks.get(i))
            continue;

        JSCell* cell = reinterpret_cast<JSCell*>(&atoms()[i]);
        if (blockState == Zapped && !cell->isZapped())
            continue;

        if (blockState != New)
            callDestructor(cell, jsFinalObjectVPtr);

        if (sweepMode == SweepToFreeList) {
101
            FreeCell* freeCell = reinterpret_cast<FreeCell*>(cell);
102
103
            freeCell->next = head;
            head = freeCell;
104
105
        }
    }
106
107
108

    m_state = ((sweepMode == SweepToFreeList) ? FreeListed : Zapped);
    return head;
109
110
}

111
MarkedBlock::FreeCell* MarkedBlock::sweep(SweepMode sweepMode)
112
{
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);

    switch (m_state) {
    case New:
        ASSERT(sweepMode == SweepToFreeList);
        return specializedSweep<New, SweepToFreeList>();
    case FreeListed:
        // Happens when a block transitions to fully allocated.
        ASSERT(sweepMode == SweepToFreeList);
        return 0;
    case Allocated:
        ASSERT_NOT_REACHED();
        return 0;
    case Marked:
        return sweepMode == SweepToFreeList
            ? specializedSweep<Marked, SweepToFreeList>()
            : specializedSweep<Marked, SweepOnly>();
    case Zapped:
        return sweepMode == SweepToFreeList
            ? specializedSweep<Zapped, SweepToFreeList>()
            : specializedSweep<Zapped, SweepOnly>();
134
    }
ggaren@apple.com's avatar
ggaren@apple.com committed
135
136
137

    ASSERT_NOT_REACHED();
    return 0;
138
139
}

140
void MarkedBlock::zapFreeList(FreeCell* firstFreeCell)
141
{
142
    HEAP_LOG_BLOCK_STATE_TRANSITION(this);
143

144
145
146
    // Roll back to a coherent state for Heap introspection. Cells newly
    // allocated from our free list are not currently marked, so we need another
    // way to tell what's live vs dead. We use zapping for that.
147

148
149
150
151
    FreeCell* next;
    for (FreeCell* current = firstFreeCell; current; current = next) {
        next = current->next;
        reinterpret_cast<JSCell*>(current)->zap();
152
    }
153
154

    m_state = Zapped;
155
156
}

157
} // namespace JSC