Heap.cpp 20.4 KB
Newer Older
1
/*
2
 *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
3
 *  Copyright (C) 2007 Eric Seidel <eric@webkit.org>
4 5 6 7 8 9 10 11 12 13 14 15 16
 *
 *  This library is free software; you can redistribute it and/or
 *  modify it under the terms of the GNU Lesser General Public
 *  License as published by the Free Software Foundation; either
 *  version 2 of the License, or (at your option) any later version.
 *
 *  This library is distributed in the hope that it will be useful,
 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
 *  Lesser General Public License for more details.
 *
 *  You should have received a copy of the GNU Lesser General Public
 *  License along with this library; if not, write to the Free Software
mjs's avatar
mjs committed
17
 *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
18
 *
19 20
 */

mjs's avatar
mjs committed
21
#include "config.h"
22
#include "Heap.h"
darin's avatar
darin committed
23

24
#include "CodeBlock.h"
25
#include "ConservativeRoots.h"
26
#include "GCActivityCallback.h"
27
#include "HeapRootVisitor.h"
28
#include "Interpreter.h"
29
#include "JSGlobalData.h"
30
#include "JSGlobalObject.h"
ap@webkit.org's avatar
ap@webkit.org committed
31
#include "JSLock.h"
darin@apple.com's avatar
darin@apple.com committed
32
#include "JSONObject.h"
33
#include "Tracing.h"
34
#include <algorithm>
darin's avatar
darin committed
35

36
#define COLLECT_ON_EVERY_ALLOCATION 0
mjs's avatar
mjs committed
37

38
using namespace std;
39
using namespace JSC;
40

41
namespace JSC {
42

43 44
namespace { 

45 46 47
static const size_t largeHeapSize = 16 * 1024 * 1024;
static const size_t smallHeapSize = 512 * 1024;

48 49 50 51
static size_t heapSizeForHint(HeapSize heapSize)
{
#if ENABLE(LARGE_HEAP)
    if (heapSize == LargeHeap)
52
        return largeHeapSize;
53
    ASSERT(heapSize == SmallHeap);
54 55 56 57
    return smallHeapSize;
#else
    ASSERT_UNUSED(heapSize, heapSize == LargeHeap || heapSize == SmallHeap);
    return smallHeapSize;
58 59
#endif
}
60

61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82
static inline bool isValidSharedInstanceThreadState()
{
    if (!JSLock::lockCount())
        return false;

    if (!JSLock::currentThreadIsHoldingLock())
        return false;

    return true;
}

static inline bool isValidThreadState(JSGlobalData* globalData)
{
    if (globalData->identifierTable != wtfThreadData().currentIdentifierTable())
        return false;

    if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState())
        return false;

    return true;
}

83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
class CountFunctor {
public:
    typedef size_t ReturnType;

    CountFunctor();
    void count(size_t);
    ReturnType returnValue();

private:
    ReturnType m_count;
};

inline CountFunctor::CountFunctor()
    : m_count(0)
{
}

inline void CountFunctor::count(size_t count)
{
    m_count += count;
}

inline CountFunctor::ReturnType CountFunctor::returnValue()
{
    return m_count;
}

struct ClearMarks : MarkedBlock::VoidFunctor {
    void operator()(MarkedBlock*);
};

inline void ClearMarks::operator()(MarkedBlock* block)
{
    block->clearMarks();
117
    block->notifyMayHaveFreshFreeCells();
118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
}

struct Sweep : MarkedBlock::VoidFunctor {
    void operator()(MarkedBlock*);
};

inline void Sweep::operator()(MarkedBlock* block)
{
    block->sweep();
}

struct MarkCount : CountFunctor {
    void operator()(MarkedBlock*);
};

inline void MarkCount::operator()(MarkedBlock* block)
{
    count(block->markCount());
}

struct Size : CountFunctor {
    void operator()(MarkedBlock*);
};

inline void Size::operator()(MarkedBlock* block)
{
    count(block->markCount() * block->cellSize());
}

struct Capacity : CountFunctor {
    void operator()(MarkedBlock*);
};

inline void Capacity::operator()(MarkedBlock* block)
{
    count(block->capacity());
}

struct Count : public CountFunctor {
    void operator()(JSCell*);
};

inline void Count::operator()(JSCell*)
{
    count(1);
}

struct CountIfGlobalObject : CountFunctor {
    void operator()(JSCell*);
};

inline void CountIfGlobalObject::operator()(JSCell* cell)
{
    if (!cell->isObject())
        return;
    if (!asObject(cell)->isGlobalObject())
        return;
    count(1);
}

class TakeIfEmpty {
public:
    typedef MarkedBlock* ReturnType;

    TakeIfEmpty(NewSpace*);
    void operator()(MarkedBlock*);
    ReturnType returnValue();

private:
    NewSpace* m_newSpace;
    DoublyLinkedList<MarkedBlock> m_empties;
};

inline TakeIfEmpty::TakeIfEmpty(NewSpace* newSpace)
    : m_newSpace(newSpace)
{
}

inline void TakeIfEmpty::operator()(MarkedBlock* block)
{
    if (!block->isEmpty())
        return;

    m_newSpace->removeBlock(block);
    m_empties.append(block);
}

inline TakeIfEmpty::ReturnType TakeIfEmpty::returnValue()
{
    return m_empties.head();
}

class RecordType {
public:
    typedef PassOwnPtr<TypeCountSet> ReturnType;

    RecordType();
    void operator()(JSCell*);
    ReturnType returnValue();

private:
    const char* typeName(JSCell*);
    OwnPtr<TypeCountSet> m_typeCountSet;
};

inline RecordType::RecordType()
    : m_typeCountSet(adoptPtr(new TypeCountSet))
{
}

inline const char* RecordType::typeName(JSCell* cell)
{
    const ClassInfo* info = cell->classInfo();
    if (!info || !info->className)
        return "[unknown]";
    return info->className;
}

inline void RecordType::operator()(JSCell* cell)
{
    m_typeCountSet->add(typeName(cell));
}

inline PassOwnPtr<TypeCountSet> RecordType::returnValue()
{
    return m_typeCountSet.release();
}

} // anonymous namespace

248 249 250 251
Heap::Heap(JSGlobalData* globalData, HeapSize heapSize)
    : m_heapSize(heapSize)
    , m_minBytesPerCycle(heapSizeForHint(heapSize))
    , m_operationInProgress(NoOperation)
252
    , m_newSpace(this)
253
    , m_extraCost(0)
254 255
    , m_markListSet(0)
    , m_activityCallback(DefaultGCActivityCallback::create(this))
256
    , m_machineThreads(this)
257
    , m_slotVisitor(globalData->jsArrayVPtr)
258
    , m_handleHeap(globalData)
259
    , m_isSafeToCollect(false)
260
    , m_globalData(globalData)
ap@webkit.org's avatar
ap@webkit.org committed
261
{
262
    m_newSpace.setHighWaterMark(m_minBytesPerCycle);
263
    (*m_activityCallback)();
264 265 266 267 268
#if ENABLE(LAZY_BLOCK_FREEING)
    m_numberOfFreeBlocks = 0;
    m_blockFreeingThread = createThread(blockFreeingThreadStartFunc, this, "JavaScriptCore::BlockFree");
    ASSERT(m_blockFreeingThread);
#endif
ap@webkit.org's avatar
ap@webkit.org committed
269
}
mjs's avatar
mjs committed
270

ap@webkit.org's avatar
ap@webkit.org committed
271
Heap::~Heap()
darin@apple.com's avatar
darin@apple.com committed
272
{
273 274 275 276 277 278 279 280 281 282
#if ENABLE(LAZY_BLOCK_FREEING)
    // destroy our thread
    {
        MutexLocker locker(m_freeBlockLock);
        m_blockFreeingThreadShouldQuit = true;
        m_freeBlockCondition.broadcast();
    }
    waitForThreadCompletion(m_blockFreeingThread, 0);
#endif
    
darin@apple.com's avatar
darin@apple.com committed
283 284 285 286 287
    // The destroy function must already have been called, so assert this.
    ASSERT(!m_globalData);
}

void Heap::destroy()
ap@webkit.org's avatar
ap@webkit.org committed
288
{
ap@webkit.org's avatar
ap@webkit.org committed
289
    JSLock lock(SilenceAssertionsOnly);
ap@webkit.org's avatar
ap@webkit.org committed
290

darin@apple.com's avatar
darin@apple.com committed
291 292 293
    if (!m_globalData)
        return;

294
    ASSERT(!m_globalData->dynamicGlobalObject);
295
    ASSERT(m_operationInProgress == NoOperation);
296
    
darin@apple.com's avatar
darin@apple.com committed
297 298
    // The global object is not GC protected at this point, so sweeping may delete it
    // (and thus the global data) before other objects that may use the global data.
ap@webkit.org's avatar
ap@webkit.org committed
299 300
    RefPtr<JSGlobalData> protect(m_globalData);

301 302 303 304
#if ENABLE(JIT)
    m_globalData->jitStubs->clearHostFunctionStubs();
#endif

ap@webkit.org's avatar
ap@webkit.org committed
305
    delete m_markListSet;
ap@webkit.org's avatar
ap@webkit.org committed
306
    m_markListSet = 0;
307 308

    clearMarks();
309
    m_handleHeap.finalizeWeakHandles();
310
    m_globalData->smallStrings.finalizeSmallStrings();
311 312 313

    shrink();
    ASSERT(!size());
314
    
315 316 317 318 319
#if ENABLE(SIMPLE_HEAP_PROFILING)
    m_slotVisitor.m_visitedTypeCounts.dump(stderr, "Visited Type Counts");
    m_destroyedTypeCounts.dump(stderr, "Destroyed Type Counts");
#endif
    
320 321 322
#if ENABLE(LAZY_BLOCK_FREEING)
    releaseFreeBlocks();
#endif
323

324
    m_globalData = 0;
ap@webkit.org's avatar
ap@webkit.org committed
325 326
}

327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390
#if ENABLE(LAZY_BLOCK_FREEING)
void Heap::waitForRelativeTimeWhileHoldingLock(double relative)
{
    if (m_blockFreeingThreadShouldQuit)
        return;
    m_freeBlockCondition.timedWait(m_freeBlockLock, currentTime() + relative);
}

void Heap::waitForRelativeTime(double relative)
{
    // If this returns early, that's fine, so long as it doesn't do it too
    // frequently. It would only be a bug if this function failed to return
    // when it was asked to do so.
    
    MutexLocker locker(m_freeBlockLock);
    waitForRelativeTimeWhileHoldingLock(relative);
}

void* Heap::blockFreeingThreadStartFunc(void* heap)
{
    static_cast<Heap*>(heap)->blockFreeingThreadMain();
    return 0;
}

void Heap::blockFreeingThreadMain()
{
    while (!m_blockFreeingThreadShouldQuit) {
        // Generally wait for one second before scavenging free blocks. This
        // may return early, particularly when we're being asked to quit.
        waitForRelativeTime(1.0);
        if (m_blockFreeingThreadShouldQuit)
            break;
        
        // Now process the list of free blocks. Keep freeing until half of the
        // blocks that are currently on the list are gone. Assume that a size_t
        // field can be accessed atomically.
        size_t currentNumberOfFreeBlocks = m_numberOfFreeBlocks;
        if (!currentNumberOfFreeBlocks)
            continue;
        
        size_t desiredNumberOfFreeBlocks = currentNumberOfFreeBlocks / 2;
        
        while (!m_blockFreeingThreadShouldQuit) {
            MarkedBlock* block;
            {
                MutexLocker locker(m_freeBlockLock);
                if (m_numberOfFreeBlocks <= desiredNumberOfFreeBlocks)
                    block = 0;
                else {
                    block = m_freeBlocks.removeHead();
                    ASSERT(block);
                    m_numberOfFreeBlocks--;
                }
            }
            
            if (!block)
                break;
            
            MarkedBlock::destroy(block);
        }
    }
}
#endif // ENABLE(LAZY_BLOCK_FREEING)

391
void Heap::reportExtraMemoryCostSlowCase(size_t cost)
mjs's avatar
mjs committed
392 393 394 395 396 397 398 399 400 401 402 403
{
    // Our frequency of garbage collection tries to balance memory use against speed
    // by collecting based on the number of newly created values. However, for values
    // that hold on to a great deal of memory that's not in the form of other JS values,
    // that is not good enough - in some cases a lot of those objects can pile up and
    // use crazy amounts of memory without a GC happening. So we track these extra
    // memory costs. Only unusually large objects are noted, and we only keep track
    // of this extra cost until the next GC. In garbage collected languages, most values
    // are either very short lived temporaries, or have extremely long lifetimes. So
    // if a large value survives one garbage collection, there is not much point to
    // collecting more frequently as long as it stays alive.

404
    if (m_extraCost > maxExtraCost && m_extraCost > m_newSpace.highWaterMark() / 2)
405
        collectAllGarbage();
406
    m_extraCost += cost;
mjs's avatar
mjs committed
407 408
}

409 410 411 412 413 414 415 416
inline void* Heap::tryAllocate(NewSpace::SizeClass& sizeClass)
{
    m_operationInProgress = Allocation;
    void* result = m_newSpace.allocate(sizeClass);
    m_operationInProgress = NoOperation;
    return result;
}

417
void* Heap::allocateSlowCase(NewSpace::SizeClass& sizeClass)
418
{
419
#if COLLECT_ON_EVERY_ALLOCATION
420
    collectAllGarbage();
421
    ASSERT(m_operationInProgress == NoOperation);
422 423
#endif

424
    void* result = tryAllocate(sizeClass);
425

426
    if (LIKELY(result != 0))
427 428
        return result;

429 430 431 432 433 434 435 436 437 438
    AllocationEffort allocationEffort;
    
    if (m_newSpace.waterMark() < m_newSpace.highWaterMark() || !m_isSafeToCollect)
        allocationEffort = AllocationMustSucceed;
    else
        allocationEffort = AllocationCanFail;
    
    MarkedBlock* block = allocateBlock(sizeClass.cellSize, allocationEffort);
    if (block) {
        m_newSpace.addBlock(sizeClass, block);
439 440 441
        void* result = tryAllocate(sizeClass);
        ASSERT(result);
        return result;
442
    }
443

444
    collect(DoNotSweep);
445 446 447 448 449 450 451 452
    
    result = tryAllocate(sizeClass);
    
    if (result)
        return result;
    
    ASSERT(m_newSpace.waterMark() < m_newSpace.highWaterMark());
    
453
    m_newSpace.addBlock(sizeClass, allocateBlock(sizeClass.cellSize, AllocationMustSucceed));
454 455 456 457
    
    result = tryAllocate(sizeClass);
    ASSERT(result);
    return result;
458
}
mjs's avatar
mjs committed
459

ggaren@apple.com's avatar
ggaren@apple.com committed
460
void Heap::protect(JSValue k)
mjs's avatar
mjs committed
461
{
mjs's avatar
mjs committed
462
    ASSERT(k);
463
    ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
mjs's avatar
mjs committed
464

weinig@apple.com's avatar
weinig@apple.com committed
465
    if (!k.isCell())
ap@webkit.org's avatar
ap@webkit.org committed
466
        return;
mjs's avatar
mjs committed
467

weinig@apple.com's avatar
weinig@apple.com committed
468
    m_protectedValues.add(k.asCell());
mjs's avatar
mjs committed
469 470
}

471
bool Heap::unprotect(JSValue k)
mjs's avatar
mjs committed
472
{
mjs's avatar
mjs committed
473
    ASSERT(k);
474
    ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance());
mjs's avatar
mjs committed
475

weinig@apple.com's avatar
weinig@apple.com committed
476
    if (!k.isCell())
477
        return false;
mjs's avatar
mjs committed
478

479
    return m_protectedValues.remove(k.asCell());
mjs's avatar
mjs committed
480 481
}

482
void Heap::markProtectedObjects(HeapRootVisitor& heapRootVisitor)
mjs's avatar
mjs committed
483
{
ap@webkit.org's avatar
ap@webkit.org committed
484
    ProtectCountSet::iterator end = m_protectedValues.end();
485
    for (ProtectCountSet::iterator it = m_protectedValues.begin(); it != end; ++it)
486
        heapRootVisitor.visit(&it->first);
mjs's avatar
mjs committed
487 488
}

489 490 491 492 493 494 495 496 497 498 499
void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector)
{
    m_tempSortingVectors.append(tempVector);
}

void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector)
{
    ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last());
    m_tempSortingVectors.removeLast();
}
    
500
void Heap::markTempSortVectors(HeapRootVisitor& heapRootVisitor)
501 502 503 504 505 506 507 508
{
    typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors;

    VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end();
    for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) {
        Vector<ValueStringPair>* tempSortingVector = *it;

        Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end();
509
        for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) {
510
            if (vectorIt->first)
511
                heapRootVisitor.visit(&vectorIt->first);
512
        }
513 514
    }
}
515 516 517 518 519 520

inline RegisterFile& Heap::registerFile()
{
    return m_globalData->interpreter->registerFile();
}

521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537
void Heap::getConservativeRegisterRoots(HashSet<JSCell*>& roots)
{
    ASSERT(isValidThreadState(m_globalData));
    if (m_operationInProgress != NoOperation)
        CRASH();
    m_operationInProgress = Collection;
    ConservativeRoots registerFileRoots(&m_blocks);
    registerFile().gatherConservativeRoots(registerFileRoots);
    size_t registerFileRootCount = registerFileRoots.size();
    JSCell** registerRoots = registerFileRoots.roots();
    for (size_t i = 0; i < registerFileRootCount; i++) {
        setMarked(registerRoots[i]);
        roots.add(registerRoots[i]);
    }
    m_operationInProgress = NoOperation;
}

538
void Heap::markRoots()
539
{
540
    ASSERT(isValidThreadState(m_globalData));
541
    if (m_operationInProgress != NoOperation)
ap@webkit.org's avatar
ap@webkit.org committed
542
        CRASH();
543
    m_operationInProgress = Collection;
darin's avatar
darin committed
544

545 546 547 548
    void* dummy;

    // We gather conservative roots before clearing mark bits because conservative
    // gathering uses the mark bits to determine whether a reference is valid.
549
    ConservativeRoots machineThreadRoots(&m_blocks);
550 551
    m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy);

552
    ConservativeRoots registerFileRoots(&m_blocks);
553
    registerFile().gatherConservativeRoots(registerFileRoots);
ggaren@apple.com's avatar
ggaren@apple.com committed
554

555
    clearMarks();
ggaren@apple.com's avatar
ggaren@apple.com committed
556

557
    SlotVisitor& visitor = m_slotVisitor;
558 559
    HeapRootVisitor heapRootVisitor(visitor);

560 561
    visitor.append(machineThreadRoots);
    visitor.drain();
562

563 564
    visitor.append(registerFileRoots);
    visitor.drain();
565

566
    markProtectedObjects(heapRootVisitor);
567
    visitor.drain();
568
    
569
    markTempSortVectors(heapRootVisitor);
570
    visitor.drain();
571

ap@webkit.org's avatar
ap@webkit.org committed
572
    if (m_markListSet && m_markListSet->size())
573
        MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet);
574
    if (m_globalData->exception)
575
        heapRootVisitor.visit(&m_globalData->exception);
576
    visitor.drain();
577

578
    m_handleHeap.visitStrongHandles(heapRootVisitor);
579
    visitor.drain();
580

581
    m_handleStack.visit(heapRootVisitor);
582
    visitor.drain();
583

584 585 586 587
    // Weak handles must be marked last, because their owners use the set of
    // opaque roots to determine reachability.
    int lastOpaqueRootCount;
    do {
588
        lastOpaqueRootCount = visitor.opaqueRootCount();
589
        m_handleHeap.visitWeakHandles(heapRootVisitor);
590
        visitor.drain();
591
    // If the set of opaque roots has grown, more weak handles may have become reachable.
592
    } while (lastOpaqueRootCount != visitor.opaqueRootCount());
593

594
    visitor.reset();
595

596
    m_operationInProgress = NoOperation;
597
}
ap@webkit.org's avatar
ap@webkit.org committed
598

599 600
void Heap::clearMarks()
{
601
    forEachBlock<ClearMarks>();
602 603 604 605
}

void Heap::sweep()
{
606
    forEachBlock<Sweep>();
607 608
}

609
size_t Heap::objectCount()
610
{
611
    return forEachBlock<MarkCount>();
612
}
613

614
size_t Heap::size()
antti@apple.com's avatar
antti@apple.com committed
615
{
616
    return forEachBlock<Size>();
antti@apple.com's avatar
antti@apple.com committed
617 618
}

619
size_t Heap::capacity()
620
{
621
    return forEachBlock<Capacity>();
darin's avatar
darin committed
622 623
}

ap@webkit.org's avatar
ap@webkit.org committed
624
size_t Heap::protectedGlobalObjectCount()
ggaren@apple.com's avatar
ggaren@apple.com committed
625
{
626
    return forEachProtectedCell<CountIfGlobalObject>();
darin's avatar
darin committed
627
}
628

629
size_t Heap::globalObjectCount()
mjs's avatar
mjs committed
630
{
631
    return forEachCell<CountIfGlobalObject>();
632 633
}

634
size_t Heap::protectedObjectCount()
635
{
636
    return forEachProtectedCell<Count>();
637 638 639 640
}

PassOwnPtr<TypeCountSet> Heap::protectedObjectTypeCounts()
{
641
    return forEachProtectedCell<RecordType>();
642 643
}

644
PassOwnPtr<TypeCountSet> Heap::objectTypeCounts()
645
{
646
    return forEachCell<RecordType>();
647 648
}

649
void Heap::collectAllGarbage()
650
{
651 652
    if (!m_isSafeToCollect)
        return;
653 654 655
    if (!m_globalData->dynamicGlobalObject)
        m_globalData->recompileAllJSFunctions();

656
    collect(DoSweep);
657 658
}

659
void Heap::collect(SweepToggle sweepToggle)
660
{
661
    ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable());
662
    ASSERT(m_isSafeToCollect);
663
    JAVASCRIPTCORE_GC_BEGIN();
664 665 666
    
    canonicalizeBlocks();
    
667
    markRoots();
668
    m_handleHeap.finalizeWeakHandles();
669
    m_globalData->smallStrings.finalizeSmallStrings();
670 671

    JAVASCRIPTCORE_GC_MARKED();
672 673
    
    resetAllocator();
674

675
    if (sweepToggle == DoSweep) {
676 677
        sweep();
        shrink();
678
    }
679

680 681 682 683
    // To avoid pathological GC churn in large heaps, we set the allocation high
    // water mark to be proportional to the current size of the heap. The exact
    // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size :
    // new bytes allocated) proportion, and seems to work well in benchmarks.
684
    size_t proportionalBytes = 2 * size();
685
    m_newSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle));
686

687
    JAVASCRIPTCORE_GC_END();
688 689

    (*m_activityCallback)();
690 691
}

692 693 694 695 696
void Heap::canonicalizeBlocks()
{
    m_newSpace.canonicalizeBlocks();
}

697 698 699
void Heap::resetAllocator()
{
    m_extraCost = 0;
700
    m_newSpace.resetAllocator();
701 702
}

703 704 705 706 707
void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
{
    m_activityCallback = activityCallback;
}

708 709 710 711 712
GCActivityCallback* Heap::activityCallback()
{
    return m_activityCallback.get();
}

713 714 715 716 717
bool Heap::isValidAllocation(size_t bytes)
{
    if (!isValidThreadState(m_globalData))
        return false;

718
    if (bytes > NewSpace::maxCellSize)
719 720 721 722 723 724 725 726
        return false;

    if (m_operationInProgress != NoOperation)
        return false;
    
    return true;
}

727
MarkedBlock* Heap::allocateBlock(size_t cellSize, Heap::AllocationEffort allocationEffort)
728
{
729 730 731
    MarkedBlock* block;
    
#if !ENABLE(LAZY_BLOCK_FREEING)
732 733 734
    if (allocationEffort == AllocationCanFail)
        return 0;
    
735 736 737 738 739 740 741 742 743 744 745 746 747
    block = MarkedBlock::create(this, cellSize);
#else
    {
        MutexLocker locker(m_freeBlockLock);
        if (m_numberOfFreeBlocks) {
            block = m_freeBlocks.removeHead();
            ASSERT(block);
            m_numberOfFreeBlocks--;
        } else
            block = 0;
    }
    if (block)
        block->initForCellSize(cellSize);
748 749
    else if (allocationEffort == AllocationCanFail)
        return 0;
750 751 752 753
    else
        block = MarkedBlock::create(this, cellSize);
#endif
    
754 755 756 757 758
    m_blocks.add(block);

    return block;
}

759
void Heap::freeBlocks(MarkedBlock* head)
760 761
{
    MarkedBlock* next;
762
    for (MarkedBlock* block = head; block; block = next) {
763 764 765
        next = block->next();

        m_blocks.remove(block);
766 767
        block->reset();
#if !ENABLE(LAZY_BLOCK_FREEING)
768
        MarkedBlock::destroy(block);
769 770 771 772 773
#else
        MutexLocker locker(m_freeBlockLock);
        m_freeBlocks.append(block);
        m_numberOfFreeBlocks++;
#endif
774 775 776 777 778 779
    }
}

void Heap::shrink()
{
    // We record a temporary list of empties to avoid modifying m_blocks while iterating it.
780 781
    TakeIfEmpty takeIfEmpty(&m_newSpace);
    freeBlocks(forEachBlock(takeIfEmpty));
782 783
}

784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807
#if ENABLE(LAZY_BLOCK_FREEING)
void Heap::releaseFreeBlocks()
{
    while (true) {
        MarkedBlock* block;
        {
            MutexLocker locker(m_freeBlockLock);
            if (!m_numberOfFreeBlocks)
                block = 0;
            else {
                block = m_freeBlocks.removeHead();
                ASSERT(block);
                m_numberOfFreeBlocks--;
            }
        }
        
        if (!block)
            break;
        
        MarkedBlock::destroy(block);
    }
}
#endif

808 809 810 811 812 813 814 815 816 817 818 819 820 821 822
#if ENABLE(GGC)
void Heap::writeBarrierSlowCase(const JSCell* owner, JSCell* cell)
{
    if (!cell)
        return;
    MarkedBlock::blockFor(cell)->addOldSpaceOwner(owner, cell);
}

#else

void Heap::writeBarrierSlowCase(const JSCell*, JSCell*)
{
}
#endif

823
} // namespace JSC