ExecutableAllocatorFixedVMPool.cpp 18.6 KB
Newer Older
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
/*
 * Copyright (C) 2009 Apple Inc. All rights reserved.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions
 * are met:
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
 * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 
 */

#include "config.h"

#include "ExecutableAllocator.h"

#include <errno.h>

32
#if ENABLE(ASSEMBLER) && OS(DARWIN) && CPU(X86_64)
33

34
#include "TCSpinLock.h"
35 36 37 38 39
#include <mach/mach_init.h>
#include <mach/vm_map.h>
#include <sys/mman.h>
#include <unistd.h>
#include <wtf/AVLTree.h>
ggaren@apple.com's avatar
ggaren@apple.com committed
40
#include <wtf/VMTags.h>
41

42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
using namespace WTF;

namespace JSC {

#define TWO_GB (2u * 1024u * 1024u * 1024u)
#define SIXTEEN_MB (16u * 1024u * 1024u)

// FreeListEntry describes a free chunk of memory, stored in the freeList.
struct FreeListEntry {
    FreeListEntry(void* pointer, size_t size)
        : pointer(pointer)
        , size(size)
        , nextEntry(0)
        , less(0)
        , greater(0)
        , balanceFactor(0)
    {
    }

    // All entries of the same size share a single entry
    // in the AVLTree, and are linked together in a linked
    // list, using nextEntry.
    void* pointer;
    size_t size;
    FreeListEntry* nextEntry;

    // These fields are used by AVLTree.
    FreeListEntry* less;
    FreeListEntry* greater;
    int balanceFactor;
};

// Abstractor class for use in AVLTree.
// Nodes in the AVLTree are of type FreeListEntry, keyed on
// (and thus sorted by) their size.
struct AVLTreeAbstractorForFreeList {
    typedef FreeListEntry* handle;
    typedef int32_t size;
    typedef size_t key;

    handle get_less(handle h) { return h->less; }
    void set_less(handle h, handle lh) { h->less = lh; }
    handle get_greater(handle h) { return h->greater; }
    void set_greater(handle h, handle gh) { h->greater = gh; }
    int get_balance_factor(handle h) { return h->balanceFactor; }
    void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }

    static handle null() { return 0; }

    int compare_key_key(key va, key vb) { return va - vb; }
    int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
    int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
};

// Used to reverse sort an array of FreeListEntry pointers.
static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
{
    FreeListEntry* left = *(FreeListEntry**)leftPtr;
    FreeListEntry* right = *(FreeListEntry**)rightPtr;

    return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
}

// Used to reverse sort an array of pointers.
static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
{
    void* left = *(void**)leftPtr;
    void* right = *(void**)rightPtr;

    return (intptr_t)right - (intptr_t)left;
}

class FixedVMPoolAllocator
{
116
    // The free list is stored in a sorted tree.
117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
    typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;

    // Use madvise as apropriate to prevent freed pages from being spilled,
    // and to attempt to ensure that used memory is reported correctly.
#if HAVE(MADV_FREE_REUSE)
    void release(void* position, size_t size)
    {
        while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
    }

    void reuse(void* position, size_t size)
    {
        while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
    }
#elif HAVE(MADV_DONTNEED)
132
    void release(void* position, size_t size)
133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150
    {
        while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
    }

    void reuse(void*, size_t) {}
#else
    void release(void*, size_t) {}
    void reuse(void*, size_t) {}
#endif

    // All addition to the free list should go through this method, rather than
    // calling insert directly, to avoid multiple entries beging added with the
    // same key.  All nodes being added should be singletons, they should not
    // already be a part of a chain.
    void addToFreeList(FreeListEntry* entry)
    {
        ASSERT(!entry->nextEntry);

151 152
        if (entry->size == m_commonSize) {
            m_commonSizedAllocations.append(entry->pointer);
153
            delete entry;
154 155
        } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
            // m_freeList already contain an entry for this size - insert this node into the chain.
156 157 158
            entry->nextEntry = entryInFreeList->nextEntry;
            entryInFreeList->nextEntry = entry;
        } else
159
            m_freeList.insert(entry);
160 161 162 163
    }

    // We do not attempt to coalesce addition, which may lead to fragmentation;
    // instead we periodically perform a sweep to try to coalesce neigboring
164
    // entries in m_freeList.  Presently this is triggered at the point 16MB
165 166 167 168 169
    // of memory has been released.
    void coalesceFreeSpace()
    {
        Vector<FreeListEntry*> freeListEntries;
        SizeSortedFreeTree::Iterator iter;
170
        iter.start_iter_least(m_freeList);
171

172
        // Empty m_freeList into a Vector.
173
        for (FreeListEntry* entry; (entry = *iter); ++iter) {
174
            // Each entry in m_freeList might correspond to multiple
175 176 177 178 179 180 181 182 183 184 185 186
            // free chunks of memory (of the same size).  Walk the chain
            // (this is likely of couse only be one entry long!) adding
            // each entry to the Vector (at reseting the next in chain
            // pointer to separate each node out).
            FreeListEntry* next;
            do {
                next = entry->nextEntry;
                entry->nextEntry = 0;
                freeListEntries.append(entry);
            } while ((entry = next));
        }
        // All entries are now in the Vector; purge the tree.
187
        m_freeList.purge();
188

189
        // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
190 191 192
        // We reverse-sort so that we can logically work forwards through memory,
        // whilst popping items off the end of the Vectors using last() and removeLast().
        qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
193
        qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
194

195
        // The entries from m_commonSizedAllocations that cannot be
196 197 198 199
        // coalesced into larger chunks will be temporarily stored here.
        Vector<void*> newCommonSizedAllocations;

        // Keep processing so long as entries remain in either of the vectors.
200
        while (freeListEntries.size() || m_commonSizedAllocations.size()) {
201 202 203 204
            // We're going to try to find a FreeListEntry node that we can coalesce onto.
            FreeListEntry* coalescionEntry = 0;

            // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
205 206
            if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
                // Pop an item from the m_commonSizedAllocations vector - this is the lowest
207
                // addressed free chunk.  Find out the begin and end addresses of the memory chunk.
208 209 210
                void* begin = m_commonSizedAllocations.last();
                void* end = (void*)((intptr_t)begin + m_commonSize);
                m_commonSizedAllocations.removeLast();
211 212 213 214

                // Try to find another free chunk abutting onto the end of the one we have already found.
                if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
                    // There is an existing FreeListEntry for the next chunk of memory!
215
                    // we can reuse this.  Pop it off the end of m_freeList.
216 217 218
                    coalescionEntry = freeListEntries.last();
                    freeListEntries.removeLast();
                    // Update the existing node to include the common-sized chunk that we also found. 
219 220 221
                    coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
                    coalescionEntry->size += m_commonSize;
                } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
222 223
                    // There is a second common-sized chunk that can be coalesced.
                    // Allocate a new node.
224 225
                    m_commonSizedAllocations.removeLast();
                    coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
226 227 228
                } else {
                    // Nope - this poor little guy is all on his own. :-(
                    // Add him into the newCommonSizedAllocations vector for now, we're
229
                    // going to end up adding him back into the m_commonSizedAllocations
230 231 232 233 234 235
                    // list when we're done.
                    newCommonSizedAllocations.append(begin);
                    continue;
                }
            } else {
                ASSERT(freeListEntries.size());
236 237
                ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
                // The lowest addressed item is from m_freeList; pop it from the Vector.
238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256
                coalescionEntry = freeListEntries.last();
                freeListEntries.removeLast();
            }
            
            // Right, we have a FreeListEntry, we just need check if there is anything else
            // to coalesce onto the end.
            ASSERT(coalescionEntry);
            while (true) {
                // Calculate the end address of the chunk we have found so far.
                void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);

                // Is there another chunk adjacent to the one we already have?
                if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
                    // Yes - another FreeListEntry -pop it from the list.
                    FreeListEntry* coalescee = freeListEntries.last();
                    freeListEntries.removeLast();
                    // Add it's size onto our existing node.
                    coalescionEntry->size += coalescee->size;
                    delete coalescee;
257
                } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
258
                    // We can coalesce the next common-sized chunk.
259 260
                    m_commonSizedAllocations.removeLast();
                    coalescionEntry->size += m_commonSize;
261 262 263 264 265
                } else
                    break; // Nope, nothing to be added - stop here.
            }

            // We've coalesced everything we can onto the current chunk.
266
            // Add it back into m_freeList.
267 268 269
            addToFreeList(coalescionEntry);
        }

270 271
        // All chunks of free memory larger than m_commonSize should be
        // back in m_freeList by now.  All that remains to be done is to
272
        // copy the contents on the newCommonSizedAllocations back into
273 274 275
        // the m_commonSizedAllocations Vector.
        ASSERT(m_commonSizedAllocations.size() == 0);
        m_commonSizedAllocations.append(newCommonSizedAllocations);
276 277 278 279 280
    }

public:

    FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
281 282 283
        : m_commonSize(commonSize)
        , m_countFreedSinceLastCoalesce(0)
        , m_totalHeapSize(totalHeapSize)
284
    {
285 286 287 288 289 290 291 292 293 294 295 296
        // Cook up an address to allocate at, using the following recipe:
        //   17 bits of zero, stay in userspace kids.
        //   26 bits of randomness for ASLR.
        //   21 bits of zero, at least stay aligned within one level of the pagetables.
        //
        // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
        // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
        // 2^24, which should put up somewhere in the middle of usespace (in the address range
        // 0x200000000000 .. 0x5fffffffffff).
        intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
        randomLocation += (1 << 24);
        randomLocation <<= 21;
297
        m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
298
        if (!m_base)
299 300
            CRASH();

301
        // For simplicity, we keep all memory in m_freeList in a 'released' state.
302
        // This means that we can simply reuse all memory when allocating, without
303
        // worrying about it's previous state, and also makes coalescing m_freeList
304 305
        // simpler since we need not worry about the possibility of coalescing released
        // chunks with non-released ones.
306 307
        release(m_base, m_totalHeapSize);
        m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
308 309 310 311 312 313 314
    }

    void* alloc(size_t size)
    {
        void* result;

        // Freed allocations of the common size are not stored back into the main
315 316 317 318 319
        // m_freeList, but are instead stored in a separate vector.  If the request
        // is for a common sized allocation, check this list.
        if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
            result = m_commonSizedAllocations.last();
            m_commonSizedAllocations.removeLast();
320
        } else {
321 322
            // Serach m_freeList for a suitable sized chunk to allocate memory from.
            FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
323 324 325 326 327 328

            // This would be bad news.
            if (!entry) {
                // Errk!  Lets take a last-ditch desparation attempt at defragmentation...
                coalesceFreeSpace();
                // Did that free up a large enough chunk?
329
                entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
330 331 332 333
                // No?...  *BOOM!*
                if (!entry)
                    CRASH();
            }
334
            ASSERT(entry->size != m_commonSize);
335

336
            // Remove the entry from m_freeList.  But! -
337 338 339 340 341 342 343 344 345
            // Each entry in the tree may represent a chain of multiple chunks of the
            // same size, and we only want to remove one on them.  So, if this entry
            // does have a chain, just remove the first-but-one item from the chain.
            if (FreeListEntry* next = entry->nextEntry) {
                // We're going to leave 'entry' in the tree; remove 'next' from its chain.
                entry->nextEntry = next->nextEntry;
                next->nextEntry = 0;
                entry = next;
            } else
346
                m_freeList.remove(entry->size);
347 348 349 350 351

            // Whoo!, we have a result!
            ASSERT(entry->size >= size);
            result = entry->pointer;

352 353
            // If the allocation exactly fits the chunk we found in the,
            // m_freeList then the FreeListEntry node is no longer needed.
354 355 356 357 358
            if (entry->size == size)
                delete entry;
            else {
                // There is memory left over, and it is not of the common size.
                // We can reuse the existing FreeListEntry node to add this back
359
                // into m_freeList.
360 361 362 363 364 365 366
                entry->pointer = (void*)((intptr_t)entry->pointer + size);
                entry->size -= size;
                addToFreeList(entry);
            }
        }

        // Call reuse to report to the operating system that this memory is in use.
367
        ASSERT(isWithinVMPool(result, size));
368 369 370 371 372 373 374 375
        reuse(result, size);
        return result;
    }

    void free(void* pointer, size_t size)
    {
        // Call release to report to the operating system that this
        // memory is no longer in use, and need not be paged out.
376
        ASSERT(isWithinVMPool(pointer, size));
377 378
        release(pointer, size);

379 380 381 382
        // Common-sized allocations are stored in the m_commonSizedAllocations
        // vector; all other freed chunks are added to m_freeList.
        if (size == m_commonSize)
            m_commonSizedAllocations.append(pointer);
383 384 385 386
        else
            addToFreeList(new FreeListEntry(pointer, size));

        // Do some housekeeping.  Every time we reach a point that
387
        // 16MB of allocations have been freed, sweep m_freeList
388
        // coalescing any neighboring fragments.
389 390 391
        m_countFreedSinceLastCoalesce += size;
        if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
            m_countFreedSinceLastCoalesce = 0;
392 393 394 395 396
            coalesceFreeSpace();
        }
    }

private:
397 398 399 400

#ifndef NDEBUG
    bool isWithinVMPool(void* pointer, size_t size)
    {
401
        return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
402 403 404
    }
#endif

405
    // Freed space from the most common sized allocations will be held in this list, ...
406 407
    const size_t m_commonSize;
    Vector<void*> m_commonSizedAllocations;
408

409 410
    // ... and all other freed allocations are held in m_freeList.
    SizeSortedFreeTree m_freeList;
411 412

    // This is used for housekeeping, to trigger defragmentation of the freed lists.
413
    size_t m_countFreedSinceLastCoalesce;
414

415 416
    void* m_base;
    size_t m_totalHeapSize;
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447
};

void ExecutableAllocator::intializePageSize()
{
    ExecutableAllocator::pageSize = getpagesize();
}

static FixedVMPoolAllocator* allocator = 0;
static SpinLock spinlock = SPINLOCK_INITIALIZER;

ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
{
  SpinLockHolder lock_holder(&spinlock);

    if (!allocator)
        allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
    ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
    return alloc;
}

void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation) 
{
  SpinLockHolder lock_holder(&spinlock);

    ASSERT(allocator);
    allocator->free(allocation.pages, allocation.size);
}

}

#endif // HAVE(ASSEMBLER)