summaryrefslogtreecommitdiff
path: root/js/src/gc
diff options
context:
space:
mode:
Diffstat (limited to 'js/src/gc')
-rw-r--r--js/src/gc/Allocator.cpp628
-rw-r--r--js/src/gc/Allocator.h35
-rw-r--r--js/src/gc/Barrier.cpp210
-rw-r--r--js/src/gc/Barrier.h968
-rw-r--r--js/src/gc/FindSCCs.h214
-rw-r--r--js/src/gc/GCInternals.h175
-rw-r--r--js/src/gc/GCRuntime.h1467
-rw-r--r--js/src/gc/GCTrace.cpp243
-rw-r--r--js/src/gc/GCTrace.h55
-rw-r--r--js/src/gc/GCTraceFormat.h56
-rw-r--r--js/src/gc/Heap-inl.h29
-rw-r--r--js/src/gc/Heap.h1385
-rw-r--r--js/src/gc/Iteration.cpp142
-rw-r--r--js/src/gc/Marking.cpp3019
-rw-r--r--js/src/gc/Marking.h477
-rw-r--r--js/src/gc/Memory.cpp901
-rw-r--r--js/src/gc/Memory.h53
-rw-r--r--js/src/gc/MemoryProfiler.cpp49
-rw-r--r--js/src/gc/Nursery-inl.h88
-rw-r--r--js/src/gc/Nursery.cpp1025
-rw-r--r--js/src/gc/Nursery.h471
-rw-r--r--js/src/gc/NurseryAwareHashMap.h178
-rw-r--r--js/src/gc/Policy.h159
-rw-r--r--js/src/gc/RootMarking.cpp543
-rw-r--r--js/src/gc/Rooting.h82
-rw-r--r--js/src/gc/Statistics.cpp1383
-rw-r--r--js/src/gc/Statistics.h505
-rw-r--r--js/src/gc/StoreBuffer-inl.h75
-rw-r--r--js/src/gc/StoreBuffer.cpp153
-rw-r--r--js/src/gc/StoreBuffer.h499
-rw-r--r--js/src/gc/Tracer.cpp432
-rw-r--r--js/src/gc/Tracer.h159
-rw-r--r--js/src/gc/Verifier.cpp569
-rw-r--r--js/src/gc/Zone.cpp471
-rw-r--r--js/src/gc/Zone.h743
35 files changed, 17641 insertions, 0 deletions
diff --git a/js/src/gc/Allocator.cpp b/js/src/gc/Allocator.cpp
new file mode 100644
index 000000000..f7dc50d02
--- /dev/null
+++ b/js/src/gc/Allocator.cpp
@@ -0,0 +1,628 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Allocator.h"
+
+#include "jscntxt.h"
+
+#include "gc/GCInternals.h"
+#include "gc/GCTrace.h"
+#include "gc/Nursery.h"
+#include "jit/JitCompartment.h"
+#include "vm/Runtime.h"
+#include "vm/String.h"
+
+#include "jsobjinlines.h"
+
+#include "gc/Heap-inl.h"
+
+using namespace js;
+using namespace gc;
+
+template <typename T, AllowGC allowGC /* = CanGC */>
+JSObject*
+js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
+ const Class* clasp)
+{
+ static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
+ MOZ_ASSERT(IsObjectAllocKind(kind));
+ size_t thingSize = Arena::thingSize(kind);
+
+ MOZ_ASSERT(thingSize == Arena::thingSize(kind));
+ MOZ_ASSERT(thingSize >= sizeof(JSObject_Slots0));
+ static_assert(sizeof(JSObject_Slots0) >= CellSize,
+ "All allocations must be at least the allocator-imposed minimum size.");
+
+ MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
+
+ // Off-main-thread alloc cannot trigger GC or make runtime assertions.
+ if (!cx->isJSContext())
+ return GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
+
+ JSContext* ncx = cx->asJSContext();
+ JSRuntime* rt = ncx->runtime();
+ if (!rt->gc.checkAllocatorState<allowGC>(ncx, kind))
+ return nullptr;
+
+ if (ncx->nursery().isEnabled() && heap != TenuredHeap) {
+ JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(ncx, thingSize, nDynamicSlots, clasp);
+ if (obj)
+ return obj;
+
+ // Our most common non-jit allocation path is NoGC; thus, if we fail the
+ // alloc and cannot GC, we *must* return nullptr here so that the caller
+ // will do a CanGC allocation to clear the nursery. Failing to do so will
+ // cause all allocations on this path to land in Tenured, and we will not
+ // get the benefit of the nursery.
+ if (!allowGC)
+ return nullptr;
+ }
+
+ return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
+}
+template JSObject* js::Allocate<JSObject, NoGC>(ExclusiveContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots, gc::InitialHeap heap,
+ const Class* clasp);
+template JSObject* js::Allocate<JSObject, CanGC>(ExclusiveContext* cx, gc::AllocKind kind,
+ size_t nDynamicSlots, gc::InitialHeap heap,
+ const Class* clasp);
+
+// Attempt to allocate a new GC thing out of the nursery. If there is not enough
+// room in the nursery or there is an OOM, this method will return nullptr.
+template <AllowGC allowGC>
+JSObject*
+GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots, const Class* clasp)
+{
+ MOZ_ASSERT(isNurseryAllocAllowed());
+ MOZ_ASSERT(!cx->zone()->usedByExclusiveThread);
+ MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
+ JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
+ if (obj)
+ return obj;
+
+ if (allowGC && !rt->mainThread.suppressGC) {
+ minorGC(JS::gcreason::OUT_OF_NURSERY);
+
+ // Exceeding gcMaxBytes while tenuring can disable the Nursery.
+ if (nursery.isEnabled()) {
+ JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
+ MOZ_ASSERT(obj);
+ return obj;
+ }
+ }
+ return nullptr;
+}
+
+template <AllowGC allowGC>
+JSObject*
+GCRuntime::tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
+ size_t nDynamicSlots)
+{
+ HeapSlot* slots = nullptr;
+ if (nDynamicSlots) {
+ slots = cx->zone()->pod_malloc<HeapSlot>(nDynamicSlots);
+ if (MOZ_UNLIKELY(!slots)) {
+ if (allowGC)
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+ Debug_SetSlotRangeToCrashOnTouch(slots, nDynamicSlots);
+ }
+
+ JSObject* obj = tryNewTenuredThing<JSObject, allowGC>(cx, kind, thingSize);
+
+ if (obj)
+ obj->setInitialSlotsMaybeNonNative(slots);
+ else
+ js_free(slots);
+
+ return obj;
+}
+
+template <typename T, AllowGC allowGC /* = CanGC */>
+T*
+js::Allocate(ExclusiveContext* cx)
+{
+ static_assert(!mozilla::IsConvertible<T*, JSObject*>::value, "must not be JSObject derived");
+ static_assert(sizeof(T) >= CellSize,
+ "All allocations must be at least the allocator-imposed minimum size.");
+
+ AllocKind kind = MapTypeToFinalizeKind<T>::kind;
+ size_t thingSize = sizeof(T);
+ MOZ_ASSERT(thingSize == Arena::thingSize(kind));
+
+ if (cx->isJSContext()) {
+ JSContext* ncx = cx->asJSContext();
+ if (!ncx->runtime()->gc.checkAllocatorState<allowGC>(ncx, kind))
+ return nullptr;
+ }
+
+ return GCRuntime::tryNewTenuredThing<T, allowGC>(cx, kind, thingSize);
+}
+
+#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType) \
+ template type* js::Allocate<type, NoGC>(ExclusiveContext* cx);\
+ template type* js::Allocate<type, CanGC>(ExclusiveContext* cx);
+FOR_EACH_NONOBJECT_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
+#undef DECL_ALLOCATOR_INSTANCES
+
+template <typename T, AllowGC allowGC>
+/* static */ T*
+GCRuntime::tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize)
+{
+ // Bump allocate in the arena's current free-list span.
+ T* t = reinterpret_cast<T*>(cx->arenas()->allocateFromFreeList(kind, thingSize));
+ if (MOZ_UNLIKELY(!t)) {
+ // Get the next available free list and allocate out of it. This may
+ // acquire a new arena, which will lock the chunk list. If there are no
+ // chunks available it may also allocate new memory directly.
+ t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind, thingSize));
+
+ if (MOZ_UNLIKELY(!t && allowGC && cx->isJSContext())) {
+ // We have no memory available for a new chunk; perform an
+ // all-compartments, non-incremental, shrinking GC and wait for
+ // sweeping to finish.
+ JS::PrepareForFullGC(cx->asJSContext());
+ AutoKeepAtoms keepAtoms(cx->perThreadData);
+ cx->asJSContext()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
+ cx->asJSContext()->gc.waitBackgroundSweepOrAllocEnd();
+
+ t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
+ if (!t)
+ ReportOutOfMemory(cx);
+ }
+ }
+
+ checkIncrementalZoneState(cx, t);
+ TraceTenuredAlloc(t, kind);
+ return t;
+}
+
+template <AllowGC allowGC>
+bool
+GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
+{
+ if (allowGC) {
+ if (!gcIfNeededPerAllocation(cx))
+ return false;
+ }
+
+#if defined(JS_GC_ZEAL) || defined(DEBUG)
+ MOZ_ASSERT_IF(cx->compartment()->isAtomsCompartment(),
+ kind == AllocKind::ATOM ||
+ kind == AllocKind::FAT_INLINE_ATOM ||
+ kind == AllocKind::SYMBOL ||
+ kind == AllocKind::JITCODE ||
+ kind == AllocKind::SCOPE);
+ MOZ_ASSERT_IF(!cx->compartment()->isAtomsCompartment(),
+ kind != AllocKind::ATOM &&
+ kind != AllocKind::FAT_INLINE_ATOM);
+ MOZ_ASSERT(!rt->isHeapBusy());
+ MOZ_ASSERT(isAllocAllowed());
+#endif
+
+ // Crash if we perform a GC action when it is not safe.
+ if (allowGC && !rt->mainThread.suppressGC)
+ rt->gc.verifyIsSafeToGC();
+
+ // For testing out of memory conditions
+ if (js::oom::ShouldFailWithOOM()) {
+ // If we are doing a fallible allocation, percolate up the OOM
+ // instead of reporting it.
+ if (allowGC)
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+bool
+GCRuntime::gcIfNeededPerAllocation(JSContext* cx)
+{
+#ifdef JS_GC_ZEAL
+ if (needZealousGC())
+ runDebugGC();
+#endif
+
+ // Invoking the interrupt callback can fail and we can't usefully
+ // handle that here. Just check in case we need to collect instead.
+ if (rt->hasPendingInterrupt())
+ gcIfRequested();
+
+ // If we have grown past our GC heap threshold while in the middle of
+ // an incremental GC, we're growing faster than we're GCing, so stop
+ // the world and do a full, non-incremental GC right now, if possible.
+ if (isIncrementalGCInProgress() &&
+ cx->zone()->usage.gcBytes() > cx->zone()->threshold.gcTriggerBytes())
+ {
+ PrepareZoneForGC(cx->zone());
+ AutoKeepAtoms keepAtoms(cx->perThreadData);
+ gc(GC_NORMAL, JS::gcreason::INCREMENTAL_TOO_SLOW);
+ }
+
+ return true;
+}
+
+template <typename T>
+/* static */ void
+GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
+{
+#ifdef DEBUG
+ if (!cx->isJSContext())
+ return;
+
+ Zone* zone = cx->asJSContext()->zone();
+ MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
+ t->asTenured().arena()->allocatedDuringIncremental);
+#endif
+}
+
+
+// /////////// Arena -> Thing Allocator //////////////////////////////////////
+
+void
+GCRuntime::startBackgroundAllocTaskIfIdle()
+{
+ AutoLockHelperThreadState helperLock;
+ if (allocTask.isRunningWithLockHeld(helperLock))
+ return;
+
+ // Join the previous invocation of the task. This will return immediately
+ // if the thread has never been started.
+ allocTask.joinWithLockHeld(helperLock);
+ allocTask.startWithLockHeld(helperLock);
+}
+
+/* static */ TenuredCell*
+GCRuntime::refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind, size_t thingSize)
+{
+ cx->arenas()->checkEmptyFreeList(thingKind);
+
+ if (cx->isJSContext())
+ return refillFreeListFromMainThread(cx->asJSContext(), thingKind, thingSize);
+
+ return refillFreeListOffMainThread(cx, thingKind);
+}
+
+/* static */ TenuredCell*
+GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
+{
+ // It should not be possible to allocate on the main thread while we are
+ // inside a GC.
+ Zone *zone = cx->zone();
+ MOZ_ASSERT(!cx->runtime()->isHeapBusy(), "allocating while under GC");
+
+ AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
+ return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
+}
+
+/* static */ TenuredCell*
+GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind)
+{
+ // A GC may be happening on the main thread, but zones used by exclusive
+ // contexts are never collected.
+ Zone* zone = cx->zone();
+ MOZ_ASSERT(!zone->wasGCStarted());
+
+ AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
+ return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
+}
+
+/* static */ TenuredCell*
+GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
+{
+ /*
+ * Called by compacting GC to refill a free list while we are in a GC.
+ */
+
+ zone->arenas.checkEmptyFreeList(thingKind);
+ mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
+ MOZ_ASSERT(rt->isHeapCollecting());
+ MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
+
+ AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
+ return zone->arenas.allocateFromArena(zone, thingKind, DontCheckThresholds,
+ maybeStartBackgroundAllocation);
+}
+
+TenuredCell*
+ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds,
+ AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc)
+{
+ JSRuntime* rt = zone->runtimeFromAnyThread();
+
+ mozilla::Maybe<AutoLockGC> maybeLock;
+
+ // See if we can proceed without taking the GC lock.
+ if (backgroundFinalizeState[thingKind] != BFS_DONE)
+ maybeLock.emplace(rt);
+
+ ArenaList& al = arenaLists[thingKind];
+ Arena* arena = al.takeNextArena();
+ if (arena) {
+ // Empty arenas should be immediately freed.
+ MOZ_ASSERT(!arena->isEmpty());
+
+ return allocateFromArenaInner(zone, arena, thingKind);
+ }
+
+ // Parallel threads have their own ArenaLists, but chunks are shared;
+ // if we haven't already, take the GC lock now to avoid racing.
+ if (maybeLock.isNothing())
+ maybeLock.emplace(rt);
+
+ Chunk* chunk = rt->gc.pickChunk(maybeLock.ref(), maybeStartBGAlloc);
+ if (!chunk)
+ return nullptr;
+
+ // Although our chunk should definitely have enough space for another arena,
+ // there are other valid reasons why Chunk::allocateArena() may fail.
+ arena = rt->gc.allocateArena(chunk, zone, thingKind, checkThresholds, maybeLock.ref());
+ if (!arena)
+ return nullptr;
+
+ MOZ_ASSERT(al.isCursorAtEnd());
+ al.insertBeforeCursor(arena);
+
+ return allocateFromArenaInner(zone, arena, thingKind);
+}
+
+inline TenuredCell*
+ArenaLists::allocateFromArenaInner(JS::Zone* zone, Arena* arena, AllocKind kind)
+{
+ size_t thingSize = Arena::thingSize(kind);
+
+ freeLists[kind] = arena->getFirstFreeSpan();
+
+ if (MOZ_UNLIKELY(zone->wasGCStarted()))
+ zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, arena);
+ TenuredCell* thing = freeLists[kind]->allocate(thingSize);
+ MOZ_ASSERT(thing); // This allocation is infallible.
+ return thing;
+}
+
+void
+GCRuntime::arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena)
+{
+ if (zone->needsIncrementalBarrier()) {
+ arena->allocatedDuringIncremental = true;
+ marker.delayMarkingArena(arena);
+ } else if (zone->isGCSweeping()) {
+ arena->setNextAllocDuringSweep(arenasAllocatedDuringSweep);
+ arenasAllocatedDuringSweep = arena;
+ }
+}
+
+
+// /////////// Chunk -> Arena Allocator //////////////////////////////////////
+
+bool
+GCRuntime::wantBackgroundAllocation(const AutoLockGC& lock) const
+{
+ // To minimize memory waste, we do not want to run the background chunk
+ // allocation if we already have some empty chunks or when the runtime has
+ // a small heap size (and therefore likely has a small growth rate).
+ return allocTask.enabled() &&
+ emptyChunks(lock).count() < tunables.minEmptyChunkCount(lock) &&
+ (fullChunks(lock).count() + availableChunks(lock).count()) >= 4;
+}
+
+Arena*
+GCRuntime::allocateArena(Chunk* chunk, Zone* zone, AllocKind thingKind,
+ ShouldCheckThresholds checkThresholds, const AutoLockGC& lock)
+{
+ MOZ_ASSERT(chunk->hasAvailableArenas());
+
+ // Fail the allocation if we are over our heap size limits.
+ if (checkThresholds && usage.gcBytes() >= tunables.gcMaxBytes())
+ return nullptr;
+
+ Arena* arena = chunk->allocateArena(rt, zone, thingKind, lock);
+ zone->usage.addGCArena();
+
+ // Trigger an incremental slice if needed.
+ if (checkThresholds)
+ maybeAllocTriggerZoneGC(zone, lock);
+
+ return arena;
+}
+
+Arena*
+Chunk::allocateArena(JSRuntime* rt, Zone* zone, AllocKind thingKind, const AutoLockGC& lock)
+{
+ Arena* arena = info.numArenasFreeCommitted > 0
+ ? fetchNextFreeArena(rt)
+ : fetchNextDecommittedArena();
+ arena->init(zone, thingKind);
+ updateChunkListAfterAlloc(rt, lock);
+ return arena;
+}
+
+inline void
+GCRuntime::updateOnFreeArenaAlloc(const ChunkInfo& info)
+{
+ MOZ_ASSERT(info.numArenasFreeCommitted <= numArenasFreeCommitted);
+ --numArenasFreeCommitted;
+}
+
+Arena*
+Chunk::fetchNextFreeArena(JSRuntime* rt)
+{
+ MOZ_ASSERT(info.numArenasFreeCommitted > 0);
+ MOZ_ASSERT(info.numArenasFreeCommitted <= info.numArenasFree);
+
+ Arena* arena = info.freeArenasHead;
+ info.freeArenasHead = arena->next;
+ --info.numArenasFreeCommitted;
+ --info.numArenasFree;
+ rt->gc.updateOnFreeArenaAlloc(info);
+
+ return arena;
+}
+
+Arena*
+Chunk::fetchNextDecommittedArena()
+{
+ MOZ_ASSERT(info.numArenasFreeCommitted == 0);
+ MOZ_ASSERT(info.numArenasFree > 0);
+
+ unsigned offset = findDecommittedArenaOffset();
+ info.lastDecommittedArenaOffset = offset + 1;
+ --info.numArenasFree;
+ decommittedArenas.unset(offset);
+
+ Arena* arena = &arenas[offset];
+ MarkPagesInUse(arena, ArenaSize);
+ arena->setAsNotAllocated();
+
+ return arena;
+}
+
+/*
+ * Search for and return the next decommitted Arena. Our goal is to keep
+ * lastDecommittedArenaOffset "close" to a free arena. We do this by setting
+ * it to the most recently freed arena when we free, and forcing it to
+ * the last alloc + 1 when we allocate.
+ */
+uint32_t
+Chunk::findDecommittedArenaOffset()
+{
+ /* Note: lastFreeArenaOffset can be past the end of the list. */
+ for (unsigned i = info.lastDecommittedArenaOffset; i < ArenasPerChunk; i++) {
+ if (decommittedArenas.get(i))
+ return i;
+ }
+ for (unsigned i = 0; i < info.lastDecommittedArenaOffset; i++) {
+ if (decommittedArenas.get(i))
+ return i;
+ }
+ MOZ_CRASH("No decommitted arenas found.");
+}
+
+
+// /////////// System -> Chunk Allocator /////////////////////////////////////
+
+Chunk*
+GCRuntime::getOrAllocChunk(const AutoLockGC& lock,
+ AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
+{
+ Chunk* chunk = emptyChunks(lock).pop();
+ if (!chunk) {
+ chunk = Chunk::allocate(rt);
+ if (!chunk)
+ return nullptr;
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ }
+
+ if (wantBackgroundAllocation(lock))
+ maybeStartBackgroundAllocation.tryToStartBackgroundAllocation(rt->gc);
+
+ return chunk;
+}
+
+void
+GCRuntime::recycleChunk(Chunk* chunk, const AutoLockGC& lock)
+{
+ emptyChunks(lock).push(chunk);
+}
+
+Chunk*
+GCRuntime::pickChunk(const AutoLockGC& lock,
+ AutoMaybeStartBackgroundAllocation& maybeStartBackgroundAllocation)
+{
+ if (availableChunks(lock).count())
+ return availableChunks(lock).head();
+
+ Chunk* chunk = getOrAllocChunk(lock, maybeStartBackgroundAllocation);
+ if (!chunk)
+ return nullptr;
+
+ chunk->init(rt);
+ MOZ_ASSERT(chunk->info.numArenasFreeCommitted == 0);
+ MOZ_ASSERT(chunk->unused());
+ MOZ_ASSERT(!fullChunks(lock).contains(chunk));
+ MOZ_ASSERT(!availableChunks(lock).contains(chunk));
+
+ chunkAllocationSinceLastGC = true;
+
+ availableChunks(lock).push(chunk);
+
+ return chunk;
+}
+
+BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
+ : runtime(rt),
+ chunkPool_(pool),
+ enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
+{
+}
+
+/* virtual */ void
+BackgroundAllocTask::run()
+{
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
+
+ AutoLockGC lock(runtime);
+ while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
+ Chunk* chunk;
+ {
+ AutoUnlockGC unlock(lock);
+ chunk = Chunk::allocate(runtime);
+ if (!chunk)
+ break;
+ chunk->init(runtime);
+ }
+ chunkPool_.push(chunk);
+ }
+}
+
+/* static */ Chunk*
+Chunk::allocate(JSRuntime* rt)
+{
+ Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
+ if (!chunk)
+ return nullptr;
+ rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
+ return chunk;
+}
+
+void
+Chunk::init(JSRuntime* rt)
+{
+ JS_POISON(this, JS_FRESH_TENURED_PATTERN, ChunkSize);
+
+ /*
+ * We clear the bitmap to guard against JS::GCThingIsMarkedGray being called
+ * on uninitialized data, which would happen before the first GC cycle.
+ */
+ bitmap.clear();
+
+ /*
+ * Decommit the arenas. We do this after poisoning so that if the OS does
+ * not have to recycle the pages, we still get the benefit of poisoning.
+ */
+ decommitAllArenas(rt);
+
+ /* Initialize the chunk info. */
+ info.init();
+ new (&trailer) ChunkTrailer(rt);
+
+ /* The rest of info fields are initialized in pickChunk. */
+}
+
+void Chunk::decommitAllArenas(JSRuntime* rt)
+{
+ decommittedArenas.clear(true);
+ MarkPagesUnused(&arenas[0], ArenasPerChunk * ArenaSize);
+
+ info.freeArenasHead = nullptr;
+ info.lastDecommittedArenaOffset = 0;
+ info.numArenasFree = ArenasPerChunk;
+ info.numArenasFreeCommitted = 0;
+}
diff --git a/js/src/gc/Allocator.h b/js/src/gc/Allocator.h
new file mode 100644
index 000000000..fd530cadf
--- /dev/null
+++ b/js/src/gc/Allocator.h
@@ -0,0 +1,35 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Allocator_h
+#define gc_Allocator_h
+
+#include "gc/Heap.h"
+#include "js/RootingAPI.h"
+
+namespace js {
+struct Class;
+
+// Allocate a new GC thing. After a successful allocation the caller must
+// fully initialize the thing before calling any function that can potentially
+// trigger GC. This will ensure that GC tracing never sees junk values stored
+// in the partially initialized thing.
+//
+// Note that JSObject allocation must use the longer signature below that
+// includes slot, heap, and finalizer information in support of various
+// object-specific optimizations.
+template <typename T, AllowGC allowGC = CanGC>
+T*
+Allocate(ExclusiveContext* cx);
+
+template <typename, AllowGC allowGC = CanGC>
+JSObject*
+Allocate(ExclusiveContext* cx, gc::AllocKind kind, size_t nDynamicSlots, gc::InitialHeap heap,
+ const Class* clasp);
+
+} // namespace js
+
+#endif // gc_Allocator_h
diff --git a/js/src/gc/Barrier.cpp b/js/src/gc/Barrier.cpp
new file mode 100644
index 000000000..f19f6f046
--- /dev/null
+++ b/js/src/gc/Barrier.cpp
@@ -0,0 +1,210 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Barrier.h"
+
+#include "jscompartment.h"
+#include "jsobj.h"
+
+#include "builtin/TypedObject.h"
+#include "gc/Policy.h"
+#include "gc/Zone.h"
+#include "js/HashTable.h"
+#include "js/Value.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/SharedArrayObject.h"
+#include "vm/Symbol.h"
+#include "wasm/WasmJS.h"
+
+namespace js {
+
+bool
+RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
+{
+ return shadowZone->runtimeFromMainThread()->isHeapMajorCollecting();
+}
+
+#ifdef DEBUG
+
+bool
+IsMarkedBlack(NativeObject* obj)
+{
+ // Note: we assume conservatively that Nursery things will be live.
+ if (!obj->isTenured())
+ return true;
+
+ gc::TenuredCell& tenured = obj->asTenured();
+ return (tenured.isMarked(gc::BLACK) && !tenured.isMarked(gc::GRAY)) ||
+ tenured.arena()->allocatedDuringIncremental;
+}
+
+bool
+HeapSlot::preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const
+{
+ return kind == Slot
+ ? &owner->getSlotRef(slot) == this
+ : &owner->getDenseElement(slot) == (const Value*)this;
+}
+
+bool
+HeapSlot::preconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot,
+ const Value& target) const
+{
+ bool isCorrectSlot = kind == Slot
+ ? obj->getSlotAddressUnchecked(slot)->get() == target
+ : static_cast<HeapSlot*>(obj->getDenseElements() + slot)->get() == target;
+ bool isBlackToGray = target.isMarkable() &&
+ IsMarkedBlack(obj) && JS::GCThingIsMarkedGray(JS::GCCellPtr(target));
+ return isCorrectSlot && !isBlackToGray;
+}
+
+bool
+CurrentThreadIsIonCompiling()
+{
+ return TlsPerThreadData.get()->ionCompiling;
+}
+
+bool
+CurrentThreadIsIonCompilingSafeForMinorGC()
+{
+ return TlsPerThreadData.get()->ionCompilingSafeForMinorGC;
+}
+
+bool
+CurrentThreadIsGCSweeping()
+{
+ return TlsPerThreadData.get()->gcSweeping;
+}
+
+#endif // DEBUG
+
+template <typename S>
+template <typename T>
+void
+ReadBarrierFunctor<S>::operator()(T* t)
+{
+ InternalBarrierMethods<T*>::readBarrier(t);
+}
+
+// All GC things may be held in a Value, either publicly or as a private GC
+// thing.
+#define JS_EXPAND_DEF(name, type, _) \
+template void ReadBarrierFunctor<JS::Value>::operator()<type>(type*);
+JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+
+template <typename S>
+template <typename T>
+void
+PreBarrierFunctor<S>::operator()(T* t)
+{
+ InternalBarrierMethods<T*>::preBarrier(t);
+}
+
+// All GC things may be held in a Value, either publicly or as a private GC
+// thing.
+#define JS_EXPAND_DEF(name, type, _) \
+template void PreBarrierFunctor<JS::Value>::operator()<type>(type*);
+JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+
+template void PreBarrierFunctor<jsid>::operator()<JS::Symbol>(JS::Symbol*);
+template void PreBarrierFunctor<jsid>::operator()<JSString>(JSString*);
+
+template <typename T>
+/* static */ bool
+MovableCellHasher<T>::hasHash(const Lookup& l)
+{
+ if (!l)
+ return true;
+
+ return l->zoneFromAnyThread()->hasUniqueId(l);
+}
+
+template <typename T>
+/* static */ bool
+MovableCellHasher<T>::ensureHash(const Lookup& l)
+{
+ if (!l)
+ return true;
+
+ uint64_t unusedId;
+ return l->zoneFromAnyThread()->getUniqueId(l, &unusedId);
+}
+
+template <typename T>
+/* static */ HashNumber
+MovableCellHasher<T>::hash(const Lookup& l)
+{
+ if (!l)
+ return 0;
+
+ // We have to access the zone from-any-thread here: a worker thread may be
+ // cloning a self-hosted object from the main-thread-runtime-owned self-
+ // hosting zone into the off-main-thread runtime. The zone's uid lock will
+ // protect against multiple workers doing this simultaneously.
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ l->zoneFromAnyThread()->isSelfHostingZone());
+
+ return l->zoneFromAnyThread()->getHashCodeInfallible(l);
+}
+
+template <typename T>
+/* static */ bool
+MovableCellHasher<T>::match(const Key& k, const Lookup& l)
+{
+ // Return true if both are null or false if only one is null.
+ if (!k)
+ return !l;
+ if (!l)
+ return false;
+
+ MOZ_ASSERT(k);
+ MOZ_ASSERT(l);
+ MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
+ l->zoneFromAnyThread()->isSelfHostingZone());
+
+ Zone* zone = k->zoneFromAnyThread();
+ if (zone != l->zoneFromAnyThread())
+ return false;
+ MOZ_ASSERT(zone->hasUniqueId(k));
+ MOZ_ASSERT(zone->hasUniqueId(l));
+
+ // Since both already have a uid (from hash), the get is infallible.
+ return zone->getUniqueIdInfallible(k) == zone->getUniqueIdInfallible(l);
+}
+
+#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wattributes"
+#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
+
+template struct JS_PUBLIC_API(MovableCellHasher<JSObject*>);
+template struct JS_PUBLIC_API(MovableCellHasher<GlobalObject*>);
+template struct JS_PUBLIC_API(MovableCellHasher<SavedFrame*>);
+template struct JS_PUBLIC_API(MovableCellHasher<EnvironmentObject*>);
+template struct JS_PUBLIC_API(MovableCellHasher<WasmInstanceObject*>);
+template struct JS_PUBLIC_API(MovableCellHasher<JSScript*>);
+
+#ifdef JS_BROKEN_GCC_ATTRIBUTE_WARNING
+#pragma GCC diagnostic pop
+#endif // JS_BROKEN_GCC_ATTRIBUTE_WARNING
+
+} // namespace js
+
+JS_PUBLIC_API(void)
+JS::HeapObjectPostBarrier(JSObject** objp, JSObject* prev, JSObject* next)
+{
+ MOZ_ASSERT(objp);
+ js::InternalBarrierMethods<JSObject*>::postBarrier(objp, prev, next);
+}
+
+JS_PUBLIC_API(void)
+JS::HeapValuePostBarrier(JS::Value* valuep, const Value& prev, const Value& next)
+{
+ MOZ_ASSERT(valuep);
+ js::InternalBarrierMethods<JS::Value>::postBarrier(valuep, prev, next);
+}
diff --git a/js/src/gc/Barrier.h b/js/src/gc/Barrier.h
new file mode 100644
index 000000000..950c96314
--- /dev/null
+++ b/js/src/gc/Barrier.h
@@ -0,0 +1,968 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Barrier_h
+#define gc_Barrier_h
+
+#include "NamespaceImports.h"
+
+#include "gc/Heap.h"
+#include "gc/StoreBuffer.h"
+#include "js/HeapAPI.h"
+#include "js/Id.h"
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+
+/*
+ * A write barrier is a mechanism used by incremental or generation GCs to
+ * ensure that every value that needs to be marked is marked. In general, the
+ * write barrier should be invoked whenever a write can cause the set of things
+ * traced through by the GC to change. This includes:
+ * - writes to object properties
+ * - writes to array slots
+ * - writes to fields like JSObject::shape_ that we trace through
+ * - writes to fields in private data
+ * - writes to non-markable fields like JSObject::private that point to
+ * markable data
+ * The last category is the trickiest. Even though the private pointers does not
+ * point to a GC thing, changing the private pointer may change the set of
+ * objects that are traced by the GC. Therefore it needs a write barrier.
+ *
+ * Every barriered write should have the following form:
+ * <pre-barrier>
+ * obj->field = value; // do the actual write
+ * <post-barrier>
+ * The pre-barrier is used for incremental GC and the post-barrier is for
+ * generational GC.
+ *
+ * PRE-BARRIER
+ *
+ * To understand the pre-barrier, let's consider how incremental GC works. The
+ * GC itself is divided into "slices". Between each slice, JS code is allowed to
+ * run. Each slice should be short so that the user doesn't notice the
+ * interruptions. In our GC, the structure of the slices is as follows:
+ *
+ * 1. ... JS work, which leads to a request to do GC ...
+ * 2. [first GC slice, which performs all root marking and possibly more marking]
+ * 3. ... more JS work is allowed to run ...
+ * 4. [GC mark slice, which runs entirely in drainMarkStack]
+ * 5. ... more JS work ...
+ * 6. [GC mark slice, which runs entirely in drainMarkStack]
+ * 7. ... more JS work ...
+ * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
+ * 9. ... JS continues uninterrupted now that GC is finishes ...
+ *
+ * Of course, there may be a different number of slices depending on how much
+ * marking is to be done.
+ *
+ * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
+ * might change the heap in a way that causes the GC to collect an object that
+ * is actually reachable. The write barrier prevents this from happening. We use
+ * a variant of incremental GC called "snapshot at the beginning." This approach
+ * guarantees the invariant that if an object is reachable in step 2, then we
+ * will mark it eventually. The name comes from the idea that we take a
+ * theoretical "snapshot" of all reachable objects in step 2; all objects in
+ * that snapshot should eventually be marked. (Note that the write barrier
+ * verifier code takes an actual snapshot.)
+ *
+ * The basic correctness invariant of a snapshot-at-the-beginning collector is
+ * that any object reachable at the end of the GC (step 9) must either:
+ * (1) have been reachable at the beginning (step 2) and thus in the snapshot
+ * (2) or must have been newly allocated, in steps 3, 5, or 7.
+ * To deal with case (2), any objects allocated during an incremental GC are
+ * automatically marked black.
+ *
+ * This strategy is actually somewhat conservative: if an object becomes
+ * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
+ * mainly for simplicity. (Also, note that the snapshot is entirely
+ * theoretical. We don't actually do anything special in step 2 that we wouldn't
+ * do in a non-incremental GC.
+ *
+ * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
+ * write "obj->field = value". Let the prior value of obj->field be
+ * value0. Since it's possible that value0 may have been what obj->field
+ * contained in step 2, when the snapshot was taken, the barrier marks
+ * value0. Note that it only does this if we're in the middle of an incremental
+ * GC. Since this is rare, the cost of the write barrier is usually just an
+ * extra branch.
+ *
+ * In practice, we implement the pre-barrier differently based on the type of
+ * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is
+ * a JSObject*. It takes value0 as a parameter.
+ *
+ * READ-BARRIER
+ *
+ * Incremental GC requires that weak pointers have read barriers. The problem
+ * happens when, during an incremental GC, some code reads a weak pointer and
+ * writes it somewhere on the heap that has been marked black in a previous
+ * slice. Since the weak pointer will not otherwise be marked and will be swept
+ * and finalized in the last slice, this will leave the pointer just written
+ * dangling after the GC. To solve this, we immediately mark black all weak
+ * pointers that get read between slices so that it is safe to store them in an
+ * already marked part of the heap, e.g. in Rooted.
+ *
+ * POST-BARRIER
+ *
+ * For generational GC, we want to be able to quickly collect the nursery in a
+ * minor collection. Part of the way this is achieved is to only mark the
+ * nursery itself; tenured things, which may form the majority of the heap, are
+ * not traced through or marked. This leads to the problem of what to do about
+ * tenured objects that have pointers into the nursery: if such things are not
+ * marked, they may be discarded while there are still live objects which
+ * reference them. The solution is to maintain information about these pointers,
+ * and mark their targets when we start a minor collection.
+ *
+ * The pointers can be thought of as edges in object graph, and the set of edges
+ * from the tenured generation into the nursery is know as the remembered set.
+ * Post barriers are used to track this remembered set.
+ *
+ * Whenever a slot which could contain such a pointer is written, we use a write
+ * barrier to check if the edge created is in the remembered set, and if so we
+ * insert it into the store buffer, which is the collector's representation of
+ * the remembered set. This means than when we come to do a minor collection we
+ * can examine the contents of the store buffer and mark any edge targets that
+ * are in the nursery.
+ *
+ * IMPLEMENTATION DETAILS
+ *
+ * Since it would be awkward to change every write to memory into a function
+ * call, this file contains a bunch of C++ classes and templates that use
+ * operator overloading to take care of barriers automatically. In many cases,
+ * all that's necessary to make some field be barriered is to replace
+ * Type* field;
+ * with
+ * GCPtr<Type> field;
+ *
+ * One additional note: not all object writes need to be pre-barriered. Writes
+ * to newly allocated objects do not need a pre-barrier. In these cases, we use
+ * the "obj->field.init(value)" method instead of "obj->field = value". We use
+ * the init naming idiom in many places to signify that a field is being
+ * assigned for the first time.
+ *
+ * This file implements four classes, illustrated here:
+ *
+ * BarrieredBase base class of all barriers
+ * | |
+ * | WriteBarrieredBase base class which provides common write operations
+ * | | | | |
+ * | | | | PreBarriered provides pre-barriers only
+ * | | | |
+ * | | | GCPtr provides pre- and post-barriers
+ * | | |
+ * | | HeapPtr provides pre- and post-barriers; is relocatable
+ * | | and deletable for use inside C++ managed memory
+ * | |
+ * | HeapSlot similar to GCPtr, but tailored to slots storage
+ * |
+ * ReadBarrieredBase base class which provides common read operations
+ * |
+ * ReadBarriered provides read barriers only
+ *
+ *
+ * The implementation of the barrier logic is implemented on T::writeBarrier.*,
+ * via:
+ *
+ * WriteBarrieredBase<T>::pre
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> T::writeBarrierPre
+ * -> InternalBarrierMethods<Value>::preBarrier
+ * -> InternalBarrierMethods<jsid>::preBarrier
+ * -> InternalBarrierMethods<T*>::preBarrier
+ * -> T::writeBarrierPre
+ *
+ * GCPtr<T>::post and HeapPtr<T>::post
+ * -> InternalBarrierMethods<T*>::postBarrier
+ * -> T::writeBarrierPost
+ * -> InternalBarrierMethods<Value>::postBarrier
+ * -> StoreBuffer::put
+ *
+ * These classes are designed to be used by the internals of the JS engine.
+ * Barriers designed to be used externally are provided in js/RootingAPI.h.
+ * These external barriers call into the same post-barrier implementations at
+ * InternalBarrierMethods<T>::post via an indirect call to Heap(.+)Barrier.
+ *
+ * These clases are designed to be used to wrap GC thing pointers or values that
+ * act like them (i.e. JS::Value and jsid). It is possible to use them for
+ * other types by supplying the necessary barrier implementations but this
+ * is not usually necessary and should be done with caution.
+ */
+
+class JSAtom;
+struct JSCompartment;
+class JSFlatString;
+class JSLinearString;
+
+namespace JS {
+class Symbol;
+} // namespace JS
+
+namespace js {
+
+class AccessorShape;
+class ArrayObject;
+class ArgumentsObject;
+class ArrayBufferObjectMaybeShared;
+class ArrayBufferObject;
+class ArrayBufferViewObject;
+class SharedArrayBufferObject;
+class BaseShape;
+class DebugEnvironmentProxy;
+class GlobalObject;
+class LazyScript;
+class ModuleObject;
+class ModuleEnvironmentObject;
+class ModuleNamespaceObject;
+class NativeObject;
+class PlainObject;
+class PropertyName;
+class SavedFrame;
+class EnvironmentObject;
+class ScriptSourceObject;
+class Shape;
+class UnownedBaseShape;
+class ObjectGroup;
+
+namespace jit {
+class JitCode;
+} // namespace jit
+
+#ifdef DEBUG
+// Barriers can't be triggered during backend Ion compilation, which may run on
+// a helper thread.
+bool
+CurrentThreadIsIonCompiling();
+
+bool
+CurrentThreadIsIonCompilingSafeForMinorGC();
+
+bool
+CurrentThreadIsGCSweeping();
+
+bool
+IsMarkedBlack(NativeObject* obj);
+#endif
+
+namespace gc {
+
+// Marking.h depends on these barrier definitions, so we need a separate
+// entry point for marking to implement the pre-barrier.
+void MarkValueForBarrier(JSTracer* trc, Value* v, const char* name);
+void MarkIdForBarrier(JSTracer* trc, jsid* idp, const char* name);
+
+} // namespace gc
+
+template <typename T>
+struct InternalBarrierMethods {};
+
+template <typename T>
+struct InternalBarrierMethods<T*>
+{
+ static bool isMarkable(T* v) { return v != nullptr; }
+
+ static bool isMarkableTaggedPointer(T* v) { return !IsNullTaggedPointer(v); }
+
+ static void preBarrier(T* v) { T::writeBarrierPre(v); }
+
+ static void postBarrier(T** vp, T* prev, T* next) { T::writeBarrierPost(vp, prev, next); }
+
+ static void readBarrier(T* v) { T::readBarrier(v); }
+};
+
+template <typename S> struct PreBarrierFunctor : public VoidDefaultAdaptor<S> {
+ template <typename T> void operator()(T* t);
+};
+
+template <typename S> struct ReadBarrierFunctor : public VoidDefaultAdaptor<S> {
+ template <typename T> void operator()(T* t);
+};
+
+template <>
+struct InternalBarrierMethods<Value>
+{
+ static bool isMarkable(const Value& v) { return v.isMarkable(); }
+ static bool isMarkableTaggedPointer(const Value& v) { return isMarkable(v); }
+
+ static void preBarrier(const Value& v) {
+ DispatchTyped(PreBarrierFunctor<Value>(), v);
+ }
+
+ static void postBarrier(Value* vp, const Value& prev, const Value& next) {
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(vp);
+
+ // If the target needs an entry, add it.
+ js::gc::StoreBuffer* sb;
+ if (next.isObject() && (sb = reinterpret_cast<gc::Cell*>(&next.toObject())->storeBuffer())) {
+ // If we know that the prev has already inserted an entry, we can
+ // skip doing the lookup to add the new entry. Note that we cannot
+ // safely assert the presence of the entry because it may have been
+ // added via a different store buffer.
+ if (prev.isObject() && reinterpret_cast<gc::Cell*>(&prev.toObject())->storeBuffer())
+ return;
+ sb->putValue(vp);
+ return;
+ }
+ // Remove the prev entry if the new value does not need it.
+ if (prev.isObject() && (sb = reinterpret_cast<gc::Cell*>(&prev.toObject())->storeBuffer()))
+ sb->unputValue(vp);
+ }
+
+ static void readBarrier(const Value& v) {
+ DispatchTyped(ReadBarrierFunctor<Value>(), v);
+ }
+};
+
+template <>
+struct InternalBarrierMethods<jsid>
+{
+ static bool isMarkable(jsid id) { return JSID_IS_GCTHING(id); }
+ static bool isMarkableTaggedPointer(jsid id) { return isMarkable(id); }
+
+ static void preBarrier(jsid id) { DispatchTyped(PreBarrierFunctor<jsid>(), id); }
+ static void postBarrier(jsid* idp, jsid prev, jsid next) {}
+};
+
+// Barrier classes can use Mixins to add methods to a set of barrier
+// instantiations, to make the barriered thing look and feel more like the
+// thing itself.
+template <typename T>
+class BarrieredBaseMixins {};
+
+// Base class of all barrier types.
+//
+// This is marked non-memmovable since post barriers added by derived classes
+// can add pointers to class instances to the store buffer.
+template <typename T>
+class MOZ_NON_MEMMOVABLE BarrieredBase : public BarrieredBaseMixins<T>
+{
+ protected:
+ // BarrieredBase is not directly instantiable.
+ explicit BarrieredBase(const T& v) : value(v) {}
+
+ // Storage for all barrier classes. |value| must be a GC thing reference
+ // type: either a direct pointer to a GC thing or a supported tagged
+ // pointer that can reference GC things, such as JS::Value or jsid. Nested
+ // barrier types are NOT supported. See assertTypeConstraints.
+ T value;
+
+ public:
+ // Note: this is public because C++ cannot friend to a specific template instantiation.
+ // Friending to the generic template leads to a number of unintended consequences, including
+ // template resolution ambiguity and a circular dependency with Tracing.h.
+ T* unsafeUnbarrieredForTracing() { return &value; }
+};
+
+// Base class for barriered pointer types that intercept only writes.
+template <class T>
+class WriteBarrieredBase : public BarrieredBase<T>
+{
+ protected:
+ // WriteBarrieredBase is not directly instantiable.
+ explicit WriteBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
+
+ public:
+ DECLARE_POINTER_COMPARISON_OPS(T);
+ DECLARE_POINTER_CONSTREF_OPS(T);
+
+ // Use this if the automatic coercion to T isn't working.
+ const T& get() const { return this->value; }
+
+ // Use this if you want to change the value without invoking barriers.
+ // Obviously this is dangerous unless you know the barrier is not needed.
+ void unsafeSet(const T& v) { this->value = v; }
+
+ // For users who need to manually barrier the raw types.
+ static void writeBarrierPre(const T& v) { InternalBarrierMethods<T>::preBarrier(v); }
+
+ protected:
+ void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
+ void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+/*
+ * PreBarriered only automatically handles pre-barriers. Post-barriers must be
+ * manually implemented when using this class. GCPtr and HeapPtr should be used
+ * in all cases that do not require explicit low-level control of moving
+ * behavior, e.g. for HashMap keys.
+ */
+template <class T>
+class PreBarriered : public WriteBarrieredBase<T>
+{
+ public:
+ PreBarriered() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
+ /*
+ * Allow implicit construction for use in generic contexts, such as
+ * DebuggerWeakMap::markKeys.
+ */
+ MOZ_IMPLICIT PreBarriered(const T& v) : WriteBarrieredBase<T>(v) {}
+ explicit PreBarriered(const PreBarriered<T>& v) : WriteBarrieredBase<T>(v.value) {}
+ ~PreBarriered() { this->pre(); }
+
+ void init(const T& v) {
+ this->value = v;
+ }
+
+ /* Use to set the pointer to nullptr. */
+ void clear() {
+ this->pre();
+ this->value = nullptr;
+ }
+
+ DECLARE_POINTER_ASSIGN_OPS(PreBarriered, T);
+
+ private:
+ void set(const T& v) {
+ this->pre();
+ this->value = v;
+ }
+};
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine.
+ *
+ * It must only be stored in memory that has GC lifetime. GCPtr must not be
+ * used in contexts where it may be implicitly moved or deleted, e.g. most
+ * containers.
+ *
+ * The post-barriers implemented by this class are faster than those
+ * implemented by js::HeapPtr<T> or JS::Heap<T> at the cost of not
+ * automatically handling deletion or movement.
+ */
+template <class T>
+class GCPtr : public WriteBarrieredBase<T>
+{
+ public:
+ GCPtr() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
+ explicit GCPtr(const T& v) : WriteBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), v);
+ }
+ explicit GCPtr(const GCPtr<T>& v) : WriteBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), v);
+ }
+#ifdef DEBUG
+ ~GCPtr() {
+ // No prebarrier necessary as this only happens when we are sweeping or
+ // after we have just collected the nursery. Note that the wrapped
+ // pointer may already have been freed by this point.
+ MOZ_ASSERT(CurrentThreadIsGCSweeping());
+ Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this));
+ }
+#endif
+
+ void init(const T& v) {
+ this->value = v;
+ this->post(JS::GCPolicy<T>::initial(), v);
+ }
+
+ DECLARE_POINTER_ASSIGN_OPS(GCPtr, T);
+
+ T unbarrieredGet() const {
+ return this->value;
+ }
+
+ private:
+ void set(const T& v) {
+ this->pre();
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+
+ /*
+ * Unlike HeapPtr<T>, GCPtr<T> must be managed with GC lifetimes.
+ * Specifically, the memory used by the pointer itself must be live until
+ * at least the next minor GC. For that reason, move semantics are invalid
+ * and are deleted here. Please note that not all containers support move
+ * semantics, so this does not completely prevent invalid uses.
+ */
+ GCPtr(GCPtr<T>&&) = delete;
+ GCPtr<T>& operator=(GCPtr<T>&&) = delete;
+};
+
+/*
+ * A pre- and post-barriered heap pointer, for use inside the JS engine. These
+ * heap pointers can be stored in C++ containers like GCVector and GCHashMap.
+ *
+ * The GC sometimes keeps pointers to pointers to GC things --- for example, to
+ * track references into the nursery. However, C++ containers like GCVector and
+ * GCHashMap usually reserve the right to relocate their elements any time
+ * they're modified, invalidating all pointers to the elements. HeapPtr
+ * has a move constructor which knows how to keep the GC up to date if it is
+ * moved to a new location.
+ *
+ * However, because of this additional communication with the GC, HeapPtr
+ * is somewhat slower, so it should only be used in contexts where this ability
+ * is necessary.
+ *
+ * Obviously, JSObjects, JSStrings, and the like get tenured and compacted, so
+ * whatever pointers they contain get relocated, in the sense used here.
+ * However, since the GC itself is moving those values, it takes care of its
+ * internal pointers to those pointers itself. HeapPtr is only necessary
+ * when the relocation would otherwise occur without the GC's knowledge.
+ */
+template <class T>
+class HeapPtr : public WriteBarrieredBase<T>
+{
+ public:
+ HeapPtr() : WriteBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
+
+ // Implicitly adding barriers is a reasonable default.
+ MOZ_IMPLICIT HeapPtr(const T& v) : WriteBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), this->value);
+ }
+
+ /*
+ * For HeapPtr, move semantics are equivalent to copy semantics. In
+ * C++, a copy constructor taking const-ref is the way to get a single
+ * function that will be used for both lvalue and rvalue copies, so we can
+ * simply omit the rvalue variant.
+ */
+ MOZ_IMPLICIT HeapPtr(const HeapPtr<T>& v) : WriteBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), this->value);
+ }
+
+ ~HeapPtr() {
+ this->pre();
+ this->post(this->value, JS::GCPolicy<T>::initial());
+ }
+
+ void init(const T& v) {
+ this->value = v;
+ this->post(JS::GCPolicy<T>::initial(), this->value);
+ }
+
+ DECLARE_POINTER_ASSIGN_OPS(HeapPtr, T);
+
+ /* Make this friend so it can access pre() and post(). */
+ template <class T1, class T2>
+ friend inline void
+ BarrieredSetPair(Zone* zone,
+ HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2);
+
+ protected:
+ void set(const T& v) {
+ this->pre();
+ postBarrieredSet(v);
+ }
+
+ void postBarrieredSet(const T& v) {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, this->value);
+ }
+};
+
+// Base class for barriered pointer types that intercept reads and writes.
+template <typename T>
+class ReadBarrieredBase : public BarrieredBase<T>
+{
+ protected:
+ // ReadBarrieredBase is not directly instantiable.
+ explicit ReadBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
+
+ protected:
+ void read() const { InternalBarrierMethods<T>::readBarrier(this->value); }
+ void post(const T& prev, const T& next) {
+ InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
+ }
+};
+
+// Incremental GC requires that weak pointers have read barriers. See the block
+// comment at the top of Barrier.h for a complete discussion of why.
+//
+// Note that this class also has post-barriers, so is safe to use with nursery
+// pointers. However, when used as a hashtable key, care must still be taken to
+// insert manual post-barriers on the table for rekeying if the key is based in
+// any way on the address of the object.
+template <typename T>
+class ReadBarriered : public ReadBarrieredBase<T>
+{
+ public:
+ ReadBarriered() : ReadBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
+
+ // It is okay to add barriers implicitly.
+ MOZ_IMPLICIT ReadBarriered(const T& v) : ReadBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), v);
+ }
+
+ // Copy is creating a new edge, so we must read barrier the source edge.
+ explicit ReadBarriered(const ReadBarriered& v) : ReadBarrieredBase<T>(v) {
+ this->post(JS::GCPolicy<T>::initial(), v.get());
+ }
+
+ // Move retains the lifetime status of the source edge, so does not fire
+ // the read barrier of the defunct edge.
+ ReadBarriered(ReadBarriered&& v)
+ : ReadBarrieredBase<T>(mozilla::Move(v))
+ {
+ this->post(JS::GCPolicy<T>::initial(), v.value);
+ }
+
+ ~ReadBarriered() {
+ this->post(this->value, JS::GCPolicy<T>::initial());
+ }
+
+ ReadBarriered& operator=(const ReadBarriered& v) {
+ T prior = this->value;
+ this->value = v.value;
+ this->post(prior, v.value);
+ return *this;
+ }
+
+ const T get() const {
+ if (!InternalBarrierMethods<T>::isMarkable(this->value))
+ return JS::GCPolicy<T>::initial();
+ this->read();
+ return this->value;
+ }
+
+ const T unbarrieredGet() const {
+ return this->value;
+ }
+
+ explicit operator bool() const {
+ return bool(this->value);
+ }
+
+ operator const T() const { return get(); }
+
+ const T operator->() const { return get(); }
+
+ T* unsafeGet() { return &this->value; }
+ T const* unsafeGet() const { return &this->value; }
+
+ void set(const T& v)
+ {
+ T tmp = this->value;
+ this->value = v;
+ this->post(tmp, v);
+ }
+};
+
+// A WeakRef pointer does not hold its target live and is automatically nulled
+// out when the GC discovers that it is not reachable from any other path.
+template <typename T>
+using WeakRef = ReadBarriered<T>;
+
+// Add Value operations to all Barrier types. Note, this must be defined before
+// HeapSlot for HeapSlot's base to get these operations.
+template <>
+class BarrieredBaseMixins<JS::Value> : public ValueOperations<WriteBarrieredBase<JS::Value>>
+{};
+
+// A pre- and post-barriered Value that is specialized to be aware that it
+// resides in a slots or elements vector. This allows it to be relocated in
+// memory, but with substantially less overhead than a HeapPtr.
+class HeapSlot : public WriteBarrieredBase<Value>
+{
+ public:
+ enum Kind {
+ Slot = 0,
+ Element = 1
+ };
+
+ explicit HeapSlot() = delete;
+
+ explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const Value& v)
+ : WriteBarrieredBase<Value>(v)
+ {
+ post(obj, kind, slot, v);
+ }
+
+ explicit HeapSlot(NativeObject* obj, Kind kind, uint32_t slot, const HeapSlot& s)
+ : WriteBarrieredBase<Value>(s.value)
+ {
+ post(obj, kind, slot, s);
+ }
+
+ ~HeapSlot() {
+ pre();
+ }
+
+ void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+#ifdef DEBUG
+ bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const;
+ bool preconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot,
+ const Value& target) const;
+#endif
+
+ void set(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
+ MOZ_ASSERT(preconditionForSet(owner, kind, slot));
+ pre();
+ value = v;
+ post(owner, kind, slot, v);
+ }
+
+ /* For users who need to manually barrier the raw types. */
+ static void writeBarrierPost(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
+ reinterpret_cast<HeapSlot*>(const_cast<Value*>(&target))->post(owner, kind, slot, target);
+ }
+
+ private:
+ void post(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
+ MOZ_ASSERT(preconditionForWriteBarrierPost(owner, kind, slot, target));
+ if (this->value.isObject()) {
+ gc::Cell* cell = reinterpret_cast<gc::Cell*>(&this->value.toObject());
+ if (cell->storeBuffer())
+ cell->storeBuffer()->putSlot(owner, kind, slot, 1);
+ }
+ }
+};
+
+class HeapSlotArray
+{
+ HeapSlot* array;
+
+ // Whether writes may be performed to the slots in this array. This helps
+ // to control how object elements which may be copy on write are used.
+#ifdef DEBUG
+ bool allowWrite_;
+#endif
+
+ public:
+ explicit HeapSlotArray(HeapSlot* array, bool allowWrite)
+ : array(array)
+#ifdef DEBUG
+ , allowWrite_(allowWrite)
+#endif
+ {}
+
+ operator const Value*() const {
+ JS_STATIC_ASSERT(sizeof(GCPtr<Value>) == sizeof(Value));
+ JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
+ return reinterpret_cast<const Value*>(array);
+ }
+ operator HeapSlot*() const { MOZ_ASSERT(allowWrite()); return array; }
+
+ HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset, allowWrite()); }
+ HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset, allowWrite()); }
+
+ private:
+ bool allowWrite() const {
+#ifdef DEBUG
+ return allowWrite_;
+#else
+ return true;
+#endif
+ }
+};
+
+/*
+ * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
+ * barriers with only one branch to check if we're in an incremental GC.
+ */
+template <class T1, class T2>
+static inline void
+BarrieredSetPair(Zone* zone,
+ HeapPtr<T1*>& v1, T1* val1,
+ HeapPtr<T2*>& v2, T2* val2)
+{
+ if (T1::needWriteBarrierPre(zone)) {
+ v1.pre();
+ v2.pre();
+ }
+ v1.postBarrieredSet(val1);
+ v2.postBarrieredSet(val2);
+}
+
+/*
+ * ImmutableTenuredPtr is designed for one very narrow case: replacing
+ * immutable raw pointers to GC-managed things, implicitly converting to a
+ * handle type for ease of use. Pointers encapsulated by this type must:
+ *
+ * be immutable (no incremental write barriers),
+ * never point into the nursery (no generational write barriers), and
+ * be traced via MarkRuntime (we use fromMarkedLocation).
+ *
+ * In short: you *really* need to know what you're doing before you use this
+ * class!
+ */
+template <typename T>
+class ImmutableTenuredPtr
+{
+ T value;
+
+ public:
+ operator T() const { return value; }
+ T operator->() const { return value; }
+
+ operator Handle<T>() const {
+ return Handle<T>::fromMarkedLocation(&value);
+ }
+
+ void init(T ptr) {
+ MOZ_ASSERT(ptr->isTenured());
+ value = ptr;
+ }
+
+ T get() const { return value; }
+ const T* address() { return &value; }
+};
+
+template <typename T>
+struct MovableCellHasher<PreBarriered<T>>
+{
+ using Key = PreBarriered<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
+ static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
+ static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
+ static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+template <typename T>
+struct MovableCellHasher<HeapPtr<T>>
+{
+ using Key = HeapPtr<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
+ static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
+ static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
+ static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+template <typename T>
+struct MovableCellHasher<ReadBarriered<T>>
+{
+ using Key = ReadBarriered<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
+ static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
+ static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
+ static bool match(const Key& k, const Lookup& l) {
+ return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
+ }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+/* Useful for hashtables with a GCPtr as key. */
+template <class T>
+struct GCPtrHasher
+{
+ typedef GCPtr<T> Key;
+ typedef T Lookup;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+/* Specialized hashing policy for GCPtrs. */
+template <class T>
+struct DefaultHasher<GCPtr<T>> : GCPtrHasher<T> {};
+
+template <class T>
+struct PreBarrieredHasher
+{
+ typedef PreBarriered<T> Key;
+ typedef T Lookup;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.get() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+template <class T>
+struct DefaultHasher<PreBarriered<T>> : PreBarrieredHasher<T> { };
+
+/* Useful for hashtables with a ReadBarriered as key. */
+template <class T>
+struct ReadBarrieredHasher
+{
+ typedef ReadBarriered<T> Key;
+ typedef T Lookup;
+
+ static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
+ static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
+ static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); }
+};
+
+/* Specialized hashing policy for ReadBarriereds. */
+template <class T>
+struct DefaultHasher<ReadBarriered<T>> : ReadBarrieredHasher<T> { };
+
+class ArrayObject;
+class ArrayBufferObject;
+class GlobalObject;
+class Scope;
+class ScriptSourceObject;
+class Shape;
+class BaseShape;
+class UnownedBaseShape;
+class WasmInstanceObject;
+class WasmTableObject;
+namespace jit {
+class JitCode;
+} // namespace jit
+
+typedef PreBarriered<JSObject*> PreBarrieredObject;
+typedef PreBarriered<JSScript*> PreBarrieredScript;
+typedef PreBarriered<jit::JitCode*> PreBarrieredJitCode;
+typedef PreBarriered<JSString*> PreBarrieredString;
+typedef PreBarriered<JSAtom*> PreBarrieredAtom;
+
+typedef GCPtr<NativeObject*> GCPtrNativeObject;
+typedef GCPtr<ArrayObject*> GCPtrArrayObject;
+typedef GCPtr<ArrayBufferObjectMaybeShared*> GCPtrArrayBufferObjectMaybeShared;
+typedef GCPtr<ArrayBufferObject*> GCPtrArrayBufferObject;
+typedef GCPtr<BaseShape*> GCPtrBaseShape;
+typedef GCPtr<JSAtom*> GCPtrAtom;
+typedef GCPtr<JSFlatString*> GCPtrFlatString;
+typedef GCPtr<JSFunction*> GCPtrFunction;
+typedef GCPtr<JSLinearString*> GCPtrLinearString;
+typedef GCPtr<JSObject*> GCPtrObject;
+typedef GCPtr<JSScript*> GCPtrScript;
+typedef GCPtr<JSString*> GCPtrString;
+typedef GCPtr<ModuleObject*> GCPtrModuleObject;
+typedef GCPtr<ModuleEnvironmentObject*> GCPtrModuleEnvironmentObject;
+typedef GCPtr<ModuleNamespaceObject*> GCPtrModuleNamespaceObject;
+typedef GCPtr<PlainObject*> GCPtrPlainObject;
+typedef GCPtr<PropertyName*> GCPtrPropertyName;
+typedef GCPtr<Shape*> GCPtrShape;
+typedef GCPtr<UnownedBaseShape*> GCPtrUnownedBaseShape;
+typedef GCPtr<jit::JitCode*> GCPtrJitCode;
+typedef GCPtr<ObjectGroup*> GCPtrObjectGroup;
+typedef GCPtr<Scope*> GCPtrScope;
+
+typedef PreBarriered<Value> PreBarrieredValue;
+typedef GCPtr<Value> GCPtrValue;
+
+typedef PreBarriered<jsid> PreBarrieredId;
+typedef GCPtr<jsid> GCPtrId;
+
+typedef ImmutableTenuredPtr<PropertyName*> ImmutablePropertyNamePtr;
+typedef ImmutableTenuredPtr<JS::Symbol*> ImmutableSymbolPtr;
+
+typedef ReadBarriered<DebugEnvironmentProxy*> ReadBarrieredDebugEnvironmentProxy;
+typedef ReadBarriered<GlobalObject*> ReadBarrieredGlobalObject;
+typedef ReadBarriered<JSObject*> ReadBarrieredObject;
+typedef ReadBarriered<JSFunction*> ReadBarrieredFunction;
+typedef ReadBarriered<JSScript*> ReadBarrieredScript;
+typedef ReadBarriered<ScriptSourceObject*> ReadBarrieredScriptSourceObject;
+typedef ReadBarriered<Shape*> ReadBarrieredShape;
+typedef ReadBarriered<jit::JitCode*> ReadBarrieredJitCode;
+typedef ReadBarriered<ObjectGroup*> ReadBarrieredObjectGroup;
+typedef ReadBarriered<JS::Symbol*> ReadBarrieredSymbol;
+typedef ReadBarriered<WasmInstanceObject*> ReadBarrieredWasmInstanceObject;
+typedef ReadBarriered<WasmTableObject*> ReadBarrieredWasmTableObject;
+
+typedef ReadBarriered<Value> ReadBarrieredValue;
+
+} /* namespace js */
+
+#endif /* gc_Barrier_h */
diff --git a/js/src/gc/FindSCCs.h b/js/src/gc/FindSCCs.h
new file mode 100644
index 000000000..037557e3e
--- /dev/null
+++ b/js/src/gc/FindSCCs.h
@@ -0,0 +1,214 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_FindSCCs_h
+#define gc_FindSCCs_h
+
+#include "mozilla/Move.h"
+
+#include "jsfriendapi.h"
+#include "jsutil.h"
+
+namespace js {
+namespace gc {
+
+template<class Node>
+struct GraphNodeBase
+{
+ Node* gcNextGraphNode;
+ Node* gcNextGraphComponent;
+ unsigned gcDiscoveryTime;
+ unsigned gcLowLink;
+
+ GraphNodeBase()
+ : gcNextGraphNode(nullptr),
+ gcNextGraphComponent(nullptr),
+ gcDiscoveryTime(0),
+ gcLowLink(0) {}
+
+ ~GraphNodeBase() {}
+
+ Node* nextNodeInGroup() const {
+ if (gcNextGraphNode && gcNextGraphNode->gcNextGraphComponent == gcNextGraphComponent)
+ return gcNextGraphNode;
+ return nullptr;
+ }
+
+ Node* nextGroup() const {
+ return gcNextGraphComponent;
+ }
+};
+
+/*
+ * Find the strongly connected components of a graph using Tarjan's algorithm,
+ * and return them in topological order.
+ *
+ * Nodes derive from GraphNodeBase and implement findGraphEdges, which calls
+ * finder.addEdgeTo to describe the outgoing edges from that node:
+ *
+ * struct MyComponentFinder;
+ *
+ * struct MyGraphNode : public GraphNodeBase
+ * {
+ * void findOutgoingEdges(MyComponentFinder& finder)
+ * {
+ * for edge in my_outgoing_edges:
+ * if is_relevant(edge):
+ * finder.addEdgeTo(edge.destination)
+ * }
+ * }
+ *
+ * struct MyComponentFinder : public ComponentFinder<MyGraphNode, MyComponentFinder>
+ * {
+ * ...
+ * };
+ *
+ * MyComponentFinder finder;
+ * finder.addNode(v);
+ */
+
+template <typename Node, typename Derived>
+class ComponentFinder
+{
+ public:
+ explicit ComponentFinder(uintptr_t sl)
+ : clock(1),
+ stack(nullptr),
+ firstComponent(nullptr),
+ cur(nullptr),
+ stackLimit(sl),
+ stackFull(false)
+ {}
+
+ ~ComponentFinder() {
+ MOZ_ASSERT(!stack);
+ MOZ_ASSERT(!firstComponent);
+ }
+
+ /* Forces all nodes to be added to a single component. */
+ void useOneComponent() { stackFull = true; }
+
+ void addNode(Node* v) {
+ if (v->gcDiscoveryTime == Undefined) {
+ MOZ_ASSERT(v->gcLowLink == Undefined);
+ processNode(v);
+ }
+ }
+
+ Node* getResultsList() {
+ if (stackFull) {
+ /*
+ * All nodes after the stack overflow are in |stack|. Put them all in
+ * one big component of their own.
+ */
+ Node* firstGoodComponent = firstComponent;
+ for (Node* v = stack; v; v = stack) {
+ stack = v->gcNextGraphNode;
+ v->gcNextGraphComponent = firstGoodComponent;
+ v->gcNextGraphNode = firstComponent;
+ firstComponent = v;
+ }
+ stackFull = false;
+ }
+
+ MOZ_ASSERT(!stack);
+
+ Node* result = firstComponent;
+ firstComponent = nullptr;
+
+ for (Node* v = result; v; v = v->gcNextGraphNode) {
+ v->gcDiscoveryTime = Undefined;
+ v->gcLowLink = Undefined;
+ }
+
+ return result;
+ }
+
+ static void mergeGroups(Node* first) {
+ for (Node* v = first; v; v = v->gcNextGraphNode)
+ v->gcNextGraphComponent = nullptr;
+ }
+
+ public:
+ /* Call from implementation of GraphNodeBase::findOutgoingEdges(). */
+ void addEdgeTo(Node* w) {
+ if (w->gcDiscoveryTime == Undefined) {
+ processNode(w);
+ cur->gcLowLink = Min(cur->gcLowLink, w->gcLowLink);
+ } else if (w->gcDiscoveryTime != Finished) {
+ cur->gcLowLink = Min(cur->gcLowLink, w->gcDiscoveryTime);
+ }
+ }
+
+ private:
+ /* Constant used to indicate an unprocessed vertex. */
+ static const unsigned Undefined = 0;
+
+ /* Constant used to indicate an processed vertex that is no longer on the stack. */
+ static const unsigned Finished = (unsigned)-1;
+
+ void processNode(Node* v) {
+ v->gcDiscoveryTime = clock;
+ v->gcLowLink = clock;
+ ++clock;
+
+ v->gcNextGraphNode = stack;
+ stack = v;
+
+ int stackDummy;
+ if (stackFull || !JS_CHECK_STACK_SIZE(stackLimit, &stackDummy)) {
+ stackFull = true;
+ return;
+ }
+
+ Node* old = cur;
+ cur = v;
+ cur->findOutgoingEdges(*static_cast<Derived*>(this));
+ cur = old;
+
+ if (stackFull)
+ return;
+
+ if (v->gcLowLink == v->gcDiscoveryTime) {
+ Node* nextComponent = firstComponent;
+ Node* w;
+ do {
+ MOZ_ASSERT(stack);
+ w = stack;
+ stack = w->gcNextGraphNode;
+
+ /*
+ * Record that the element is no longer on the stack by setting the
+ * discovery time to a special value that's not Undefined.
+ */
+ w->gcDiscoveryTime = Finished;
+
+ /* Figure out which group we're in. */
+ w->gcNextGraphComponent = nextComponent;
+
+ /*
+ * Prepend the component to the beginning of the output list to
+ * reverse the list and achieve the desired order.
+ */
+ w->gcNextGraphNode = firstComponent;
+ firstComponent = w;
+ } while (w != v);
+ }
+ }
+
+ private:
+ unsigned clock;
+ Node* stack;
+ Node* firstComponent;
+ Node* cur;
+ uintptr_t stackLimit;
+ bool stackFull;
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_FindSCCs_h */
diff --git a/js/src/gc/GCInternals.h b/js/src/gc/GCInternals.h
new file mode 100644
index 000000000..722539e1c
--- /dev/null
+++ b/js/src/gc/GCInternals.h
@@ -0,0 +1,175 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCInternals_h
+#define gc_GCInternals_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/PodOperations.h"
+
+#include "jscntxt.h"
+
+#include "gc/Zone.h"
+#include "vm/HelperThreads.h"
+#include "vm/Runtime.h"
+
+namespace js {
+namespace gc {
+
+void FinishGC(JSContext* cx);
+
+/*
+ * This class should be used by any code that needs to exclusive access to the
+ * heap in order to trace through it...
+ */
+class MOZ_RAII AutoTraceSession
+{
+ public:
+ explicit AutoTraceSession(JSRuntime* rt, JS::HeapState state = JS::HeapState::Tracing);
+ ~AutoTraceSession();
+
+ // Threads with an exclusive context can hit refillFreeList while holding
+ // the exclusive access lock. To avoid deadlocking when we try to acquire
+ // this lock during GC and the other thread is waiting, make sure we hold
+ // the exclusive access lock during GC sessions.
+ AutoLockForExclusiveAccess lock;
+
+ protected:
+ JSRuntime* runtime;
+
+ private:
+ AutoTraceSession(const AutoTraceSession&) = delete;
+ void operator=(const AutoTraceSession&) = delete;
+
+ JS::HeapState prevState;
+ AutoSPSEntry pseudoFrame;
+};
+
+class MOZ_RAII AutoPrepareForTracing
+{
+ mozilla::Maybe<AutoTraceSession> session_;
+
+ public:
+ AutoPrepareForTracing(JSContext* cx, ZoneSelector selector);
+ AutoTraceSession& session() { return session_.ref(); }
+};
+
+AbortReason
+IsIncrementalGCUnsafe(JSRuntime* rt);
+
+#ifdef JS_GC_ZEAL
+
+class MOZ_RAII AutoStopVerifyingBarriers
+{
+ GCRuntime* gc;
+ bool restartPreVerifier;
+
+ public:
+ AutoStopVerifyingBarriers(JSRuntime* rt, bool isShutdown)
+ : gc(&rt->gc)
+ {
+ if (gc->isVerifyPreBarriersEnabled()) {
+ gc->endVerifyPreBarriers();
+ restartPreVerifier = !isShutdown;
+ } else {
+ restartPreVerifier = false;
+ }
+ }
+
+ ~AutoStopVerifyingBarriers() {
+ // Nasty special case: verification runs a minor GC, which *may* nest
+ // inside of an outer minor GC. This is not allowed by the
+ // gc::Statistics phase tree. So we pause the "real" GC, if in fact one
+ // is in progress.
+ gcstats::Phase outer = gc->stats.currentPhase();
+ if (outer != gcstats::PHASE_NONE)
+ gc->stats.endPhase(outer);
+ MOZ_ASSERT((gc->stats.currentPhase() == gcstats::PHASE_NONE) ||
+ (gc->stats.currentPhase() == gcstats::PHASE_GC_BEGIN) ||
+ (gc->stats.currentPhase() == gcstats::PHASE_GC_END));
+
+ if (restartPreVerifier)
+ gc->startVerifyPreBarriers();
+
+ if (outer != gcstats::PHASE_NONE)
+ gc->stats.beginPhase(outer);
+ }
+};
+#else
+struct MOZ_RAII AutoStopVerifyingBarriers
+{
+ AutoStopVerifyingBarriers(JSRuntime*, bool) {}
+};
+#endif /* JS_GC_ZEAL */
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void CheckHashTablesAfterMovingGC(JSRuntime* rt);
+void CheckHeapAfterGC(JSRuntime* rt);
+#endif
+
+struct MovingTracer : JS::CallbackTracer
+{
+ explicit MovingTracer(JSRuntime* rt) : CallbackTracer(rt, TraceWeakMapKeysValues) {}
+
+ void onObjectEdge(JSObject** objp) override;
+ void onShapeEdge(Shape** shapep) override;
+ void onStringEdge(JSString** stringp) override;
+ void onScriptEdge(JSScript** scriptp) override;
+ void onLazyScriptEdge(LazyScript** lazyp) override;
+ void onBaseShapeEdge(BaseShape** basep) override;
+ void onScopeEdge(Scope** basep) override;
+ void onChild(const JS::GCCellPtr& thing) override {
+ MOZ_ASSERT(!RelocationOverlay::isCellForwarded(thing.asCell()));
+ }
+
+#ifdef DEBUG
+ TracerKind getTracerKind() const override { return TracerKind::Moving; }
+#endif
+};
+
+// Structure for counting how many times objects in a particular group have
+// been tenured during a minor collection.
+struct TenureCount
+{
+ ObjectGroup* group;
+ int count;
+};
+
+// Keep rough track of how many times we tenure objects in particular groups
+// during minor collections, using a fixed size hash for efficiency at the cost
+// of potential collisions.
+struct TenureCountCache
+{
+ static const size_t EntryShift = 4;
+ static const size_t EntryCount = 1 << EntryShift;
+
+ TenureCount entries[EntryCount];
+
+ TenureCountCache() { mozilla::PodZero(this); }
+
+ HashNumber hash(ObjectGroup* group) {
+#if JS_BITS_PER_WORD == 32
+ static const size_t ZeroBits = 3;
+#else
+ static const size_t ZeroBits = 4;
+#endif
+
+ uintptr_t word = uintptr_t(group);
+ MOZ_ASSERT((word & ((1 << ZeroBits) - 1)) == 0);
+ word >>= ZeroBits;
+ return HashNumber((word >> EntryShift) ^ word);
+ }
+
+ TenureCount& findEntry(ObjectGroup* group) {
+ return entries[hash(group) % EntryCount];
+ }
+};
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_GCInternals_h */
diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h
new file mode 100644
index 000000000..8c9322849
--- /dev/null
+++ b/js/src/gc/GCRuntime.h
@@ -0,0 +1,1467 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCRuntime_h
+#define gc_GCRuntime_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/EnumSet.h"
+
+#include "jsfriendapi.h"
+#include "jsgc.h"
+
+#include "gc/Heap.h"
+#include "gc/Nursery.h"
+#include "gc/Statistics.h"
+#include "gc/StoreBuffer.h"
+#include "gc/Tracer.h"
+#include "js/GCAnnotations.h"
+
+namespace js {
+
+class AutoLockGC;
+class AutoLockHelperThreadState;
+class VerifyPreTracer;
+
+namespace gc {
+
+typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
+using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
+
+class AutoMaybeStartBackgroundAllocation;
+class MarkingValidator;
+class AutoTraceSession;
+struct MovingTracer;
+
+class ChunkPool
+{
+ Chunk* head_;
+ size_t count_;
+
+ public:
+ ChunkPool() : head_(nullptr), count_(0) {}
+
+ size_t count() const { return count_; }
+
+ Chunk* head() { MOZ_ASSERT(head_); return head_; }
+ Chunk* pop();
+ void push(Chunk* chunk);
+ Chunk* remove(Chunk* chunk);
+
+#ifdef DEBUG
+ bool contains(Chunk* chunk) const;
+ bool verify() const;
+#endif
+
+ // Pool mutation does not invalidate an Iter unless the mutation
+ // is of the Chunk currently being visited by the Iter.
+ class Iter {
+ public:
+ explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
+ bool done() const { return !current_; }
+ void next();
+ Chunk* get() const { return current_; }
+ operator Chunk*() const { return get(); }
+ Chunk* operator->() const { return get(); }
+ private:
+ Chunk* current_;
+ };
+};
+
+// Performs extra allocation off the main thread so that when memory is
+// required on the main thread it will already be available and waiting.
+class BackgroundAllocTask : public GCParallelTask
+{
+ // Guarded by the GC lock.
+ JSRuntime* runtime;
+ ChunkPool& chunkPool_;
+
+ const bool enabled_;
+
+ public:
+ BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
+ bool enabled() const { return enabled_; }
+
+ protected:
+ void run() override;
+};
+
+// Search the provided Chunks for free arenas and decommit them.
+class BackgroundDecommitTask : public GCParallelTask
+{
+ public:
+ using ChunkVector = mozilla::Vector<Chunk*>;
+
+ explicit BackgroundDecommitTask(JSRuntime *rt) : runtime(rt) {}
+ void setChunksToScan(ChunkVector &chunks);
+
+ protected:
+ void run() override;
+
+ private:
+ JSRuntime* runtime;
+ ChunkVector toDecommit;
+};
+
+/*
+ * Encapsulates all of the GC tunables. These are effectively constant and
+ * should only be modified by setParameter.
+ */
+class GCSchedulingTunables
+{
+ /*
+ * Soft limit on the number of bytes we are allowed to allocate in the GC
+ * heap. Attempts to allocate gcthings over this limit will return null and
+ * subsequently invoke the standard OOM machinery, independent of available
+ * physical memory.
+ */
+ size_t gcMaxBytes_;
+
+ /*
+ * The base value used to compute zone->trigger.gcBytes(). When
+ * usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
+ * be scheduled for a GC, depending on the exact circumstances.
+ */
+ size_t gcZoneAllocThresholdBase_;
+
+ /* Fraction of threshold.gcBytes() which triggers an incremental GC. */
+ double zoneAllocThresholdFactor_;
+
+ /*
+ * Number of bytes to allocate between incremental slices in GCs triggered
+ * by the zone allocation threshold.
+ */
+ size_t zoneAllocDelayBytes_;
+
+ /*
+ * Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
+ * tunables that make GC non-deterministic.
+ */
+ bool dynamicHeapGrowthEnabled_;
+
+ /*
+ * We enter high-frequency mode if we GC a twice within this many
+ * microseconds. This value is stored directly in microseconds.
+ */
+ uint64_t highFrequencyThresholdUsec_;
+
+ /*
+ * When in the |highFrequencyGC| mode, these parameterize the per-zone
+ * "HeapGrowthFactor" computation.
+ */
+ uint64_t highFrequencyLowLimitBytes_;
+ uint64_t highFrequencyHighLimitBytes_;
+ double highFrequencyHeapGrowthMax_;
+ double highFrequencyHeapGrowthMin_;
+
+ /*
+ * When not in |highFrequencyGC| mode, this is the global (stored per-zone)
+ * "HeapGrowthFactor".
+ */
+ double lowFrequencyHeapGrowth_;
+
+ /*
+ * Doubles the length of IGC slices when in the |highFrequencyGC| mode.
+ */
+ bool dynamicMarkSliceEnabled_;
+
+ /*
+ * Controls whether painting can trigger IGC slices.
+ */
+ bool refreshFrameSlicesEnabled_;
+
+ /*
+ * Controls the number of empty chunks reserved for future allocation.
+ */
+ uint32_t minEmptyChunkCount_;
+ uint32_t maxEmptyChunkCount_;
+
+ public:
+ GCSchedulingTunables()
+ : gcMaxBytes_(0),
+ gcZoneAllocThresholdBase_(30 * 1024 * 1024),
+ zoneAllocThresholdFactor_(0.9),
+ zoneAllocDelayBytes_(1024 * 1024),
+ dynamicHeapGrowthEnabled_(false),
+ highFrequencyThresholdUsec_(1000 * 1000),
+ highFrequencyLowLimitBytes_(100 * 1024 * 1024),
+ highFrequencyHighLimitBytes_(500 * 1024 * 1024),
+ highFrequencyHeapGrowthMax_(3.0),
+ highFrequencyHeapGrowthMin_(1.5),
+ lowFrequencyHeapGrowth_(1.5),
+ dynamicMarkSliceEnabled_(false),
+ refreshFrameSlicesEnabled_(true),
+ minEmptyChunkCount_(1),
+ maxEmptyChunkCount_(30)
+ {}
+
+ size_t gcMaxBytes() const { return gcMaxBytes_; }
+ size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
+ double zoneAllocThresholdFactor() const { return zoneAllocThresholdFactor_; }
+ size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
+ bool isDynamicHeapGrowthEnabled() const { return dynamicHeapGrowthEnabled_; }
+ uint64_t highFrequencyThresholdUsec() const { return highFrequencyThresholdUsec_; }
+ uint64_t highFrequencyLowLimitBytes() const { return highFrequencyLowLimitBytes_; }
+ uint64_t highFrequencyHighLimitBytes() const { return highFrequencyHighLimitBytes_; }
+ double highFrequencyHeapGrowthMax() const { return highFrequencyHeapGrowthMax_; }
+ double highFrequencyHeapGrowthMin() const { return highFrequencyHeapGrowthMin_; }
+ double lowFrequencyHeapGrowth() const { return lowFrequencyHeapGrowth_; }
+ bool isDynamicMarkSliceEnabled() const { return dynamicMarkSliceEnabled_; }
+ bool areRefreshFrameSlicesEnabled() const { return refreshFrameSlicesEnabled_; }
+ unsigned minEmptyChunkCount(const AutoLockGC&) const { return minEmptyChunkCount_; }
+ unsigned maxEmptyChunkCount() const { return maxEmptyChunkCount_; }
+
+ MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, const AutoLockGC& lock);
+};
+
+/*
+ * GC Scheduling Overview
+ * ======================
+ *
+ * Scheduling GC's in SpiderMonkey/Firefox is tremendously complicated because
+ * of the large number of subtle, cross-cutting, and widely dispersed factors
+ * that must be taken into account. A summary of some of the more important
+ * factors follows.
+ *
+ * Cost factors:
+ *
+ * * GC too soon and we'll revisit an object graph almost identical to the
+ * one we just visited; since we are unlikely to find new garbage, the
+ * traversal will be largely overhead. We rely heavily on external factors
+ * to signal us that we are likely to find lots of garbage: e.g. "a tab
+ * just got closed".
+ *
+ * * GC too late and we'll run out of memory to allocate (e.g. Out-Of-Memory,
+ * hereafter simply abbreviated to OOM). If this happens inside
+ * SpiderMonkey we may be able to recover, but most embedder allocations
+ * will simply crash on OOM, even if the GC has plenty of free memory it
+ * could surrender.
+ *
+ * * Memory fragmentation: if we fill the process with GC allocations, a
+ * request for a large block of contiguous memory may fail because no
+ * contiguous block is free, despite having enough memory available to
+ * service the request.
+ *
+ * * Management overhead: if our GC heap becomes large, we create extra
+ * overhead when managing the GC's structures, even if the allocations are
+ * mostly unused.
+ *
+ * Heap Management Factors:
+ *
+ * * GC memory: The GC has its own allocator that it uses to make fixed size
+ * allocations for GC managed things. In cases where the GC thing requires
+ * larger or variable sized memory to implement itself, it is responsible
+ * for using the system heap.
+ *
+ * * C Heap Memory: Rather than allowing for large or variable allocations,
+ * the SpiderMonkey GC allows GC things to hold pointers to C heap memory.
+ * It is the responsibility of the thing to free this memory with a custom
+ * finalizer (with the sole exception of NativeObject, which knows about
+ * slots and elements for performance reasons). C heap memory has different
+ * performance and overhead tradeoffs than GC internal memory, which need
+ * to be considered with scheduling a GC.
+ *
+ * Application Factors:
+ *
+ * * Most applications allocate heavily at startup, then enter a processing
+ * stage where memory utilization remains roughly fixed with a slower
+ * allocation rate. This is not always the case, however, so while we may
+ * optimize for this pattern, we must be able to handle arbitrary
+ * allocation patterns.
+ *
+ * Other factors:
+ *
+ * * Other memory: This is memory allocated outside the purview of the GC.
+ * Data mapped by the system for code libraries, data allocated by those
+ * libraries, data in the JSRuntime that is used to manage the engine,
+ * memory used by the embedding that is not attached to a GC thing, memory
+ * used by unrelated processes running on the hardware that use space we
+ * could otherwise use for allocation, etc. While we don't have to manage
+ * it, we do have to take it into account when scheduling since it affects
+ * when we will OOM.
+ *
+ * * Physical Reality: All real machines have limits on the number of bits
+ * that they are physically able to store. While modern operating systems
+ * can generally make additional space available with swapping, at some
+ * point there are simply no more bits to allocate. There is also the
+ * factor of address space limitations, particularly on 32bit machines.
+ *
+ * * Platform Factors: Each OS makes use of wildly different memory
+ * management techniques. These differences result in different performance
+ * tradeoffs, different fragmentation patterns, and different hard limits
+ * on the amount of physical and/or virtual memory that we can use before
+ * OOMing.
+ *
+ *
+ * Reasons for scheduling GC
+ * -------------------------
+ *
+ * While code generally takes the above factors into account in only an ad-hoc
+ * fashion, the API forces the user to pick a "reason" for the GC. We have a
+ * bunch of JS::gcreason reasons in GCAPI.h. These fall into a few categories
+ * that generally coincide with one or more of the above factors.
+ *
+ * Embedding reasons:
+ *
+ * 1) Do a GC now because the embedding knows something useful about the
+ * zone's memory retention state. These are gcreasons like LOAD_END,
+ * PAGE_HIDE, SET_NEW_DOCUMENT, DOM_UTILS. Mostly, Gecko uses these to
+ * indicate that a significant fraction of the scheduled zone's memory is
+ * probably reclaimable.
+ *
+ * 2) Do some known amount of GC work now because the embedding knows now is
+ * a good time to do a long, unblockable operation of a known duration.
+ * These are INTER_SLICE_GC and REFRESH_FRAME.
+ *
+ * Correctness reasons:
+ *
+ * 3) Do a GC now because correctness depends on some GC property. For
+ * example, CC_WAITING is where the embedding requires the mark bits
+ * to be set correct. Also, EVICT_NURSERY where we need to work on the tenured
+ * heap.
+ *
+ * 4) Do a GC because we are shutting down: e.g. SHUTDOWN_CC or DESTROY_*.
+ *
+ * 5) Do a GC because a compartment was accessed between GC slices when we
+ * would have otherwise discarded it. We have to do a second GC to clean
+ * it up: e.g. COMPARTMENT_REVIVED.
+ *
+ * Emergency Reasons:
+ *
+ * 6) Do an all-zones, non-incremental GC now because the embedding knows it
+ * cannot wait: e.g. MEM_PRESSURE.
+ *
+ * 7) OOM when fetching a new Chunk results in a LAST_DITCH GC.
+ *
+ * Heap Size Limitation Reasons:
+ *
+ * 8) Do an incremental, zonal GC with reason MAYBEGC when we discover that
+ * the gc's allocated size is approaching the current trigger. This is
+ * called MAYBEGC because we make this check in the MaybeGC function.
+ * MaybeGC gets called at the top of the main event loop. Normally, it is
+ * expected that this callback will keep the heap size limited. It is
+ * relatively inexpensive, because it is invoked with no JS running and
+ * thus few stack roots to scan. For this reason, the GC's "trigger" bytes
+ * is less than the GC's "max" bytes as used by the trigger below.
+ *
+ * 9) Do an incremental, zonal GC with reason MAYBEGC when we go to allocate
+ * a new GC thing and find that the GC heap size has grown beyond the
+ * configured maximum (JSGC_MAX_BYTES). We trigger this GC by returning
+ * nullptr and then calling maybeGC at the top level of the allocator.
+ * This is then guaranteed to fail the "size greater than trigger" check
+ * above, since trigger is always less than max. After performing the GC,
+ * the allocator unconditionally returns nullptr to force an OOM exception
+ * is raised by the script.
+ *
+ * Note that this differs from a LAST_DITCH GC where we actually run out
+ * of memory (i.e., a call to a system allocator fails) when trying to
+ * allocate. Unlike above, LAST_DITCH GC only happens when we are really
+ * out of memory, not just when we cross an arbitrary trigger; despite
+ * this, it may still return an allocation at the end and allow the script
+ * to continue, if the LAST_DITCH GC was able to free up enough memory.
+ *
+ * 10) Do a GC under reason ALLOC_TRIGGER when we are over the GC heap trigger
+ * limit, but in the allocator rather than in a random call to maybeGC.
+ * This occurs if we allocate too much before returning to the event loop
+ * and calling maybeGC; this is extremely common in benchmarks and
+ * long-running Worker computations. Note that this uses a wildly
+ * different mechanism from the above in that it sets the interrupt flag
+ * and does the GC at the next loop head, before the next alloc, or
+ * maybeGC. The reason for this is that this check is made after the
+ * allocation and we cannot GC with an uninitialized thing in the heap.
+ *
+ * 11) Do an incremental, zonal GC with reason TOO_MUCH_MALLOC when we have
+ * malloced more than JSGC_MAX_MALLOC_BYTES in a zone since the last GC.
+ *
+ *
+ * Size Limitation Triggers Explanation
+ * ------------------------------------
+ *
+ * The GC internally is entirely unaware of the context of the execution of
+ * the mutator. It sees only:
+ *
+ * A) Allocated size: this is the amount of memory currently requested by the
+ * mutator. This quantity is monotonically increasing: i.e. the allocation
+ * rate is always >= 0. It is also easy for the system to track.
+ *
+ * B) Retained size: this is the amount of memory that the mutator can
+ * currently reach. Said another way, it is the size of the heap
+ * immediately after a GC (modulo background sweeping). This size is very
+ * costly to know exactly and also extremely hard to estimate with any
+ * fidelity.
+ *
+ * For reference, a common allocated vs. retained graph might look like:
+ *
+ * | ** **
+ * | ** ** * **
+ * | ** * ** * **
+ * | * ** * ** * **
+ * | ** ** * ** * **
+ * s| * * ** ** + + **
+ * i| * * * + + + + +
+ * z| * * * + + + + +
+ * e| * **+
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * | * +
+ * |*+
+ * +--------------------------------------------------
+ * time
+ * *** = allocated
+ * +++ = retained
+ *
+ * Note that this is a bit of a simplification
+ * because in reality we track malloc and GC heap
+ * sizes separately and have a different level of
+ * granularity and accuracy on each heap.
+ *
+ * This presents some obvious implications for Mark-and-Sweep collectors.
+ * Namely:
+ * -> t[marking] ~= size[retained]
+ * -> t[sweeping] ~= size[allocated] - size[retained]
+ *
+ * In a non-incremental collector, maintaining low latency and high
+ * responsiveness requires that total GC times be as low as possible. Thus,
+ * in order to stay responsive when we did not have a fully incremental
+ * collector, our GC triggers were focused on minimizing collection time.
+ * Furthermore, since size[retained] is not under control of the GC, all the
+ * GC could do to control collection times was reduce sweep times by
+ * minimizing size[allocated], per the equation above.
+ *
+ * The result of the above is GC triggers that focus on size[allocated] to
+ * the exclusion of other important factors and default heuristics that are
+ * not optimal for a fully incremental collector. On the other hand, this is
+ * not all bad: minimizing size[allocated] also minimizes the chance of OOM
+ * and sweeping remains one of the hardest areas to further incrementalize.
+ *
+ * EAGER_ALLOC_TRIGGER
+ * -------------------
+ * Occurs when we return to the event loop and find our heap is getting
+ * largish, but before t[marking] OR t[sweeping] is too large for a
+ * responsive non-incremental GC. This is intended to be the common case
+ * in normal web applications: e.g. we just finished an event handler and
+ * the few objects we allocated when computing the new whatzitz have
+ * pushed us slightly over the limit. After this GC we rescale the new
+ * EAGER_ALLOC_TRIGGER trigger to 150% of size[retained] so that our
+ * non-incremental GC times will always be proportional to this size
+ * rather than being dominated by sweeping.
+ *
+ * As a concession to mutators that allocate heavily during their startup
+ * phase, we have a highFrequencyGCMode that ups the growth rate to 300%
+ * of the current size[retained] so that we'll do fewer longer GCs at the
+ * end of the mutator startup rather than more, smaller GCs.
+ *
+ * Assumptions:
+ * -> Responsiveness is proportional to t[marking] + t[sweeping].
+ * -> size[retained] is proportional only to GC allocations.
+ *
+ * ALLOC_TRIGGER (non-incremental)
+ * -------------------------------
+ * If we do not return to the event loop before getting all the way to our
+ * gc trigger bytes then MAYBEGC will never fire. To avoid OOMing, we
+ * succeed the current allocation and set the script interrupt so that we
+ * will (hopefully) do a GC before we overflow our max and have to raise
+ * an OOM exception for the script.
+ *
+ * Assumptions:
+ * -> Common web scripts will return to the event loop before using
+ * 10% of the current gcTriggerBytes worth of GC memory.
+ *
+ * ALLOC_TRIGGER (incremental)
+ * ---------------------------
+ * In practice the above trigger is rough: if a website is just on the
+ * cusp, sometimes it will trigger a non-incremental GC moments before
+ * returning to the event loop, where it could have done an incremental
+ * GC. Thus, we recently added an incremental version of the above with a
+ * substantially lower threshold, so that we have a soft limit here. If
+ * IGC can collect faster than the allocator generates garbage, even if
+ * the allocator does not return to the event loop frequently, we should
+ * not have to fall back to a non-incremental GC.
+ *
+ * INCREMENTAL_TOO_SLOW
+ * --------------------
+ * Do a full, non-incremental GC if we overflow ALLOC_TRIGGER during an
+ * incremental GC. When in the middle of an incremental GC, we suppress
+ * our other triggers, so we need a way to backstop the IGC if the
+ * mutator allocates faster than the IGC can clean things up.
+ *
+ * TOO_MUCH_MALLOC
+ * ---------------
+ * Performs a GC before size[allocated] - size[retained] gets too large
+ * for non-incremental sweeping to be fast in the case that we have
+ * significantly more malloc allocation than GC allocation. This is meant
+ * to complement MAYBEGC triggers. We track this by counting malloced
+ * bytes; the counter gets reset at every GC since we do not always have a
+ * size at the time we call free. Because of this, the malloc heuristic
+ * is, unfortunatly, not usefully able to augment our other GC heap
+ * triggers and is limited to this singular heuristic.
+ *
+ * Assumptions:
+ * -> EITHER size[allocated_by_malloc] ~= size[allocated_by_GC]
+ * OR time[sweeping] ~= size[allocated_by_malloc]
+ * -> size[retained] @ t0 ~= size[retained] @ t1
+ * i.e. That the mutator is in steady-state operation.
+ *
+ * LAST_DITCH_GC
+ * -------------
+ * Does a GC because we are out of memory.
+ *
+ * Assumptions:
+ * -> size[retained] < size[available_memory]
+ */
+class GCSchedulingState
+{
+ /*
+ * Influences how we schedule and run GC's in several subtle ways. The most
+ * important factor is in how it controls the "HeapGrowthFactor". The
+ * growth factor is a measure of how large (as a percentage of the last GC)
+ * the heap is allowed to grow before we try to schedule another GC.
+ */
+ bool inHighFrequencyGCMode_;
+
+ public:
+ GCSchedulingState()
+ : inHighFrequencyGCMode_(false)
+ {}
+
+ bool inHighFrequencyGCMode() const { return inHighFrequencyGCMode_; }
+
+ void updateHighFrequencyMode(uint64_t lastGCTime, uint64_t currentTime,
+ const GCSchedulingTunables& tunables) {
+ inHighFrequencyGCMode_ =
+ tunables.isDynamicHeapGrowthEnabled() && lastGCTime &&
+ lastGCTime + tunables.highFrequencyThresholdUsec() > currentTime;
+ }
+};
+
+template<typename F>
+struct Callback {
+ F op;
+ void* data;
+
+ Callback()
+ : op(nullptr), data(nullptr)
+ {}
+ Callback(F op, void* data)
+ : op(op), data(data)
+ {}
+};
+
+template<typename F>
+using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
+
+template <typename T, typename Iter0, typename Iter1>
+class ChainedIter
+{
+ Iter0 iter0_;
+ Iter1 iter1_;
+
+ public:
+ ChainedIter(const Iter0& iter0, const Iter1& iter1)
+ : iter0_(iter0), iter1_(iter1)
+ {}
+
+ bool done() const { return iter0_.done() && iter1_.done(); }
+ void next() {
+ MOZ_ASSERT(!done());
+ if (!iter0_.done()) {
+ iter0_.next();
+ } else {
+ MOZ_ASSERT(!iter1_.done());
+ iter1_.next();
+ }
+ }
+ T get() const {
+ MOZ_ASSERT(!done());
+ if (!iter0_.done())
+ return iter0_.get();
+ MOZ_ASSERT(!iter1_.done());
+ return iter1_.get();
+ }
+
+ operator T() const { return get(); }
+ T operator->() const { return get(); }
+};
+
+typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy> RootedValueMap;
+
+using AllocKinds = mozilla::EnumSet<AllocKind>;
+
+class GCRuntime
+{
+ public:
+ explicit GCRuntime(JSRuntime* rt);
+ MOZ_MUST_USE bool init(uint32_t maxbytes, uint32_t maxNurseryBytes);
+ void finishRoots();
+ void finish();
+
+ inline bool hasZealMode(ZealMode mode);
+ inline void clearZealMode(ZealMode mode);
+ inline bool upcomingZealousGC();
+ inline bool needZealousGC();
+
+ MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
+ void removeRoot(Value* vp);
+ void setMarkStackLimit(size_t limit, AutoLockGC& lock);
+
+ MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
+ uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
+
+ MOZ_MUST_USE bool triggerGC(JS::gcreason::Reason reason);
+ void maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock);
+ // The return value indicates if we were able to do the GC.
+ bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason);
+ void maybeGC(Zone* zone);
+ void minorGC(JS::gcreason::Reason reason,
+ gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
+ void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
+ minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
+ }
+ // The return value indicates whether a major GC was performed.
+ bool gcIfRequested();
+ void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
+ void startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis = 0);
+ void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
+ void finishGC(JS::gcreason::Reason reason);
+ void abortGC();
+ void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
+ void debugGCSlice(SliceBudget& budget);
+
+ void triggerFullGCForAtoms() {
+ MOZ_ASSERT(fullGCForAtomsRequested_);
+ fullGCForAtomsRequested_ = false;
+ MOZ_RELEASE_ASSERT(triggerGC(JS::gcreason::ALLOC_TRIGGER));
+ }
+
+ void runDebugGC();
+ inline void poke();
+
+ enum TraceOrMarkRuntime {
+ TraceRuntime,
+ MarkRuntime
+ };
+ void traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock);
+ void traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
+
+ void notifyDidPaint();
+ void shrinkBuffers();
+ void onOutOfMallocMemory();
+ void onOutOfMallocMemory(const AutoLockGC& lock);
+
+#ifdef JS_GC_ZEAL
+ const void* addressOfZealModeBits() { return &zealModeBits; }
+ void getZealBits(uint32_t* zealBits, uint32_t* frequency, uint32_t* nextScheduled);
+ void setZeal(uint8_t zeal, uint32_t frequency);
+ bool parseAndSetZeal(const char* str);
+ void setNextScheduled(uint32_t count);
+ void verifyPreBarriers();
+ void maybeVerifyPreBarriers(bool always);
+ bool selectForMarking(JSObject* object);
+ void clearSelectedForMarking();
+ void setDeterministic(bool enable);
+#endif
+
+ size_t maxMallocBytesAllocated() { return maxMallocBytes; }
+
+ uint64_t nextCellUniqueId() {
+ MOZ_ASSERT(nextCellUniqueId_ > 0);
+ uint64_t uid = ++nextCellUniqueId_;
+ return uid;
+ }
+
+#ifdef DEBUG
+ bool shutdownCollectedEverything() const {
+ return arenasEmptyAtShutdown;
+ }
+#endif
+
+ public:
+ // Internal public interface
+ State state() const { return incrementalState; }
+ bool isHeapCompacting() const { return state() == State::Compact; }
+ bool isForegroundSweeping() const { return state() == State::Sweep; }
+ bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
+ void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
+ void waitBackgroundSweepOrAllocEnd() {
+ helperState.waitBackgroundSweepEnd();
+ allocTask.cancel(GCParallelTask::CancelAndWait);
+ }
+
+ void requestMinorGC(JS::gcreason::Reason reason);
+
+#ifdef DEBUG
+ bool onBackgroundThread() { return helperState.onBackgroundThread(); }
+#endif // DEBUG
+
+ void lockGC() {
+ lock.lock();
+ }
+
+ void unlockGC() {
+ lock.unlock();
+ }
+
+#ifdef DEBUG
+ bool isAllocAllowed() { return noGCOrAllocationCheck == 0; }
+ void disallowAlloc() { ++noGCOrAllocationCheck; }
+ void allowAlloc() {
+ MOZ_ASSERT(!isAllocAllowed());
+ --noGCOrAllocationCheck;
+ }
+
+ bool isNurseryAllocAllowed() { return noNurseryAllocationCheck == 0; }
+ void disallowNurseryAlloc() { ++noNurseryAllocationCheck; }
+ void allowNurseryAlloc() {
+ MOZ_ASSERT(!isNurseryAllocAllowed());
+ --noNurseryAllocationCheck;
+ }
+
+ bool isStrictProxyCheckingEnabled() { return disableStrictProxyCheckingCount == 0; }
+ void disableStrictProxyChecking() { ++disableStrictProxyCheckingCount; }
+ void enableStrictProxyChecking() {
+ MOZ_ASSERT(disableStrictProxyCheckingCount > 0);
+ --disableStrictProxyCheckingCount;
+ }
+#endif // DEBUG
+
+ bool isInsideUnsafeRegion() { return inUnsafeRegion != 0; }
+ void enterUnsafeRegion() { ++inUnsafeRegion; }
+ void leaveUnsafeRegion() {
+ MOZ_ASSERT(inUnsafeRegion > 0);
+ --inUnsafeRegion;
+ }
+
+ void verifyIsSafeToGC() {
+ MOZ_DIAGNOSTIC_ASSERT(!isInsideUnsafeRegion(),
+ "[AutoAssertNoGC] possible GC in GC-unsafe region");
+ }
+
+ void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
+
+ bool isIncrementalGCAllowed() const { return incrementalAllowed; }
+ void disallowIncrementalGC() { incrementalAllowed = false; }
+
+ bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
+ bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
+
+ bool isGenerationalGCEnabled() const { return generationalDisabled == 0; }
+ void disableGenerationalGC();
+ void enableGenerationalGC();
+
+ void disableCompactingGC();
+ void enableCompactingGC();
+ bool isCompactingGCEnabled() const;
+
+ void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
+ MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+ void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
+
+ void setMaxMallocBytes(size_t value);
+ int32_t getMallocBytes() const { return mallocBytesUntilGC; }
+ void resetMallocBytes();
+ bool isTooMuchMalloc() const { return mallocBytesUntilGC <= 0; }
+ void updateMallocCounter(JS::Zone* zone, size_t nbytes);
+ void onTooMuchMalloc();
+
+ void setGCCallback(JSGCCallback callback, void* data);
+ void callGCCallback(JSGCStatus status) const;
+ void setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
+ void* data);
+ void callObjectsTenuredCallback();
+ MOZ_MUST_USE bool addFinalizeCallback(JSFinalizeCallback callback, void* data);
+ void removeFinalizeCallback(JSFinalizeCallback func);
+ MOZ_MUST_USE bool addWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback,
+ void* data);
+ void removeWeakPointerZoneGroupCallback(JSWeakPointerZoneGroupCallback callback);
+ MOZ_MUST_USE bool addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,
+ void* data);
+ void removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback);
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+ JS::DoCycleCollectionCallback setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback);
+ void callDoCycleCollectionCallback(JSContext* cx);
+
+ void setFullCompartmentChecks(bool enable);
+
+ bool isManipulatingDeadZones() { return manipulatingDeadZones; }
+ void setManipulatingDeadZones(bool value) { manipulatingDeadZones = value; }
+ unsigned objectsMarkedInDeadZonesCount() { return objectsMarkedInDeadZones; }
+ void incObjectsMarkedInDeadZone() {
+ MOZ_ASSERT(manipulatingDeadZones);
+ ++objectsMarkedInDeadZones;
+ }
+
+ JS::Zone* getCurrentZoneGroup() { return currentZoneGroup; }
+ void setFoundBlackGrayEdges(TenuredCell& target) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!foundBlackGrayEdges.append(&target))
+ oomUnsafe.crash("OOM|small: failed to insert into foundBlackGrayEdges");
+ }
+
+ uint64_t gcNumber() const { return number; }
+
+ uint64_t minorGCCount() const { return minorGCNumber; }
+ void incMinorGcNumber() { ++minorGCNumber; ++number; }
+
+ uint64_t majorGCCount() const { return majorGCNumber; }
+ void incMajorGcNumber() { ++majorGCNumber; ++number; }
+
+ int64_t defaultSliceBudget() const { return defaultTimeBudget_; }
+
+ bool isIncrementalGc() const { return isIncremental; }
+ bool isFullGc() const { return isFull; }
+ bool isCompactingGc() const { return isCompacting; }
+
+ bool minorGCRequested() const { return minorGCTriggerReason != JS::gcreason::NO_REASON; }
+ bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
+ bool isGcNeeded() { return minorGCRequested() || majorGCRequested(); }
+
+ bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
+
+ double computeHeapGrowthFactor(size_t lastBytes);
+ size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
+
+ JSGCMode gcMode() const { return mode; }
+ void setGCMode(JSGCMode m) {
+ mode = m;
+ marker.setGCMode(mode);
+ }
+
+ inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
+ inline void updateOnArenaFree(const ChunkInfo& info);
+
+ ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_; }
+ ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_; }
+ ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_; }
+ const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_; }
+ const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_; }
+ const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_; }
+ typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
+ NonEmptyChunksIter allNonEmptyChunks() {
+ return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
+ }
+
+ Chunk* getOrAllocChunk(const AutoLockGC& lock,
+ AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
+ void recycleChunk(Chunk* chunk, const AutoLockGC& lock);
+
+#ifdef JS_GC_ZEAL
+ void startVerifyPreBarriers();
+ void endVerifyPreBarriers();
+ void finishVerifier();
+ bool isVerifyPreBarriersEnabled() const { return !!verifyPreData; }
+#else
+ bool isVerifyPreBarriersEnabled() const { return false; }
+#endif
+
+ // Free certain LifoAlloc blocks when it is safe to do so.
+ void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
+ void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
+ void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
+
+ // Queue a thunk to run after the next minor GC.
+ void callAfterMinorGC(void (*thunk)(void* data), void* data) {
+ nursery.queueSweepAction(thunk, data);
+ }
+
+ // Public here for ReleaseArenaLists and FinalizeTypedArenas.
+ void releaseArena(Arena* arena, const AutoLockGC& lock);
+
+ void releaseHeldRelocatedArenas();
+ void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ // Allocator
+ template <AllowGC allowGC>
+ MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
+ template <AllowGC allowGC>
+ JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
+ const Class* clasp);
+ template <AllowGC allowGC>
+ static JSObject* tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
+ size_t nDynamicSlots);
+ template <typename T, AllowGC allowGC>
+ static T* tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize);
+ static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
+
+ private:
+ enum IncrementalProgress
+ {
+ NotFinished = 0,
+ Finished
+ };
+
+ // For ArenaLists::allocateFromArena()
+ friend class ArenaLists;
+ Chunk* pickChunk(const AutoLockGC& lock,
+ AutoMaybeStartBackgroundAllocation& maybeStartBGAlloc);
+ Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind,
+ ShouldCheckThresholds checkThresholds, const AutoLockGC& lock);
+ void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
+
+ // Allocator internals
+ MOZ_MUST_USE bool gcIfNeededPerAllocation(JSContext* cx);
+ template <typename T>
+ static void checkIncrementalZoneState(ExclusiveContext* cx, T* t);
+ static TenuredCell* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
+ size_t thingSize);
+ static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
+ size_t thingSize);
+ static TenuredCell* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
+
+ /*
+ * Return the list of chunks that can be released outside the GC lock.
+ * Must be called either during the GC or with the GC lock taken.
+ */
+ friend class BackgroundDecommitTask;
+ ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
+ void freeEmptyChunks(JSRuntime* rt, const AutoLockGC& lock);
+ void prepareToFreeChunk(ChunkInfo& info);
+
+ friend class BackgroundAllocTask;
+ friend class AutoMaybeStartBackgroundAllocation;
+ bool wantBackgroundAllocation(const AutoLockGC& lock) const;
+ void startBackgroundAllocTaskIfIdle();
+
+ void requestMajorGC(JS::gcreason::Reason reason);
+ SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
+ void budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock);
+ void resetIncrementalGC(AbortReason reason, AutoLockForExclusiveAccess& lock);
+
+ // Assert if the system state is such that we should never
+ // receive a request to do GC work.
+ void checkCanCallAPI();
+
+ // Check if the system state is such that GC has been supressed
+ // or otherwise delayed.
+ MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason);
+
+ gcstats::ZoneGCStats scanZonesBeforeGC();
+ void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
+ MOZ_MUST_USE bool gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
+ JS::gcreason::Reason reason);
+ void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
+ AutoLockForExclusiveAccess& lock);
+
+ void pushZealSelectedObjects();
+ void purgeRuntime(AutoLockForExclusiveAccess& lock);
+ MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock);
+ bool shouldPreserveJITCode(JSCompartment* comp, int64_t currentTime,
+ JS::gcreason::Reason reason, bool canAllocateMoreCode);
+ void traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock);
+ void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
+ void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
+ AutoLockForExclusiveAccess& lock);
+ void bufferGrayRoots();
+ void maybeDoCycleCollection();
+ void markCompartments();
+ IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::Phase phase);
+ template <class CompartmentIterT> void markWeakReferences(gcstats::Phase phase);
+ void markWeakReferencesInCurrentGroup(gcstats::Phase phase);
+ template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::Phase phase);
+ void markBufferedGrayRoots(JS::Zone* zone);
+ void markGrayReferencesInCurrentGroup(gcstats::Phase phase);
+ void markAllWeakReferences(gcstats::Phase phase);
+ void markAllGrayReferences(gcstats::Phase phase);
+
+ void beginSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
+ void findZoneGroups(AutoLockForExclusiveAccess& lock);
+ MOZ_MUST_USE bool findInterZoneEdges();
+ void getNextZoneGroup();
+ void endMarkingZoneGroup();
+ void beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock);
+ bool shouldReleaseObservedTypes();
+ void endSweepingZoneGroup();
+ IncrementalProgress sweepPhase(SliceBudget& sliceBudget, AutoLockForExclusiveAccess& lock);
+ void endSweepPhase(bool lastGC, AutoLockForExclusiveAccess& lock);
+ void sweepZones(FreeOp* fop, bool lastGC);
+ void decommitAllWithoutUnlocking(const AutoLockGC& lock);
+ void startDecommit();
+ void queueZonesForBackgroundSweep(ZoneList& zones);
+ void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks);
+ void assertBackgroundSweepingFinished();
+ bool shouldCompact();
+ void beginCompactPhase();
+ IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
+ AutoLockForExclusiveAccess& lock);
+ void endCompactPhase(JS::gcreason::Reason reason);
+ void sweepTypesAfterCompacting(Zone* zone);
+ void sweepZoneAfterCompacting(Zone* zone);
+ MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::gcreason::Reason reason,
+ Arena*& relocatedListOut, SliceBudget& sliceBudget);
+ void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
+ void updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, size_t bgTaskCount);
+ void updateAllCellPointers(MovingTracer* trc, Zone* zone);
+ void updatePointersToRelocatedCells(Zone* zone, AutoLockForExclusiveAccess& lock);
+ void protectAndHoldArenas(Arena* arenaList);
+ void unprotectHeldRelocatedArenas();
+ void releaseRelocatedArenas(Arena* arenaList);
+ void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock);
+ void finishCollection(JS::gcreason::Reason reason);
+
+ void computeNonIncrementalMarkingForValidation(AutoLockForExclusiveAccess& lock);
+ void validateIncrementalMarking();
+ void finishMarkingValidation();
+
+#ifdef DEBUG
+ void checkForCompartmentMismatches();
+#endif
+
+ void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
+ void callWeakPointerZoneGroupCallbacks() const;
+ void callWeakPointerCompartmentCallbacks(JSCompartment* comp) const;
+
+ public:
+ JSRuntime* rt;
+
+ /* Embedders can use this zone however they wish. */
+ JS::Zone* systemZone;
+
+ /* List of compartments and zones (protected by the GC lock). */
+ ZoneVector zones;
+
+ Nursery nursery;
+ StoreBuffer storeBuffer;
+
+ gcstats::Statistics stats;
+
+ GCMarker marker;
+
+ /* Track heap usage for this runtime. */
+ HeapUsage usage;
+
+ /* GC scheduling state and parameters. */
+ GCSchedulingTunables tunables;
+ GCSchedulingState schedulingState;
+
+ MemProfiler mMemProfiler;
+
+ private:
+ // When empty, chunks reside in the emptyChunks pool and are re-used as
+ // needed or eventually expired if not re-used. The emptyChunks pool gets
+ // refilled from the background allocation task heuristically so that empty
+ // chunks should always available for immediate allocation without syscalls.
+ ChunkPool emptyChunks_;
+
+ // Chunks which have had some, but not all, of their arenas allocated live
+ // in the available chunk lists. When all available arenas in a chunk have
+ // been allocated, the chunk is removed from the available list and moved
+ // to the fullChunks pool. During a GC, if all arenas are free, the chunk
+ // is moved back to the emptyChunks pool and scheduled for eventual
+ // release.
+ ChunkPool availableChunks_;
+
+ // When all arenas in a chunk are used, it is moved to the fullChunks pool
+ // so as to reduce the cost of operations on the available lists.
+ ChunkPool fullChunks_;
+
+ RootedValueMap rootsHash;
+
+ size_t maxMallocBytes;
+
+ // An incrementing id used to assign unique ids to cells that require one.
+ mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
+
+ /*
+ * Number of the committed arenas in all GC chunks including empty chunks.
+ */
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
+ VerifyPreTracer* verifyPreData;
+
+ private:
+ bool chunkAllocationSinceLastGC;
+ int64_t lastGCTime;
+
+ JSGCMode mode;
+
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
+
+ /* During shutdown, the GC needs to clean up every possible object. */
+ bool cleanUpEverything;
+
+ // Gray marking must be done after all black marking is complete. However,
+ // we do not have write barriers on XPConnect roots. Therefore, XPConnect
+ // roots must be accumulated in the first slice of incremental GC. We
+ // accumulate these roots in each zone's gcGrayRoots vector and then mark
+ // them later, after black marking is complete for each compartment. This
+ // accumulation can fail, but in that case we switch to non-incremental GC.
+ enum class GrayBufferState {
+ Unused,
+ Okay,
+ Failed
+ };
+ GrayBufferState grayBufferState;
+ bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
+
+ // Clear each zone's gray buffers, but do not change the current state.
+ void resetBufferedGrayRoots() const;
+
+ // Reset the gray buffering state to Unused.
+ void clearBufferedGrayRoots() {
+ grayBufferState = GrayBufferState::Unused;
+ resetBufferedGrayRoots();
+ }
+
+ mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
+
+ JS::gcreason::Reason minorGCTriggerReason;
+
+ /* Perform full GC if rt->keepAtoms() becomes false. */
+ bool fullGCForAtomsRequested_;
+
+ /* Incremented at the start of every minor GC. */
+ uint64_t minorGCNumber;
+
+ /* Incremented at the start of every major GC. */
+ uint64_t majorGCNumber;
+
+ /* The major GC number at which to release observed type information. */
+ uint64_t jitReleaseNumber;
+
+ /* Incremented on every GC slice. */
+ uint64_t number;
+
+ /* The number at the time of the most recent GC's first slice. */
+ uint64_t startNumber;
+
+ /* Whether the currently running GC can finish in multiple slices. */
+ bool isIncremental;
+
+ /* Whether all zones are being collected in first GC slice. */
+ bool isFull;
+
+ /* Whether the heap will be compacted at the end of GC. */
+ bool isCompacting;
+
+ /* The invocation kind of the current GC, taken from the first slice. */
+ JSGCInvocationKind invocationKind;
+
+ /* The initial GC reason, taken from the first slice. */
+ JS::gcreason::Reason initialReason;
+
+#ifdef DEBUG
+ /*
+ * If this is 0, all cross-compartment proxies must be registered in the
+ * wrapper map. This checking must be disabled temporarily while creating
+ * new wrappers. When non-zero, this records the recursion depth of wrapper
+ * creation.
+ */
+ uintptr_t disableStrictProxyCheckingCount;
+#endif
+
+ /*
+ * The current incremental GC phase. This is also used internally in
+ * non-incremental GC.
+ */
+ State incrementalState;
+
+ /* Indicates that the last incremental slice exhausted the mark stack. */
+ bool lastMarkSlice;
+
+ /* Whether any sweeping will take place in the separate GC helper thread. */
+ bool sweepOnBackgroundThread;
+
+ /* Whether observed type information is being released in the current GC. */
+ bool releaseObservedTypes;
+
+ /* Whether any black->gray edges were found during marking. */
+ BlackGrayEdgeVector foundBlackGrayEdges;
+
+ /* Singly linekd list of zones to be swept in the background. */
+ ZoneList backgroundSweepZones;
+
+ /*
+ * Free LIFO blocks are transferred to this allocator before being freed on
+ * the background GC thread after sweeping.
+ */
+ LifoAlloc blocksToFreeAfterSweeping;
+
+ /*
+ * Free LIFO blocks are transferred to this allocator before being freed
+ * after minor GC.
+ */
+ LifoAlloc blocksToFreeAfterMinorGC;
+
+ /* Index of current zone group (for stats). */
+ unsigned zoneGroupIndex;
+
+ /*
+ * Incremental sweep state.
+ */
+ JS::Zone* zoneGroups;
+ JS::Zone* currentZoneGroup;
+ bool sweepingTypes;
+ unsigned finalizePhase;
+ JS::Zone* sweepZone;
+ AllocKind sweepKind;
+ bool abortSweepAfterCurrentGroup;
+
+ /*
+ * Concurrent sweep infrastructure.
+ */
+ void startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
+ void joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperThreadState& locked);
+
+ /*
+ * List head of arenas allocated during the sweep phase.
+ */
+ Arena* arenasAllocatedDuringSweep;
+
+ /*
+ * Incremental compacting state.
+ */
+ bool startedCompacting;
+ ZoneList zonesToMaybeCompact;
+ Arena* relocatedArenasToRelease;
+
+#ifdef JS_GC_ZEAL
+ MarkingValidator* markingValidator;
+#endif
+
+ /*
+ * Indicates that a GC slice has taken place in the middle of an animation
+ * frame, rather than at the beginning. In this case, the next slice will be
+ * delayed so that we don't get back-to-back slices.
+ */
+ bool interFrameGC;
+
+ /* Default budget for incremental GC slice. See js/SliceBudget.h. */
+ int64_t defaultTimeBudget_;
+
+ /*
+ * We disable incremental GC if we encounter a Class with a trace hook
+ * that does not implement write barriers.
+ */
+ bool incrementalAllowed;
+
+ /*
+ * GGC can be enabled from the command line while testing.
+ */
+ unsigned generationalDisabled;
+
+ /*
+ * Whether compacting GC can is enabled globally.
+ */
+ bool compactingEnabled;
+
+ /*
+ * Some code cannot tolerate compacting GC so it can be disabled temporarily
+ * with AutoDisableCompactingGC which uses this counter.
+ */
+ unsigned compactingDisabledCount;
+
+ /*
+ * This is true if we are in the middle of a brain transplant (e.g.,
+ * JS_TransplantObject) or some other operation that can manipulate
+ * dead zones.
+ */
+ bool manipulatingDeadZones;
+
+ /*
+ * This field is incremented each time we mark an object inside a
+ * zone with no incoming cross-compartment pointers. Typically if
+ * this happens it signals that an incremental GC is marking too much
+ * stuff. At various times we check this counter and, if it has changed, we
+ * run an immediate, non-incremental GC to clean up the dead
+ * zones. This should happen very rarely.
+ */
+ unsigned objectsMarkedInDeadZones;
+
+ bool poked;
+
+ /*
+ * These options control the zealousness of the GC. At every allocation,
+ * nextScheduled is decremented. When it reaches zero we do a full GC.
+ *
+ * At this point, if zeal_ is one of the types that trigger periodic
+ * collection, then nextScheduled is reset to the value of zealFrequency.
+ * Otherwise, no additional GCs take place.
+ *
+ * You can control these values in several ways:
+ * - Set the JS_GC_ZEAL environment variable
+ * - Call gczeal() or schedulegc() from inside shell-executed JS code
+ * (see the help for details)
+ *
+ * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
+ * whenever a GC poke happens). This option is mainly useful to embedders.
+ *
+ * We use zeal_ == 4 to enable write barrier verification. See the comment
+ * in jsgc.cpp for more information about this.
+ *
+ * zeal_ values from 8 to 10 periodically run different types of
+ * incremental GC.
+ *
+ * zeal_ value 14 performs periodic shrinking collections.
+ */
+#ifdef JS_GC_ZEAL
+ uint32_t zealModeBits;
+ int zealFrequency;
+ int nextScheduled;
+ bool deterministicOnly;
+ int incrementalLimit;
+
+ Vector<JSObject*, 0, SystemAllocPolicy> selectedForMarking;
+#endif
+
+ bool fullCompartmentChecks;
+
+ Callback<JSGCCallback> gcCallback;
+ Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
+ Callback<JSObjectsTenuredCallback> tenuredCallback;
+ CallbackVector<JSFinalizeCallback> finalizeCallbacks;
+ CallbackVector<JSWeakPointerZoneGroupCallback> updateWeakPointerZoneGroupCallbacks;
+ CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
+
+ /*
+ * Malloc counter to measure memory pressure for GC scheduling. It runs
+ * from maxMallocBytes down to zero.
+ */
+ mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> mallocBytesUntilGC;
+
+ /*
+ * Whether a GC has been triggered as a result of mallocBytesUntilGC
+ * falling below zero.
+ */
+ mozilla::Atomic<bool, mozilla::ReleaseAcquire> mallocGCTriggered;
+
+ /*
+ * The trace operations to trace embedding-specific GC roots. One is for
+ * tracing through black roots and the other is for tracing through gray
+ * roots. The black/gray distinction is only relevant to the cycle
+ * collector.
+ */
+ CallbackVector<JSTraceDataOp> blackRootTracers;
+ Callback<JSTraceDataOp> grayRootTracer;
+
+ /* Always preserve JIT code during GCs, for testing. */
+ bool alwaysPreserveCode;
+
+ /*
+ * Some regions of code are hard for the static rooting hazard analysis to
+ * understand. In those cases, we trade the static analysis for a dynamic
+ * analysis. When this is non-zero, we should assert if we trigger, or
+ * might trigger, a GC.
+ */
+ int inUnsafeRegion;
+
+#ifdef DEBUG
+ size_t noGCOrAllocationCheck;
+ size_t noNurseryAllocationCheck;
+
+ bool arenasEmptyAtShutdown;
+#endif
+
+ /* Synchronize GC heap access between main thread and GCHelperState. */
+ friend class js::AutoLockGC;
+ js::Mutex lock;
+
+ BackgroundAllocTask allocTask;
+ BackgroundDecommitTask decommitTask;
+ GCHelperState helperState;
+
+ /*
+ * During incremental sweeping, this field temporarily holds the arenas of
+ * the current AllocKind being swept in order of increasing free space.
+ */
+ SortedArenaList incrementalSweepList;
+
+ friend class js::GCHelperState;
+ friend class MarkingValidator;
+ friend class AutoTraceSession;
+ friend class AutoEnterIteration;
+};
+
+/* Prevent compartments and zones from being collected during iteration. */
+class MOZ_RAII AutoEnterIteration {
+ GCRuntime* gc;
+
+ public:
+ explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
+ ++gc->numActiveZoneIters;
+ }
+
+ ~AutoEnterIteration() {
+ MOZ_ASSERT(gc->numActiveZoneIters);
+ --gc->numActiveZoneIters;
+ }
+};
+
+// After pulling a Chunk out of the empty chunks pool, we want to run the
+// background allocator to refill it. The code that takes Chunks does so under
+// the GC lock. We need to start the background allocation under the helper
+// threads lock. To avoid lock inversion we have to delay the start until after
+// we are outside the GC lock. This class handles that delay automatically.
+class MOZ_RAII AutoMaybeStartBackgroundAllocation
+{
+ GCRuntime* gc;
+
+ public:
+ AutoMaybeStartBackgroundAllocation()
+ : gc(nullptr)
+ {}
+
+ void tryToStartBackgroundAllocation(GCRuntime& gc) {
+ this->gc = &gc;
+ }
+
+ ~AutoMaybeStartBackgroundAllocation() {
+ if (gc)
+ gc->startBackgroundAllocTaskIfIdle();
+ }
+};
+
+#ifdef JS_GC_ZEAL
+
+inline bool
+GCRuntime::hasZealMode(ZealMode mode)
+{
+ static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
+ "Zeal modes must fit in zealModeBits");
+ return zealModeBits & (1 << uint32_t(mode));
+}
+
+inline void
+GCRuntime::clearZealMode(ZealMode mode)
+{
+ zealModeBits &= ~(1 << uint32_t(mode));
+ MOZ_ASSERT(!hasZealMode(mode));
+}
+
+inline bool
+GCRuntime::upcomingZealousGC() {
+ return nextScheduled == 1;
+}
+
+inline bool
+GCRuntime::needZealousGC() {
+ if (nextScheduled > 0 && --nextScheduled == 0) {
+ if (hasZealMode(ZealMode::Alloc) ||
+ hasZealMode(ZealMode::GenerationalGC) ||
+ hasZealMode(ZealMode::IncrementalRootsThenFinish) ||
+ hasZealMode(ZealMode::IncrementalMarkAllThenFinish) ||
+ hasZealMode(ZealMode::IncrementalMultipleSlices) ||
+ hasZealMode(ZealMode::Compact))
+ {
+ nextScheduled = zealFrequency;
+ }
+ return true;
+ }
+ return false;
+}
+#else
+inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
+inline void GCRuntime::clearZealMode(ZealMode mode) { }
+inline bool GCRuntime::upcomingZealousGC() { return false; }
+inline bool GCRuntime::needZealousGC() { return false; }
+#endif
+
+} /* namespace gc */
+
+} /* namespace js */
+
+#endif
diff --git a/js/src/gc/GCTrace.cpp b/js/src/gc/GCTrace.cpp
new file mode 100644
index 000000000..5a336b093
--- /dev/null
+++ b/js/src/gc/GCTrace.cpp
@@ -0,0 +1,243 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef JS_GC_TRACE
+
+#include "gc/GCTrace.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#include "gc/GCTraceFormat.h"
+
+#include "js/HashTable.h"
+
+using namespace js;
+using namespace js::gc;
+
+JS_STATIC_ASSERT(AllocKinds == unsigned(AllocKind::LIMIT));
+JS_STATIC_ASSERT(LastObjectAllocKind == unsigned(AllocKind::OBJECT_LAST));
+
+static FILE* gcTraceFile = nullptr;
+
+static HashSet<const Class*, DefaultHasher<const Class*>, SystemAllocPolicy> tracedClasses;
+static HashSet<const ObjectGroup*, DefaultHasher<const ObjectGroup*>, SystemAllocPolicy> tracedGroups;
+
+static inline void
+WriteWord(uint64_t data)
+{
+ if (gcTraceFile)
+ fwrite(&data, sizeof(data), 1, gcTraceFile);
+}
+
+static inline void
+TraceEvent(GCTraceEvent event, uint64_t payload = 0, uint8_t extra = 0)
+{
+ MOZ_ASSERT(event < GCTraceEventCount);
+ MOZ_ASSERT((payload >> TracePayloadBits) == 0);
+ WriteWord((uint64_t(event) << TraceEventShift) |
+ (uint64_t(extra) << TraceExtraShift) | payload);
+}
+
+static inline void
+TraceAddress(const void* p)
+{
+ TraceEvent(TraceDataAddress, uint64_t(p));
+}
+
+static inline void
+TraceInt(uint32_t data)
+{
+ TraceEvent(TraceDataInt, data);
+}
+
+static void
+TraceString(const char* string)
+{
+ JS_STATIC_ASSERT(sizeof(char) == 1);
+
+ size_t length = strlen(string);
+ const unsigned charsPerWord = sizeof(uint64_t);
+ unsigned wordCount = (length + charsPerWord - 1) / charsPerWord;
+
+ TraceEvent(TraceDataString, length);
+ for (unsigned i = 0; i < wordCount; ++i) {
+ union
+ {
+ uint64_t word;
+ char chars[charsPerWord];
+ } data;
+ strncpy(data.chars, string + (i * charsPerWord), charsPerWord);
+ WriteWord(data.word);
+ }
+}
+
+bool
+js::gc::InitTrace(GCRuntime& gc)
+{
+ /* This currently does not support multiple runtimes. */
+ MOZ_ALWAYS_TRUE(!gcTraceFile);
+
+ char* filename = getenv("JS_GC_TRACE");
+ if (!filename)
+ return true;
+
+ if (!tracedClasses.init() || !tracedTypes.init()) {
+ FinishTrace();
+ return false;
+ }
+
+ gcTraceFile = fopen(filename, "w");
+ if (!gcTraceFile) {
+ FinishTrace();
+ return false;
+ }
+
+ TraceEvent(TraceEventInit, 0, TraceFormatVersion);
+
+ /* Trace information about thing sizes. */
+ for (auto kind : AllAllocKinds())
+ TraceEvent(TraceEventThingSize, Arena::thingSize(kind));
+
+ return true;
+}
+
+void
+js::gc::FinishTrace()
+{
+ if (gcTraceFile) {
+ fclose(gcTraceFile);
+ gcTraceFile = nullptr;
+ }
+ tracedClasses.finish();
+ tracedTypes.finish();
+}
+
+bool
+js::gc::TraceEnabled()
+{
+ return gcTraceFile != nullptr;
+}
+
+void
+js::gc::TraceNurseryAlloc(Cell* thing, size_t size)
+{
+ if (thing) {
+ /* We don't have AllocKind here, but we can work it out from size. */
+ unsigned slots = (size - sizeof(JSObject)) / sizeof(JS::Value);
+ AllocKind kind = GetBackgroundAllocKind(GetGCObjectKind(slots));
+ TraceEvent(TraceEventNurseryAlloc, uint64_t(thing), kind);
+ }
+}
+
+void
+js::gc::TraceTenuredAlloc(Cell* thing, AllocKind kind)
+{
+ if (thing)
+ TraceEvent(TraceEventTenuredAlloc, uint64_t(thing), kind);
+}
+
+static void
+MaybeTraceClass(const Class* clasp)
+{
+ if (tracedClasses.has(clasp))
+ return;
+
+ TraceEvent(TraceEventClassInfo, uint64_t(clasp));
+ TraceString(clasp->name);
+ TraceInt(clasp->flags);
+ TraceInt(clasp->finalize != nullptr);
+
+ MOZ_ALWAYS_TRUE(tracedClasses.put(clasp));
+}
+
+static void
+MaybeTraceGroup(ObjectGroup* group)
+{
+ if (tracedGroups.has(group))
+ return;
+
+ MaybeTraceClass(group->clasp());
+ TraceEvent(TraceEventGroupInfo, uint64_t(group));
+ TraceAddress(group->clasp());
+ TraceInt(group->flags());
+
+ MOZ_ALWAYS_TRUE(tracedGroups.put(group));
+}
+
+void
+js::gc::TraceTypeNewScript(ObjectGroup* group)
+{
+ const size_t bufLength = 128;
+ static char buffer[bufLength];
+ MOZ_ASSERT(group->hasNewScript());
+ JSAtom* funName = group->newScript()->fun->displayAtom();
+ if (!funName)
+ return;
+
+ size_t length = funName->length();
+ MOZ_ALWAYS_TRUE(length < bufLength);
+ CopyChars(reinterpret_cast<Latin1Char*>(buffer), *funName);
+ buffer[length] = 0;
+
+ TraceEvent(TraceEventTypeNewScript, uint64_t(group));
+ TraceString(buffer);
+}
+
+void
+js::gc::TraceCreateObject(JSObject* object)
+{
+ if (!gcTraceFile)
+ return;
+
+ ObjectGroup* group = object->group();
+ MaybeTraceGroup(group);
+ TraceEvent(TraceEventCreateObject, uint64_t(object));
+ TraceAddress(group);
+}
+
+void
+js::gc::TraceMinorGCStart()
+{
+ TraceEvent(TraceEventMinorGCStart);
+}
+
+void
+js::gc::TracePromoteToTenured(Cell* src, Cell* dst)
+{
+ TraceEvent(TraceEventPromoteToTenured, uint64_t(src));
+ TraceAddress(dst);
+}
+
+void
+js::gc::TraceMinorGCEnd()
+{
+ TraceEvent(TraceEventMinorGCEnd);
+}
+
+void
+js::gc::TraceMajorGCStart()
+{
+ TraceEvent(TraceEventMajorGCStart);
+}
+
+void
+js::gc::TraceTenuredFinalize(Cell* thing)
+{
+ if (!gcTraceFile)
+ return;
+ if (thing->tenuredGetAllocKind() == AllocKind::OBJECT_GROUP)
+ tracedGroups.remove(static_cast<const ObjectGroup*>(thing));
+ TraceEvent(TraceEventTenuredFinalize, uint64_t(thing));
+}
+
+void
+js::gc::TraceMajorGCEnd()
+{
+ TraceEvent(TraceEventMajorGCEnd);
+}
+
+#endif
diff --git a/js/src/gc/GCTrace.h b/js/src/gc/GCTrace.h
new file mode 100644
index 000000000..dc3586dd9
--- /dev/null
+++ b/js/src/gc/GCTrace.h
@@ -0,0 +1,55 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCTrace_h
+#define gc_GCTrace_h
+
+#include "gc/Heap.h"
+
+namespace js {
+
+class ObjectGroup;
+
+namespace gc {
+
+#ifdef JS_GC_TRACE
+
+extern MOZ_MUST_USE bool InitTrace(GCRuntime& gc);
+extern void FinishTrace();
+extern bool TraceEnabled();
+extern void TraceNurseryAlloc(Cell* thing, size_t size);
+extern void TraceTenuredAlloc(Cell* thing, AllocKind kind);
+extern void TraceCreateObject(JSObject* object);
+extern void TraceMinorGCStart();
+extern void TracePromoteToTenured(Cell* src, Cell* dst);
+extern void TraceMinorGCEnd();
+extern void TraceMajorGCStart();
+extern void TraceTenuredFinalize(Cell* thing);
+extern void TraceMajorGCEnd();
+extern void TraceTypeNewScript(js::ObjectGroup* group);
+
+#else
+
+inline MOZ_MUST_USE bool InitTrace(GCRuntime& gc) { return true; }
+inline void FinishTrace() {}
+inline bool TraceEnabled() { return false; }
+inline void TraceNurseryAlloc(Cell* thing, size_t size) {}
+inline void TraceTenuredAlloc(Cell* thing, AllocKind kind) {}
+inline void TraceCreateObject(JSObject* object) {}
+inline void TraceMinorGCStart() {}
+inline void TracePromoteToTenured(Cell* src, Cell* dst) {}
+inline void TraceMinorGCEnd() {}
+inline void TraceMajorGCStart() {}
+inline void TraceTenuredFinalize(Cell* thing) {}
+inline void TraceMajorGCEnd() {}
+inline void TraceTypeNewScript(js::ObjectGroup* group) {}
+
+#endif
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif
diff --git a/js/src/gc/GCTraceFormat.h b/js/src/gc/GCTraceFormat.h
new file mode 100644
index 000000000..82998b37d
--- /dev/null
+++ b/js/src/gc/GCTraceFormat.h
@@ -0,0 +1,56 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_GCTraceFormat_h
+#define gc_GCTraceFormat_h
+
+/*
+ * Each trace is stored as a 64-bit word with the following format:
+ *
+ * 56 48 0
+ * +----------------+----------------+-----------------------------------------+
+ * | Event type | Optional extra | Optional payload |
+ * +----------------+----------------+-----------------------------------------+
+ */
+
+enum GCTraceEvent {
+ // Events
+ TraceEventInit,
+ TraceEventThingSize,
+ TraceEventNurseryAlloc,
+ TraceEventTenuredAlloc,
+ TraceEventClassInfo,
+ TraceEventTypeInfo,
+ TraceEventTypeNewScript,
+ TraceEventCreateObject,
+ TraceEventMinorGCStart,
+ TraceEventPromoteToTenured,
+ TraceEventMinorGCEnd,
+ TraceEventMajorGCStart,
+ TraceEventTenuredFinalize,
+ TraceEventMajorGCEnd,
+
+ TraceDataAddress, // following TraceEventPromote
+ TraceDataInt, // following TraceEventClassInfo
+ TraceDataString, // following TraceEventClassInfo
+
+ GCTraceEventCount
+};
+
+const unsigned TraceFormatVersion = 1;
+
+const unsigned TracePayloadBits = 48;
+
+const unsigned TraceExtraShift = 48;
+const unsigned TraceExtraBits = 8;
+
+const unsigned TraceEventShift = 56;
+const unsigned TraceEventBits = 8;
+
+const unsigned AllocKinds = 22;
+const unsigned LastObjectAllocKind = 11;
+
+#endif
diff --git a/js/src/gc/Heap-inl.h b/js/src/gc/Heap-inl.h
new file mode 100644
index 000000000..0126e74ae
--- /dev/null
+++ b/js/src/gc/Heap-inl.h
@@ -0,0 +1,29 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_inl_h
+#define gc_Heap_inl_h
+
+#include "gc/StoreBuffer.h"
+
+inline void
+js::gc::Arena::init(JS::Zone* zoneArg, AllocKind kind)
+{
+ MOZ_ASSERT(firstFreeSpan.isEmpty());
+ MOZ_ASSERT(!zone);
+ MOZ_ASSERT(!allocated());
+ MOZ_ASSERT(!hasDelayedMarking);
+ MOZ_ASSERT(!allocatedDuringIncremental);
+ MOZ_ASSERT(!markOverflow);
+ MOZ_ASSERT(!auxNextLink);
+
+ zone = zoneArg;
+ allocKind = size_t(kind);
+ setAsFullyUnused();
+ bufferedCells = &ArenaCellSet::Empty;
+}
+
+#endif
diff --git a/js/src/gc/Heap.h b/js/src/gc/Heap.h
new file mode 100644
index 000000000..2a9390e91
--- /dev/null
+++ b/js/src/gc/Heap.h
@@ -0,0 +1,1385 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Heap_h
+#define gc_Heap_h
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/EnumeratedRange.h"
+#include "mozilla/PodOperations.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "jsfriendapi.h"
+#include "jspubtd.h"
+#include "jstypes.h"
+#include "jsutil.h"
+
+#include "ds/BitArray.h"
+#include "gc/Memory.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+
+struct JSRuntime;
+
+namespace JS {
+namespace shadow {
+struct Runtime;
+} // namespace shadow
+} // namespace JS
+
+namespace js {
+
+class AutoLockGC;
+class FreeOp;
+
+extern bool
+RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone);
+
+#ifdef DEBUG
+
+// Barriers can't be triggered during backend Ion compilation, which may run on
+// a helper thread.
+extern bool
+CurrentThreadIsIonCompiling();
+#endif
+
+// The return value indicates if anything was unmarked.
+extern bool
+UnmarkGrayCellRecursively(gc::Cell* cell, JS::TraceKind kind);
+
+extern void
+TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp, const char* name);
+
+namespace gc {
+
+class Arena;
+class ArenaCellSet;
+class ArenaList;
+class SortedArenaList;
+struct Chunk;
+
+/*
+ * This flag allows an allocation site to request a specific heap based upon the
+ * estimated lifetime or lifetime requirements of objects allocated from that
+ * site.
+ */
+enum InitialHeap {
+ DefaultHeap,
+ TenuredHeap
+};
+
+/* The GC allocation kinds. */
+// FIXME: uint8_t would make more sense for the underlying type, but causes
+// miscompilations in GCC (fixed in 4.8.5 and 4.9.3). See also bug 1143966.
+enum class AllocKind {
+ FIRST,
+ OBJECT_FIRST = FIRST,
+ FUNCTION = FIRST,
+ FUNCTION_EXTENDED,
+ OBJECT0,
+ OBJECT0_BACKGROUND,
+ OBJECT2,
+ OBJECT2_BACKGROUND,
+ OBJECT4,
+ OBJECT4_BACKGROUND,
+ OBJECT8,
+ OBJECT8_BACKGROUND,
+ OBJECT12,
+ OBJECT12_BACKGROUND,
+ OBJECT16,
+ OBJECT16_BACKGROUND,
+ OBJECT_LIMIT,
+ OBJECT_LAST = OBJECT_LIMIT - 1,
+ SCRIPT,
+ LAZY_SCRIPT,
+ SHAPE,
+ ACCESSOR_SHAPE,
+ BASE_SHAPE,
+ OBJECT_GROUP,
+ FAT_INLINE_STRING,
+ STRING,
+ EXTERNAL_STRING,
+ FAT_INLINE_ATOM,
+ ATOM,
+ SYMBOL,
+ JITCODE,
+ SCOPE,
+ LIMIT,
+ LAST = LIMIT - 1
+};
+
+// Macro to enumerate the different allocation kinds supplying information about
+// the trace kind, C++ type and allocation size.
+#define FOR_EACH_OBJECT_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType */ \
+ D(FUNCTION, Object, JSObject, JSFunction) \
+ D(FUNCTION_EXTENDED, Object, JSObject, FunctionExtended) \
+ D(OBJECT0, Object, JSObject, JSObject_Slots0) \
+ D(OBJECT0_BACKGROUND, Object, JSObject, JSObject_Slots0) \
+ D(OBJECT2, Object, JSObject, JSObject_Slots2) \
+ D(OBJECT2_BACKGROUND, Object, JSObject, JSObject_Slots2) \
+ D(OBJECT4, Object, JSObject, JSObject_Slots4) \
+ D(OBJECT4_BACKGROUND, Object, JSObject, JSObject_Slots4) \
+ D(OBJECT8, Object, JSObject, JSObject_Slots8) \
+ D(OBJECT8_BACKGROUND, Object, JSObject, JSObject_Slots8) \
+ D(OBJECT12, Object, JSObject, JSObject_Slots12) \
+ D(OBJECT12_BACKGROUND, Object, JSObject, JSObject_Slots12) \
+ D(OBJECT16, Object, JSObject, JSObject_Slots16) \
+ D(OBJECT16_BACKGROUND, Object, JSObject, JSObject_Slots16)
+
+#define FOR_EACH_NONOBJECT_ALLOCKIND(D) \
+ /* AllocKind TraceKind TypeName SizedType */ \
+ D(SCRIPT, Script, JSScript, JSScript) \
+ D(LAZY_SCRIPT, LazyScript, js::LazyScript, js::LazyScript) \
+ D(SHAPE, Shape, js::Shape, js::Shape) \
+ D(ACCESSOR_SHAPE, Shape, js::AccessorShape, js::AccessorShape) \
+ D(BASE_SHAPE, BaseShape, js::BaseShape, js::BaseShape) \
+ D(OBJECT_GROUP, ObjectGroup, js::ObjectGroup, js::ObjectGroup) \
+ D(FAT_INLINE_STRING, String, JSFatInlineString, JSFatInlineString) \
+ D(STRING, String, JSString, JSString) \
+ D(EXTERNAL_STRING, String, JSExternalString, JSExternalString) \
+ D(FAT_INLINE_ATOM, String, js::FatInlineAtom, js::FatInlineAtom) \
+ D(ATOM, String, js::NormalAtom, js::NormalAtom) \
+ D(SYMBOL, Symbol, JS::Symbol, JS::Symbol) \
+ D(JITCODE, JitCode, js::jit::JitCode, js::jit::JitCode) \
+ D(SCOPE, Scope, js::Scope, js::Scope)
+
+#define FOR_EACH_ALLOCKIND(D) \
+ FOR_EACH_OBJECT_ALLOCKIND(D) \
+ FOR_EACH_NONOBJECT_ALLOCKIND(D)
+
+static_assert(int(AllocKind::FIRST) == 0, "Various places depend on AllocKind starting at 0, "
+ "please audit them carefully!");
+static_assert(int(AllocKind::OBJECT_FIRST) == 0, "Various places depend on AllocKind::OBJECT_FIRST "
+ "being 0, please audit them carefully!");
+
+inline bool
+IsAllocKind(AllocKind kind)
+{
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LIMIT;
+}
+
+inline bool
+IsValidAllocKind(AllocKind kind)
+{
+ return kind >= AllocKind::FIRST && kind <= AllocKind::LAST;
+}
+
+inline bool
+IsObjectAllocKind(AllocKind kind)
+{
+ return kind >= AllocKind::OBJECT_FIRST && kind <= AllocKind::OBJECT_LAST;
+}
+
+inline bool
+IsShapeAllocKind(AllocKind kind)
+{
+ return kind == AllocKind::SHAPE || kind == AllocKind::ACCESSOR_SHAPE;
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all alloc kinds.
+inline decltype(mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT))
+AllAllocKinds()
+{
+ return mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over all object alloc kinds.
+inline decltype(mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST, AllocKind::OBJECT_LIMIT))
+ObjectAllocKinds()
+{
+ return mozilla::MakeEnumeratedRange(AllocKind::OBJECT_FIRST, AllocKind::OBJECT_LIMIT);
+}
+
+// Returns a sequence for use in a range-based for loop,
+// to iterate over alloc kinds from |first| to |limit|, exclusive.
+inline decltype(mozilla::MakeEnumeratedRange(AllocKind::FIRST, AllocKind::LIMIT))
+SomeAllocKinds(AllocKind first = AllocKind::FIRST, AllocKind limit = AllocKind::LIMIT)
+{
+ MOZ_ASSERT(IsAllocKind(first), "|first| is not a valid AllocKind!");
+ MOZ_ASSERT(IsAllocKind(limit), "|limit| is not a valid AllocKind!");
+ return mozilla::MakeEnumeratedRange(first, limit);
+}
+
+// AllAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular alloc kind.
+template<typename ValueType> using AllAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::LIMIT, ValueType>;
+
+// ObjectAllocKindArray<ValueType> gives an enumerated array of ValueTypes,
+// with each index corresponding to a particular object alloc kind.
+template<typename ValueType> using ObjectAllocKindArray =
+ mozilla::EnumeratedArray<AllocKind, AllocKind::OBJECT_LIMIT, ValueType>;
+
+static inline JS::TraceKind
+MapAllocToTraceKind(AllocKind kind)
+{
+ static const JS::TraceKind map[] = {
+#define EXPAND_ELEMENT(allocKind, traceKind, type, sizedType) \
+ JS::TraceKind::traceKind,
+FOR_EACH_ALLOCKIND(EXPAND_ELEMENT)
+#undef EXPAND_ELEMENT
+ };
+
+ static_assert(MOZ_ARRAY_LENGTH(map) == size_t(AllocKind::LIMIT),
+ "AllocKind-to-TraceKind mapping must be in sync");
+ return map[size_t(kind)];
+}
+
+/*
+ * This must be an upper bound, but we do not need the least upper bound, so
+ * we just exclude non-background objects.
+ */
+static const size_t MAX_BACKGROUND_FINALIZE_KINDS =
+ size_t(AllocKind::LIMIT) - size_t(AllocKind::OBJECT_LIMIT) / 2;
+
+class TenuredCell;
+
+// A GC cell is the base class for all GC things.
+struct Cell
+{
+ public:
+ MOZ_ALWAYS_INLINE bool isTenured() const { return !IsInsideNursery(this); }
+ MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
+ MOZ_ALWAYS_INLINE TenuredCell& asTenured();
+
+ inline JSRuntime* runtimeFromMainThread() const;
+ inline JS::shadow::Runtime* shadowRuntimeFromMainThread() const;
+
+ // Note: Unrestricted access to the runtime of a GC thing from an arbitrary
+ // thread can easily lead to races. Use this method very carefully.
+ inline JSRuntime* runtimeFromAnyThread() const;
+ inline JS::shadow::Runtime* shadowRuntimeFromAnyThread() const;
+
+ // May be overridden by GC thing kinds that have a compartment pointer.
+ inline JSCompartment* maybeCompartment() const { return nullptr; }
+
+ inline StoreBuffer* storeBuffer() const;
+
+ inline JS::TraceKind getTraceKind() const;
+
+ static MOZ_ALWAYS_INLINE bool needWriteBarrierPre(JS::Zone* zone);
+
+#ifdef DEBUG
+ inline bool isAligned() const;
+ void dump(FILE* fp) const;
+ void dump() const;
+#endif
+
+ protected:
+ inline uintptr_t address() const;
+ inline Chunk* chunk() const;
+} JS_HAZ_GC_THING;
+
+// A GC TenuredCell gets behaviors that are valid for things in the Tenured
+// heap, such as access to the arena and mark bits.
+class TenuredCell : public Cell
+{
+ public:
+ // Construct a TenuredCell from a void*, making various sanity assertions.
+ static MOZ_ALWAYS_INLINE TenuredCell* fromPointer(void* ptr);
+ static MOZ_ALWAYS_INLINE const TenuredCell* fromPointer(const void* ptr);
+
+ // Mark bit management.
+ MOZ_ALWAYS_INLINE bool isMarked(uint32_t color = BLACK) const;
+ // The return value indicates if the cell went from unmarked to marked.
+ MOZ_ALWAYS_INLINE bool markIfUnmarked(uint32_t color = BLACK) const;
+ MOZ_ALWAYS_INLINE void unmark(uint32_t color) const;
+ MOZ_ALWAYS_INLINE void copyMarkBitsFrom(const TenuredCell* src);
+
+ // Note: this is in TenuredCell because JSObject subclasses are sometimes
+ // used tagged.
+ static MOZ_ALWAYS_INLINE bool isNullLike(const Cell* thing) { return !thing; }
+
+ // Access to the arena.
+ inline Arena* arena() const;
+ inline AllocKind getAllocKind() const;
+ inline JS::TraceKind getTraceKind() const;
+ inline JS::Zone* zone() const;
+ inline JS::Zone* zoneFromAnyThread() const;
+ inline bool isInsideZone(JS::Zone* zone) const;
+
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZone() const {
+ return JS::shadow::Zone::asShadowZone(zone());
+ }
+ MOZ_ALWAYS_INLINE JS::shadow::Zone* shadowZoneFromAnyThread() const {
+ return JS::shadow::Zone::asShadowZone(zoneFromAnyThread());
+ }
+
+ static MOZ_ALWAYS_INLINE void readBarrier(TenuredCell* thing);
+ static MOZ_ALWAYS_INLINE void writeBarrierPre(TenuredCell* thing);
+
+ static MOZ_ALWAYS_INLINE void writeBarrierPost(void* cellp, TenuredCell* prior,
+ TenuredCell* next);
+
+ // Default implementation for kinds that don't require fixup.
+ void fixupAfterMovingGC() {}
+
+#ifdef DEBUG
+ inline bool isAligned() const;
+#endif
+};
+
+/* Cells are aligned to CellShift, so the largest tagged null pointer is: */
+const uintptr_t LargestTaggedNullCellPointer = (1 << CellShift) - 1;
+
+constexpr size_t
+DivideAndRoundUp(size_t numerator, size_t divisor) {
+ return (numerator + divisor - 1) / divisor;
+}
+
+const size_t ArenaCellCount = ArenaSize / CellSize;
+static_assert(ArenaSize % CellSize == 0, "Arena size must be a multiple of cell size");
+
+/*
+ * The mark bitmap has one bit per each GC cell. For multi-cell GC things this
+ * wastes space but allows to avoid expensive devisions by thing's size when
+ * accessing the bitmap. In addition this allows to use some bits for colored
+ * marking during the cycle GC.
+ */
+const size_t ArenaBitmapBits = ArenaCellCount;
+const size_t ArenaBitmapBytes = DivideAndRoundUp(ArenaBitmapBits, 8);
+const size_t ArenaBitmapWords = DivideAndRoundUp(ArenaBitmapBits, JS_BITS_PER_WORD);
+
+/*
+ * A FreeSpan represents a contiguous sequence of free cells in an Arena. It
+ * can take two forms.
+ *
+ * - In an empty span, |first| and |last| are both zero.
+ *
+ * - In a non-empty span, |first| is the address of the first free thing in the
+ * span, and |last| is the address of the last free thing in the span.
+ * Furthermore, the memory pointed to by |last| holds a FreeSpan structure
+ * that points to the next span (which may be empty); this works because
+ * sizeof(FreeSpan) is less than the smallest thingSize.
+ */
+class FreeSpan
+{
+ friend class Arena;
+ friend class ArenaCellIterImpl;
+
+ uint16_t first;
+ uint16_t last;
+
+ public:
+ // This inits just |first| and |last|; if the span is non-empty it doesn't
+ // do anything with the next span stored at |last|.
+ void initBounds(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ checkRange(firstArg, lastArg, arena);
+ first = firstArg;
+ last = lastArg;
+ }
+
+ void initAsEmpty() {
+ first = 0;
+ last = 0;
+ }
+
+ // This sets |first| and |last|, and also sets the next span stored at
+ // |last| as empty. (As a result, |firstArg| and |lastArg| cannot represent
+ // an empty span.)
+ void initFinal(uintptr_t firstArg, uintptr_t lastArg, const Arena* arena) {
+ initBounds(firstArg, lastArg, arena);
+ FreeSpan* last = nextSpanUnchecked(arena);
+ last->initAsEmpty();
+ checkSpan(arena);
+ }
+
+ bool isEmpty() const {
+ return !first;
+ }
+
+ Arena* getArenaUnchecked() { return reinterpret_cast<Arena*>(this); }
+ inline Arena* getArena();
+
+ static size_t offsetOfFirst() {
+ return offsetof(FreeSpan, first);
+ }
+
+ static size_t offsetOfLast() {
+ return offsetof(FreeSpan, last);
+ }
+
+ // Like nextSpan(), but no checking of the following span is done.
+ FreeSpan* nextSpanUnchecked(const Arena* arena) const {
+ MOZ_ASSERT(arena && !isEmpty());
+ return reinterpret_cast<FreeSpan*>(uintptr_t(arena) + last);
+ }
+
+ const FreeSpan* nextSpan(const Arena* arena) const {
+ checkSpan(arena);
+ return nextSpanUnchecked(arena);
+ }
+
+ MOZ_ALWAYS_INLINE TenuredCell* allocate(size_t thingSize) {
+ // Eschew the usual checks, because this might be the placeholder span.
+ // If this is somehow an invalid, non-empty span, checkSpan() will catch it.
+ Arena* arena = getArenaUnchecked();
+ checkSpan(arena);
+ uintptr_t thing = uintptr_t(arena) + first;
+ if (first < last) {
+ // We have space for at least two more things, so do a simple bump-allocate.
+ first += thingSize;
+ } else if (MOZ_LIKELY(first)) {
+ // The last space points to the next free span (which may be empty).
+ const FreeSpan* next = nextSpan(arena);
+ first = next->first;
+ last = next->last;
+ } else {
+ return nullptr; // The span is empty.
+ }
+ checkSpan(arena);
+ JS_EXTRA_POISON(reinterpret_cast<void*>(thing), JS_ALLOCATED_TENURED_PATTERN, thingSize);
+ MemProfiler::SampleTenured(reinterpret_cast<void*>(thing), thingSize);
+ return reinterpret_cast<TenuredCell*>(thing);
+ }
+
+ inline void checkSpan(const Arena* arena) const;
+ inline void checkRange(uintptr_t first, uintptr_t last, const Arena* arena) const;
+};
+
+/*
+ * Arenas are the allocation units of the tenured heap in the GC. An arena
+ * is 4kiB in size and 4kiB-aligned. It starts with several header fields
+ * followed by some bytes of padding. The remainder of the arena is filled
+ * with GC things of a particular AllocKind. The padding ensures that the
+ * GC thing array ends exactly at the end of the arena:
+ *
+ * <----------------------------------------------> = ArenaSize bytes
+ * +---------------+---------+----+----+-----+----+
+ * | header fields | padding | T0 | T1 | ... | Tn |
+ * +---------------+---------+----+----+-----+----+
+ * <-------------------------> = first thing offset
+ */
+class Arena
+{
+ static JS_FRIEND_DATA(const uint32_t) ThingSizes[];
+ static JS_FRIEND_DATA(const uint32_t) FirstThingOffsets[];
+ static JS_FRIEND_DATA(const uint32_t) ThingsPerArena[];
+
+ /*
+ * The first span of free things in the arena. Most of these spans are
+ * stored as offsets in free regions of the data array, and most operations
+ * on FreeSpans take an Arena pointer for safety. However, the FreeSpans
+ * used for allocation are stored here, at the start of an Arena, and use
+ * their own address to grab the next span within the same Arena.
+ */
+ FreeSpan firstFreeSpan;
+
+ public:
+ /*
+ * The zone that this Arena is contained within, when allocated. The offset
+ * of this field must match the ArenaZoneOffset stored in js/HeapAPI.h,
+ * as is statically asserted below.
+ */
+ JS::Zone* zone;
+
+ /*
+ * Arena::next has two purposes: when unallocated, it points to the next
+ * available Arena. When allocated, it points to the next Arena in the same
+ * zone and with the same alloc kind.
+ */
+ Arena* next;
+
+ private:
+ /*
+ * One of the AllocKind constants or AllocKind::LIMIT when the arena does
+ * not contain any GC things and is on the list of empty arenas in the GC
+ * chunk.
+ *
+ * We use 8 bits for the alloc kind so the compiler can use byte-level
+ * memory instructions to access it.
+ */
+ size_t allocKind : 8;
+
+ public:
+ /*
+ * When collecting we sometimes need to keep an auxillary list of arenas,
+ * for which we use the following fields. This happens for several reasons:
+ *
+ * When recursive marking uses too much stack, the marking is delayed and
+ * the corresponding arenas are put into a stack. To distinguish the bottom
+ * of the stack from the arenas not present in the stack we use the
+ * markOverflow flag to tag arenas on the stack.
+ *
+ * Delayed marking is also used for arenas that we allocate into during an
+ * incremental GC. In this case, we intend to mark all the objects in the
+ * arena, and it's faster to do this marking in bulk.
+ *
+ * When sweeping we keep track of which arenas have been allocated since
+ * the end of the mark phase. This allows us to tell whether a pointer to
+ * an unmarked object is yet to be finalized or has already been
+ * reallocated. We set the allocatedDuringIncremental flag for this and
+ * clear it at the end of the sweep phase.
+ *
+ * To minimize the size of the header fields we record the next linkage as
+ * address() >> ArenaShift and pack it with the allocKind and the flags.
+ */
+ size_t hasDelayedMarking : 1;
+ size_t allocatedDuringIncremental : 1;
+ size_t markOverflow : 1;
+ size_t auxNextLink : JS_BITS_PER_WORD - 8 - 1 - 1 - 1;
+ static_assert(ArenaShift >= 8 + 1 + 1 + 1,
+ "Arena::auxNextLink packing assumes that ArenaShift has "
+ "enough bits to cover allocKind and hasDelayedMarking.");
+
+ /*
+ * If non-null, points to an ArenaCellSet that represents the set of cells
+ * in this arena that are in the nursery's store buffer.
+ */
+ ArenaCellSet* bufferedCells;
+
+ /*
+ * The size of data should be |ArenaSize - offsetof(data)|, but the offset
+ * is not yet known to the compiler, so we do it by hand. |firstFreeSpan|
+ * takes up 8 bytes on 64-bit due to alignment requirements; the rest are
+ * obvious. This constant is stored in js/HeapAPI.h.
+ */
+ uint8_t data[ArenaSize - ArenaHeaderSize];
+
+ void init(JS::Zone* zoneArg, AllocKind kind);
+
+ // Sets |firstFreeSpan| to the Arena's entire valid range, and
+ // also sets the next span stored at |firstFreeSpan.last| as empty.
+ void setAsFullyUnused() {
+ AllocKind kind = getAllocKind();
+ firstFreeSpan.first = firstThingOffset(kind);
+ firstFreeSpan.last = lastThingOffset(kind);
+ FreeSpan* last = firstFreeSpan.nextSpanUnchecked(this);
+ last->initAsEmpty();
+ }
+
+ void setAsNotAllocated() {
+ firstFreeSpan.initAsEmpty();
+ zone = nullptr;
+ allocKind = size_t(AllocKind::LIMIT);
+ hasDelayedMarking = 0;
+ allocatedDuringIncremental = 0;
+ markOverflow = 0;
+ auxNextLink = 0;
+ bufferedCells = nullptr;
+ }
+
+ uintptr_t address() const {
+ checkAddress();
+ return uintptr_t(this);
+ }
+
+ inline void checkAddress() const;
+
+ inline Chunk* chunk() const;
+
+ bool allocated() const {
+ MOZ_ASSERT(IsAllocKind(AllocKind(allocKind)));
+ return IsValidAllocKind(AllocKind(allocKind));
+ }
+
+ AllocKind getAllocKind() const {
+ MOZ_ASSERT(allocated());
+ return AllocKind(allocKind);
+ }
+
+ FreeSpan* getFirstFreeSpan() { return &firstFreeSpan; }
+
+ static size_t thingSize(AllocKind kind) { return ThingSizes[size_t(kind)]; }
+ static size_t thingsPerArena(AllocKind kind) { return ThingsPerArena[size_t(kind)]; }
+ static size_t thingsSpan(AllocKind kind) { return thingsPerArena(kind) * thingSize(kind); }
+
+ static size_t firstThingOffset(AllocKind kind) { return FirstThingOffsets[size_t(kind)]; }
+ static size_t lastThingOffset(AllocKind kind) { return ArenaSize - thingSize(kind); }
+
+ size_t getThingSize() const { return thingSize(getAllocKind()); }
+ size_t getThingsPerArena() const { return thingsPerArena(getAllocKind()); }
+ size_t getThingsSpan() const { return getThingsPerArena() * getThingSize(); }
+
+ uintptr_t thingsStart() const { return address() + firstThingOffset(getAllocKind()); }
+ uintptr_t thingsEnd() const { return address() + ArenaSize; }
+
+ bool isEmpty() const {
+ // Arena is empty if its first span covers the whole arena.
+ firstFreeSpan.checkSpan(this);
+ AllocKind kind = getAllocKind();
+ return firstFreeSpan.first == firstThingOffset(kind) &&
+ firstFreeSpan.last == lastThingOffset(kind);
+ }
+
+ bool hasFreeThings() const { return !firstFreeSpan.isEmpty(); }
+
+ size_t numFreeThings(size_t thingSize) const {
+ firstFreeSpan.checkSpan(this);
+ size_t numFree = 0;
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this))
+ numFree += (span->last - span->first) / thingSize + 1;
+ return numFree;
+ }
+
+ size_t countFreeCells() { return numFreeThings(getThingSize()); }
+ size_t countUsedCells() { return getThingsPerArena() - countFreeCells(); }
+
+ bool inFreeList(uintptr_t thing) {
+ uintptr_t base = address();
+ const FreeSpan* span = &firstFreeSpan;
+ for (; !span->isEmpty(); span = span->nextSpan(this)) {
+ /* If the thing comes before the current span, it's not free. */
+ if (thing < base + span->first)
+ return false;
+
+ /* If we find it before the end of the span, it's free. */
+ if (thing <= base + span->last)
+ return true;
+ }
+ return false;
+ }
+
+ static bool isAligned(uintptr_t thing, size_t thingSize) {
+ /* Things ends at the arena end. */
+ uintptr_t tailOffset = ArenaSize - (thing & ArenaMask);
+ return tailOffset % thingSize == 0;
+ }
+
+ Arena* getNextDelayedMarking() const {
+ MOZ_ASSERT(hasDelayedMarking);
+ return reinterpret_cast<Arena*>(auxNextLink << ArenaShift);
+ }
+
+ void setNextDelayedMarking(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(!auxNextLink && !hasDelayedMarking);
+ hasDelayedMarking = 1;
+ if (arena)
+ auxNextLink = arena->address() >> ArenaShift;
+ }
+
+ void unsetDelayedMarking() {
+ MOZ_ASSERT(hasDelayedMarking);
+ hasDelayedMarking = 0;
+ auxNextLink = 0;
+ }
+
+ Arena* getNextAllocDuringSweep() const {
+ MOZ_ASSERT(allocatedDuringIncremental);
+ return reinterpret_cast<Arena*>(auxNextLink << ArenaShift);
+ }
+
+ void setNextAllocDuringSweep(Arena* arena) {
+ MOZ_ASSERT(!(uintptr_t(arena) & ArenaMask));
+ MOZ_ASSERT(!auxNextLink && !allocatedDuringIncremental);
+ allocatedDuringIncremental = 1;
+ if (arena)
+ auxNextLink = arena->address() >> ArenaShift;
+ }
+
+ void unsetAllocDuringSweep() {
+ MOZ_ASSERT(allocatedDuringIncremental);
+ allocatedDuringIncremental = 0;
+ auxNextLink = 0;
+ }
+
+ template <typename T>
+ size_t finalize(FreeOp* fop, AllocKind thingKind, size_t thingSize);
+
+ static void staticAsserts();
+
+ void unmarkAll();
+
+ static size_t offsetOfBufferedCells() {
+ return offsetof(Arena, bufferedCells);
+ }
+};
+
+static_assert(ArenaZoneOffset == offsetof(Arena, zone),
+ "The hardcoded API zone offset must match the actual offset.");
+
+static_assert(sizeof(Arena) == ArenaSize,
+ "ArenaSize must match the actual size of the Arena structure.");
+
+static_assert(offsetof(Arena, data) == ArenaHeaderSize,
+ "ArenaHeaderSize must match the actual size of the header fields.");
+
+inline Arena*
+FreeSpan::getArena()
+{
+ Arena* arena = getArenaUnchecked();
+ arena->checkAddress();
+ return arena;
+}
+
+inline void
+FreeSpan::checkSpan(const Arena* arena) const
+{
+#ifdef DEBUG
+ if (!first) {
+ MOZ_ASSERT(!first && !last);
+ return;
+ }
+
+ arena->checkAddress();
+ checkRange(first, last, arena);
+
+ // If there's a following span, it must have a higher address,
+ // and the gap must be at least 2 * thingSize.
+ const FreeSpan* next = nextSpanUnchecked(arena);
+ if (next->first) {
+ checkRange(next->first, next->last, arena);
+ size_t thingSize = arena->getThingSize();
+ MOZ_ASSERT(last + 2 * thingSize <= next->first);
+ }
+#endif
+}
+
+inline void
+FreeSpan::checkRange(uintptr_t first, uintptr_t last, const Arena* arena) const
+{
+#ifdef DEBUG
+ MOZ_ASSERT(arena);
+ MOZ_ASSERT(first <= last);
+ AllocKind thingKind = arena->getAllocKind();
+ MOZ_ASSERT(first >= Arena::firstThingOffset(thingKind));
+ MOZ_ASSERT(last <= Arena::lastThingOffset(thingKind));
+ MOZ_ASSERT((last - first) % Arena::thingSize(thingKind) == 0);
+#endif
+}
+
+/*
+ * The tail of the chunk info is shared between all chunks in the system, both
+ * nursery and tenured. This structure is locatable from any GC pointer by
+ * aligning to 1MiB.
+ */
+struct ChunkTrailer
+{
+ /* Construct a Nursery ChunkTrailer. */
+ ChunkTrailer(JSRuntime* rt, StoreBuffer* sb)
+ : location(ChunkLocation::Nursery), storeBuffer(sb), runtime(rt)
+ {}
+
+ /* Construct a Tenured heap ChunkTrailer. */
+ explicit ChunkTrailer(JSRuntime* rt)
+ : location(ChunkLocation::TenuredHeap), storeBuffer(nullptr), runtime(rt)
+ {}
+
+ public:
+ /* The index the chunk in the nursery, or LocationTenuredHeap. */
+ ChunkLocation location;
+ uint32_t padding;
+
+ /* The store buffer for writes to things in this chunk or nullptr. */
+ StoreBuffer* storeBuffer;
+
+ /* This provides quick access to the runtime from absolutely anywhere. */
+ JSRuntime* runtime;
+};
+
+static_assert(sizeof(ChunkTrailer) == ChunkTrailerSize,
+ "ChunkTrailer size must match the API defined size.");
+
+/* The chunk header (located at the end of the chunk to preserve arena alignment). */
+struct ChunkInfo
+{
+ void init() {
+ next = prev = nullptr;
+ }
+
+ private:
+ friend class ChunkPool;
+ Chunk* next;
+ Chunk* prev;
+
+ public:
+ /* Free arenas are linked together with arena.next. */
+ Arena* freeArenasHead;
+
+#if JS_BITS_PER_WORD == 32
+ /*
+ * Calculating sizes and offsets is simpler if sizeof(ChunkInfo) is
+ * architecture-independent.
+ */
+ char padding[24];
+#endif
+
+ /*
+ * Decommitted arenas are tracked by a bitmap in the chunk header. We use
+ * this offset to start our search iteration close to a decommitted arena
+ * that we can allocate.
+ */
+ uint32_t lastDecommittedArenaOffset;
+
+ /* Number of free arenas, either committed or decommitted. */
+ uint32_t numArenasFree;
+
+ /* Number of free, committed arenas. */
+ uint32_t numArenasFreeCommitted;
+};
+
+/*
+ * Calculating ArenasPerChunk:
+ *
+ * In order to figure out how many Arenas will fit in a chunk, we need to know
+ * how much extra space is available after we allocate the header data. This
+ * is a problem because the header size depends on the number of arenas in the
+ * chunk. The two dependent fields are bitmap and decommittedArenas.
+ *
+ * For the mark bitmap, we know that each arena will use a fixed number of full
+ * bytes: ArenaBitmapBytes. The full size of the header data is this number
+ * multiplied by the eventual number of arenas we have in the header. We,
+ * conceptually, distribute this header data among the individual arenas and do
+ * not include it in the header. This way we do not have to worry about its
+ * variable size: it gets attached to the variable number we are computing.
+ *
+ * For the decommitted arena bitmap, we only have 1 bit per arena, so this
+ * technique will not work. Instead, we observe that we do not have enough
+ * header info to fill 8 full arenas: it is currently 4 on 64bit, less on
+ * 32bit. Thus, with current numbers, we need 64 bytes for decommittedArenas.
+ * This will not become 63 bytes unless we double the data required in the
+ * header. Therefore, we just compute the number of bytes required to track
+ * every possible arena and do not worry about slop bits, since there are too
+ * few to usefully allocate.
+ *
+ * To actually compute the number of arenas we can allocate in a chunk, we
+ * divide the amount of available space less the header info (not including
+ * the mark bitmap which is distributed into the arena size) by the size of
+ * the arena (with the mark bitmap bytes it uses).
+ */
+const size_t BytesPerArenaWithHeader = ArenaSize + ArenaBitmapBytes;
+const size_t ChunkDecommitBitmapBytes = ChunkSize / ArenaSize / JS_BITS_PER_BYTE;
+const size_t ChunkBytesAvailable = ChunkSize - sizeof(ChunkTrailer) - sizeof(ChunkInfo) - ChunkDecommitBitmapBytes;
+const size_t ArenasPerChunk = ChunkBytesAvailable / BytesPerArenaWithHeader;
+
+#ifdef JS_GC_SMALL_CHUNK_SIZE
+static_assert(ArenasPerChunk == 62, "Do not accidentally change our heap's density.");
+#else
+static_assert(ArenasPerChunk == 252, "Do not accidentally change our heap's density.");
+#endif
+
+/* A chunk bitmap contains enough mark bits for all the cells in a chunk. */
+struct ChunkBitmap
+{
+ volatile uintptr_t bitmap[ArenaBitmapWords * ArenasPerChunk];
+
+ public:
+ ChunkBitmap() { }
+
+ MOZ_ALWAYS_INLINE void getMarkWordAndMask(const Cell* cell, uint32_t color,
+ uintptr_t** wordp, uintptr_t* maskp)
+ {
+ detail::GetGCThingMarkWordAndMask(uintptr_t(cell), color, wordp, maskp);
+ }
+
+ MOZ_ALWAYS_INLINE MOZ_TSAN_BLACKLIST bool isMarked(const Cell* cell, uint32_t color) {
+ uintptr_t* word, mask;
+ getMarkWordAndMask(cell, color, &word, &mask);
+ return *word & mask;
+ }
+
+ // The return value indicates if the cell went from unmarked to marked.
+ MOZ_ALWAYS_INLINE bool markIfUnmarked(const Cell* cell, uint32_t color) {
+ uintptr_t* word, mask;
+ getMarkWordAndMask(cell, BLACK, &word, &mask);
+ if (*word & mask)
+ return false;
+ *word |= mask;
+ if (color != BLACK) {
+ /*
+ * We use getMarkWordAndMask to recalculate both mask and word as
+ * doing just mask << color may overflow the mask.
+ */
+ getMarkWordAndMask(cell, color, &word, &mask);
+ if (*word & mask)
+ return false;
+ *word |= mask;
+ }
+ return true;
+ }
+
+ MOZ_ALWAYS_INLINE void unmark(const Cell* cell, uint32_t color) {
+ uintptr_t* word, mask;
+ getMarkWordAndMask(cell, color, &word, &mask);
+ *word &= ~mask;
+ }
+
+ MOZ_ALWAYS_INLINE void copyMarkBit(Cell* dst, const TenuredCell* src, uint32_t color) {
+ uintptr_t* word, mask;
+ getMarkWordAndMask(dst, color, &word, &mask);
+ *word = (*word & ~mask) | (src->isMarked(color) ? mask : 0);
+ }
+
+ void clear() {
+ memset((void*)bitmap, 0, sizeof(bitmap));
+ }
+
+ uintptr_t* arenaBits(Arena* arena) {
+ static_assert(ArenaBitmapBits == ArenaBitmapWords * JS_BITS_PER_WORD,
+ "We assume that the part of the bitmap corresponding to the arena "
+ "has the exact number of words so we do not need to deal with a word "
+ "that covers bits from two arenas.");
+
+ uintptr_t* word, unused;
+ getMarkWordAndMask(reinterpret_cast<Cell*>(arena->address()), BLACK, &word, &unused);
+ return word;
+ }
+};
+
+static_assert(ArenaBitmapBytes * ArenasPerChunk == sizeof(ChunkBitmap),
+ "Ensure our ChunkBitmap actually covers all arenas.");
+static_assert(js::gc::ChunkMarkBitmapBits == ArenaBitmapBits * ArenasPerChunk,
+ "Ensure that the mark bitmap has the right number of bits.");
+
+typedef BitArray<ArenasPerChunk> PerArenaBitmap;
+
+const size_t ChunkPadSize = ChunkSize
+ - (sizeof(Arena) * ArenasPerChunk)
+ - sizeof(ChunkBitmap)
+ - sizeof(PerArenaBitmap)
+ - sizeof(ChunkInfo)
+ - sizeof(ChunkTrailer);
+static_assert(ChunkPadSize < BytesPerArenaWithHeader,
+ "If the chunk padding is larger than an arena, we should have one more arena.");
+
+/*
+ * Chunks contain arenas and associated data structures (mark bitmap, delayed
+ * marking state).
+ */
+struct Chunk
+{
+ Arena arenas[ArenasPerChunk];
+
+ /* Pad to full size to ensure cache alignment of ChunkInfo. */
+ uint8_t padding[ChunkPadSize];
+
+ ChunkBitmap bitmap;
+ PerArenaBitmap decommittedArenas;
+ ChunkInfo info;
+ ChunkTrailer trailer;
+
+ static Chunk* fromAddress(uintptr_t addr) {
+ addr &= ~ChunkMask;
+ return reinterpret_cast<Chunk*>(addr);
+ }
+
+ static bool withinValidRange(uintptr_t addr) {
+ uintptr_t offset = addr & ChunkMask;
+ return Chunk::fromAddress(addr)->isNurseryChunk()
+ ? offset < ChunkSize - sizeof(ChunkTrailer)
+ : offset < ArenasPerChunk * ArenaSize;
+ }
+
+ static size_t arenaIndex(uintptr_t addr) {
+ MOZ_ASSERT(!Chunk::fromAddress(addr)->isNurseryChunk());
+ MOZ_ASSERT(withinValidRange(addr));
+ return (addr & ChunkMask) >> ArenaShift;
+ }
+
+ uintptr_t address() const {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ MOZ_ASSERT(!(addr & ChunkMask));
+ return addr;
+ }
+
+ bool unused() const {
+ return info.numArenasFree == ArenasPerChunk;
+ }
+
+ bool hasAvailableArenas() const {
+ return info.numArenasFree != 0;
+ }
+
+ bool isNurseryChunk() const {
+ return trailer.storeBuffer;
+ }
+
+ Arena* allocateArena(JSRuntime* rt, JS::Zone* zone, AllocKind kind, const AutoLockGC& lock);
+
+ void releaseArena(JSRuntime* rt, Arena* arena, const AutoLockGC& lock);
+ void recycleArena(Arena* arena, SortedArenaList& dest, size_t thingsPerArena);
+
+ MOZ_MUST_USE bool decommitOneFreeArena(JSRuntime* rt, AutoLockGC& lock);
+ void decommitAllArenasWithoutUnlocking(const AutoLockGC& lock);
+
+ static Chunk* allocate(JSRuntime* rt);
+ void init(JSRuntime* rt);
+
+ private:
+ void decommitAllArenas(JSRuntime* rt);
+
+ /* Search for a decommitted arena to allocate. */
+ unsigned findDecommittedArenaOffset();
+ Arena* fetchNextDecommittedArena();
+
+ void addArenaToFreeList(JSRuntime* rt, Arena* arena);
+ void addArenaToDecommittedList(JSRuntime* rt, const Arena* arena);
+
+ void updateChunkListAfterAlloc(JSRuntime* rt, const AutoLockGC& lock);
+ void updateChunkListAfterFree(JSRuntime* rt, const AutoLockGC& lock);
+
+ public:
+ /* Unlink and return the freeArenasHead. */
+ Arena* fetchNextFreeArena(JSRuntime* rt);
+};
+
+static_assert(sizeof(Chunk) == ChunkSize,
+ "Ensure the hardcoded chunk size definition actually matches the struct.");
+static_assert(js::gc::ChunkMarkBitmapOffset == offsetof(Chunk, bitmap),
+ "The hardcoded API bitmap offset must match the actual offset.");
+static_assert(js::gc::ChunkRuntimeOffset == offsetof(Chunk, trailer) +
+ offsetof(ChunkTrailer, runtime),
+ "The hardcoded API runtime offset must match the actual offset.");
+static_assert(js::gc::ChunkLocationOffset == offsetof(Chunk, trailer) +
+ offsetof(ChunkTrailer, location),
+ "The hardcoded API location offset must match the actual offset.");
+
+/*
+ * Tracks the used sizes for owned heap data and automatically maintains the
+ * memory usage relationship between GCRuntime and Zones.
+ */
+class HeapUsage
+{
+ /*
+ * A heap usage that contains our parent's heap usage, or null if this is
+ * the top-level usage container.
+ */
+ HeapUsage* parent_;
+
+ /*
+ * The approximate number of bytes in use on the GC heap, to the nearest
+ * ArenaSize. This does not include any malloc data. It also does not
+ * include not-actively-used addresses that are still reserved at the OS
+ * level for GC usage. It is atomic because it is updated by both the main
+ * and GC helper threads.
+ */
+ mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes_;
+
+ public:
+ explicit HeapUsage(HeapUsage* parent)
+ : parent_(parent),
+ gcBytes_(0)
+ {}
+
+ size_t gcBytes() const { return gcBytes_; }
+
+ void addGCArena() {
+ gcBytes_ += ArenaSize;
+ if (parent_)
+ parent_->addGCArena();
+ }
+ void removeGCArena() {
+ MOZ_ASSERT(gcBytes_ >= ArenaSize);
+ gcBytes_ -= ArenaSize;
+ if (parent_)
+ parent_->removeGCArena();
+ }
+
+ /* Pair to adoptArenas. Adopts the attendant usage statistics. */
+ void adopt(HeapUsage& other) {
+ gcBytes_ += other.gcBytes_;
+ other.gcBytes_ = 0;
+ }
+};
+
+inline void
+Arena::checkAddress() const
+{
+ mozilla::DebugOnly<uintptr_t> addr = uintptr_t(this);
+ MOZ_ASSERT(addr);
+ MOZ_ASSERT(!(addr & ArenaMask));
+ MOZ_ASSERT(Chunk::withinValidRange(addr));
+}
+
+inline Chunk*
+Arena::chunk() const
+{
+ return Chunk::fromAddress(address());
+}
+
+static void
+AssertValidColor(const TenuredCell* thing, uint32_t color)
+{
+#ifdef DEBUG
+ Arena* arena = thing->arena();
+ MOZ_ASSERT(color < arena->getThingSize() / CellSize);
+#endif
+}
+
+MOZ_ALWAYS_INLINE const TenuredCell&
+Cell::asTenured() const
+{
+ MOZ_ASSERT(isTenured());
+ return *static_cast<const TenuredCell*>(this);
+}
+
+MOZ_ALWAYS_INLINE TenuredCell&
+Cell::asTenured()
+{
+ MOZ_ASSERT(isTenured());
+ return *static_cast<TenuredCell*>(this);
+}
+
+inline JSRuntime*
+Cell::runtimeFromMainThread() const
+{
+ JSRuntime* rt = chunk()->trailer.runtime;
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ return rt;
+}
+
+inline JS::shadow::Runtime*
+Cell::shadowRuntimeFromMainThread() const
+{
+ return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread());
+}
+
+inline JSRuntime*
+Cell::runtimeFromAnyThread() const
+{
+ return chunk()->trailer.runtime;
+}
+
+inline JS::shadow::Runtime*
+Cell::shadowRuntimeFromAnyThread() const
+{
+ return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
+}
+
+inline uintptr_t
+Cell::address() const
+{
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellSize == 0);
+ MOZ_ASSERT(Chunk::withinValidRange(addr));
+ return addr;
+}
+
+Chunk*
+Cell::chunk() const
+{
+ uintptr_t addr = uintptr_t(this);
+ MOZ_ASSERT(addr % CellSize == 0);
+ addr &= ~ChunkMask;
+ return reinterpret_cast<Chunk*>(addr);
+}
+
+inline StoreBuffer*
+Cell::storeBuffer() const
+{
+ return chunk()->trailer.storeBuffer;
+}
+
+inline JS::TraceKind
+Cell::getTraceKind() const
+{
+ return isTenured() ? asTenured().getTraceKind() : JS::TraceKind::Object;
+}
+
+inline bool
+InFreeList(Arena* arena, void* thing)
+{
+ uintptr_t addr = reinterpret_cast<uintptr_t>(thing);
+ MOZ_ASSERT(Arena::isAligned(addr, arena->getThingSize()));
+ return arena->inFreeList(addr);
+}
+
+/* static */ MOZ_ALWAYS_INLINE bool
+Cell::needWriteBarrierPre(JS::Zone* zone) {
+ return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
+}
+
+/* static */ MOZ_ALWAYS_INLINE TenuredCell*
+TenuredCell::fromPointer(void* ptr)
+{
+ MOZ_ASSERT(static_cast<TenuredCell*>(ptr)->isTenured());
+ return static_cast<TenuredCell*>(ptr);
+}
+
+/* static */ MOZ_ALWAYS_INLINE const TenuredCell*
+TenuredCell::fromPointer(const void* ptr)
+{
+ MOZ_ASSERT(static_cast<const TenuredCell*>(ptr)->isTenured());
+ return static_cast<const TenuredCell*>(ptr);
+}
+
+bool
+TenuredCell::isMarked(uint32_t color /* = BLACK */) const
+{
+ MOZ_ASSERT(arena()->allocated());
+ AssertValidColor(this, color);
+ return chunk()->bitmap.isMarked(this, color);
+}
+
+bool
+TenuredCell::markIfUnmarked(uint32_t color /* = BLACK */) const
+{
+ AssertValidColor(this, color);
+ return chunk()->bitmap.markIfUnmarked(this, color);
+}
+
+void
+TenuredCell::unmark(uint32_t color) const
+{
+ MOZ_ASSERT(color != BLACK);
+ AssertValidColor(this, color);
+ chunk()->bitmap.unmark(this, color);
+}
+
+void
+TenuredCell::copyMarkBitsFrom(const TenuredCell* src)
+{
+ ChunkBitmap& bitmap = chunk()->bitmap;
+ bitmap.copyMarkBit(this, src, BLACK);
+ bitmap.copyMarkBit(this, src, GRAY);
+}
+
+inline Arena*
+TenuredCell::arena() const
+{
+ MOZ_ASSERT(isTenured());
+ uintptr_t addr = address();
+ addr &= ~ArenaMask;
+ return reinterpret_cast<Arena*>(addr);
+}
+
+AllocKind
+TenuredCell::getAllocKind() const
+{
+ return arena()->getAllocKind();
+}
+
+JS::TraceKind
+TenuredCell::getTraceKind() const
+{
+ return MapAllocToTraceKind(getAllocKind());
+}
+
+JS::Zone*
+TenuredCell::zone() const
+{
+ JS::Zone* zone = arena()->zone;
+ MOZ_ASSERT(CurrentThreadCanAccessZone(zone));
+ return zone;
+}
+
+JS::Zone*
+TenuredCell::zoneFromAnyThread() const
+{
+ return arena()->zone;
+}
+
+bool
+TenuredCell::isInsideZone(JS::Zone* zone) const
+{
+ return zone == arena()->zone;
+}
+
+/* static */ MOZ_ALWAYS_INLINE void
+TenuredCell::readBarrier(TenuredCell* thing)
+{
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT(!isNullLike(thing));
+
+ // It would be good if barriers were never triggered during collection, but
+ // at the moment this can happen e.g. when rekeying tables containing
+ // read-barriered GC things after a moving GC.
+ //
+ // TODO: Fix this and assert we're not collecting if we're on the main
+ // thread.
+
+ JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
+ if (shadowZone->needsIncrementalBarrier()) {
+ // Barriers are only enabled on the main thread and are disabled while collecting.
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
+ Cell* tmp = thing;
+ TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
+ MOZ_ASSERT(tmp == thing);
+ }
+
+ if (thing->isMarked(GRAY)) {
+ // There shouldn't be anything marked grey unless we're on the main thread.
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
+ if (!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone))
+ UnmarkGrayCellRecursively(thing, thing->getTraceKind());
+ }
+}
+
+void
+AssertSafeToSkipBarrier(TenuredCell* thing);
+
+/* static */ MOZ_ALWAYS_INLINE void
+TenuredCell::writeBarrierPre(TenuredCell* thing)
+{
+ MOZ_ASSERT(!CurrentThreadIsIonCompiling());
+ MOZ_ASSERT_IF(thing, !isNullLike(thing));
+ if (!thing)
+ return;
+
+#ifdef JS_GC_ZEAL
+ // When verifying pre barriers we need to switch on all barriers, even
+ // those on the Atoms Zone. Normally, we never enter a parse task when
+ // collecting in the atoms zone, so will filter out atoms below.
+ // Unfortuantely, If we try that when verifying pre-barriers, we'd never be
+ // able to handle OMT parse tasks at all as we switch on the verifier any
+ // time we're not doing GC. This would cause us to deadlock, as OMT parsing
+ // is meant to resume after GC work completes. Instead we filter out any
+ // OMT barriers that reach us and assert that they would normally not be
+ // possible.
+ if (!CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread())) {
+ AssertSafeToSkipBarrier(thing);
+ return;
+ }
+#endif
+
+ JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
+ if (shadowZone->needsIncrementalBarrier()) {
+ MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
+ Cell* tmp = thing;
+ TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "pre barrier");
+ MOZ_ASSERT(tmp == thing);
+ }
+}
+
+static MOZ_ALWAYS_INLINE void
+AssertValidToSkipBarrier(TenuredCell* thing)
+{
+ MOZ_ASSERT(!IsInsideNursery(thing));
+ MOZ_ASSERT_IF(thing, MapAllocToTraceKind(thing->getAllocKind()) != JS::TraceKind::Object);
+}
+
+/* static */ MOZ_ALWAYS_INLINE void
+TenuredCell::writeBarrierPost(void* cellp, TenuredCell* prior, TenuredCell* next)
+{
+ AssertValidToSkipBarrier(next);
+}
+
+#ifdef DEBUG
+bool
+Cell::isAligned() const
+{
+ if (!isTenured())
+ return true;
+ return asTenured().isAligned();
+}
+
+bool
+TenuredCell::isAligned() const
+{
+ return Arena::isAligned(address(), arena()->getThingSize());
+}
+#endif
+
+static const int32_t ChunkLocationOffsetFromLastByte =
+ int32_t(gc::ChunkLocationOffset) - int32_t(gc::ChunkMask);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_Heap_h */
diff --git a/js/src/gc/Iteration.cpp b/js/src/gc/Iteration.cpp
new file mode 100644
index 000000000..3ebd5d290
--- /dev/null
+++ b/js/src/gc/Iteration.cpp
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/DebugOnly.h"
+
+#include "jscompartment.h"
+#include "jsgc.h"
+
+#include "gc/GCInternals.h"
+#include "js/HashTable.h"
+#include "vm/Runtime.h"
+
+#include "jscntxtinlines.h"
+#include "jsgcinlines.h"
+
+using namespace js;
+using namespace js::gc;
+
+static void
+IterateCompartmentsArenasCells(JSContext* cx, Zone* zone, void* data,
+ JSIterateCompartmentCallback compartmentCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback)
+{
+ for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next())
+ (*compartmentCallback)(cx, data, comp);
+
+ for (auto thingKind : AllAllocKinds()) {
+ JS::TraceKind traceKind = MapAllocToTraceKind(thingKind);
+ size_t thingSize = Arena::thingSize(thingKind);
+
+ for (ArenaIter aiter(zone, thingKind); !aiter.done(); aiter.next()) {
+ Arena* arena = aiter.get();
+ (*arenaCallback)(cx, data, arena, traceKind, thingSize);
+ for (ArenaCellIter iter(arena); !iter.done(); iter.next())
+ (*cellCallback)(cx, data, iter.getCell(), traceKind, thingSize);
+ }
+ }
+}
+
+void
+js::IterateZonesCompartmentsArenasCells(JSContext* cx, void* data,
+ IterateZoneCallback zoneCallback,
+ JSIterateCompartmentCallback compartmentCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback)
+{
+ AutoPrepareForTracing prop(cx, WithAtoms);
+
+ for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next()) {
+ (*zoneCallback)(cx, data, zone);
+ IterateCompartmentsArenasCells(cx, zone, data,
+ compartmentCallback, arenaCallback, cellCallback);
+ }
+}
+
+void
+js::IterateZoneCompartmentsArenasCells(JSContext* cx, Zone* zone, void* data,
+ IterateZoneCallback zoneCallback,
+ JSIterateCompartmentCallback compartmentCallback,
+ IterateArenaCallback arenaCallback,
+ IterateCellCallback cellCallback)
+{
+ AutoPrepareForTracing prop(cx, WithAtoms);
+
+ (*zoneCallback)(cx, data, zone);
+ IterateCompartmentsArenasCells(cx, zone, data,
+ compartmentCallback, arenaCallback, cellCallback);
+}
+
+void
+js::IterateChunks(JSContext* cx, void* data, IterateChunkCallback chunkCallback)
+{
+ AutoPrepareForTracing prep(cx, SkipAtoms);
+
+ for (auto chunk = cx->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
+ chunkCallback(cx, data, chunk);
+}
+
+void
+js::IterateScripts(JSContext* cx, JSCompartment* compartment,
+ void* data, IterateScriptCallback scriptCallback)
+{
+ MOZ_ASSERT(!cx->mainThread().suppressGC);
+ AutoEmptyNursery empty(cx);
+ AutoPrepareForTracing prep(cx, SkipAtoms);
+
+ if (compartment) {
+ Zone* zone = compartment->zone();
+ for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next()) {
+ if (script->compartment() == compartment)
+ scriptCallback(cx, data, script);
+ }
+ } else {
+ for (ZonesIter zone(cx, SkipAtoms); !zone.done(); zone.next()) {
+ for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next())
+ scriptCallback(cx, data, script);
+ }
+ }
+}
+
+static void
+IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
+{
+ for (auto kind : ObjectAllocKinds()) {
+ for (GrayObjectIter obj(zone, kind); !obj.done(); obj.next()) {
+ if (obj->asTenured().isMarked(GRAY))
+ cellCallback(data, JS::GCCellPtr(obj.get()));
+ }
+ }
+}
+
+void
+js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
+{
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ MOZ_ASSERT(!rt->isHeapBusy());
+ AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms);
+ ::IterateGrayObjects(zone, cellCallback, data);
+}
+
+void
+js::IterateGrayObjectsUnderCC(Zone* zone, GCThingCallback cellCallback, void* data)
+{
+ mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
+ MOZ_ASSERT(rt->isCycleCollecting());
+ MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
+ ::IterateGrayObjects(zone, cellCallback, data);
+}
+
+JS_PUBLIC_API(void)
+JS_IterateCompartments(JSContext* cx, void* data,
+ JSIterateCompartmentCallback compartmentCallback)
+{
+ AutoTraceSession session(cx);
+
+ for (CompartmentsIter c(cx, WithAtoms); !c.done(); c.next())
+ (*compartmentCallback)(cx, data, c);
+}
diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp
new file mode 100644
index 000000000..d9235f9ac
--- /dev/null
+++ b/js/src/gc/Marking.cpp
@@ -0,0 +1,3019 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Marking.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/ReentrancyGuard.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jsgc.h"
+#include "jsprf.h"
+
+#include "builtin/ModuleObject.h"
+#include "gc/GCInternals.h"
+#include "gc/Policy.h"
+#include "jit/IonCode.h"
+#include "js/SliceBudget.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/ArrayObject.h"
+#include "vm/Debugger.h"
+#include "vm/EnvironmentObject.h"
+#include "vm/Scope.h"
+#include "vm/Shape.h"
+#include "vm/Symbol.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/UnboxedObject.h"
+#include "wasm/WasmJS.h"
+
+#include "jscompartmentinlines.h"
+#include "jsgcinlines.h"
+#include "jsobjinlines.h"
+
+#include "gc/Nursery-inl.h"
+#include "vm/String-inl.h"
+#include "vm/UnboxedObject-inl.h"
+
+using namespace js;
+using namespace js::gc;
+
+using JS::MapTypeToTraceKind;
+
+using mozilla::ArrayLength;
+using mozilla::DebugOnly;
+using mozilla::IsBaseOf;
+using mozilla::IsSame;
+using mozilla::MakeRange;
+using mozilla::PodCopy;
+
+// Tracing Overview
+// ================
+//
+// Tracing, in this context, refers to an abstract visitation of some or all of
+// the GC-controlled heap. The effect of tracing an edge of the graph depends
+// on the subclass of the JSTracer on whose behalf we are tracing.
+//
+// Marking
+// -------
+//
+// The primary JSTracer is the GCMarker. The marking tracer causes the target
+// of each traversed edge to be marked black and the target edge's children to
+// be marked either gray (in the gc algorithm sense) or immediately black.
+//
+// Callback
+// --------
+//
+// The secondary JSTracer is the CallbackTracer. This simply invokes a callback
+// on each edge in a child.
+//
+// The following is a rough outline of the general struture of the tracing
+// internals.
+//
+// //
+// .---------. .---------. .--------------------------. .----------. //
+// |TraceEdge| |TraceRoot| |TraceManuallyBarrieredEdge| ... |TraceRange| ... etc. //
+// '---------' '---------' '--------------------------' '----------' //
+// \ \ / / //
+// \ \ .----------------. / / //
+// o------------->o-|DispatchToTracer|-o<-----------------------o //
+// '----------------' //
+// / \ //
+// / \ //
+// .---------. .----------. .-----------------. //
+// |DoMarking| |DoCallback|-------> |<JSTraceCallback>|-----------> //
+// '---------' '----------' '-----------------' //
+// | //
+// | //
+// .--------. //
+// o---------------->|traverse| . //
+// /_\ '--------' ' . //
+// | . . ' . //
+// | . . ' . //
+// | . . ' . //
+// | .-----------. .-----------. ' . .--------------------. //
+// | |markAndScan| |markAndPush| ' - |markAndTraceChildren|----> //
+// | '-----------' '-----------' '--------------------' //
+// | | \ //
+// | | \ //
+// | .----------------------. .----------------. //
+// | |T::eagerlyMarkChildren| |pushMarkStackTop|<===Oo //
+// | '----------------------' '----------------' || //
+// | | || || //
+// | | || || //
+// | | || || //
+// o<-----------------o<========================OO============Oo //
+// //
+// //
+// Legend: //
+// ------ Direct calls //
+// . . . Static dispatch //
+// ====== Dispatch through a manual stack. //
+// //
+
+
+/*** Tracing Invariants **************************************************************************/
+
+#if defined(DEBUG)
+template<typename T>
+static inline bool
+IsThingPoisoned(T* thing)
+{
+ const uint8_t poisonBytes[] = {
+ JS_FRESH_NURSERY_PATTERN,
+ JS_SWEPT_NURSERY_PATTERN,
+ JS_ALLOCATED_NURSERY_PATTERN,
+ JS_FRESH_TENURED_PATTERN,
+ JS_MOVED_TENURED_PATTERN,
+ JS_SWEPT_TENURED_PATTERN,
+ JS_ALLOCATED_TENURED_PATTERN,
+ JS_SWEPT_CODE_PATTERN
+ };
+ const int numPoisonBytes = sizeof(poisonBytes) / sizeof(poisonBytes[0]);
+ uint32_t* p = reinterpret_cast<uint32_t*>(reinterpret_cast<FreeSpan*>(thing) + 1);
+ // Note: all free patterns are odd to make the common, not-poisoned case a single test.
+ if ((*p & 1) == 0)
+ return false;
+ for (int i = 0; i < numPoisonBytes; ++i) {
+ const uint8_t pb = poisonBytes[i];
+ const uint32_t pw = pb | (pb << 8) | (pb << 16) | (pb << 24);
+ if (*p == pw)
+ return true;
+ }
+ return false;
+}
+
+static bool
+IsMovingTracer(JSTracer *trc)
+{
+ return trc->isCallbackTracer() &&
+ trc->asCallbackTracer()->getTracerKind() == JS::CallbackTracer::TracerKind::Moving;
+}
+#endif
+
+template <typename T> bool ThingIsPermanentAtomOrWellKnownSymbol(T* thing) { return false; }
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<JSString>(JSString* str) {
+ return str->isPermanentAtom();
+}
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<JSFlatString>(JSFlatString* str) {
+ return str->isPermanentAtom();
+}
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<JSLinearString>(JSLinearString* str) {
+ return str->isPermanentAtom();
+}
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<JSAtom>(JSAtom* atom) {
+ return atom->isPermanent();
+}
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<PropertyName>(PropertyName* name) {
+ return name->isPermanent();
+}
+template <> bool ThingIsPermanentAtomOrWellKnownSymbol<JS::Symbol>(JS::Symbol* sym) {
+ return sym->isWellKnownSymbol();
+}
+
+template <typename T>
+static inline bool
+IsOwnedByOtherRuntime(JSRuntime* rt, T thing)
+{
+ bool other = thing->runtimeFromAnyThread() != rt;
+ MOZ_ASSERT_IF(other,
+ ThingIsPermanentAtomOrWellKnownSymbol(thing) ||
+ thing->zoneFromAnyThread()->isSelfHostingZone());
+ return other;
+}
+
+template<typename T>
+void
+js::CheckTracedThing(JSTracer* trc, T* thing)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(trc);
+ MOZ_ASSERT(thing);
+
+ if (!trc->checkEdges())
+ return;
+
+ if (IsForwarded(thing))
+ thing = Forwarded(thing);
+
+ /* This function uses data that's not available in the nursery. */
+ if (IsInsideNursery(thing))
+ return;
+
+ MOZ_ASSERT_IF(!IsMovingTracer(trc) && !trc->isTenuringTracer(), !IsForwarded(thing));
+
+ /*
+ * Permanent atoms and things in the self-hosting zone are not associated
+ * with this runtime, but will be ignored during marking.
+ */
+ if (IsOwnedByOtherRuntime(trc->runtime(), thing))
+ return;
+
+ Zone* zone = thing->zoneFromAnyThread();
+ JSRuntime* rt = trc->runtime();
+
+ MOZ_ASSERT_IF(!IsMovingTracer(trc), CurrentThreadCanAccessZone(zone));
+ MOZ_ASSERT_IF(!IsMovingTracer(trc), CurrentThreadCanAccessRuntime(rt));
+
+ MOZ_ASSERT(zone->runtimeFromAnyThread() == trc->runtime());
+
+ MOZ_ASSERT(thing->isAligned());
+ MOZ_ASSERT(MapTypeToTraceKind<typename mozilla::RemovePointer<T>::Type>::kind ==
+ thing->getTraceKind());
+
+ /*
+ * Do not check IsMarkingTracer directly -- it should only be used in paths
+ * where we cannot be the gray buffering tracer.
+ */
+ bool isGcMarkingTracer = trc->isMarkingTracer();
+
+ MOZ_ASSERT_IF(zone->requireGCTracer(), isGcMarkingTracer || IsBufferGrayRootsTracer(trc));
+
+ if (isGcMarkingTracer) {
+ GCMarker* gcMarker = static_cast<GCMarker*>(trc);
+ MOZ_ASSERT_IF(gcMarker->shouldCheckCompartments(),
+ zone->isCollecting() || zone->isAtomsZone());
+
+ MOZ_ASSERT_IF(gcMarker->markColor() == GRAY,
+ !zone->isGCMarkingBlack() || zone->isAtomsZone());
+
+ MOZ_ASSERT(!(zone->isGCSweeping() || zone->isGCFinished() || zone->isGCCompacting()));
+ }
+
+ /*
+ * Try to assert that the thing is allocated.
+ *
+ * We would like to assert that the thing is not in the free list, but this
+ * check is very slow. Instead we check whether the thing has been poisoned:
+ * if it has not then we assume it is allocated, but if it has then it is
+ * either free or uninitialized in which case we check the free list.
+ *
+ * Further complications are that background sweeping may be running and
+ * concurrently modifiying the free list and that tracing is done off main
+ * thread during compacting GC and reading the contents of the thing by
+ * IsThingPoisoned would be racy in this case.
+ */
+ MOZ_ASSERT_IF(rt->isHeapBusy() && !zone->isGCCompacting() && !rt->gc.isBackgroundSweeping(),
+ !IsThingPoisoned(thing) || !InFreeList(thing->asTenured().arena(), thing));
+#endif
+}
+
+template <typename S>
+struct CheckTracedFunctor : public VoidDefaultAdaptor<S> {
+ template <typename T> void operator()(T* t, JSTracer* trc) { CheckTracedThing(trc, t); }
+};
+
+template<typename T>
+void
+js::CheckTracedThing(JSTracer* trc, T thing)
+{
+ DispatchTyped(CheckTracedFunctor<T>(), thing, trc);
+}
+
+namespace js {
+#define IMPL_CHECK_TRACED_THING(_, type, __) \
+ template void CheckTracedThing<type>(JSTracer*, type*);
+JS_FOR_EACH_TRACEKIND(IMPL_CHECK_TRACED_THING);
+#undef IMPL_CHECK_TRACED_THING
+} // namespace js
+
+static bool
+ShouldMarkCrossCompartment(JSTracer* trc, JSObject* src, Cell* cell)
+{
+ if (!trc->isMarkingTracer())
+ return true;
+
+ uint32_t color = static_cast<GCMarker*>(trc)->markColor();
+ MOZ_ASSERT(color == BLACK || color == GRAY);
+
+ if (!cell->isTenured()) {
+ MOZ_ASSERT(color == BLACK);
+ return false;
+ }
+ TenuredCell& tenured = cell->asTenured();
+
+ JS::Zone* zone = tenured.zone();
+ if (color == BLACK) {
+ /*
+ * Having black->gray edges violates our promise to the cycle
+ * collector. This can happen if we're collecting a compartment and it
+ * has an edge to an uncollected compartment: it's possible that the
+ * source and destination of the cross-compartment edge should be gray,
+ * but the source was marked black by the conservative scanner.
+ */
+ if (tenured.isMarked(GRAY)) {
+ MOZ_ASSERT(!zone->isCollecting());
+ trc->runtime()->gc.setFoundBlackGrayEdges(tenured);
+ }
+ return zone->isGCMarking();
+ } else {
+ if (zone->isGCMarkingBlack()) {
+ /*
+ * The destination compartment is being not being marked gray now,
+ * but it will be later, so record the cell so it can be marked gray
+ * at the appropriate time.
+ */
+ if (!tenured.isMarked())
+ DelayCrossCompartmentGrayMarking(src);
+ return false;
+ }
+ return zone->isGCMarkingGray();
+ }
+}
+
+static bool
+ShouldMarkCrossCompartment(JSTracer* trc, JSObject* src, const Value& val)
+{
+ return val.isMarkable() && ShouldMarkCrossCompartment(trc, src, (Cell*)val.toGCThing());
+}
+
+static void
+AssertZoneIsMarking(Cell* thing)
+{
+ MOZ_ASSERT(TenuredCell::fromPointer(thing)->zone()->isGCMarking());
+}
+
+static void
+AssertZoneIsMarking(JSString* str)
+{
+#ifdef DEBUG
+ Zone* zone = TenuredCell::fromPointer(str)->zone();
+ MOZ_ASSERT(zone->isGCMarking() || zone->isAtomsZone());
+#endif
+}
+
+static void
+AssertZoneIsMarking(JS::Symbol* sym)
+{
+#ifdef DEBUG
+ Zone* zone = TenuredCell::fromPointer(sym)->zone();
+ MOZ_ASSERT(zone->isGCMarking() || zone->isAtomsZone());
+#endif
+}
+
+static void
+AssertRootMarkingPhase(JSTracer* trc)
+{
+ MOZ_ASSERT_IF(trc->isMarkingTracer(),
+ trc->runtime()->gc.state() == State::NotActive ||
+ trc->runtime()->gc.state() == State::MarkRoots);
+}
+
+
+/*** Tracing Interface ***************************************************************************/
+
+// The second parameter to BaseGCType is derived automatically based on T. The
+// relation here is that for any T, the TraceKind will automatically,
+// statically select the correct Cell layout for marking. Below, we instantiate
+// each override with a declaration of the most derived layout type.
+//
+// The use of TraceKind::Null for the case where the type is not matched
+// generates a compile error as no template instantiated for that kind.
+//
+// Usage:
+// BaseGCType<T>::type
+//
+// Examples:
+// BaseGCType<JSFunction>::type => JSObject
+// BaseGCType<UnownedBaseShape>::type => BaseShape
+// etc.
+template <typename T, JS::TraceKind =
+#define EXPAND_MATCH_TYPE(name, type, _) \
+ IsBaseOf<type, T>::value ? JS::TraceKind::name :
+JS_FOR_EACH_TRACEKIND(EXPAND_MATCH_TYPE)
+#undef EXPAND_MATCH_TYPE
+ JS::TraceKind::Null>
+
+struct BaseGCType;
+#define IMPL_BASE_GC_TYPE(name, type_, _) \
+ template <typename T> struct BaseGCType<T, JS::TraceKind:: name> { typedef type_ type; };
+JS_FOR_EACH_TRACEKIND(IMPL_BASE_GC_TYPE);
+#undef IMPL_BASE_GC_TYPE
+
+// Our barrier templates are parameterized on the pointer types so that we can
+// share the definitions with Value and jsid. Thus, we need to strip the
+// pointer before sending the type to BaseGCType and re-add it on the other
+// side. As such:
+template <typename T> struct PtrBaseGCType { typedef T type; };
+template <typename T> struct PtrBaseGCType<T*> { typedef typename BaseGCType<T>::type* type; };
+
+template <typename T>
+typename PtrBaseGCType<T>::type*
+ConvertToBase(T* thingp)
+{
+ return reinterpret_cast<typename PtrBaseGCType<T>::type*>(thingp);
+}
+
+template <typename T> void DispatchToTracer(JSTracer* trc, T* thingp, const char* name);
+template <typename T> T DoCallback(JS::CallbackTracer* trc, T* thingp, const char* name);
+template <typename T> void DoMarking(GCMarker* gcmarker, T* thing);
+template <typename T> void DoMarking(GCMarker* gcmarker, const T& thing);
+template <typename T> void NoteWeakEdge(GCMarker* gcmarker, T** thingp);
+template <typename T> void NoteWeakEdge(GCMarker* gcmarker, T* thingp);
+
+template <typename T>
+void
+js::TraceEdge(JSTracer* trc, WriteBarrieredBase<T>* thingp, const char* name)
+{
+ DispatchToTracer(trc, ConvertToBase(thingp->unsafeUnbarrieredForTracing()), name);
+}
+
+template <typename T>
+void
+js::TraceEdge(JSTracer* trc, ReadBarriered<T>* thingp, const char* name)
+{
+ DispatchToTracer(trc, ConvertToBase(thingp->unsafeGet()), name);
+}
+
+template <typename T>
+void
+js::TraceNullableEdge(JSTracer* trc, WriteBarrieredBase<T>* thingp, const char* name)
+{
+ if (InternalBarrierMethods<T>::isMarkable(thingp->get()))
+ DispatchToTracer(trc, ConvertToBase(thingp->unsafeUnbarrieredForTracing()), name);
+}
+
+template <typename T>
+JS_PUBLIC_API(void)
+JS::TraceEdge(JSTracer* trc, JS::Heap<T>* thingp, const char* name)
+{
+ MOZ_ASSERT(thingp);
+ if (InternalBarrierMethods<T>::isMarkable(*thingp->unsafeGet()))
+ DispatchToTracer(trc, ConvertToBase(thingp->unsafeGet()), name);
+}
+
+JS_PUBLIC_API(void)
+JS::TraceEdge(JSTracer* trc, JS::TenuredHeap<JSObject*>* thingp, const char* name)
+{
+ MOZ_ASSERT(thingp);
+ if (JSObject* ptr = thingp->unbarrieredGetPtr()) {
+ DispatchToTracer(trc, &ptr, name);
+ thingp->setPtr(ptr);
+ }
+}
+
+template <typename T>
+void
+js::TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp, const char* name)
+{
+ DispatchToTracer(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+JS_PUBLIC_API(void)
+js::UnsafeTraceManuallyBarrieredEdge(JSTracer* trc, T* thingp, const char* name)
+{
+ DispatchToTracer(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+void
+js::TraceWeakEdge(JSTracer* trc, WeakRef<T>* thingp, const char* name)
+{
+ // Non-marking tracers treat the edge strongly.
+ if (!trc->isMarkingTracer())
+ return DispatchToTracer(trc, ConvertToBase(thingp->unsafeUnbarrieredForTracing()), name);
+
+ NoteWeakEdge(static_cast<GCMarker*>(trc),
+ ConvertToBase(thingp->unsafeUnbarrieredForTracing()));
+}
+
+template <typename T>
+void
+js::TraceRoot(JSTracer* trc, T* thingp, const char* name)
+{
+ AssertRootMarkingPhase(trc);
+ DispatchToTracer(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+void
+js::TraceRoot(JSTracer* trc, ReadBarriered<T>* thingp, const char* name)
+{
+ TraceRoot(trc, thingp->unsafeGet(), name);
+}
+
+template <typename T>
+void
+js::TraceNullableRoot(JSTracer* trc, T* thingp, const char* name)
+{
+ AssertRootMarkingPhase(trc);
+ if (InternalBarrierMethods<T>::isMarkableTaggedPointer(*thingp))
+ DispatchToTracer(trc, ConvertToBase(thingp), name);
+}
+
+template <typename T>
+void
+js::TraceNullableRoot(JSTracer* trc, ReadBarriered<T>* thingp, const char* name)
+{
+ TraceNullableRoot(trc, thingp->unsafeGet(), name);
+}
+
+template <typename T>
+JS_PUBLIC_API(void)
+JS::UnsafeTraceRoot(JSTracer* trc, T* thingp, const char* name)
+{
+ MOZ_ASSERT(thingp);
+ js::TraceNullableRoot(trc, thingp, name);
+}
+
+template <typename T>
+void
+js::TraceRange(JSTracer* trc, size_t len, WriteBarrieredBase<T>* vec, const char* name)
+{
+ JS::AutoTracingIndex index(trc);
+ for (auto i : MakeRange(len)) {
+ if (InternalBarrierMethods<T>::isMarkable(vec[i].get()))
+ DispatchToTracer(trc, ConvertToBase(vec[i].unsafeUnbarrieredForTracing()), name);
+ ++index;
+ }
+}
+
+template <typename T>
+void
+js::TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name)
+{
+ AssertRootMarkingPhase(trc);
+ JS::AutoTracingIndex index(trc);
+ for (auto i : MakeRange(len)) {
+ if (InternalBarrierMethods<T>::isMarkable(vec[i]))
+ DispatchToTracer(trc, ConvertToBase(&vec[i]), name);
+ ++index;
+ }
+}
+
+// Instantiate a copy of the Tracing templates for each derived type.
+#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(type) \
+ template void js::TraceEdge<type>(JSTracer*, WriteBarrieredBase<type>*, const char*); \
+ template void js::TraceEdge<type>(JSTracer*, ReadBarriered<type>*, const char*); \
+ template void js::TraceNullableEdge<type>(JSTracer*, WriteBarrieredBase<type>*, const char*); \
+ template void js::TraceManuallyBarrieredEdge<type>(JSTracer*, type*, const char*); \
+ template void js::TraceWeakEdge<type>(JSTracer*, WeakRef<type>*, const char*); \
+ template void js::TraceRoot<type>(JSTracer*, type*, const char*); \
+ template void js::TraceRoot<type>(JSTracer*, ReadBarriered<type>*, const char*); \
+ template void js::TraceNullableRoot<type>(JSTracer*, type*, const char*); \
+ template void js::TraceNullableRoot<type>(JSTracer*, ReadBarriered<type>*, const char*); \
+ template void js::TraceRange<type>(JSTracer*, size_t, WriteBarrieredBase<type>*, const char*); \
+ template void js::TraceRootRange<type>(JSTracer*, size_t, type*, const char*);
+FOR_EACH_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS)
+#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS
+
+#define INSTANTIATE_PUBLIC_TRACE_FUNCTIONS(type) \
+ template JS_PUBLIC_API(void) JS::TraceEdge<type>(JSTracer*, JS::Heap<type>*, const char*); \
+ template JS_PUBLIC_API(void) JS::UnsafeTraceRoot<type>(JSTracer*, type*, const char*); \
+ template JS_PUBLIC_API(void) js::UnsafeTraceManuallyBarrieredEdge<type>(JSTracer*, type*, \
+ const char*);
+FOR_EACH_PUBLIC_GC_POINTER_TYPE(INSTANTIATE_PUBLIC_TRACE_FUNCTIONS)
+FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(INSTANTIATE_PUBLIC_TRACE_FUNCTIONS)
+#undef INSTANTIATE_PUBLIC_TRACE_FUNCTIONS
+
+template <typename T>
+void
+js::TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src, T* dst,
+ const char* name)
+{
+ if (ShouldMarkCrossCompartment(trc, src, *dst))
+ DispatchToTracer(trc, dst, name);
+}
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<JSObject*>(JSTracer*, JSObject*,
+ JSObject**, const char*);
+template void js::TraceManuallyBarrieredCrossCompartmentEdge<JSScript*>(JSTracer*, JSObject*,
+ JSScript**, const char*);
+
+template <typename T>
+void
+js::TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src, WriteBarrieredBase<T>* dst,
+ const char* name)
+{
+ if (ShouldMarkCrossCompartment(trc, src, dst->get()))
+ DispatchToTracer(trc, dst->unsafeUnbarrieredForTracing(), name);
+}
+template void js::TraceCrossCompartmentEdge<Value>(JSTracer*, JSObject*,
+ WriteBarrieredBase<Value>*, const char*);
+
+template <typename T>
+void
+js::TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name)
+{
+ AssertRootMarkingPhase(trc);
+ MOZ_ASSERT(ThingIsPermanentAtomOrWellKnownSymbol(thing));
+
+ // We have to mark permanent atoms and well-known symbols through a special
+ // method because the default DoMarking implementation automatically skips
+ // them. Fortunately, atoms (permanent and non) cannot refer to other GC
+ // things so they do not need to go through the mark stack and may simply
+ // be marked directly. Moreover, well-known symbols can refer only to
+ // permanent atoms, so likewise require no subsquent marking.
+ CheckTracedThing(trc, *ConvertToBase(&thing));
+ if (trc->isMarkingTracer())
+ thing->markIfUnmarked(gc::BLACK);
+ else
+ DoCallback(trc->asCallbackTracer(), ConvertToBase(&thing), name);
+}
+template void js::TraceProcessGlobalRoot<JSAtom>(JSTracer*, JSAtom*, const char*);
+template void js::TraceProcessGlobalRoot<JS::Symbol>(JSTracer*, JS::Symbol*, const char*);
+
+// A typed functor adaptor for TraceRoot.
+struct TraceRootFunctor {
+ template <typename T>
+ void operator()(JSTracer* trc, Cell** thingp, const char* name) {
+ TraceRoot(trc, reinterpret_cast<T**>(thingp), name);
+ }
+};
+
+void
+js::TraceGenericPointerRoot(JSTracer* trc, Cell** thingp, const char* name)
+{
+ MOZ_ASSERT(thingp);
+ if (!*thingp)
+ return;
+ TraceRootFunctor f;
+ DispatchTraceKindTyped(f, (*thingp)->getTraceKind(), trc, thingp, name);
+}
+
+// A typed functor adaptor for TraceManuallyBarrieredEdge.
+struct TraceManuallyBarrieredEdgeFunctor {
+ template <typename T>
+ void operator()(JSTracer* trc, Cell** thingp, const char* name) {
+ TraceManuallyBarrieredEdge(trc, reinterpret_cast<T**>(thingp), name);
+ }
+};
+
+void
+js::TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, Cell** thingp, const char* name)
+{
+ MOZ_ASSERT(thingp);
+ if (!*thingp)
+ return;
+ TraceManuallyBarrieredEdgeFunctor f;
+ DispatchTraceKindTyped(f, (*thingp)->getTraceKind(), trc, thingp, name);
+}
+
+// This method is responsible for dynamic dispatch to the real tracer
+// implementation. Consider replacing this choke point with virtual dispatch:
+// a sufficiently smart C++ compiler may be able to devirtualize some paths.
+template <typename T>
+void
+DispatchToTracer(JSTracer* trc, T* thingp, const char* name)
+{
+#define IS_SAME_TYPE_OR(name, type, _) mozilla::IsSame<type*, T>::value ||
+ static_assert(
+ JS_FOR_EACH_TRACEKIND(IS_SAME_TYPE_OR)
+ mozilla::IsSame<T, JS::Value>::value ||
+ mozilla::IsSame<T, jsid>::value ||
+ mozilla::IsSame<T, TaggedProto>::value,
+ "Only the base cell layout types are allowed into marking/tracing internals");
+#undef IS_SAME_TYPE_OR
+ if (trc->isMarkingTracer())
+ return DoMarking(static_cast<GCMarker*>(trc), *thingp);
+ if (trc->isTenuringTracer())
+ return static_cast<TenuringTracer*>(trc)->traverse(thingp);
+ MOZ_ASSERT(trc->isCallbackTracer());
+ DoCallback(trc->asCallbackTracer(), thingp, name);
+}
+
+
+/*** GC Marking Interface *************************************************************************/
+
+namespace js {
+
+typedef bool HasNoImplicitEdgesType;
+
+template <typename T>
+struct ImplicitEdgeHolderType {
+ typedef HasNoImplicitEdgesType Type;
+};
+
+// For now, we only handle JSObject* and JSScript* keys, but the linear time
+// algorithm can be easily extended by adding in more types here, then making
+// GCMarker::traverse<T> call markPotentialEphemeronKey.
+template <>
+struct ImplicitEdgeHolderType<JSObject*> {
+ typedef JSObject* Type;
+};
+
+template <>
+struct ImplicitEdgeHolderType<JSScript*> {
+ typedef JSScript* Type;
+};
+
+void
+GCMarker::markEphemeronValues(gc::Cell* markedCell, WeakEntryVector& values)
+{
+ size_t initialLen = values.length();
+ for (size_t i = 0; i < initialLen; i++)
+ values[i].weakmap->traceEntry(this, markedCell, values[i].key);
+
+ // The vector should not be appended to during iteration because the key is
+ // already marked, and even in cases where we have a multipart key, we
+ // should only be inserting entries for the unmarked portions.
+ MOZ_ASSERT(values.length() == initialLen);
+}
+
+template <typename T>
+void
+GCMarker::markImplicitEdgesHelper(T markedThing)
+{
+ if (!isWeakMarkingTracer())
+ return;
+
+ Zone* zone = gc::TenuredCell::fromPointer(markedThing)->zone();
+ MOZ_ASSERT(zone->isGCMarking());
+ MOZ_ASSERT(!zone->isGCSweeping());
+
+ auto p = zone->gcWeakKeys.get(JS::GCCellPtr(markedThing));
+ if (!p)
+ return;
+ WeakEntryVector& markables = p->value;
+
+ markEphemeronValues(markedThing, markables);
+ markables.clear(); // If key address is reused, it should do nothing
+}
+
+template <>
+void
+GCMarker::markImplicitEdgesHelper(HasNoImplicitEdgesType)
+{
+}
+
+template <typename T>
+void
+GCMarker::markImplicitEdges(T* thing)
+{
+ markImplicitEdgesHelper<typename ImplicitEdgeHolderType<T*>::Type>(thing);
+}
+
+} // namespace js
+
+template <typename T>
+static inline bool
+MustSkipMarking(GCMarker* gcmarker, T thing)
+{
+ // Don't trace things that are owned by another runtime.
+ if (IsOwnedByOtherRuntime(gcmarker->runtime(), thing))
+ return true;
+
+ // Don't mark things outside a zone if we are in a per-zone GC.
+ return !thing->zone()->isGCMarking();
+}
+
+template <>
+bool
+MustSkipMarking<JSObject*>(GCMarker* gcmarker, JSObject* obj)
+{
+ // Don't trace things that are owned by another runtime.
+ if (IsOwnedByOtherRuntime(gcmarker->runtime(), obj))
+ return true;
+
+ // We may mark a Nursery thing outside the context of the
+ // MinorCollectionTracer because of a pre-barrier. The pre-barrier is not
+ // needed in this case because we perform a minor collection before each
+ // incremental slice.
+ if (IsInsideNursery(obj))
+ return true;
+
+ // Don't mark things outside a zone if we are in a per-zone GC. It is
+ // faster to check our own arena, which we can do since we know that
+ // the object is tenured.
+ return !TenuredCell::fromPointer(obj)->zone()->isGCMarking();
+}
+
+template <typename T>
+void
+DoMarking(GCMarker* gcmarker, T* thing)
+{
+ // Do per-type marking precondition checks.
+ if (MustSkipMarking(gcmarker, thing))
+ return;
+
+ CheckTracedThing(gcmarker, thing);
+ gcmarker->traverse(thing);
+
+ // Mark the compartment as live.
+ SetMaybeAliveFlag(thing);
+}
+
+template <typename S>
+struct DoMarkingFunctor : public VoidDefaultAdaptor<S> {
+ template <typename T> void operator()(T* t, GCMarker* gcmarker) { DoMarking(gcmarker, t); }
+};
+
+template <typename T>
+void
+DoMarking(GCMarker* gcmarker, const T& thing)
+{
+ DispatchTyped(DoMarkingFunctor<T>(), thing, gcmarker);
+}
+
+template <typename T>
+void
+NoteWeakEdge(GCMarker* gcmarker, T** thingp)
+{
+ // Do per-type marking precondition checks.
+ if (MustSkipMarking(gcmarker, *thingp))
+ return;
+
+ CheckTracedThing(gcmarker, *thingp);
+
+ // If the target is already marked, there's no need to store the edge.
+ if (IsMarkedUnbarriered(gcmarker->runtime(), thingp))
+ return;
+
+ gcmarker->noteWeakEdge(thingp);
+}
+
+template <typename T>
+void
+NoteWeakEdge(GCMarker* gcmarker, T* thingp)
+{
+ MOZ_CRASH("the gc does not support tagged pointers as weak edges");
+}
+
+template <typename T>
+void
+js::GCMarker::noteWeakEdge(T* edge)
+{
+ static_assert(IsBaseOf<Cell, typename mozilla::RemovePointer<T>::Type>::value,
+ "edge must point to a GC pointer");
+ MOZ_ASSERT((*edge)->isTenured());
+
+ // Note: we really want the *source* Zone here. The edge may start in a
+ // non-gc heap location, however, so we use the fact that cross-zone weak
+ // references are not allowed and use the *target's* zone.
+ JS::Zone::WeakEdges &weakRefs = (*edge)->asTenured().zone()->gcWeakRefs;
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!weakRefs.append(reinterpret_cast<TenuredCell**>(edge)))
+ oomUnsafe.crash("Failed to record a weak edge for sweeping.");
+}
+
+// The simplest traversal calls out to the fully generic traceChildren function
+// to visit the child edges. In the absence of other traversal mechanisms, this
+// function will rapidly grow the stack past its bounds and crash the process.
+// Thus, this generic tracing should only be used in cases where subsequent
+// tracing will not recurse.
+template <typename T>
+void
+js::GCMarker::markAndTraceChildren(T* thing)
+{
+ if (ThingIsPermanentAtomOrWellKnownSymbol(thing))
+ return;
+ if (mark(thing))
+ thing->traceChildren(this);
+}
+namespace js {
+template <> void GCMarker::traverse(BaseShape* thing) { markAndTraceChildren(thing); }
+template <> void GCMarker::traverse(JS::Symbol* thing) { markAndTraceChildren(thing); }
+} // namespace js
+
+// Strings, LazyScripts, Shapes, and Scopes are extremely common, but have
+// simple patterns of recursion. We traverse trees of these edges immediately,
+// with aggressive, manual inlining, implemented by eagerlyTraceChildren.
+template <typename T>
+void
+js::GCMarker::markAndScan(T* thing)
+{
+ if (ThingIsPermanentAtomOrWellKnownSymbol(thing))
+ return;
+ if (mark(thing))
+ eagerlyMarkChildren(thing);
+}
+namespace js {
+template <> void GCMarker::traverse(JSString* thing) { markAndScan(thing); }
+template <> void GCMarker::traverse(LazyScript* thing) { markAndScan(thing); }
+template <> void GCMarker::traverse(Shape* thing) { markAndScan(thing); }
+template <> void GCMarker::traverse(js::Scope* thing) { markAndScan(thing); }
+} // namespace js
+
+// Object and ObjectGroup are extremely common and can contain arbitrarily
+// nested graphs, so are not trivially inlined. In this case we use a mark
+// stack to control recursion. JitCode shares none of these properties, but is
+// included for historical reasons. JSScript normally cannot recurse, but may
+// be used as a weakmap key and thereby recurse into weakmapped values.
+template <typename T>
+void
+js::GCMarker::markAndPush(StackTag tag, T* thing)
+{
+ if (!mark(thing))
+ return;
+ pushTaggedPtr(tag, thing);
+ markImplicitEdges(thing);
+}
+namespace js {
+template <> void GCMarker::traverse(JSObject* thing) { markAndPush(ObjectTag, thing); }
+template <> void GCMarker::traverse(ObjectGroup* thing) { markAndPush(GroupTag, thing); }
+template <> void GCMarker::traverse(jit::JitCode* thing) { markAndPush(JitCodeTag, thing); }
+template <> void GCMarker::traverse(JSScript* thing) { markAndPush(ScriptTag, thing); }
+} // namespace js
+
+namespace js {
+template <>
+void
+GCMarker::traverse(AccessorShape* thing) {
+ MOZ_CRASH("AccessorShape must be marked as a Shape");
+}
+} // namespace js
+
+template <typename S, typename T>
+static void
+CheckTraversedEdge(S source, T* target)
+{
+ // Atoms and Symbols do not have or mark their internal pointers, respectively.
+ MOZ_ASSERT(!ThingIsPermanentAtomOrWellKnownSymbol(source));
+
+ // The Zones must match, unless the target is an atom.
+ MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(target),
+ target->zone()->isAtomsZone() || target->zone() == source->zone());
+
+ // Atoms and Symbols do not have access to a compartment pointer, or we'd need
+ // to adjust the subsequent check to catch that case.
+ MOZ_ASSERT_IF(ThingIsPermanentAtomOrWellKnownSymbol(target), !target->maybeCompartment());
+ MOZ_ASSERT_IF(target->zoneFromAnyThread()->isAtomsZone(), !target->maybeCompartment());
+ // If we have access to a compartment pointer for both things, they must match.
+ MOZ_ASSERT_IF(source->maybeCompartment() && target->maybeCompartment(),
+ source->maybeCompartment() == target->maybeCompartment());
+}
+
+template <typename S, typename T>
+void
+js::GCMarker::traverseEdge(S source, T* target)
+{
+ CheckTraversedEdge(source, target);
+ traverse(target);
+}
+
+template <typename V, typename S> struct TraverseEdgeFunctor : public VoidDefaultAdaptor<V> {
+ template <typename T> void operator()(T t, GCMarker* gcmarker, S s) {
+ return gcmarker->traverseEdge(s, t);
+ }
+};
+
+template <typename S, typename T>
+void
+js::GCMarker::traverseEdge(S source, const T& thing)
+{
+ DispatchTyped(TraverseEdgeFunctor<T, S>(), thing, this, source);
+}
+
+template <typename T>
+bool
+js::GCMarker::mark(T* thing)
+{
+ AssertZoneIsMarking(thing);
+ MOZ_ASSERT(!IsInsideNursery(gc::TenuredCell::fromPointer(thing)));
+ return gc::ParticipatesInCC<T>::value
+ ? gc::TenuredCell::fromPointer(thing)->markIfUnmarked(markColor())
+ : gc::TenuredCell::fromPointer(thing)->markIfUnmarked(gc::BLACK);
+}
+
+
+/*** Inline, Eager GC Marking *********************************************************************/
+
+// Each of the eager, inline marking paths is directly preceeded by the
+// out-of-line, generic tracing code for comparison. Both paths must end up
+// traversing equivalent subgraphs.
+
+void
+LazyScript::traceChildren(JSTracer* trc)
+{
+ if (script_)
+ TraceWeakEdge(trc, &script_, "script");
+
+ if (function_)
+ TraceEdge(trc, &function_, "function");
+
+ if (sourceObject_)
+ TraceEdge(trc, &sourceObject_, "sourceObject");
+
+ if (enclosingScope_)
+ TraceEdge(trc, &enclosingScope_, "enclosingScope");
+
+ // We rely on the fact that atoms are always tenured.
+ JSAtom** closedOverBindings = this->closedOverBindings();
+ for (auto i : MakeRange(numClosedOverBindings())) {
+ if (closedOverBindings[i])
+ TraceManuallyBarrieredEdge(trc, &closedOverBindings[i], "closedOverBinding");
+ }
+
+ GCPtrFunction* innerFunctions = this->innerFunctions();
+ for (auto i : MakeRange(numInnerFunctions()))
+ TraceEdge(trc, &innerFunctions[i], "lazyScriptInnerFunction");
+}
+inline void
+js::GCMarker::eagerlyMarkChildren(LazyScript *thing)
+{
+ if (thing->script_)
+ noteWeakEdge(thing->script_.unsafeUnbarrieredForTracing());
+
+ if (thing->function_)
+ traverseEdge(thing, static_cast<JSObject*>(thing->function_));
+
+ if (thing->sourceObject_)
+ traverseEdge(thing, static_cast<JSObject*>(thing->sourceObject_));
+
+ if (thing->enclosingScope_)
+ traverseEdge(thing, static_cast<Scope*>(thing->enclosingScope_));
+
+ // We rely on the fact that atoms are always tenured.
+ JSAtom** closedOverBindings = thing->closedOverBindings();
+ for (auto i : MakeRange(thing->numClosedOverBindings())) {
+ if (closedOverBindings[i])
+ traverseEdge(thing, static_cast<JSString*>(closedOverBindings[i]));
+ }
+
+ GCPtrFunction* innerFunctions = thing->innerFunctions();
+ for (auto i : MakeRange(thing->numInnerFunctions()))
+ traverseEdge(thing, static_cast<JSObject*>(innerFunctions[i]));
+}
+
+void
+Shape::traceChildren(JSTracer* trc)
+{
+ TraceEdge(trc, &base_, "base");
+ TraceEdge(trc, &propidRef(), "propid");
+ if (parent)
+ TraceEdge(trc, &parent, "parent");
+
+ if (hasGetterObject())
+ TraceManuallyBarrieredEdge(trc, &asAccessorShape().getterObj, "getter");
+ if (hasSetterObject())
+ TraceManuallyBarrieredEdge(trc, &asAccessorShape().setterObj, "setter");
+}
+inline void
+js::GCMarker::eagerlyMarkChildren(Shape* shape)
+{
+ MOZ_ASSERT(shape->isMarked(this->markColor()));
+ do {
+ // Special case: if a base shape has a shape table then all its pointers
+ // must point to this shape or an anscestor. Since these pointers will
+ // be traced by this loop they do not need to be traced here as well.
+ BaseShape* base = shape->base();
+ CheckTraversedEdge(shape, base);
+ if (mark(base)) {
+ MOZ_ASSERT(base->canSkipMarkingShapeTable(shape));
+ base->traceChildrenSkipShapeTable(this);
+ }
+
+ traverseEdge(shape, shape->propidRef().get());
+
+ // When triggered between slices on belhalf of a barrier, these
+ // objects may reside in the nursery, so require an extra check.
+ // FIXME: Bug 1157967 - remove the isTenured checks.
+ if (shape->hasGetterObject() && shape->getterObject()->isTenured())
+ traverseEdge(shape, shape->getterObject());
+ if (shape->hasSetterObject() && shape->setterObject()->isTenured())
+ traverseEdge(shape, shape->setterObject());
+
+ shape = shape->previous();
+ } while (shape && mark(shape));
+}
+
+void
+JSString::traceChildren(JSTracer* trc)
+{
+ if (hasBase())
+ traceBase(trc);
+ else if (isRope())
+ asRope().traceChildren(trc);
+}
+inline void
+GCMarker::eagerlyMarkChildren(JSString* str)
+{
+ if (str->isLinear())
+ eagerlyMarkChildren(&str->asLinear());
+ else
+ eagerlyMarkChildren(&str->asRope());
+}
+
+void
+JSString::traceBase(JSTracer* trc)
+{
+ MOZ_ASSERT(hasBase());
+ TraceManuallyBarrieredEdge(trc, &d.s.u3.base, "base");
+}
+inline void
+js::GCMarker::eagerlyMarkChildren(JSLinearString* linearStr)
+{
+ AssertZoneIsMarking(linearStr);
+ MOZ_ASSERT(linearStr->isMarked());
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+
+ // Use iterative marking to avoid blowing out the stack.
+ while (linearStr->hasBase()) {
+ linearStr = linearStr->base();
+ MOZ_ASSERT(linearStr->JSString::isLinear());
+ if (linearStr->isPermanentAtom())
+ break;
+ AssertZoneIsMarking(linearStr);
+ if (!mark(static_cast<JSString*>(linearStr)))
+ break;
+ }
+}
+
+void
+JSRope::traceChildren(JSTracer* trc) {
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u2.left, "left child");
+ js::TraceManuallyBarrieredEdge(trc, &d.s.u3.right, "right child");
+}
+inline void
+js::GCMarker::eagerlyMarkChildren(JSRope* rope)
+{
+ // This function tries to scan the whole rope tree using the marking stack
+ // as temporary storage. If that becomes full, the unscanned ropes are
+ // added to the delayed marking list. When the function returns, the
+ // marking stack is at the same depth as it was on entry. This way we avoid
+ // using tags when pushing ropes to the stack as ropes never leak to other
+ // users of the stack. This also assumes that a rope can only point to
+ // other ropes or linear strings, it cannot refer to GC things of other
+ // types.
+ ptrdiff_t savedPos = stack.position();
+ JS_DIAGNOSTICS_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+#ifdef JS_DEBUG
+ static const size_t DEEP_ROPE_THRESHOLD = 100000;
+ static const size_t ROPE_CYCLE_HISTORY = 100;
+ DebugOnly<size_t> ropeDepth = 0;
+ JSRope* history[ROPE_CYCLE_HISTORY];
+#endif
+ while (true) {
+#ifdef JS_DEBUG
+ if (++ropeDepth >= DEEP_ROPE_THRESHOLD) {
+ // Bug 1011786 comment 294 - detect cyclic ropes. There are some
+ // legitimate deep ropes, at least in tests. So if we hit a deep
+ // rope, start recording the nodes we visit and check whether we
+ // repeat. But do it on a finite window size W so that we're not
+ // scanning the full history for every node. And only check every
+ // Wth push, to add only constant overhead per node. This will only
+ // catch cycles of size up to W (but it seems most likely that any
+ // cycles will be size 1 or maybe 2.)
+ if ((ropeDepth > DEEP_ROPE_THRESHOLD + ROPE_CYCLE_HISTORY) &&
+ (ropeDepth % ROPE_CYCLE_HISTORY) == 0)
+ {
+ for (size_t i = 0; i < ROPE_CYCLE_HISTORY; i++)
+ MOZ_ASSERT(history[i] != rope, "cycle detected in rope");
+ }
+ history[ropeDepth % ROPE_CYCLE_HISTORY] = rope;
+ }
+#endif
+
+ JS_DIAGNOSTICS_ASSERT(rope->getTraceKind() == JS::TraceKind::String);
+ JS_DIAGNOSTICS_ASSERT(rope->JSString::isRope());
+ AssertZoneIsMarking(rope);
+ MOZ_ASSERT(rope->isMarked());
+ JSRope* next = nullptr;
+
+ JSString* right = rope->rightChild();
+ if (!right->isPermanentAtom() &&
+ mark(right))
+ {
+ if (right->isLinear())
+ eagerlyMarkChildren(&right->asLinear());
+ else
+ next = &right->asRope();
+ }
+
+ JSString* left = rope->leftChild();
+ if (!left->isPermanentAtom() &&
+ mark(left))
+ {
+ if (left->isLinear()) {
+ eagerlyMarkChildren(&left->asLinear());
+ } else {
+ // When both children are ropes, set aside the right one to
+ // scan it later.
+ if (next && !stack.push(reinterpret_cast<uintptr_t>(next)))
+ delayMarkingChildren(next);
+ next = &left->asRope();
+ }
+ }
+ if (next) {
+ rope = next;
+ } else if (savedPos != stack.position()) {
+ MOZ_ASSERT(savedPos < stack.position());
+ rope = reinterpret_cast<JSRope*>(stack.pop());
+ } else {
+ break;
+ }
+ }
+ MOZ_ASSERT(savedPos == stack.position());
+}
+
+static inline void
+TraceBindingNames(JSTracer* trc, BindingName* names, uint32_t length)
+{
+ for (uint32_t i = 0; i < length; i++) {
+ JSAtom* name = names[i].name();
+ MOZ_ASSERT(name);
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+};
+static inline void
+TraceNullableBindingNames(JSTracer* trc, BindingName* names, uint32_t length)
+{
+ for (uint32_t i = 0; i < length; i++) {
+ if (JSAtom* name = names[i].name())
+ TraceManuallyBarrieredEdge(trc, &name, "scope name");
+ }
+};
+void
+BindingName::trace(JSTracer* trc)
+{
+ if (JSAtom* atom = name())
+ TraceManuallyBarrieredEdge(trc, &atom, "binding name");
+}
+void
+BindingIter::trace(JSTracer* trc)
+{
+ TraceNullableBindingNames(trc, names_, length_);
+}
+void
+LexicalScope::Data::trace(JSTracer* trc)
+{
+ TraceBindingNames(trc, names, length);
+}
+void
+FunctionScope::Data::trace(JSTracer* trc)
+{
+ TraceNullableEdge(trc, &canonicalFunction, "scope canonical function");
+ TraceNullableBindingNames(trc, names, length);
+}
+void
+VarScope::Data::trace(JSTracer* trc)
+{
+ TraceBindingNames(trc, names, length);
+}
+void
+GlobalScope::Data::trace(JSTracer* trc)
+{
+ TraceBindingNames(trc, names, length);
+}
+void
+EvalScope::Data::trace(JSTracer* trc)
+{
+ TraceBindingNames(trc, names, length);
+}
+void
+ModuleScope::Data::trace(JSTracer* trc)
+{
+ TraceNullableEdge(trc, &module, "scope module");
+ TraceBindingNames(trc, names, length);
+}
+void
+Scope::traceChildren(JSTracer* trc)
+{
+ TraceNullableEdge(trc, &enclosing_, "scope enclosing");
+ TraceNullableEdge(trc, &environmentShape_, "scope env shape");
+ switch (kind_) {
+ case ScopeKind::Function:
+ reinterpret_cast<FunctionScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::FunctionBodyVar:
+ case ScopeKind::ParameterExpressionVar:
+ reinterpret_cast<VarScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda:
+ reinterpret_cast<LexicalScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic:
+ reinterpret_cast<GlobalScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval:
+ reinterpret_cast<EvalScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::Module:
+ reinterpret_cast<ModuleScope::Data*>(data_)->trace(trc);
+ break;
+ case ScopeKind::With:
+ break;
+ }
+}
+inline void
+js::GCMarker::eagerlyMarkChildren(Scope* scope)
+{
+ if (scope->enclosing_)
+ traverseEdge(scope, static_cast<Scope*>(scope->enclosing_));
+ if (scope->environmentShape_)
+ traverseEdge(scope, static_cast<Shape*>(scope->environmentShape_));
+ BindingName* names = nullptr;
+ uint32_t length = 0;
+ switch (scope->kind_) {
+ case ScopeKind::Function: {
+ FunctionScope::Data* data = reinterpret_cast<FunctionScope::Data*>(scope->data_);
+ traverseEdge(scope, static_cast<JSObject*>(data->canonicalFunction));
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::FunctionBodyVar:
+ case ScopeKind::ParameterExpressionVar: {
+ VarScope::Data* data = reinterpret_cast<VarScope::Data*>(scope->data_);
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::Lexical:
+ case ScopeKind::SimpleCatch:
+ case ScopeKind::Catch:
+ case ScopeKind::NamedLambda:
+ case ScopeKind::StrictNamedLambda: {
+ LexicalScope::Data* data = reinterpret_cast<LexicalScope::Data*>(scope->data_);
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::Global:
+ case ScopeKind::NonSyntactic: {
+ GlobalScope::Data* data = reinterpret_cast<GlobalScope::Data*>(scope->data_);
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::Eval:
+ case ScopeKind::StrictEval: {
+ EvalScope::Data* data = reinterpret_cast<EvalScope::Data*>(scope->data_);
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::Module: {
+ ModuleScope::Data* data = reinterpret_cast<ModuleScope::Data*>(scope->data_);
+ traverseEdge(scope, static_cast<JSObject*>(data->module));
+ names = data->names;
+ length = data->length;
+ break;
+ }
+
+ case ScopeKind::With:
+ break;
+ }
+ if (scope->kind_ == ScopeKind::Function) {
+ for (uint32_t i = 0; i < length; i++) {
+ if (JSAtom* name = names[i].name())
+ traverseEdge(scope, static_cast<JSString*>(name));
+ }
+ } else {
+ for (uint32_t i = 0; i < length; i++)
+ traverseEdge(scope, static_cast<JSString*>(names[i].name()));
+ }
+}
+
+void
+js::ObjectGroup::traceChildren(JSTracer* trc)
+{
+ unsigned count = getPropertyCount();
+ for (unsigned i = 0; i < count; i++) {
+ if (ObjectGroup::Property* prop = getProperty(i))
+ TraceEdge(trc, &prop->id, "group_property");
+ }
+
+ if (proto().isObject())
+ TraceEdge(trc, &proto(), "group_proto");
+
+ if (trc->isMarkingTracer())
+ compartment()->mark();
+
+ if (JSObject* global = compartment()->unsafeUnbarrieredMaybeGlobal())
+ TraceManuallyBarrieredEdge(trc, &global, "group_global");
+
+
+ if (newScript())
+ newScript()->trace(trc);
+
+ if (maybePreliminaryObjects())
+ maybePreliminaryObjects()->trace(trc);
+
+ if (maybeUnboxedLayout())
+ unboxedLayout().trace(trc);
+
+ if (ObjectGroup* unboxedGroup = maybeOriginalUnboxedGroup()) {
+ TraceManuallyBarrieredEdge(trc, &unboxedGroup, "group_original_unboxed_group");
+ setOriginalUnboxedGroup(unboxedGroup);
+ }
+
+ if (JSObject* descr = maybeTypeDescr()) {
+ TraceManuallyBarrieredEdge(trc, &descr, "group_type_descr");
+ setTypeDescr(&descr->as<TypeDescr>());
+ }
+
+ if (JSObject* fun = maybeInterpretedFunction()) {
+ TraceManuallyBarrieredEdge(trc, &fun, "group_function");
+ setInterpretedFunction(&fun->as<JSFunction>());
+ }
+}
+void
+js::GCMarker::lazilyMarkChildren(ObjectGroup* group)
+{
+ unsigned count = group->getPropertyCount();
+ for (unsigned i = 0; i < count; i++) {
+ if (ObjectGroup::Property* prop = group->getProperty(i))
+ traverseEdge(group, prop->id.get());
+ }
+
+ if (group->proto().isObject())
+ traverseEdge(group, group->proto().toObject());
+
+ group->compartment()->mark();
+
+ if (GlobalObject* global = group->compartment()->unsafeUnbarrieredMaybeGlobal())
+ traverseEdge(group, static_cast<JSObject*>(global));
+
+ if (group->newScript())
+ group->newScript()->trace(this);
+
+ if (group->maybePreliminaryObjects())
+ group->maybePreliminaryObjects()->trace(this);
+
+ if (group->maybeUnboxedLayout())
+ group->unboxedLayout().trace(this);
+
+ if (ObjectGroup* unboxedGroup = group->maybeOriginalUnboxedGroup())
+ traverseEdge(group, unboxedGroup);
+
+ if (TypeDescr* descr = group->maybeTypeDescr())
+ traverseEdge(group, static_cast<JSObject*>(descr));
+
+ if (JSFunction* fun = group->maybeInterpretedFunction())
+ traverseEdge(group, static_cast<JSObject*>(fun));
+}
+
+struct TraverseObjectFunctor
+{
+ template <typename T>
+ void operator()(T* thing, GCMarker* gcmarker, JSObject* src) {
+ gcmarker->traverseEdge(src, *thing);
+ }
+};
+
+// Call the trace hook set on the object, if present. If further tracing of
+// NativeObject fields is required, this will return the native object.
+enum class CheckGeneration { DoChecks, NoChecks};
+template <typename Functor, typename... Args>
+static inline NativeObject*
+CallTraceHook(Functor f, JSTracer* trc, JSObject* obj, CheckGeneration check, Args&&... args)
+{
+ const Class* clasp = obj->getClass();
+ MOZ_ASSERT(clasp);
+ MOZ_ASSERT(obj->isNative() == clasp->isNative());
+
+ if (!clasp->hasTrace())
+ return &obj->as<NativeObject>();
+
+ if (clasp->isTrace(InlineTypedObject::obj_trace)) {
+ Shape** pshape = obj->as<InlineTypedObject>().addressOfShapeFromGC();
+ f(pshape, mozilla::Forward<Args>(args)...);
+
+ InlineTypedObject& tobj = obj->as<InlineTypedObject>();
+ if (tobj.typeDescr().hasTraceList()) {
+ VisitTraceList(f, tobj.typeDescr().traceList(), tobj.inlineTypedMemForGC(),
+ mozilla::Forward<Args>(args)...);
+ }
+
+ return nullptr;
+ }
+
+ if (clasp == &UnboxedPlainObject::class_) {
+ JSObject** pexpando = obj->as<UnboxedPlainObject>().addressOfExpando();
+ if (*pexpando)
+ f(pexpando, mozilla::Forward<Args>(args)...);
+
+ UnboxedPlainObject& unboxed = obj->as<UnboxedPlainObject>();
+ const UnboxedLayout& layout = check == CheckGeneration::DoChecks
+ ? unboxed.layout()
+ : unboxed.layoutDontCheckGeneration();
+ if (layout.traceList()) {
+ VisitTraceList(f, layout.traceList(), unboxed.data(),
+ mozilla::Forward<Args>(args)...);
+ }
+
+ return nullptr;
+ }
+
+ clasp->doTrace(trc, obj);
+
+ if (!clasp->isNative())
+ return nullptr;
+ return &obj->as<NativeObject>();
+}
+
+template <typename F, typename... Args>
+static void
+VisitTraceList(F f, const int32_t* traceList, uint8_t* memory, Args&&... args)
+{
+ while (*traceList != -1) {
+ f(reinterpret_cast<JSString**>(memory + *traceList), mozilla::Forward<Args>(args)...);
+ traceList++;
+ }
+ traceList++;
+ while (*traceList != -1) {
+ JSObject** objp = reinterpret_cast<JSObject**>(memory + *traceList);
+ if (*objp)
+ f(objp, mozilla::Forward<Args>(args)...);
+ traceList++;
+ }
+ traceList++;
+ while (*traceList != -1) {
+ f(reinterpret_cast<Value*>(memory + *traceList), mozilla::Forward<Args>(args)...);
+ traceList++;
+ }
+}
+
+
+/*** Mark-stack Marking ***************************************************************************/
+
+bool
+GCMarker::drainMarkStack(SliceBudget& budget)
+{
+#ifdef DEBUG
+ MOZ_ASSERT(!strictCompartmentChecking);
+ strictCompartmentChecking = true;
+ auto acc = mozilla::MakeScopeExit([&] {strictCompartmentChecking = false;});
+#endif
+
+ if (budget.isOverBudget())
+ return false;
+
+ for (;;) {
+ while (!stack.isEmpty()) {
+ processMarkStackTop(budget);
+ if (budget.isOverBudget()) {
+ saveValueRanges();
+ return false;
+ }
+ }
+
+ if (!hasDelayedChildren())
+ break;
+
+ /*
+ * Mark children of things that caused too deep recursion during the
+ * above tracing. Don't do this until we're done with everything
+ * else.
+ */
+ if (!markDelayedChildren(budget)) {
+ saveValueRanges();
+ return false;
+ }
+ }
+
+ return true;
+}
+
+inline static bool
+ObjectDenseElementsMayBeMarkable(NativeObject* nobj)
+{
+ /*
+ * For arrays that are large enough it's worth checking the type information
+ * to see if the object's elements contain any GC pointers. If not, we
+ * don't need to trace them.
+ */
+ const unsigned MinElementsLength = 32;
+ if (nobj->getDenseInitializedLength() < MinElementsLength || nobj->isSingleton())
+ return true;
+
+ ObjectGroup* group = nobj->group();
+ if (group->needsSweep() || group->unknownProperties())
+ return true;
+
+ HeapTypeSet* typeSet = group->maybeGetProperty(JSID_VOID);
+ if (!typeSet)
+ return true;
+
+ static const uint32_t flagMask =
+ TYPE_FLAG_STRING | TYPE_FLAG_SYMBOL | TYPE_FLAG_LAZYARGS | TYPE_FLAG_ANYOBJECT;
+ bool mayBeMarkable = typeSet->hasAnyFlag(flagMask) || typeSet->getObjectCount() != 0;
+
+#ifdef DEBUG
+ if (!mayBeMarkable) {
+ const Value* elements = nobj->getDenseElementsAllowCopyOnWrite();
+ for (unsigned i = 0; i < nobj->getDenseInitializedLength(); i++)
+ MOZ_ASSERT(!elements[i].isMarkable());
+ }
+#endif
+
+ return mayBeMarkable;
+}
+
+inline void
+GCMarker::processMarkStackTop(SliceBudget& budget)
+{
+ /*
+ * The function uses explicit goto and implements the scanning of the
+ * object directly. It allows to eliminate the tail recursion and
+ * significantly improve the marking performance, see bug 641025.
+ */
+ HeapSlot* vp;
+ HeapSlot* end;
+ JSObject* obj;
+
+ // Decode
+ uintptr_t addr = stack.pop();
+ uintptr_t tag = addr & StackTagMask;
+ addr &= ~StackTagMask;
+
+ // Dispatch
+ switch (tag) {
+ case ValueArrayTag: {
+ JS_STATIC_ASSERT(ValueArrayTag == 0);
+ MOZ_ASSERT(!(addr & CellMask));
+ obj = reinterpret_cast<JSObject*>(addr);
+ uintptr_t addr2 = stack.pop();
+ uintptr_t addr3 = stack.pop();
+ MOZ_ASSERT(addr2 <= addr3);
+ MOZ_ASSERT((addr3 - addr2) % sizeof(Value) == 0);
+ vp = reinterpret_cast<HeapSlot*>(addr2);
+ end = reinterpret_cast<HeapSlot*>(addr3);
+ goto scan_value_array;
+ }
+
+ case ObjectTag: {
+ obj = reinterpret_cast<JSObject*>(addr);
+ AssertZoneIsMarking(obj);
+ goto scan_obj;
+ }
+
+ case GroupTag: {
+ return lazilyMarkChildren(reinterpret_cast<ObjectGroup*>(addr));
+ }
+
+ case JitCodeTag: {
+ return reinterpret_cast<jit::JitCode*>(addr)->traceChildren(this);
+ }
+
+ case ScriptTag: {
+ return reinterpret_cast<JSScript*>(addr)->traceChildren(this);
+ }
+
+ case SavedValueArrayTag: {
+ MOZ_ASSERT(!(addr & CellMask));
+ JSObject* obj = reinterpret_cast<JSObject*>(addr);
+ HeapSlot* vp;
+ HeapSlot* end;
+ if (restoreValueArray(obj, (void**)&vp, (void**)&end))
+ pushValueArray(&obj->as<NativeObject>(), vp, end);
+ else
+ repush(obj);
+ return;
+ }
+
+ default: MOZ_CRASH("Invalid tag in mark stack");
+ }
+ return;
+
+ scan_value_array:
+ MOZ_ASSERT(vp <= end);
+ while (vp != end) {
+ budget.step();
+ if (budget.isOverBudget()) {
+ pushValueArray(obj, vp, end);
+ return;
+ }
+
+ const Value& v = *vp++;
+ if (v.isString()) {
+ traverseEdge(obj, v.toString());
+ } else if (v.isObject()) {
+ JSObject* obj2 = &v.toObject();
+ MOZ_ASSERT(obj->compartment() == obj2->compartment());
+ if (mark(obj2)) {
+ // Save the rest of this value array for later and start scanning obj2's children.
+ pushValueArray(obj, vp, end);
+ obj = obj2;
+ goto scan_obj;
+ }
+ } else if (v.isSymbol()) {
+ traverseEdge(obj, v.toSymbol());
+ } else if (v.isPrivateGCThing()) {
+ traverseEdge(obj, v.toGCCellPtr());
+ }
+ }
+ return;
+
+ scan_obj:
+ {
+ AssertZoneIsMarking(obj);
+
+ budget.step();
+ if (budget.isOverBudget()) {
+ repush(obj);
+ return;
+ }
+
+ markImplicitEdges(obj);
+ ObjectGroup* group = obj->groupFromGC();
+ traverseEdge(obj, group);
+
+ NativeObject *nobj = CallTraceHook(TraverseObjectFunctor(), this, obj,
+ CheckGeneration::DoChecks, this, obj);
+ if (!nobj)
+ return;
+
+ Shape* shape = nobj->lastProperty();
+ traverseEdge(obj, shape);
+
+ unsigned nslots = nobj->slotSpan();
+
+ do {
+ if (nobj->hasEmptyElements())
+ break;
+
+ if (nobj->denseElementsAreCopyOnWrite()) {
+ JSObject* owner = nobj->getElementsHeader()->ownerObject();
+ if (owner != nobj) {
+ traverseEdge(obj, owner);
+ break;
+ }
+ }
+
+ if (!ObjectDenseElementsMayBeMarkable(nobj))
+ break;
+
+ vp = nobj->getDenseElementsAllowCopyOnWrite();
+ end = vp + nobj->getDenseInitializedLength();
+
+ if (!nslots)
+ goto scan_value_array;
+ pushValueArray(nobj, vp, end);
+ } while (false);
+
+ vp = nobj->fixedSlots();
+ if (nobj->slots_) {
+ unsigned nfixed = nobj->numFixedSlots();
+ if (nslots > nfixed) {
+ pushValueArray(nobj, vp, vp + nfixed);
+ vp = nobj->slots_;
+ end = vp + (nslots - nfixed);
+ goto scan_value_array;
+ }
+ }
+ MOZ_ASSERT(nslots <= nobj->numFixedSlots());
+ end = vp + nslots;
+ goto scan_value_array;
+ }
+}
+
+struct SlotArrayLayout
+{
+ union {
+ HeapSlot* end;
+ uintptr_t kind;
+ };
+ union {
+ HeapSlot* start;
+ uintptr_t index;
+ };
+ NativeObject* obj;
+
+ static void staticAsserts() {
+ /* This should have the same layout as three mark stack items. */
+ JS_STATIC_ASSERT(sizeof(SlotArrayLayout) == 3 * sizeof(uintptr_t));
+ }
+};
+
+/*
+ * During incremental GC, we return from drainMarkStack without having processed
+ * the entire stack. At that point, JS code can run and reallocate slot arrays
+ * that are stored on the stack. To prevent this from happening, we replace all
+ * ValueArrayTag stack items with SavedValueArrayTag. In the latter, slots
+ * pointers are replaced with slot indexes, and slot array end pointers are
+ * replaced with the kind of index (properties vs. elements).
+ */
+void
+GCMarker::saveValueRanges()
+{
+ for (uintptr_t* p = stack.tos_; p > stack.stack_; ) {
+ uintptr_t tag = *--p & StackTagMask;
+ if (tag == ValueArrayTag) {
+ *p &= ~StackTagMask;
+ p -= 2;
+ SlotArrayLayout* arr = reinterpret_cast<SlotArrayLayout*>(p);
+ NativeObject* obj = arr->obj;
+ MOZ_ASSERT(obj->isNative());
+
+ HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite();
+ if (arr->end == vp + obj->getDenseInitializedLength()) {
+ MOZ_ASSERT(arr->start >= vp);
+ arr->index = arr->start - vp;
+ arr->kind = HeapSlot::Element;
+ } else {
+ HeapSlot* vp = obj->fixedSlots();
+ unsigned nfixed = obj->numFixedSlots();
+ if (arr->start == arr->end) {
+ arr->index = obj->slotSpan();
+ } else if (arr->start >= vp && arr->start < vp + nfixed) {
+ MOZ_ASSERT(arr->end == vp + Min(nfixed, obj->slotSpan()));
+ arr->index = arr->start - vp;
+ } else {
+ MOZ_ASSERT(arr->start >= obj->slots_ &&
+ arr->end == obj->slots_ + obj->slotSpan() - nfixed);
+ arr->index = (arr->start - obj->slots_) + nfixed;
+ }
+ arr->kind = HeapSlot::Slot;
+ }
+ p[2] |= SavedValueArrayTag;
+ } else if (tag == SavedValueArrayTag) {
+ p -= 2;
+ }
+ }
+}
+
+bool
+GCMarker::restoreValueArray(JSObject* objArg, void** vpp, void** endp)
+{
+ uintptr_t start = stack.pop();
+ HeapSlot::Kind kind = (HeapSlot::Kind) stack.pop();
+
+ if (!objArg->isNative())
+ return false;
+ NativeObject* obj = &objArg->as<NativeObject>();
+
+ if (kind == HeapSlot::Element) {
+ if (!obj->is<ArrayObject>())
+ return false;
+
+ uint32_t initlen = obj->getDenseInitializedLength();
+ HeapSlot* vp = obj->getDenseElementsAllowCopyOnWrite();
+ if (start < initlen) {
+ *vpp = vp + start;
+ *endp = vp + initlen;
+ } else {
+ /* The object shrunk, in which case no scanning is needed. */
+ *vpp = *endp = vp;
+ }
+ } else {
+ MOZ_ASSERT(kind == HeapSlot::Slot);
+ HeapSlot* vp = obj->fixedSlots();
+ unsigned nfixed = obj->numFixedSlots();
+ unsigned nslots = obj->slotSpan();
+ if (start < nslots) {
+ if (start < nfixed) {
+ *vpp = vp + start;
+ *endp = vp + Min(nfixed, nslots);
+ } else {
+ *vpp = obj->slots_ + start - nfixed;
+ *endp = obj->slots_ + nslots - nfixed;
+ }
+ } else {
+ /* The object shrunk, in which case no scanning is needed. */
+ *vpp = *endp = vp;
+ }
+ }
+
+ MOZ_ASSERT(*vpp <= *endp);
+ return true;
+}
+
+
+/*** Mark Stack ***********************************************************************************/
+
+bool
+MarkStack::init(JSGCMode gcMode)
+{
+ setBaseCapacity(gcMode);
+
+ MOZ_ASSERT(!stack_);
+ uintptr_t* newStack = js_pod_malloc<uintptr_t>(baseCapacity_);
+ if (!newStack)
+ return false;
+
+ setStack(newStack, 0, baseCapacity_);
+ return true;
+}
+
+void
+MarkStack::setBaseCapacity(JSGCMode mode)
+{
+ switch (mode) {
+ case JSGC_MODE_GLOBAL:
+ case JSGC_MODE_ZONE:
+ baseCapacity_ = NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY;
+ break;
+ case JSGC_MODE_INCREMENTAL:
+ baseCapacity_ = INCREMENTAL_MARK_STACK_BASE_CAPACITY;
+ break;
+ default:
+ MOZ_CRASH("bad gc mode");
+ }
+
+ if (baseCapacity_ > maxCapacity_)
+ baseCapacity_ = maxCapacity_;
+}
+
+void
+MarkStack::setMaxCapacity(size_t maxCapacity)
+{
+ MOZ_ASSERT(maxCapacity != 0);
+ MOZ_ASSERT(isEmpty());
+ maxCapacity_ = maxCapacity;
+ if (baseCapacity_ > maxCapacity_)
+ baseCapacity_ = maxCapacity_;
+
+ reset();
+}
+
+void
+MarkStack::reset()
+{
+ if (capacity() == baseCapacity_) {
+ // No size change; keep the current stack.
+ setStack(stack_, 0, baseCapacity_);
+ return;
+ }
+
+ MOZ_ASSERT(baseCapacity_ != 0);
+ uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * baseCapacity_);
+ if (!newStack) {
+ // If the realloc fails, just keep using the existing stack; it's
+ // not ideal but better than failing.
+ newStack = stack_;
+ baseCapacity_ = capacity();
+ }
+ setStack(newStack, 0, baseCapacity_);
+}
+
+bool
+MarkStack::enlarge(unsigned count)
+{
+ size_t newCapacity = Min(maxCapacity_, capacity() * 2);
+ if (newCapacity < capacity() + count)
+ return false;
+
+ size_t tosIndex = position();
+
+ MOZ_ASSERT(newCapacity != 0);
+ uintptr_t* newStack = (uintptr_t*)js_realloc(stack_, sizeof(uintptr_t) * newCapacity);
+ if (!newStack)
+ return false;
+
+ setStack(newStack, tosIndex, newCapacity);
+ return true;
+}
+
+void
+MarkStack::setGCMode(JSGCMode gcMode)
+{
+ // The mark stack won't be resized until the next call to reset(), but
+ // that will happen at the end of the next GC.
+ setBaseCapacity(gcMode);
+}
+
+size_t
+MarkStack::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+{
+ return mallocSizeOf(stack_);
+}
+
+
+/*** GCMarker *************************************************************************************/
+
+/*
+ * ExpandWeakMaps: the GC is recomputing the liveness of WeakMap entries by
+ * expanding each live WeakMap into its constituent key->value edges, a table
+ * of which will be consulted in a later phase whenever marking a potential
+ * key.
+ */
+GCMarker::GCMarker(JSRuntime* rt)
+ : JSTracer(rt, JSTracer::TracerKindTag::Marking, ExpandWeakMaps),
+ stack(size_t(-1)),
+ color(BLACK),
+ unmarkedArenaStackTop(nullptr)
+#ifdef DEBUG
+ , markLaterArenas(0)
+ , started(false)
+ , strictCompartmentChecking(false)
+#endif
+{
+}
+
+bool
+GCMarker::init(JSGCMode gcMode)
+{
+ return stack.init(gcMode);
+}
+
+void
+GCMarker::start()
+{
+#ifdef DEBUG
+ MOZ_ASSERT(!started);
+ started = true;
+#endif
+ color = BLACK;
+ linearWeakMarkingDisabled_ = false;
+
+ MOZ_ASSERT(!unmarkedArenaStackTop);
+ MOZ_ASSERT(markLaterArenas == 0);
+}
+
+void
+GCMarker::stop()
+{
+#ifdef DEBUG
+ MOZ_ASSERT(isDrained());
+
+ MOZ_ASSERT(started);
+ started = false;
+
+ MOZ_ASSERT(!unmarkedArenaStackTop);
+ MOZ_ASSERT(markLaterArenas == 0);
+#endif
+
+ /* Free non-ballast stack memory. */
+ stack.reset();
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
+ if (!zone->gcWeakKeys.clear())
+ oomUnsafe.crash("clearing weak keys in GCMarker::stop()");
+ }
+}
+
+void
+GCMarker::reset()
+{
+ color = BLACK;
+
+ stack.reset();
+ MOZ_ASSERT(isMarkStackEmpty());
+
+ while (unmarkedArenaStackTop) {
+ Arena* arena = unmarkedArenaStackTop;
+ MOZ_ASSERT(arena->hasDelayedMarking);
+ MOZ_ASSERT(markLaterArenas);
+ unmarkedArenaStackTop = arena->getNextDelayedMarking();
+ arena->unsetDelayedMarking();
+ arena->markOverflow = 0;
+ arena->allocatedDuringIncremental = 0;
+#ifdef DEBUG
+ markLaterArenas--;
+#endif
+ }
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(!markLaterArenas);
+}
+
+void
+GCMarker::enterWeakMarkingMode()
+{
+ MOZ_ASSERT(tag_ == TracerKindTag::Marking);
+ if (linearWeakMarkingDisabled_)
+ return;
+
+ // During weak marking mode, we maintain a table mapping weak keys to
+ // entries in known-live weakmaps. Initialize it with the keys of marked
+ // weakmaps -- or more precisely, the keys of marked weakmaps that are
+ // mapped to not yet live values. (Once bug 1167452 implements incremental
+ // weakmap marking, this initialization step will become unnecessary, as
+ // the table will already hold all such keys.)
+ if (weakMapAction() == ExpandWeakMaps) {
+ tag_ = TracerKindTag::WeakMarking;
+
+ for (GCZoneGroupIter zone(runtime()); !zone.done(); zone.next()) {
+ for (WeakMapBase* m : zone->gcWeakMapList) {
+ if (m->marked)
+ (void) m->traceEntries(this);
+ }
+ }
+ }
+}
+
+void
+GCMarker::leaveWeakMarkingMode()
+{
+ MOZ_ASSERT_IF(weakMapAction() == ExpandWeakMaps && !linearWeakMarkingDisabled_,
+ tag_ == TracerKindTag::WeakMarking);
+ tag_ = TracerKindTag::Marking;
+
+ // Table is expensive to maintain when not in weak marking mode, so we'll
+ // rebuild it upon entry rather than allow it to contain stale data.
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
+ if (!zone->gcWeakKeys.clear())
+ oomUnsafe.crash("clearing weak keys in GCMarker::leaveWeakMarkingMode()");
+ }
+}
+
+void
+GCMarker::markDelayedChildren(Arena* arena)
+{
+ if (arena->markOverflow) {
+ bool always = arena->allocatedDuringIncremental;
+ arena->markOverflow = 0;
+
+ for (ArenaCellIterUnderGC i(arena); !i.done(); i.next()) {
+ TenuredCell* t = i.getCell();
+ if (always || t->isMarked()) {
+ t->markIfUnmarked();
+ js::TraceChildren(this, t, MapAllocToTraceKind(arena->getAllocKind()));
+ }
+ }
+ } else {
+ MOZ_ASSERT(arena->allocatedDuringIncremental);
+ PushArena(this, arena);
+ }
+ arena->allocatedDuringIncremental = 0;
+ /*
+ * Note that during an incremental GC we may still be allocating into
+ * the arena. However, prepareForIncrementalGC sets the
+ * allocatedDuringIncremental flag if we continue marking.
+ */
+}
+
+bool
+GCMarker::markDelayedChildren(SliceBudget& budget)
+{
+ GCRuntime& gc = runtime()->gc;
+ gcstats::AutoPhase ap(gc.stats, gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
+
+ MOZ_ASSERT(unmarkedArenaStackTop);
+ do {
+ /*
+ * If marking gets delayed at the same arena again, we must repeat
+ * marking of its things. For that we pop arena from the stack and
+ * clear its hasDelayedMarking flag before we begin the marking.
+ */
+ Arena* arena = unmarkedArenaStackTop;
+ MOZ_ASSERT(arena->hasDelayedMarking);
+ MOZ_ASSERT(markLaterArenas);
+ unmarkedArenaStackTop = arena->getNextDelayedMarking();
+ arena->unsetDelayedMarking();
+#ifdef DEBUG
+ markLaterArenas--;
+#endif
+ markDelayedChildren(arena);
+
+ budget.step(150);
+ if (budget.isOverBudget())
+ return false;
+ } while (unmarkedArenaStackTop);
+ MOZ_ASSERT(!markLaterArenas);
+
+ return true;
+}
+
+template<typename T>
+static void
+PushArenaTyped(GCMarker* gcmarker, Arena* arena)
+{
+ for (ArenaCellIterUnderGC i(arena); !i.done(); i.next())
+ gcmarker->traverse(i.get<T>());
+}
+
+struct PushArenaFunctor {
+ template <typename T> void operator()(GCMarker* gcmarker, Arena* arena) {
+ PushArenaTyped<T>(gcmarker, arena);
+ }
+};
+
+void
+gc::PushArena(GCMarker* gcmarker, Arena* arena)
+{
+ DispatchTraceKindTyped(PushArenaFunctor(),
+ MapAllocToTraceKind(arena->getAllocKind()), gcmarker, arena);
+}
+
+#ifdef DEBUG
+void
+GCMarker::checkZone(void* p)
+{
+ MOZ_ASSERT(started);
+ DebugOnly<Cell*> cell = static_cast<Cell*>(p);
+ MOZ_ASSERT_IF(cell->isTenured(), cell->asTenured().zone()->isCollecting());
+}
+#endif
+
+size_t
+GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+{
+ size_t size = stack.sizeOfExcludingThis(mallocSizeOf);
+ for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next())
+ size += zone->gcGrayRoots.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+
+/*** Tenuring Tracer *****************************************************************************/
+
+namespace js {
+template <typename T>
+void
+TenuringTracer::traverse(T** tp)
+{
+}
+
+template <>
+void
+TenuringTracer::traverse(JSObject** objp)
+{
+ // We only ever visit the internals of objects after moving them to tenured.
+ MOZ_ASSERT(!nursery().isInside(objp));
+
+ if (IsInsideNursery(*objp) && !nursery().getForwardedPointer(objp))
+ *objp = moveToTenured(*objp);
+}
+
+template <typename S>
+struct TenuringTraversalFunctor : public IdentityDefaultAdaptor<S> {
+ template <typename T> S operator()(T* t, TenuringTracer* trc) {
+ trc->traverse(&t);
+ return js::gc::RewrapTaggedPointer<S, T>::wrap(t);
+ }
+};
+
+template <typename T>
+void
+TenuringTracer::traverse(T* thingp)
+{
+ *thingp = DispatchTyped(TenuringTraversalFunctor<T>(), *thingp, this);
+}
+} // namespace js
+
+template <typename T>
+void
+js::gc::StoreBuffer::MonoTypeBuffer<T>::trace(StoreBuffer* owner, TenuringTracer& mover)
+{
+ mozilla::ReentrancyGuard g(*owner);
+ MOZ_ASSERT(owner->isEnabled());
+ MOZ_ASSERT(stores_.initialized());
+ if (last_)
+ last_.trace(mover);
+ for (typename StoreSet::Range r = stores_.all(); !r.empty(); r.popFront())
+ r.front().trace(mover);
+}
+
+namespace js {
+namespace gc {
+template void
+StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>::trace(StoreBuffer*, TenuringTracer&);
+template void
+StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>::trace(StoreBuffer*, TenuringTracer&);
+template void
+StoreBuffer::MonoTypeBuffer<StoreBuffer::CellPtrEdge>::trace(StoreBuffer*, TenuringTracer&);
+} // namespace gc
+} // namespace js
+
+void
+js::gc::StoreBuffer::SlotsEdge::trace(TenuringTracer& mover) const
+{
+ NativeObject* obj = object();
+
+ // Beware JSObject::swap exchanging a native object for a non-native one.
+ if (!obj->isNative())
+ return;
+
+ if (IsInsideNursery(obj))
+ return;
+
+ if (kind() == ElementKind) {
+ int32_t initLen = obj->getDenseInitializedLength();
+ int32_t clampedStart = Min(start_, initLen);
+ int32_t clampedEnd = Min(start_ + count_, initLen);
+ mover.traceSlots(static_cast<HeapSlot*>(obj->getDenseElements() + clampedStart)
+ ->unsafeUnbarrieredForTracing(), clampedEnd - clampedStart);
+ } else {
+ int32_t start = Min(uint32_t(start_), obj->slotSpan());
+ int32_t end = Min(uint32_t(start_) + count_, obj->slotSpan());
+ MOZ_ASSERT(end >= start);
+ mover.traceObjectSlots(obj, start, end - start);
+ }
+}
+
+static inline void
+TraceWholeCell(TenuringTracer& mover, JSObject* object)
+{
+ mover.traceObject(object);
+
+ // Additionally trace the expando object attached to any unboxed plain
+ // objects. Baseline and Ion can write properties to the expando while
+ // only adding a post barrier to the owning unboxed object. Note that
+ // it isn't possible for a nursery unboxed object to have a tenured
+ // expando, so that adding a post barrier on the original object will
+ // capture any tenured->nursery edges in the expando as well.
+
+ if (object->is<UnboxedPlainObject>()) {
+ if (UnboxedExpandoObject* expando = object->as<UnboxedPlainObject>().maybeExpando())
+ expando->traceChildren(&mover);
+ }
+}
+
+static inline void
+TraceWholeCell(TenuringTracer& mover, JSScript* script)
+{
+ script->traceChildren(&mover);
+}
+
+static inline void
+TraceWholeCell(TenuringTracer& mover, jit::JitCode* jitcode)
+{
+ jitcode->traceChildren(&mover);
+}
+
+template <typename T>
+static void
+TraceBufferedCells(TenuringTracer& mover, Arena* arena, ArenaCellSet* cells)
+{
+ for (size_t i = 0; i < ArenaCellCount; i++) {
+ if (cells->hasCell(i)) {
+ auto cell = reinterpret_cast<T*>(uintptr_t(arena) + CellSize * i);
+ TraceWholeCell(mover, cell);
+ }
+ }
+}
+
+void
+js::gc::StoreBuffer::traceWholeCells(TenuringTracer& mover)
+{
+ for (ArenaCellSet* cells = bufferWholeCell; cells; cells = cells->next) {
+ Arena* arena = cells->arena;
+
+ MOZ_ASSERT(arena->bufferedCells == cells);
+ arena->bufferedCells = &ArenaCellSet::Empty;
+
+ JS::TraceKind kind = MapAllocToTraceKind(arena->getAllocKind());
+ switch (kind) {
+ case JS::TraceKind::Object:
+ TraceBufferedCells<JSObject>(mover, arena, cells);
+ break;
+ case JS::TraceKind::Script:
+ TraceBufferedCells<JSScript>(mover, arena, cells);
+ break;
+ case JS::TraceKind::JitCode:
+ TraceBufferedCells<jit::JitCode>(mover, arena, cells);
+ break;
+ default:
+ MOZ_CRASH("Unexpected trace kind");
+ }
+ }
+
+ bufferWholeCell = nullptr;
+}
+
+void
+js::gc::StoreBuffer::CellPtrEdge::trace(TenuringTracer& mover) const
+{
+ if (!*edge)
+ return;
+
+ MOZ_ASSERT((*edge)->getTraceKind() == JS::TraceKind::Object);
+ mover.traverse(reinterpret_cast<JSObject**>(edge));
+}
+
+void
+js::gc::StoreBuffer::ValueEdge::trace(TenuringTracer& mover) const
+{
+ if (deref())
+ mover.traverse(edge);
+}
+
+/* Insert the given relocation entry into the list of things to visit. */
+void
+js::TenuringTracer::insertIntoFixupList(RelocationOverlay* entry) {
+ *tail = entry;
+ tail = &entry->nextRef();
+ *tail = nullptr;
+}
+
+JSObject*
+js::TenuringTracer::moveToTenured(JSObject* src)
+{
+ MOZ_ASSERT(IsInsideNursery(src));
+ MOZ_ASSERT(!src->zone()->usedByExclusiveThread);
+
+ AllocKind dstKind = src->allocKindForTenure(nursery());
+ Zone* zone = src->zone();
+
+ TenuredCell* t = zone->arenas.allocateFromFreeList(dstKind, Arena::thingSize(dstKind));
+ if (!t) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ t = runtime()->gc.refillFreeListInGC(zone, dstKind);
+ if (!t)
+ oomUnsafe.crash(ChunkSize, "Failed to allocate object while tenuring.");
+ }
+ JSObject* dst = reinterpret_cast<JSObject*>(t);
+ tenuredSize += moveObjectToTenured(dst, src, dstKind);
+
+ RelocationOverlay* overlay = RelocationOverlay::fromCell(src);
+ overlay->forwardTo(dst);
+ insertIntoFixupList(overlay);
+
+ TracePromoteToTenured(src, dst);
+ MemProfiler::MoveNurseryToTenured(src, dst);
+ return dst;
+}
+
+void
+js::Nursery::collectToFixedPoint(TenuringTracer& mover, TenureCountCache& tenureCounts)
+{
+ for (RelocationOverlay* p = mover.head; p; p = p->next()) {
+ JSObject* obj = static_cast<JSObject*>(p->forwardingAddress());
+ mover.traceObject(obj);
+
+ TenureCount& entry = tenureCounts.findEntry(obj->groupRaw());
+ if (entry.group == obj->groupRaw()) {
+ entry.count++;
+ } else if (!entry.group) {
+ entry.group = obj->groupRaw();
+ entry.count = 1;
+ }
+ }
+}
+
+struct TenuringFunctor
+{
+ template <typename T>
+ void operator()(T* thing, TenuringTracer& mover) {
+ mover.traverse(thing);
+ }
+};
+
+// Visit all object children of the object and trace them.
+void
+js::TenuringTracer::traceObject(JSObject* obj)
+{
+ NativeObject *nobj = CallTraceHook(TenuringFunctor(), this, obj,
+ CheckGeneration::NoChecks, *this);
+ if (!nobj)
+ return;
+
+ // Note: the contents of copy on write elements pointers are filled in
+ // during parsing and cannot contain nursery pointers.
+ if (!nobj->hasEmptyElements() &&
+ !nobj->denseElementsAreCopyOnWrite() &&
+ ObjectDenseElementsMayBeMarkable(nobj))
+ {
+ Value* elems = static_cast<HeapSlot*>(nobj->getDenseElements())->unsafeUnbarrieredForTracing();
+ traceSlots(elems, elems + nobj->getDenseInitializedLength());
+ }
+
+ traceObjectSlots(nobj, 0, nobj->slotSpan());
+}
+
+void
+js::TenuringTracer::traceObjectSlots(NativeObject* nobj, uint32_t start, uint32_t length)
+{
+ HeapSlot* fixedStart;
+ HeapSlot* fixedEnd;
+ HeapSlot* dynStart;
+ HeapSlot* dynEnd;
+ nobj->getSlotRange(start, length, &fixedStart, &fixedEnd, &dynStart, &dynEnd);
+ if (fixedStart)
+ traceSlots(fixedStart->unsafeUnbarrieredForTracing(), fixedEnd->unsafeUnbarrieredForTracing());
+ if (dynStart)
+ traceSlots(dynStart->unsafeUnbarrieredForTracing(), dynEnd->unsafeUnbarrieredForTracing());
+}
+
+void
+js::TenuringTracer::traceSlots(Value* vp, Value* end)
+{
+ for (; vp != end; ++vp)
+ traverse(vp);
+}
+
+#ifdef DEBUG
+static inline ptrdiff_t
+OffsetToChunkEnd(void* p)
+{
+ return ChunkLocationOffset - (uintptr_t(p) & gc::ChunkMask);
+}
+#endif
+
+size_t
+js::TenuringTracer::moveObjectToTenured(JSObject* dst, JSObject* src, AllocKind dstKind)
+{
+ size_t srcSize = Arena::thingSize(dstKind);
+ size_t tenuredSize = srcSize;
+
+ /*
+ * Arrays do not necessarily have the same AllocKind between src and dst.
+ * We deal with this by copying elements manually, possibly re-inlining
+ * them if there is adequate room inline in dst.
+ *
+ * For Arrays we're reducing tenuredSize to the smaller srcSize
+ * because moveElementsToTenured() accounts for all Array elements,
+ * even if they are inlined.
+ */
+ if (src->is<ArrayObject>()) {
+ tenuredSize = srcSize = sizeof(NativeObject);
+ } else if (src->is<TypedArrayObject>()) {
+ TypedArrayObject* tarray = &src->as<TypedArrayObject>();
+ // Typed arrays with inline data do not necessarily have the same
+ // AllocKind between src and dst. The nursery does not allocate an
+ // inline data buffer that has the same size as the slow path will do.
+ // In the slow path, the Typed Array Object stores the inline data
+ // in the allocated space that fits the AllocKind. In the fast path,
+ // the nursery will allocate another buffer that is directly behind the
+ // minimal JSObject. That buffer size plus the JSObject size is not
+ // necessarily as large as the slow path's AllocKind size.
+ if (tarray->hasInlineElements()) {
+ AllocKind srcKind = GetGCObjectKind(TypedArrayObject::FIXED_DATA_START);
+ size_t headerSize = Arena::thingSize(srcKind);
+ srcSize = headerSize + tarray->byteLength();
+ }
+ }
+
+ // Copy the Cell contents.
+ MOZ_ASSERT(OffsetToChunkEnd(src) >= ptrdiff_t(srcSize));
+ js_memcpy(dst, src, srcSize);
+
+ // Move any hash code attached to the object.
+ src->zone()->transferUniqueId(dst, src);
+
+ // Move the slots and elements, if we need to.
+ if (src->isNative()) {
+ NativeObject* ndst = &dst->as<NativeObject>();
+ NativeObject* nsrc = &src->as<NativeObject>();
+ tenuredSize += moveSlotsToTenured(ndst, nsrc, dstKind);
+ tenuredSize += moveElementsToTenured(ndst, nsrc, dstKind);
+
+ // The shape's list head may point into the old object. This can only
+ // happen for dictionaries, which are native objects.
+ if (&nsrc->shape_ == ndst->shape_->listp) {
+ MOZ_ASSERT(nsrc->shape_->inDictionary());
+ ndst->shape_->listp = &ndst->shape_;
+ }
+ }
+
+ if (src->is<InlineTypedObject>()) {
+ InlineTypedObject::objectMovedDuringMinorGC(this, dst, src);
+ } else if (src->is<TypedArrayObject>()) {
+ tenuredSize += TypedArrayObject::objectMovedDuringMinorGC(this, dst, src, dstKind);
+ } else if (src->is<UnboxedArrayObject>()) {
+ tenuredSize += UnboxedArrayObject::objectMovedDuringMinorGC(this, dst, src, dstKind);
+ } else if (src->is<ArgumentsObject>()) {
+ tenuredSize += ArgumentsObject::objectMovedDuringMinorGC(this, dst, src);
+ } else if (src->is<ProxyObject>()) {
+ tenuredSize += ProxyObject::objectMovedDuringMinorGC(this, dst, src);
+ } else if (JSObjectMovedOp op = dst->getClass()->extObjectMovedOp()) {
+ op(dst, src);
+ } else if (src->getClass()->hasFinalize()) {
+ // Such objects need to be handled specially above to ensure any
+ // additional nursery buffers they hold are moved.
+ MOZ_RELEASE_ASSERT(CanNurseryAllocateFinalizedClass(src->getClass()));
+ MOZ_CRASH("Unhandled JSCLASS_SKIP_NURSERY_FINALIZE Class");
+ }
+
+ return tenuredSize;
+}
+
+size_t
+js::TenuringTracer::moveSlotsToTenured(NativeObject* dst, NativeObject* src, AllocKind dstKind)
+{
+ /* Fixed slots have already been copied over. */
+ if (!src->hasDynamicSlots())
+ return 0;
+
+ if (!nursery().isInside(src->slots_)) {
+ nursery().removeMallocedBuffer(src->slots_);
+ return 0;
+ }
+
+ Zone* zone = src->zone();
+ size_t count = src->numDynamicSlots();
+
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dst->slots_ = zone->pod_malloc<HeapSlot>(count);
+ if (!dst->slots_)
+ oomUnsafe.crash(sizeof(HeapSlot) * count, "Failed to allocate slots while tenuring.");
+ }
+
+ PodCopy(dst->slots_, src->slots_, count);
+ nursery().setSlotsForwardingPointer(src->slots_, dst->slots_, count);
+ return count * sizeof(HeapSlot);
+}
+
+size_t
+js::TenuringTracer::moveElementsToTenured(NativeObject* dst, NativeObject* src, AllocKind dstKind)
+{
+ if (src->hasEmptyElements() || src->denseElementsAreCopyOnWrite())
+ return 0;
+
+ Zone* zone = src->zone();
+ ObjectElements* srcHeader = src->getElementsHeader();
+ ObjectElements* dstHeader;
+
+ /* TODO Bug 874151: Prefer to put element data inline if we have space. */
+ if (!nursery().isInside(srcHeader)) {
+ MOZ_ASSERT(src->elements_ == dst->elements_);
+ nursery().removeMallocedBuffer(srcHeader);
+ return 0;
+ }
+
+ size_t nslots = ObjectElements::VALUES_PER_HEADER + srcHeader->capacity;
+
+ /* Unlike other objects, Arrays can have fixed elements. */
+ if (src->is<ArrayObject>() && nslots <= GetGCKindSlots(dstKind)) {
+ dst->as<ArrayObject>().setFixedElements();
+ dstHeader = dst->as<ArrayObject>().getElementsHeader();
+ js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
+ nursery().setElementsForwardingPointer(srcHeader, dstHeader, nslots);
+ return nslots * sizeof(HeapSlot);
+ }
+
+ MOZ_ASSERT(nslots >= 2);
+
+ {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ dstHeader = reinterpret_cast<ObjectElements*>(zone->pod_malloc<HeapSlot>(nslots));
+ if (!dstHeader) {
+ oomUnsafe.crash(sizeof(HeapSlot) * nslots,
+ "Failed to allocate elements while tenuring.");
+ }
+ }
+
+ js_memcpy(dstHeader, srcHeader, nslots * sizeof(HeapSlot));
+ nursery().setElementsForwardingPointer(srcHeader, dstHeader, nslots);
+ dst->elements_ = dstHeader->elements();
+ return nslots * sizeof(HeapSlot);
+}
+
+
+/*** IsMarked / IsAboutToBeFinalized **************************************************************/
+
+template <typename T>
+static inline void
+CheckIsMarkedThing(T* thingp)
+{
+#define IS_SAME_TYPE_OR(name, type, _) mozilla::IsSame<type*, T>::value ||
+ static_assert(
+ JS_FOR_EACH_TRACEKIND(IS_SAME_TYPE_OR)
+ false, "Only the base cell layout types are allowed into marking/tracing internals");
+#undef IS_SAME_TYPE_OR
+
+#ifdef DEBUG
+ MOZ_ASSERT(thingp);
+ MOZ_ASSERT(*thingp);
+ JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
+ MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp),
+ CurrentThreadCanAccessRuntime(rt) ||
+ (rt->isHeapCollecting() && rt->gc.state() == State::Sweep));
+#endif
+}
+
+template <typename T>
+static bool
+IsMarkedInternalCommon(T* thingp)
+{
+ CheckIsMarkedThing(thingp);
+ MOZ_ASSERT(!IsInsideNursery(*thingp));
+
+ Zone* zone = (*thingp)->asTenured().zoneFromAnyThread();
+ if (!zone->isCollectingFromAnyThread() || zone->isGCFinished())
+ return true;
+ if (zone->isGCCompacting() && IsForwarded(*thingp))
+ *thingp = Forwarded(*thingp);
+ return (*thingp)->asTenured().isMarked();
+}
+
+template <typename T>
+static bool
+IsMarkedInternal(JSRuntime* rt, T** thingp)
+{
+ if (IsOwnedByOtherRuntime(rt, *thingp))
+ return true;
+
+ return IsMarkedInternalCommon(thingp);
+}
+
+template <>
+/* static */ bool
+IsMarkedInternal(JSRuntime* rt, JSObject** thingp)
+{
+ if (IsOwnedByOtherRuntime(rt, *thingp))
+ return true;
+
+ if (IsInsideNursery(*thingp)) {
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
+ return rt->gc.nursery.getForwardedPointer(thingp);
+ }
+ return IsMarkedInternalCommon(thingp);
+}
+
+template <typename S>
+struct IsMarkedFunctor : public IdentityDefaultAdaptor<S> {
+ template <typename T> S operator()(T* t, JSRuntime* rt, bool* rv) {
+ *rv = IsMarkedInternal(rt, &t);
+ return js::gc::RewrapTaggedPointer<S, T>::wrap(t);
+ }
+};
+
+template <typename T>
+static bool
+IsMarkedInternal(JSRuntime* rt, T* thingp)
+{
+ bool rv = true;
+ *thingp = DispatchTyped(IsMarkedFunctor<T>(), *thingp, rt, &rv);
+ return rv;
+}
+
+bool
+js::gc::IsAboutToBeFinalizedDuringSweep(TenuredCell& tenured)
+{
+ MOZ_ASSERT(!IsInsideNursery(&tenured));
+ MOZ_ASSERT(tenured.zoneFromAnyThread()->isGCSweeping());
+ if (tenured.arena()->allocatedDuringIncremental)
+ return false;
+ return !tenured.isMarked();
+}
+
+template <typename T>
+static bool
+IsAboutToBeFinalizedInternal(T** thingp)
+{
+ CheckIsMarkedThing(thingp);
+ T* thing = *thingp;
+ JSRuntime* rt = thing->runtimeFromAnyThread();
+
+ /* Permanent atoms are never finalized by non-owning runtimes. */
+ if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && !TlsPerThreadData.get()->associatedWith(rt))
+ return false;
+
+ Nursery& nursery = rt->gc.nursery;
+ if (IsInsideNursery(thing)) {
+ MOZ_ASSERT(rt->isHeapMinorCollecting());
+ return !nursery.getForwardedPointer(reinterpret_cast<JSObject**>(thingp));
+ }
+
+ Zone* zone = thing->asTenured().zoneFromAnyThread();
+ if (zone->isGCSweeping()) {
+ return IsAboutToBeFinalizedDuringSweep(thing->asTenured());
+ } else if (zone->isGCCompacting() && IsForwarded(thing)) {
+ *thingp = Forwarded(thing);
+ return false;
+ }
+
+ return false;
+}
+
+template <typename S>
+struct IsAboutToBeFinalizedFunctor : public IdentityDefaultAdaptor<S> {
+ template <typename T> S operator()(T* t, bool* rv) {
+ *rv = IsAboutToBeFinalizedInternal(&t);
+ return js::gc::RewrapTaggedPointer<S, T>::wrap(t);
+ }
+};
+
+template <typename T>
+static bool
+IsAboutToBeFinalizedInternal(T* thingp)
+{
+ bool rv = false;
+ *thingp = DispatchTyped(IsAboutToBeFinalizedFunctor<T>(), *thingp, &rv);
+ return rv;
+}
+
+namespace js {
+namespace gc {
+
+template <typename T>
+bool
+IsMarkedUnbarriered(JSRuntime* rt, T* thingp)
+{
+ return IsMarkedInternal(rt, ConvertToBase(thingp));
+}
+
+template <typename T>
+bool
+IsMarked(JSRuntime* rt, WriteBarrieredBase<T>* thingp)
+{
+ return IsMarkedInternal(rt, ConvertToBase(thingp->unsafeUnbarrieredForTracing()));
+}
+
+template <typename T>
+bool
+IsAboutToBeFinalizedUnbarriered(T* thingp)
+{
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp));
+}
+
+template <typename T>
+bool
+IsAboutToBeFinalized(WriteBarrieredBase<T>* thingp)
+{
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeUnbarrieredForTracing()));
+}
+
+template <typename T>
+bool
+IsAboutToBeFinalized(ReadBarrieredBase<T>* thingp)
+{
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeUnbarrieredForTracing()));
+}
+
+template <typename T>
+JS_PUBLIC_API(bool)
+EdgeNeedsSweep(JS::Heap<T>* thingp)
+{
+ return IsAboutToBeFinalizedInternal(ConvertToBase(thingp->unsafeGet()));
+}
+
+// Instantiate a copy of the Tracing templates for each derived type.
+#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(type) \
+ template bool IsMarkedUnbarriered<type>(JSRuntime*, type*); \
+ template bool IsMarked<type>(JSRuntime*, WriteBarrieredBase<type>*); \
+ template bool IsAboutToBeFinalizedUnbarriered<type>(type*); \
+ template bool IsAboutToBeFinalized<type>(WriteBarrieredBase<type>*); \
+ template bool IsAboutToBeFinalized<type>(ReadBarrieredBase<type>*);
+#define INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS(type) \
+ template JS_PUBLIC_API(bool) EdgeNeedsSweep<type>(JS::Heap<type>*);
+FOR_EACH_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS)
+FOR_EACH_PUBLIC_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(INSTANTIATE_ALL_VALID_HEAP_TRACE_FUNCTIONS)
+#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS
+
+} /* namespace gc */
+} /* namespace js */
+
+
+/*** Cycle Collector Barrier Implementation *******************************************************/
+
+#ifdef DEBUG
+struct AssertNonGrayTracer : public JS::CallbackTracer {
+ explicit AssertNonGrayTracer(JSRuntime* rt) : JS::CallbackTracer(rt) {}
+ void onChild(const JS::GCCellPtr& thing) override {
+ MOZ_ASSERT_IF(thing.asCell()->isTenured(),
+ !thing.asCell()->asTenured().isMarked(js::gc::GRAY));
+ }
+};
+#endif
+
+struct UnmarkGrayTracer : public JS::CallbackTracer
+{
+ /*
+ * We set weakMapAction to DoNotTraceWeakMaps because the cycle collector
+ * will fix up any color mismatches involving weakmaps when it runs.
+ */
+ explicit UnmarkGrayTracer(JSRuntime *rt, bool tracingShape = false)
+ : JS::CallbackTracer(rt, DoNotTraceWeakMaps)
+ , tracingShape(tracingShape)
+ , previousShape(nullptr)
+ , unmarkedAny(false)
+ {}
+
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ /* True iff we are tracing the immediate children of a shape. */
+ bool tracingShape;
+
+ /* If tracingShape, shape child or nullptr. Otherwise, nullptr. */
+ Shape* previousShape;
+
+ /* Whether we unmarked anything. */
+ bool unmarkedAny;
+};
+
+/*
+ * The GC and CC are run independently. Consequently, the following sequence of
+ * events can occur:
+ * 1. GC runs and marks an object gray.
+ * 2. The mutator runs (specifically, some C++ code with access to gray
+ * objects) and creates a pointer from a JS root or other black object to
+ * the gray object. If we re-ran a GC at this point, the object would now be
+ * black.
+ * 3. Now we run the CC. It may think it can collect the gray object, even
+ * though it's reachable from the JS heap.
+ *
+ * To prevent this badness, we unmark the gray bit of an object when it is
+ * accessed by callers outside XPConnect. This would cause the object to go
+ * black in step 2 above. This must be done on everything reachable from the
+ * object being returned. The following code takes care of the recursive
+ * re-coloring.
+ *
+ * There is an additional complication for certain kinds of edges that are not
+ * contained explicitly in the source object itself, such as from a weakmap key
+ * to its value, and from an object being watched by a watchpoint to the
+ * watchpoint's closure. These "implicit edges" are represented in some other
+ * container object, such as the weakmap or the watchpoint itself. In these
+ * cases, calling unmark gray on an object won't find all of its children.
+ *
+ * Handling these implicit edges has two parts:
+ * - A special pass enumerating all of the containers that know about the
+ * implicit edges to fix any black-gray edges that have been created. This
+ * is implemented in nsXPConnect::FixWeakMappingGrayBits.
+ * - To prevent any incorrectly gray objects from escaping to live JS outside
+ * of the containers, we must add unmark-graying read barriers to these
+ * containers.
+ */
+void
+UnmarkGrayTracer::onChild(const JS::GCCellPtr& thing)
+{
+ int stackDummy;
+ JSContext* cx = runtime()->contextFromMainThread();
+ if (!JS_CHECK_STACK_SIZE(cx->nativeStackLimit[StackForSystemCode], &stackDummy)) {
+ /*
+ * If we run out of stack, we take a more drastic measure: require that
+ * we GC again before the next CC.
+ */
+ runtime()->setGCGrayBitsValid(false);
+ return;
+ }
+
+ Cell* cell = thing.asCell();
+
+ // Cells in the nursery cannot be gray, and therefore must necessarily point
+ // to only black edges.
+ if (!cell->isTenured()) {
+#ifdef DEBUG
+ AssertNonGrayTracer nongray(runtime());
+ TraceChildren(&nongray, cell, thing.kind());
+#endif
+ return;
+ }
+
+ TenuredCell& tenured = cell->asTenured();
+ if (!tenured.isMarked(js::gc::GRAY))
+ return;
+ tenured.unmark(js::gc::GRAY);
+
+ unmarkedAny = true;
+
+ // Trace children of |tenured|. If |tenured| and its parent are both
+ // shapes, |tenured| will get saved to mPreviousShape without being traced.
+ // The parent will later trace |tenured|. This is done to avoid increasing
+ // the stack depth during shape tracing. It is safe to do because a shape
+ // can only have one child that is a shape.
+ UnmarkGrayTracer childTracer(runtime(), thing.kind() == JS::TraceKind::Shape);
+
+ if (thing.kind() != JS::TraceKind::Shape) {
+ TraceChildren(&childTracer, &tenured, thing.kind());
+ MOZ_ASSERT(!childTracer.previousShape);
+ unmarkedAny |= childTracer.unmarkedAny;
+ return;
+ }
+
+ MOZ_ASSERT(thing.kind() == JS::TraceKind::Shape);
+ Shape* shape = static_cast<Shape*>(&tenured);
+ if (tracingShape) {
+ MOZ_ASSERT(!previousShape);
+ previousShape = shape;
+ return;
+ }
+
+ do {
+ MOZ_ASSERT(!shape->isMarked(js::gc::GRAY));
+ shape->traceChildren(&childTracer);
+ shape = childTracer.previousShape;
+ childTracer.previousShape = nullptr;
+ } while (shape);
+ unmarkedAny |= childTracer.unmarkedAny;
+}
+
+template <typename T>
+static bool
+TypedUnmarkGrayCellRecursively(T* t)
+{
+ MOZ_ASSERT(t);
+
+ JSRuntime* rt = t->runtimeFromMainThread();
+ MOZ_ASSERT(!rt->isHeapCollecting());
+ MOZ_ASSERT(!rt->isCycleCollecting());
+
+ bool unmarkedArg = false;
+ if (t->isTenured()) {
+ if (!t->asTenured().isMarked(GRAY))
+ return false;
+
+ t->asTenured().unmark(GRAY);
+ unmarkedArg = true;
+ }
+
+ UnmarkGrayTracer trc(rt);
+ gcstats::AutoPhase outerPhase(rt->gc.stats, gcstats::PHASE_BARRIER);
+ gcstats::AutoPhase innerPhase(rt->gc.stats, gcstats::PHASE_UNMARK_GRAY);
+ t->traceChildren(&trc);
+
+ return unmarkedArg || trc.unmarkedAny;
+}
+
+struct UnmarkGrayCellRecursivelyFunctor {
+ template <typename T> bool operator()(T* t) { return TypedUnmarkGrayCellRecursively(t); }
+};
+
+bool
+js::UnmarkGrayCellRecursively(Cell* cell, JS::TraceKind kind)
+{
+ return DispatchTraceKindTyped(UnmarkGrayCellRecursivelyFunctor(), cell, kind);
+}
+
+bool
+js::UnmarkGrayShapeRecursively(Shape* shape)
+{
+ return TypedUnmarkGrayCellRecursively(shape);
+}
+
+JS_FRIEND_API(bool)
+JS::UnmarkGrayGCThingRecursively(JS::GCCellPtr thing)
+{
+ return js::UnmarkGrayCellRecursively(thing.asCell(), thing.kind());
+}
diff --git a/js/src/gc/Marking.h b/js/src/gc/Marking.h
new file mode 100644
index 000000000..ec4c69a2f
--- /dev/null
+++ b/js/src/gc/Marking.h
@@ -0,0 +1,477 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Marking_h
+#define gc_Marking_h
+
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Move.h"
+
+#include "jsfriendapi.h"
+
+#include "ds/OrderedHashTable.h"
+#include "gc/Heap.h"
+#include "gc/Tracer.h"
+#include "js/GCAPI.h"
+#include "js/HeapAPI.h"
+#include "js/SliceBudget.h"
+#include "js/TracingAPI.h"
+#include "vm/TaggedProto.h"
+
+class JSLinearString;
+class JSRope;
+namespace js {
+class BaseShape;
+class GCMarker;
+class LazyScript;
+class NativeObject;
+class ObjectGroup;
+class WeakMapBase;
+namespace gc {
+class Arena;
+} // namespace gc
+namespace jit {
+class JitCode;
+} // namespace jit
+
+static const size_t NON_INCREMENTAL_MARK_STACK_BASE_CAPACITY = 4096;
+static const size_t INCREMENTAL_MARK_STACK_BASE_CAPACITY = 32768;
+
+/*
+ * When the native stack is low, the GC does not call js::TraceChildren to mark
+ * the reachable "children" of the thing. Rather the thing is put aside and
+ * js::TraceChildren is called later with more space on the C stack.
+ *
+ * To implement such delayed marking of the children with minimal overhead for
+ * the normal case of sufficient native stack, the code adds a field per arena.
+ * The field markingDelay->link links all arenas with delayed things into a
+ * stack list with the pointer to stack top in GCMarker::unmarkedArenaStackTop.
+ * GCMarker::delayMarkingChildren adds arenas to the stack as necessary while
+ * markDelayedChildren pops the arenas from the stack until it empties.
+ */
+class MarkStack
+{
+ friend class GCMarker;
+
+ uintptr_t* stack_;
+ uintptr_t* tos_;
+ uintptr_t* end_;
+
+ // The capacity we start with and reset() to.
+ size_t baseCapacity_;
+ size_t maxCapacity_;
+
+ public:
+ explicit MarkStack(size_t maxCapacity)
+ : stack_(nullptr),
+ tos_(nullptr),
+ end_(nullptr),
+ baseCapacity_(0),
+ maxCapacity_(maxCapacity)
+ {}
+
+ ~MarkStack() {
+ js_free(stack_);
+ }
+
+ size_t capacity() { return end_ - stack_; }
+
+ ptrdiff_t position() const { return tos_ - stack_; }
+
+ void setStack(uintptr_t* stack, size_t tosIndex, size_t capacity) {
+ stack_ = stack;
+ tos_ = stack + tosIndex;
+ end_ = stack + capacity;
+ }
+
+ MOZ_MUST_USE bool init(JSGCMode gcMode);
+
+ void setBaseCapacity(JSGCMode mode);
+ size_t maxCapacity() const { return maxCapacity_; }
+ void setMaxCapacity(size_t maxCapacity);
+
+ MOZ_MUST_USE bool push(uintptr_t item) {
+ if (tos_ == end_) {
+ if (!enlarge(1))
+ return false;
+ }
+ MOZ_ASSERT(tos_ < end_);
+ *tos_++ = item;
+ return true;
+ }
+
+ MOZ_MUST_USE bool push(uintptr_t item1, uintptr_t item2, uintptr_t item3) {
+ uintptr_t* nextTos = tos_ + 3;
+ if (nextTos > end_) {
+ if (!enlarge(3))
+ return false;
+ nextTos = tos_ + 3;
+ }
+ MOZ_ASSERT(nextTos <= end_);
+ tos_[0] = item1;
+ tos_[1] = item2;
+ tos_[2] = item3;
+ tos_ = nextTos;
+ return true;
+ }
+
+ bool isEmpty() const {
+ return tos_ == stack_;
+ }
+
+ uintptr_t pop() {
+ MOZ_ASSERT(!isEmpty());
+ return *--tos_;
+ }
+
+ void reset();
+
+ /* Grow the stack, ensuring there is space for at least count elements. */
+ MOZ_MUST_USE bool enlarge(unsigned count);
+
+ void setGCMode(JSGCMode gcMode);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+namespace gc {
+
+struct WeakKeyTableHashPolicy {
+ typedef JS::GCCellPtr Lookup;
+ static HashNumber hash(const Lookup& v, const mozilla::HashCodeScrambler&) {
+ return mozilla::HashGeneric(v.asCell());
+ }
+ static bool match(const JS::GCCellPtr& k, const Lookup& l) { return k == l; }
+ static bool isEmpty(const JS::GCCellPtr& v) { return !v; }
+ static void makeEmpty(JS::GCCellPtr* vp) { *vp = nullptr; }
+};
+
+struct WeakMarkable {
+ WeakMapBase* weakmap;
+ JS::GCCellPtr key;
+
+ WeakMarkable(WeakMapBase* weakmapArg, JS::GCCellPtr keyArg)
+ : weakmap(weakmapArg), key(keyArg) {}
+};
+
+using WeakEntryVector = Vector<WeakMarkable, 2, js::SystemAllocPolicy>;
+
+using WeakKeyTable = OrderedHashMap<JS::GCCellPtr,
+ WeakEntryVector,
+ WeakKeyTableHashPolicy,
+ js::SystemAllocPolicy>;
+
+} /* namespace gc */
+
+class GCMarker : public JSTracer
+{
+ public:
+ explicit GCMarker(JSRuntime* rt);
+ MOZ_MUST_USE bool init(JSGCMode gcMode);
+
+ void setMaxCapacity(size_t maxCap) { stack.setMaxCapacity(maxCap); }
+ size_t maxCapacity() const { return stack.maxCapacity(); }
+
+ void start();
+ void stop();
+ void reset();
+
+ // Mark the given GC thing and traverse its children at some point.
+ template <typename T> void traverse(T thing);
+
+ // Calls traverse on target after making additional assertions.
+ template <typename S, typename T> void traverseEdge(S source, T* target);
+ template <typename S, typename T> void traverseEdge(S source, const T& target);
+
+ // Notes a weak graph edge for later sweeping.
+ template <typename T> void noteWeakEdge(T* edge);
+
+ /*
+ * Care must be taken changing the mark color from gray to black. The cycle
+ * collector depends on the invariant that there are no black to gray edges
+ * in the GC heap. This invariant lets the CC not trace through black
+ * objects. If this invariant is violated, the cycle collector may free
+ * objects that are still reachable.
+ */
+ void setMarkColorGray() {
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(color == gc::BLACK);
+ color = gc::GRAY;
+ }
+ void setMarkColorBlack() {
+ MOZ_ASSERT(isDrained());
+ MOZ_ASSERT(color == gc::GRAY);
+ color = gc::BLACK;
+ }
+ uint32_t markColor() const { return color; }
+
+ void enterWeakMarkingMode();
+ void leaveWeakMarkingMode();
+ void abortLinearWeakMarking() {
+ leaveWeakMarkingMode();
+ linearWeakMarkingDisabled_ = true;
+ }
+
+ void delayMarkingArena(gc::Arena* arena);
+ void delayMarkingChildren(const void* thing);
+ void markDelayedChildren(gc::Arena* arena);
+ MOZ_MUST_USE bool markDelayedChildren(SliceBudget& budget);
+ bool hasDelayedChildren() const {
+ return !!unmarkedArenaStackTop;
+ }
+
+ bool isDrained() {
+ return isMarkStackEmpty() && !unmarkedArenaStackTop;
+ }
+
+ MOZ_MUST_USE bool drainMarkStack(SliceBudget& budget);
+
+ void setGCMode(JSGCMode mode) { stack.setGCMode(mode); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+#ifdef DEBUG
+ bool shouldCheckCompartments() { return strictCompartmentChecking; }
+#endif
+
+ void markEphemeronValues(gc::Cell* markedCell, gc::WeakEntryVector& entry);
+
+ private:
+#ifdef DEBUG
+ void checkZone(void* p);
+#else
+ void checkZone(void* p) {}
+#endif
+
+ /*
+ * We use a common mark stack to mark GC things of different types and use
+ * the explicit tags to distinguish them when it cannot be deduced from
+ * the context of push or pop operation.
+ */
+ enum StackTag {
+ ValueArrayTag,
+ ObjectTag,
+ GroupTag,
+ SavedValueArrayTag,
+ JitCodeTag,
+ ScriptTag,
+ LastTag = JitCodeTag
+ };
+
+ static const uintptr_t StackTagMask = 7;
+ static_assert(StackTagMask >= uintptr_t(LastTag), "The tag mask must subsume the tags.");
+ static_assert(StackTagMask <= gc::CellMask, "The tag mask must be embeddable in a Cell*.");
+
+ // Push an object onto the stack for later tracing and assert that it has
+ // already been marked.
+ void repush(JSObject* obj) {
+ MOZ_ASSERT(gc::TenuredCell::fromPointer(obj)->isMarked(markColor()));
+ pushTaggedPtr(ObjectTag, obj);
+ }
+
+ template <typename T> void markAndTraceChildren(T* thing);
+ template <typename T> void markAndPush(StackTag tag, T* thing);
+ template <typename T> void markAndScan(T* thing);
+ template <typename T> void markImplicitEdgesHelper(T oldThing);
+ template <typename T> void markImplicitEdges(T* oldThing);
+ void eagerlyMarkChildren(JSLinearString* str);
+ void eagerlyMarkChildren(JSRope* rope);
+ void eagerlyMarkChildren(JSString* str);
+ void eagerlyMarkChildren(LazyScript *thing);
+ void eagerlyMarkChildren(Shape* shape);
+ void eagerlyMarkChildren(Scope* scope);
+ void lazilyMarkChildren(ObjectGroup* group);
+
+ // We may not have concrete types yet, so this has to be outside the header.
+ template <typename T>
+ void dispatchToTraceChildren(T* thing);
+
+ // Mark the given GC thing, but do not trace its children. Return true
+ // if the thing became marked.
+ template <typename T>
+ MOZ_MUST_USE bool mark(T* thing);
+
+ void pushTaggedPtr(StackTag tag, void* ptr) {
+ checkZone(ptr);
+ uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
+ MOZ_ASSERT(!(addr & StackTagMask));
+ if (!stack.push(addr | uintptr_t(tag)))
+ delayMarkingChildren(ptr);
+ }
+
+ void pushValueArray(JSObject* obj, HeapSlot* start, HeapSlot* end) {
+ checkZone(obj);
+
+ MOZ_ASSERT(start <= end);
+ uintptr_t tagged = reinterpret_cast<uintptr_t>(obj) | GCMarker::ValueArrayTag;
+ uintptr_t startAddr = reinterpret_cast<uintptr_t>(start);
+ uintptr_t endAddr = reinterpret_cast<uintptr_t>(end);
+
+ /*
+ * Push in the reverse order so obj will be on top. If we cannot push
+ * the array, we trigger delay marking for the whole object.
+ */
+ if (!stack.push(endAddr, startAddr, tagged))
+ delayMarkingChildren(obj);
+ }
+
+ bool isMarkStackEmpty() {
+ return stack.isEmpty();
+ }
+
+ MOZ_MUST_USE bool restoreValueArray(JSObject* obj, void** vpp, void** endp);
+ void saveValueRanges();
+ inline void processMarkStackTop(SliceBudget& budget);
+
+ /* The mark stack. Pointers in this stack are "gray" in the GC sense. */
+ MarkStack stack;
+
+ /* The color is only applied to objects and functions. */
+ uint32_t color;
+
+ /* Pointer to the top of the stack of arenas we are delaying marking on. */
+ js::gc::Arena* unmarkedArenaStackTop;
+
+ /*
+ * If the weakKeys table OOMs, disable the linear algorithm and fall back
+ * to iterating until the next GC.
+ */
+ bool linearWeakMarkingDisabled_;
+
+#ifdef DEBUG
+ /* Count of arenas that are currently in the stack. */
+ size_t markLaterArenas;
+
+ /* Assert that start and stop are called with correct ordering. */
+ bool started;
+
+ /*
+ * If this is true, all marked objects must belong to a compartment being
+ * GCed. This is used to look for compartment bugs.
+ */
+ bool strictCompartmentChecking;
+#endif // DEBUG
+};
+
+#ifdef DEBUG
+// Return true if this trace is happening on behalf of gray buffering during
+// the marking phase of incremental GC.
+bool
+IsBufferGrayRootsTracer(JSTracer* trc);
+#endif
+
+namespace gc {
+
+/*** Special Cases ***/
+
+void
+PushArena(GCMarker* gcmarker, Arena* arena);
+
+/*** Liveness ***/
+
+// Report whether a thing has been marked. Things which are in zones that are
+// not currently being collected or are owned by another runtime are always
+// reported as being marked.
+template <typename T>
+bool
+IsMarkedUnbarriered(JSRuntime* rt, T* thingp);
+
+// Report whether a thing has been marked. Things which are in zones that are
+// not currently being collected or are owned by another runtime are always
+// reported as being marked.
+template <typename T>
+bool
+IsMarked(JSRuntime* rt, WriteBarrieredBase<T>* thingp);
+
+template <typename T>
+bool
+IsAboutToBeFinalizedUnbarriered(T* thingp);
+
+template <typename T>
+bool
+IsAboutToBeFinalized(WriteBarrieredBase<T>* thingp);
+
+template <typename T>
+bool
+IsAboutToBeFinalized(ReadBarrieredBase<T>* thingp);
+
+bool
+IsAboutToBeFinalizedDuringSweep(TenuredCell& tenured);
+
+inline Cell*
+ToMarkable(const Value& v)
+{
+ if (v.isMarkable())
+ return (Cell*)v.toGCThing();
+ return nullptr;
+}
+
+inline Cell*
+ToMarkable(Cell* cell)
+{
+ return cell;
+}
+
+// Return true if the pointer is nullptr, or if it is a tagged pointer to
+// nullptr.
+MOZ_ALWAYS_INLINE bool
+IsNullTaggedPointer(void* p)
+{
+ return uintptr_t(p) <= LargestTaggedNullCellPointer;
+}
+
+// Wrap a GC thing pointer into a new Value or jsid. The type system enforces
+// that the thing pointer is a wrappable type.
+template <typename S, typename T>
+struct RewrapTaggedPointer{};
+#define DECLARE_REWRAP(S, T, method, prefix) \
+ template <> struct RewrapTaggedPointer<S, T> { \
+ static S wrap(T* thing) { return method ( prefix thing ); } \
+ }
+DECLARE_REWRAP(JS::Value, JSObject, JS::ObjectOrNullValue, );
+DECLARE_REWRAP(JS::Value, JSString, JS::StringValue, );
+DECLARE_REWRAP(JS::Value, JS::Symbol, JS::SymbolValue, );
+DECLARE_REWRAP(jsid, JSString, NON_INTEGER_ATOM_TO_JSID, (JSAtom*));
+DECLARE_REWRAP(jsid, JS::Symbol, SYMBOL_TO_JSID, );
+DECLARE_REWRAP(js::TaggedProto, JSObject, js::TaggedProto, );
+#undef DECLARE_REWRAP
+
+template <typename T>
+struct IsPrivateGCThingInValue
+ : public mozilla::EnableIf<mozilla::IsBaseOf<Cell, T>::value &&
+ !mozilla::IsBaseOf<JSObject, T>::value &&
+ !mozilla::IsBaseOf<JSString, T>::value &&
+ !mozilla::IsBaseOf<JS::Symbol, T>::value, T>
+{
+ static_assert(!mozilla::IsSame<Cell, T>::value && !mozilla::IsSame<TenuredCell, T>::value,
+ "T must not be Cell or TenuredCell");
+};
+
+template <typename T>
+struct RewrapTaggedPointer<Value, T>
+{
+ static Value wrap(typename IsPrivateGCThingInValue<T>::Type* thing) {
+ return JS::PrivateGCThingValue(thing);
+ }
+};
+
+} /* namespace gc */
+
+// The return value indicates if anything was unmarked.
+bool
+UnmarkGrayShapeRecursively(Shape* shape);
+
+template<typename T>
+void
+CheckTracedThing(JSTracer* trc, T* thing);
+
+template<typename T>
+void
+CheckTracedThing(JSTracer* trc, T thing);
+
+} /* namespace js */
+
+#endif /* gc_Marking_h */
diff --git a/js/src/gc/Memory.cpp b/js/src/gc/Memory.cpp
new file mode 100644
index 000000000..26da75469
--- /dev/null
+++ b/js/src/gc/Memory.cpp
@@ -0,0 +1,901 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Memory.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/TaggedAnonymousMemory.h"
+
+#include "js/HeapAPI.h"
+#include "vm/Runtime.h"
+
+#if defined(XP_WIN)
+
+#include "jswin.h"
+#include <psapi.h>
+
+#elif defined(SOLARIS)
+
+#include <sys/mman.h>
+#include <unistd.h>
+
+#elif defined(XP_UNIX)
+
+#include <algorithm>
+#include <errno.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#endif
+
+namespace js {
+namespace gc {
+
+// The GC can only safely decommit memory when the page size of the
+// running process matches the compiled arena size.
+static size_t pageSize = 0;
+
+// The OS allocation granularity may not match the page size.
+static size_t allocGranularity = 0;
+
+#if defined(XP_UNIX)
+// The addresses handed out by mmap may grow up or down.
+static mozilla::Atomic<int, mozilla::Relaxed> growthDirection(0);
+#endif
+
+// Data from OOM crashes shows there may be up to 24 chunksized but unusable
+// chunks available in low memory situations. These chunks may all need to be
+// used up before we gain access to remaining *alignable* chunksized regions,
+// so we use a generous limit of 32 unusable chunks to ensure we reach them.
+static const int MaxLastDitchAttempts = 32;
+
+static void GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment);
+static void* MapAlignedPagesSlow(size_t size, size_t alignment);
+static void* MapAlignedPagesLastDitch(size_t size, size_t alignment);
+
+size_t
+SystemPageSize()
+{
+ return pageSize;
+}
+
+static bool
+DecommitEnabled()
+{
+ return pageSize == ArenaSize;
+}
+
+/*
+ * This returns the offset of address p from the nearest aligned address at
+ * or below p - or alternatively, the number of unaligned bytes at the end of
+ * the region starting at p (as we assert that allocation size is an integer
+ * multiple of the alignment).
+ */
+static inline size_t
+OffsetFromAligned(void* p, size_t alignment)
+{
+ return uintptr_t(p) % alignment;
+}
+
+void*
+TestMapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+ return MapAlignedPagesLastDitch(size, alignment);
+}
+
+
+#if defined(XP_WIN)
+
+void
+InitMemorySubsystem()
+{
+ if (pageSize == 0) {
+ SYSTEM_INFO sysinfo;
+ GetSystemInfo(&sysinfo);
+ pageSize = sysinfo.dwPageSize;
+ allocGranularity = sysinfo.dwAllocationGranularity;
+ }
+}
+
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+
+static inline void*
+MapMemoryAt(void* desired, size_t length, int flags, int prot = PAGE_READWRITE)
+{
+ return VirtualAlloc(desired, length, flags, prot);
+}
+
+static inline void*
+MapMemory(size_t length, int flags, int prot = PAGE_READWRITE)
+{
+ return VirtualAlloc(nullptr, length, flags, prot);
+}
+
+void*
+MapAlignedPages(size_t size, size_t alignment)
+{
+ MOZ_ASSERT(size >= alignment);
+ MOZ_ASSERT(size >= allocGranularity);
+ MOZ_ASSERT(size % alignment == 0);
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
+ MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
+
+ void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
+
+ /* Special case: If we want allocation alignment, no further work is needed. */
+ if (alignment == allocGranularity)
+ return p;
+
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+
+ void* retainedAddr;
+ GetNewChunk(&p, &retainedAddr, size, alignment);
+ if (retainedAddr)
+ UnmapPages(retainedAddr, size);
+ if (p) {
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+ UnmapPages(p, size);
+ }
+
+ p = MapAlignedPagesSlow(size, alignment);
+ if (!p)
+ return MapAlignedPagesLastDitch(size, alignment);
+
+ MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
+ return p;
+}
+
+static void*
+MapAlignedPagesSlow(size_t size, size_t alignment)
+{
+ /*
+ * Windows requires that there be a 1:1 mapping between VM allocation
+ * and deallocation operations. Therefore, take care here to acquire the
+ * final result via one mapping operation. This means unmapping any
+ * preliminary result that is not correctly aligned.
+ */
+ void* p;
+ do {
+ /*
+ * Over-allocate in order to map a memory region that is definitely
+ * large enough, then deallocate and allocate again the correct size,
+ * within the over-sized mapping.
+ *
+ * Since we're going to unmap the whole thing anyway, the first
+ * mapping doesn't have to commit pages.
+ */
+ size_t reserveSize = size + alignment - pageSize;
+ p = MapMemory(reserveSize, MEM_RESERVE);
+ if (!p)
+ return nullptr;
+ void* chunkStart = (void*)AlignBytes(uintptr_t(p), alignment);
+ UnmapPages(p, reserveSize);
+ p = MapMemoryAt(chunkStart, size, MEM_COMMIT | MEM_RESERVE);
+
+ /* Failure here indicates a race with another thread, so try again. */
+ } while (!p);
+
+ return p;
+}
+
+/*
+ * In a low memory or high fragmentation situation, alignable chunks of the
+ * desired size may still be available, even if there are no more contiguous
+ * free chunks that meet the |size + alignment - pageSize| requirement of
+ * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
+ * by temporarily holding onto the unaligned parts of each chunk until the
+ * allocator gives us a chunk that either is, or can be aligned.
+ */
+static void*
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+ void* tempMaps[MaxLastDitchAttempts];
+ int attempt = 0;
+ void* p = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+ for (; attempt < MaxLastDitchAttempts; ++attempt) {
+ GetNewChunk(&p, tempMaps + attempt, size, alignment);
+ if (OffsetFromAligned(p, alignment) == 0) {
+ if (tempMaps[attempt])
+ UnmapPages(tempMaps[attempt], size);
+ break;
+ }
+ if (!tempMaps[attempt])
+ break; /* Bail if GetNewChunk failed. */
+ }
+ if (OffsetFromAligned(p, alignment)) {
+ UnmapPages(p, size);
+ p = nullptr;
+ }
+ while (--attempt >= 0)
+ UnmapPages(tempMaps[attempt], size);
+ return p;
+}
+
+/*
+ * On Windows, map and unmap calls must be matched, so we deallocate the
+ * unaligned chunk, then reallocate the unaligned part to block off the
+ * old address and force the allocator to give us a new one.
+ */
+static void
+GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
+{
+ void* address = *aAddress;
+ void* retainedAddr = nullptr;
+ do {
+ size_t retainedSize;
+ size_t offset = OffsetFromAligned(address, alignment);
+ if (!offset)
+ break;
+ UnmapPages(address, size);
+ retainedSize = alignment - offset;
+ retainedAddr = MapMemoryAt(address, retainedSize, MEM_RESERVE);
+ address = MapMemory(size, MEM_COMMIT | MEM_RESERVE);
+ /* If retainedAddr is null here, we raced with another thread. */
+ } while (!retainedAddr);
+ *aAddress = address;
+ *aRetainedAddr = retainedAddr;
+}
+
+void
+UnmapPages(void* p, size_t size)
+{
+ MOZ_ALWAYS_TRUE(VirtualFree(p, 0, MEM_RELEASE));
+}
+
+bool
+MarkPagesUnused(void* p, size_t size)
+{
+ if (!DecommitEnabled())
+ return true;
+
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+ LPVOID p2 = MapMemoryAt(p, size, MEM_RESET);
+ return p2 == p;
+}
+
+void
+MarkPagesInUse(void* p, size_t size)
+{
+ if (!DecommitEnabled())
+ return;
+
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+}
+
+size_t
+GetPageFaultCount()
+{
+ PROCESS_MEMORY_COUNTERS pmc;
+ if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc)))
+ return 0;
+ return pmc.PageFaultCount;
+}
+
+void*
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+{
+ MOZ_ASSERT(length && alignment);
+
+ // The allocation granularity and the requested offset
+ // must both be divisible by the requested alignment.
+ // Alignments larger than the allocation granularity are not supported.
+ if (allocGranularity % alignment != 0 || offset % alignment != 0)
+ return nullptr;
+
+ HANDLE hFile = reinterpret_cast<HANDLE>(intptr_t(fd));
+
+ // This call will fail if the file does not exist, which is what we want.
+ HANDLE hMap = CreateFileMapping(hFile, nullptr, PAGE_READONLY, 0, 0, nullptr);
+ if (!hMap)
+ return nullptr;
+
+ size_t alignedOffset = offset - (offset % allocGranularity);
+ size_t alignedLength = length + (offset % allocGranularity);
+
+ DWORD offsetH = uint32_t(uint64_t(alignedOffset) >> 32);
+ DWORD offsetL = uint32_t(alignedOffset);
+
+ // If the offset or length are out of bounds, this call will fail.
+ uint8_t* map = static_cast<uint8_t*>(MapViewOfFile(hMap, FILE_MAP_COPY, offsetH,
+ offsetL, alignedLength));
+
+ // This just decreases the file mapping object's internal reference count;
+ // it won't actually be destroyed until we unmap the associated view.
+ CloseHandle(hMap);
+
+ if (!map)
+ return nullptr;
+
+#ifdef DEBUG
+ // Zero out data before and after the desired mapping to catch errors early.
+ if (offset != alignedOffset)
+ memset(map, 0, offset - alignedOffset);
+ if (alignedLength % pageSize)
+ memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+#endif
+
+ return map + (offset - alignedOffset);
+}
+
+void
+DeallocateMappedContent(void* p, size_t /*length*/)
+{
+ if (!p)
+ return;
+
+ // Calculate the address originally returned by MapViewOfFile.
+ // This is needed because AllocateMappedContent returns a pointer
+ // that might be offset from the view, as the beginning of a
+ // view must be aligned with the allocation granularity.
+ uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
+ MOZ_ALWAYS_TRUE(UnmapViewOfFile(reinterpret_cast<void*>(map)));
+}
+
+# else // Various APIs are unavailable.
+
+void*
+MapAlignedPages(size_t size, size_t alignment)
+{
+ MOZ_ASSERT(size >= alignment);
+ MOZ_ASSERT(size >= allocGranularity);
+ MOZ_ASSERT(size % alignment == 0);
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
+ MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
+
+ void* p = _aligned_malloc(size, alignment);
+
+ MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
+ return p;
+}
+
+static void*
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+ return nullptr;
+}
+
+void
+UnmapPages(void* p, size_t size)
+{
+ _aligned_free(p);
+}
+
+bool
+MarkPagesUnused(void* p, size_t size)
+{
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+ return true;
+}
+
+bool
+MarkPagesInUse(void* p, size_t size)
+{
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+}
+
+size_t
+GetPageFaultCount()
+{
+ // GetProcessMemoryInfo is unavailable.
+ return 0;
+}
+
+void*
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+{
+ // Not implemented.
+ return nullptr;
+}
+
+// Deallocate mapped memory for object.
+void
+DeallocateMappedContent(void* p, size_t length)
+{
+ // Not implemented.
+}
+
+# endif
+
+#elif defined(SOLARIS)
+
+#ifndef MAP_NOSYNC
+# define MAP_NOSYNC 0
+#endif
+
+void
+InitMemorySubsystem()
+{
+ if (pageSize == 0)
+ pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
+}
+
+void*
+MapAlignedPages(size_t size, size_t alignment)
+{
+ MOZ_ASSERT(size >= alignment);
+ MOZ_ASSERT(size >= allocGranularity);
+ MOZ_ASSERT(size % alignment == 0);
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
+ MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
+
+ int prot = PROT_READ | PROT_WRITE;
+ int flags = MAP_PRIVATE | MAP_ANON | MAP_ALIGN | MAP_NOSYNC;
+
+ void* p = mmap((caddr_t)alignment, size, prot, flags, -1, 0);
+ if (p == MAP_FAILED)
+ return nullptr;
+ return p;
+}
+
+static void*
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+ return nullptr;
+}
+
+void
+UnmapPages(void* p, size_t size)
+{
+ MOZ_ALWAYS_TRUE(0 == munmap((caddr_t)p, size));
+}
+
+bool
+MarkPagesUnused(void* p, size_t size)
+{
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+ return true;
+}
+
+bool
+MarkPagesInUse(void* p, size_t size)
+{
+ if (!DecommitEnabled())
+ return;
+
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+}
+
+size_t
+GetPageFaultCount()
+{
+ return 0;
+}
+
+void*
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+{
+ // Not implemented.
+ return nullptr;
+}
+
+// Deallocate mapped memory for object.
+void
+DeallocateMappedContent(void* p, size_t length)
+{
+ // Not implemented.
+}
+
+#elif defined(XP_UNIX)
+
+void
+InitMemorySubsystem()
+{
+ if (pageSize == 0)
+ pageSize = allocGranularity = size_t(sysconf(_SC_PAGESIZE));
+}
+
+static inline void*
+MapMemoryAt(void* desired, size_t length, int prot = PROT_READ | PROT_WRITE,
+ int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
+{
+#if defined(__ia64__) || (defined(__sparc64__) && defined(__NetBSD__)) || defined(__aarch64__)
+ MOZ_ASSERT((0xffff800000000000ULL & (uintptr_t(desired) + length - 1)) == 0);
+#endif
+ void* region = mmap(desired, length, prot, flags, fd, offset);
+ if (region == MAP_FAILED)
+ return nullptr;
+ /*
+ * mmap treats the given address as a hint unless the MAP_FIXED flag is
+ * used (which isn't usually what you want, as this overrides existing
+ * mappings), so check that the address we got is the address we wanted.
+ */
+ if (region != desired) {
+ if (munmap(region, length))
+ MOZ_ASSERT(errno == ENOMEM);
+ return nullptr;
+ }
+ return region;
+}
+
+static inline void*
+MapMemory(size_t length, int prot = PROT_READ | PROT_WRITE,
+ int flags = MAP_PRIVATE | MAP_ANON, int fd = -1, off_t offset = 0)
+{
+#if defined(__ia64__) || (defined(__sparc64__) && defined(__NetBSD__))
+ /*
+ * The JS engine assumes that all allocated pointers have their high 17 bits clear,
+ * which ia64's mmap doesn't support directly. However, we can emulate it by passing
+ * mmap an "addr" parameter with those bits clear. The mmap will return that address,
+ * or the nearest available memory above that address, providing a near-guarantee
+ * that those bits are clear. If they are not, we return nullptr below to indicate
+ * out-of-memory.
+ *
+ * The addr is chosen as 0x0000070000000000, which still allows about 120TB of virtual
+ * address space.
+ *
+ * See Bug 589735 for more information.
+ */
+ void* region = mmap((void*)0x0000070000000000, length, prot, flags, fd, offset);
+ if (region == MAP_FAILED)
+ return nullptr;
+ /*
+ * If the allocated memory doesn't have its upper 17 bits clear, consider it
+ * as out of memory.
+ */
+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
+ if (munmap(region, length))
+ MOZ_ASSERT(errno == ENOMEM);
+ return nullptr;
+ }
+ return region;
+#elif defined(__aarch64__)
+ /*
+ * There might be similar virtual address issue on arm64 which depends on
+ * hardware and kernel configurations. But the work around is slightly
+ * different due to the different mmap behavior.
+ *
+ * TODO: Merge with the above code block if this implementation works for
+ * ia64 and sparc64.
+ */
+ const uintptr_t start = UINT64_C(0x0000070000000000);
+ const uintptr_t end = UINT64_C(0x0000800000000000);
+ const uintptr_t step = ChunkSize;
+ /*
+ * Optimization options if there are too many retries in practice:
+ * 1. Examine /proc/self/maps to find an available address. This file is
+ * not always available, however. In addition, even if we examine
+ * /proc/self/maps, we may still need to retry several times due to
+ * racing with other threads.
+ * 2. Use a global/static variable with lock to track the addresses we have
+ * allocated or tried.
+ */
+ uintptr_t hint;
+ void* region = MAP_FAILED;
+ for (hint = start; region == MAP_FAILED && hint + length <= end; hint += step) {
+ region = mmap((void*)hint, length, prot, flags, fd, offset);
+ if (region != MAP_FAILED) {
+ if ((uintptr_t(region) + (length - 1)) & 0xffff800000000000) {
+ if (munmap(region, length)) {
+ MOZ_ASSERT(errno == ENOMEM);
+ }
+ region = MAP_FAILED;
+ }
+ }
+ }
+ return region == MAP_FAILED ? nullptr : region;
+#else
+ void* region = MozTaggedAnonymousMmap(nullptr, length, prot, flags, fd, offset, "js-gc-heap");
+ if (region == MAP_FAILED)
+ return nullptr;
+ return region;
+#endif
+}
+
+void*
+MapAlignedPages(size_t size, size_t alignment)
+{
+ MOZ_ASSERT(size >= alignment);
+ MOZ_ASSERT(size >= allocGranularity);
+ MOZ_ASSERT(size % alignment == 0);
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_ASSERT_IF(alignment < allocGranularity, allocGranularity % alignment == 0);
+ MOZ_ASSERT_IF(alignment > allocGranularity, alignment % allocGranularity == 0);
+
+ void* p = MapMemory(size);
+
+ /* Special case: If we want page alignment, no further work is needed. */
+ if (alignment == allocGranularity)
+ return p;
+
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+
+ void* retainedAddr;
+ GetNewChunk(&p, &retainedAddr, size, alignment);
+ if (retainedAddr)
+ UnmapPages(retainedAddr, size);
+ if (p) {
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+ UnmapPages(p, size);
+ }
+
+ p = MapAlignedPagesSlow(size, alignment);
+ if (!p)
+ return MapAlignedPagesLastDitch(size, alignment);
+
+ MOZ_ASSERT(OffsetFromAligned(p, alignment) == 0);
+ return p;
+}
+
+static void*
+MapAlignedPagesSlow(size_t size, size_t alignment)
+{
+ /* Overallocate and unmap the region's edges. */
+ size_t reqSize = size + alignment - pageSize;
+ void* region = MapMemory(reqSize);
+ if (!region)
+ return nullptr;
+
+ void* regionEnd = (void*)(uintptr_t(region) + reqSize);
+ void* front;
+ void* end;
+ if (growthDirection <= 0) {
+ size_t offset = OffsetFromAligned(regionEnd, alignment);
+ end = (void*)(uintptr_t(regionEnd) - offset);
+ front = (void*)(uintptr_t(end) - size);
+ } else {
+ size_t offset = OffsetFromAligned(region, alignment);
+ front = (void*)(uintptr_t(region) + (offset ? alignment - offset : 0));
+ end = (void*)(uintptr_t(front) + size);
+ }
+
+ if (front != region)
+ UnmapPages(region, uintptr_t(front) - uintptr_t(region));
+ if (end != regionEnd)
+ UnmapPages(end, uintptr_t(regionEnd) - uintptr_t(end));
+
+ return front;
+}
+
+/*
+ * In a low memory or high fragmentation situation, alignable chunks of the
+ * desired size may still be available, even if there are no more contiguous
+ * free chunks that meet the |size + alignment - pageSize| requirement of
+ * MapAlignedPagesSlow. In this case, try harder to find an alignable chunk
+ * by temporarily holding onto the unaligned parts of each chunk until the
+ * allocator gives us a chunk that either is, or can be aligned.
+ */
+static void*
+MapAlignedPagesLastDitch(size_t size, size_t alignment)
+{
+ void* tempMaps[MaxLastDitchAttempts];
+ int attempt = 0;
+ void* p = MapMemory(size);
+ if (OffsetFromAligned(p, alignment) == 0)
+ return p;
+ for (; attempt < MaxLastDitchAttempts; ++attempt) {
+ GetNewChunk(&p, tempMaps + attempt, size, alignment);
+ if (OffsetFromAligned(p, alignment) == 0) {
+ if (tempMaps[attempt])
+ UnmapPages(tempMaps[attempt], size);
+ break;
+ }
+ if (!tempMaps[attempt])
+ break; /* Bail if GetNewChunk failed. */
+ }
+ if (OffsetFromAligned(p, alignment)) {
+ UnmapPages(p, size);
+ p = nullptr;
+ }
+ while (--attempt >= 0)
+ UnmapPages(tempMaps[attempt], size);
+ return p;
+}
+
+/*
+ * mmap calls don't have to be matched with calls to munmap, so we can unmap
+ * just the pages we don't need. However, as we don't know a priori if addresses
+ * are handed out in increasing or decreasing order, we have to try both
+ * directions (depending on the environment, one will always fail).
+ */
+static void
+GetNewChunk(void** aAddress, void** aRetainedAddr, size_t size, size_t alignment)
+{
+ void* address = *aAddress;
+ void* retainedAddr = nullptr;
+ bool addrsGrowDown = growthDirection <= 0;
+ int i = 0;
+ for (; i < 2; ++i) {
+ /* Try the direction indicated by growthDirection. */
+ if (addrsGrowDown) {
+ size_t offset = OffsetFromAligned(address, alignment);
+ void* head = (void*)((uintptr_t)address - offset);
+ void* tail = (void*)((uintptr_t)head + size);
+ if (MapMemoryAt(head, offset)) {
+ UnmapPages(tail, offset);
+ if (growthDirection >= -8)
+ --growthDirection;
+ address = head;
+ break;
+ }
+ } else {
+ size_t offset = alignment - OffsetFromAligned(address, alignment);
+ void* head = (void*)((uintptr_t)address + offset);
+ void* tail = (void*)((uintptr_t)address + size);
+ if (MapMemoryAt(tail, offset)) {
+ UnmapPages(address, offset);
+ if (growthDirection <= 8)
+ ++growthDirection;
+ address = head;
+ break;
+ }
+ }
+ /* If we're confident in the growth direction, don't try the other. */
+ if (growthDirection < -8 || growthDirection > 8)
+ break;
+ /* If that failed, try the opposite direction. */
+ addrsGrowDown = !addrsGrowDown;
+ }
+ /* If our current chunk cannot be aligned, see if the next one is aligned. */
+ if (OffsetFromAligned(address, alignment)) {
+ retainedAddr = address;
+ address = MapMemory(size);
+ }
+ *aAddress = address;
+ *aRetainedAddr = retainedAddr;
+}
+
+void
+UnmapPages(void* p, size_t size)
+{
+ if (munmap(p, size))
+ MOZ_ASSERT(errno == ENOMEM);
+}
+
+bool
+MarkPagesUnused(void* p, size_t size)
+{
+ if (!DecommitEnabled())
+ return false;
+
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+ int result = madvise(p, size, MADV_DONTNEED);
+ return result != -1;
+}
+
+void
+MarkPagesInUse(void* p, size_t size)
+{
+ if (!DecommitEnabled())
+ return;
+
+ MOZ_ASSERT(OffsetFromAligned(p, pageSize) == 0);
+}
+
+size_t
+GetPageFaultCount()
+{
+ struct rusage usage;
+ int err = getrusage(RUSAGE_SELF, &usage);
+ if (err)
+ return 0;
+ return usage.ru_majflt;
+}
+
+void*
+AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment)
+{
+ MOZ_ASSERT(length && alignment);
+
+ // The allocation granularity and the requested offset
+ // must both be divisible by the requested alignment.
+ // Alignments larger than the allocation granularity are not supported.
+ if (allocGranularity % alignment != 0 || offset % alignment != 0)
+ return nullptr;
+
+ // Sanity check the offset and size, as mmap does not do this for us.
+ struct stat st;
+ if (fstat(fd, &st) || offset >= uint64_t(st.st_size) || length > uint64_t(st.st_size) - offset)
+ return nullptr;
+
+ size_t alignedOffset = offset - (offset % allocGranularity);
+ size_t alignedLength = length + (offset % allocGranularity);
+
+ uint8_t* map = static_cast<uint8_t*>(MapMemory(alignedLength, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE, fd, alignedOffset));
+ if (!map)
+ return nullptr;
+
+#ifdef DEBUG
+ // Zero out data before and after the desired mapping to catch errors early.
+ if (offset != alignedOffset)
+ memset(map, 0, offset - alignedOffset);
+ if (alignedLength % pageSize)
+ memset(map + alignedLength, 0, pageSize - (alignedLength % pageSize));
+#endif
+
+ return map + (offset - alignedOffset);
+}
+
+void
+DeallocateMappedContent(void* p, size_t length)
+{
+ if (!p)
+ return;
+
+ // Calculate the address originally returned by mmap.
+ // This is needed because AllocateMappedContent returns a pointer
+ // that might be offset from the mapping, as the beginning of a
+ // mapping must be aligned with the allocation granularity.
+ uintptr_t map = uintptr_t(p) - (uintptr_t(p) % allocGranularity);
+ size_t alignedLength = length + (uintptr_t(p) % allocGranularity);
+ UnmapPages(reinterpret_cast<void*>(map), alignedLength);
+}
+
+#else
+#error "Memory mapping functions are not defined for your OS."
+#endif
+
+void
+ProtectPages(void* p, size_t size)
+{
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_RELEASE_ASSERT(size > 0);
+ MOZ_RELEASE_ASSERT(p);
+#if defined(XP_WIN)
+ DWORD oldProtect;
+ if (!VirtualProtect(p, size, PAGE_NOACCESS, &oldProtect)) {
+ MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_NOACCESS) failed! Error code: %u",
+ GetLastError());
+ }
+ MOZ_ASSERT(oldProtect == PAGE_READWRITE);
+#else // assume Unix
+ if (mprotect(p, size, PROT_NONE))
+ MOZ_CRASH("mprotect(PROT_NONE) failed");
+#endif
+}
+
+void
+MakePagesReadOnly(void* p, size_t size)
+{
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_RELEASE_ASSERT(size > 0);
+ MOZ_RELEASE_ASSERT(p);
+#if defined(XP_WIN)
+ DWORD oldProtect;
+ if (!VirtualProtect(p, size, PAGE_READONLY, &oldProtect)) {
+ MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_READONLY) failed! Error code: %u",
+ GetLastError());
+ }
+ MOZ_ASSERT(oldProtect == PAGE_READWRITE);
+#else // assume Unix
+ if (mprotect(p, size, PROT_READ))
+ MOZ_CRASH("mprotect(PROT_READ) failed");
+#endif
+}
+
+void
+UnprotectPages(void* p, size_t size)
+{
+ MOZ_ASSERT(size % pageSize == 0);
+ MOZ_RELEASE_ASSERT(size > 0);
+ MOZ_RELEASE_ASSERT(p);
+#if defined(XP_WIN)
+ DWORD oldProtect;
+ if (!VirtualProtect(p, size, PAGE_READWRITE, &oldProtect)) {
+ MOZ_CRASH_UNSAFE_PRINTF("VirtualProtect(PAGE_READWRITE) failed! Error code: %u",
+ GetLastError());
+ }
+ MOZ_ASSERT(oldProtect == PAGE_NOACCESS || oldProtect == PAGE_READONLY);
+#else // assume Unix
+ if (mprotect(p, size, PROT_READ | PROT_WRITE))
+ MOZ_CRASH("mprotect(PROT_READ | PROT_WRITE) failed");
+#endif
+}
+
+} // namespace gc
+} // namespace js
diff --git a/js/src/gc/Memory.h b/js/src/gc/Memory.h
new file mode 100644
index 000000000..4ff7bff4c
--- /dev/null
+++ b/js/src/gc/Memory.h
@@ -0,0 +1,53 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Memory_h
+#define gc_Memory_h
+
+#include <stddef.h>
+
+namespace js {
+namespace gc {
+
+// Sanity check that our compiled configuration matches the currently
+// running instance and initialize any runtime data needed for allocation.
+void InitMemorySubsystem();
+
+size_t SystemPageSize();
+
+// Allocate or deallocate pages from the system with the given alignment.
+void* MapAlignedPages(size_t size, size_t alignment);
+void UnmapPages(void* p, size_t size);
+
+// Tell the OS that the given pages are not in use, so they should not be
+// written to a paging file. This may be a no-op on some platforms.
+bool MarkPagesUnused(void* p, size_t size);
+
+// Undo |MarkPagesUnused|: tell the OS that the given pages are of interest
+// and should be paged in and out normally. This may be a no-op on some
+// platforms.
+void MarkPagesInUse(void* p, size_t size);
+
+// Returns #(hard faults) + #(soft faults)
+size_t GetPageFaultCount();
+
+// Allocate memory mapped content.
+// The offset must be aligned according to alignment requirement.
+void* AllocateMappedContent(int fd, size_t offset, size_t length, size_t alignment);
+
+// Deallocate memory mapped content.
+void DeallocateMappedContent(void* p, size_t length);
+
+void* TestMapAlignedPagesLastDitch(size_t size, size_t alignment);
+
+void ProtectPages(void* p, size_t size);
+void MakePagesReadOnly(void* p, size_t size);
+void UnprotectPages(void* p, size_t size);
+
+} // namespace gc
+} // namespace js
+
+#endif /* gc_Memory_h */
diff --git a/js/src/gc/MemoryProfiler.cpp b/js/src/gc/MemoryProfiler.cpp
new file mode 100644
index 000000000..a061f9303
--- /dev/null
+++ b/js/src/gc/MemoryProfiler.cpp
@@ -0,0 +1,49 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "jscntxt.h"
+#include "jsfriendapi.h"
+
+#include "vm/Runtime.h"
+
+using js::gc::Cell;
+
+mozilla::Atomic<uint32_t, mozilla::Relaxed> MemProfiler::sActiveProfilerCount;
+NativeProfiler* MemProfiler::sNativeProfiler;
+
+GCHeapProfiler*
+MemProfiler::GetGCHeapProfiler(void* addr)
+{
+ JSRuntime* runtime = reinterpret_cast<Cell*>(addr)->runtimeFromAnyThread();
+ return runtime->gc.mMemProfiler.mGCHeapProfiler;
+}
+
+GCHeapProfiler*
+MemProfiler::GetGCHeapProfiler(JSRuntime* runtime)
+{
+ return runtime->gc.mMemProfiler.mGCHeapProfiler;
+}
+
+JS_FRIEND_API(MemProfiler*)
+MemProfiler::GetMemProfiler(JSContext* context)
+{
+ return &context->gc.mMemProfiler;
+}
+
+JS_FRIEND_API(void)
+MemProfiler::start(GCHeapProfiler* aGCHeapProfiler)
+{
+ ReleaseAllJITCode(mRuntime->defaultFreeOp());
+ mGCHeapProfiler = aGCHeapProfiler;
+ sActiveProfilerCount++;
+}
+
+JS_FRIEND_API(void)
+MemProfiler::stop()
+{
+ sActiveProfilerCount--;
+ mGCHeapProfiler = nullptr;
+}
diff --git a/js/src/gc/Nursery-inl.h b/js/src/gc/Nursery-inl.h
new file mode 100644
index 000000000..8ebfd8232
--- /dev/null
+++ b/js/src/gc/Nursery-inl.h
@@ -0,0 +1,88 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=4 sw=4 et tw=79 ft=cpp:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_inl_h
+#define gc_Nursery_inl_h
+
+#include "gc/Nursery.h"
+
+#include "jscntxt.h"
+
+#include "gc/Heap.h"
+#include "gc/Zone.h"
+#include "js/TracingAPI.h"
+#include "vm/Runtime.h"
+
+MOZ_ALWAYS_INLINE bool
+js::Nursery::getForwardedPointer(JSObject** ref) const
+{
+ MOZ_ASSERT(ref);
+ MOZ_ASSERT(isInside((void*)*ref));
+ const gc::RelocationOverlay* overlay = reinterpret_cast<const gc::RelocationOverlay*>(*ref);
+ if (!overlay->isForwarded())
+ return false;
+ *ref = static_cast<JSObject*>(overlay->forwardingAddress());
+ return true;
+}
+
+namespace js {
+
+// The allocation methods below will not run the garbage collector. If the
+// nursery cannot accomodate the allocation, the malloc heap will be used
+// instead.
+
+template <typename T>
+static inline T*
+AllocateObjectBuffer(ExclusiveContext* cx, uint32_t count)
+{
+ if (cx->isJSContext()) {
+ Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
+ size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
+ T* buffer = static_cast<T*>(nursery.allocateBuffer(cx->zone(), nbytes));
+ if (!buffer)
+ ReportOutOfMemory(cx);
+ return buffer;
+ }
+ return cx->zone()->pod_malloc<T>(count);
+}
+
+template <typename T>
+static inline T*
+AllocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, uint32_t count)
+{
+ if (cx->isJSContext()) {
+ Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
+ size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
+ T* buffer = static_cast<T*>(nursery.allocateBuffer(obj, nbytes));
+ if (!buffer)
+ ReportOutOfMemory(cx);
+ return buffer;
+ }
+ return obj->zone()->pod_malloc<T>(count);
+}
+
+// If this returns null then the old buffer will be left alone.
+template <typename T>
+static inline T*
+ReallocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, T* oldBuffer,
+ uint32_t oldCount, uint32_t newCount)
+{
+ if (cx->isJSContext()) {
+ Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
+ T* buffer = static_cast<T*>(nursery.reallocateBuffer(obj, oldBuffer,
+ oldCount * sizeof(T),
+ newCount * sizeof(T)));
+ if (!buffer)
+ ReportOutOfMemory(cx);
+ return buffer;
+ }
+ return obj->zone()->pod_realloc<T>(oldBuffer, oldCount, newCount);
+}
+
+} // namespace js
+
+#endif /* gc_Nursery_inl_h */
diff --git a/js/src/gc/Nursery.cpp b/js/src/gc/Nursery.cpp
new file mode 100644
index 000000000..2c402fe0b
--- /dev/null
+++ b/js/src/gc/Nursery.cpp
@@ -0,0 +1,1025 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Nursery-inl.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Move.h"
+#include "mozilla/Unused.h"
+
+#include "jscompartment.h"
+#include "jsfriendapi.h"
+#include "jsgc.h"
+#include "jsutil.h"
+
+#include "gc/GCInternals.h"
+#include "gc/Memory.h"
+#include "jit/JitFrames.h"
+#include "vm/ArrayObject.h"
+#include "vm/Debugger.h"
+#if defined(DEBUG)
+#include "vm/EnvironmentObject.h"
+#endif
+#include "vm/Time.h"
+#include "vm/TypedArrayObject.h"
+#include "vm/TypeInference.h"
+
+#include "jsobjinlines.h"
+
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace gc;
+
+using mozilla::ArrayLength;
+using mozilla::DebugOnly;
+using mozilla::PodCopy;
+using mozilla::PodZero;
+
+static const uintptr_t CanaryMagicValue = 0xDEADB15D;
+
+struct js::Nursery::FreeMallocedBuffersTask : public GCParallelTask
+{
+ explicit FreeMallocedBuffersTask(FreeOp* fop) : fop_(fop) {}
+ bool init() { return buffers_.init(); }
+ void transferBuffersToFree(MallocedBuffersSet& buffersToFree,
+ const AutoLockHelperThreadState& lock);
+ ~FreeMallocedBuffersTask() override { join(); }
+
+ private:
+ FreeOp* fop_;
+ MallocedBuffersSet buffers_;
+
+ virtual void run() override;
+};
+
+struct js::Nursery::SweepAction
+{
+ SweepAction(SweepThunk thunk, void* data, SweepAction* next)
+ : thunk(thunk), data(data), next(next)
+ {}
+
+ SweepThunk thunk;
+ void* data;
+ SweepAction* next;
+
+#if JS_BITS_PER_WORD == 32
+ protected:
+ uint32_t padding;
+#endif
+};
+
+#ifdef JS_GC_ZEAL
+struct js::Nursery::Canary
+{
+ uintptr_t magicValue;
+ Canary* next;
+};
+#endif
+
+inline void
+js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
+{
+ JS_POISON(this, poison, ChunkSize);
+ init(rt);
+}
+
+inline void
+js::Nursery::NurseryChunk::init(JSRuntime* rt)
+{
+ new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
+}
+
+/* static */ inline js::Nursery::NurseryChunk*
+js::Nursery::NurseryChunk::fromChunk(Chunk* chunk)
+{
+ return reinterpret_cast<NurseryChunk*>(chunk);
+}
+
+inline Chunk*
+js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
+{
+ auto chunk = reinterpret_cast<Chunk*>(this);
+ chunk->init(rt);
+ return chunk;
+}
+
+js::Nursery::Nursery(JSRuntime* rt)
+ : runtime_(rt)
+ , position_(0)
+ , currentStartChunk_(0)
+ , currentStartPosition_(0)
+ , currentEnd_(0)
+ , currentChunk_(0)
+ , maxNurseryChunks_(0)
+ , previousPromotionRate_(0)
+ , profileThreshold_(0)
+ , enableProfiling_(false)
+ , reportTenurings_(0)
+ , minorGcCount_(0)
+ , freeMallocedBuffersTask(nullptr)
+ , sweepActions_(nullptr)
+#ifdef JS_GC_ZEAL
+ , lastCanary_(nullptr)
+#endif
+{}
+
+bool
+js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
+{
+ /* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
+ maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
+
+ /* If no chunks are specified then the nursery is permenantly disabled. */
+ if (maxNurseryChunks_ == 0)
+ return true;
+
+ if (!mallocedBuffers.init())
+ return false;
+
+ if (!cellsWithUid_.init())
+ return false;
+
+ freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
+ if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
+ return false;
+
+ AutoMaybeStartBackgroundAllocation maybeBgAlloc;
+ updateNumChunksLocked(1, maybeBgAlloc, lock);
+ if (numChunks() == 0)
+ return false;
+
+ setCurrentChunk(0);
+ setStartPosition();
+
+ char* env = getenv("JS_GC_PROFILE_NURSERY");
+ if (env) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr, "JS_GC_PROFILE_NURSERY=N\n"
+ "\tReport minor GC's taking at least N microseconds.\n");
+ exit(0);
+ }
+ enableProfiling_ = true;
+ profileThreshold_ = atoi(env);
+ }
+
+ env = getenv("JS_GC_REPORT_TENURING");
+ if (env) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr, "JS_GC_REPORT_TENURING=N\n"
+ "\tAfter a minor GC, report any ObjectGroups with at least N instances tenured.\n");
+ exit(0);
+ }
+ reportTenurings_ = atoi(env);
+ }
+
+ PodZero(&startTimes_);
+ PodZero(&profileTimes_);
+ PodZero(&totalTimes_);
+
+ if (!runtime()->gc.storeBuffer.enable())
+ return false;
+
+ MOZ_ASSERT(isEnabled());
+ return true;
+}
+
+js::Nursery::~Nursery()
+{
+ disable();
+ js_delete(freeMallocedBuffersTask);
+}
+
+void
+js::Nursery::enable()
+{
+ MOZ_ASSERT(isEmpty());
+ MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
+ if (isEnabled())
+ return;
+
+ updateNumChunks(1);
+ if (numChunks() == 0)
+ return;
+
+ setCurrentChunk(0);
+ setStartPosition();
+#ifdef JS_GC_ZEAL
+ if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+ enterZealMode();
+#endif
+
+ MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
+ return;
+}
+
+void
+js::Nursery::disable()
+{
+ MOZ_ASSERT(isEmpty());
+ if (!isEnabled())
+ return;
+ updateNumChunks(0);
+ currentEnd_ = 0;
+ runtime()->gc.storeBuffer.disable();
+}
+
+bool
+js::Nursery::isEmpty() const
+{
+ MOZ_ASSERT(runtime_);
+ if (!isEnabled())
+ return true;
+
+ if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
+ MOZ_ASSERT(currentStartChunk_ == 0);
+ MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
+ }
+ return position() == currentStartPosition_;
+}
+
+#ifdef JS_GC_ZEAL
+void
+js::Nursery::enterZealMode() {
+ if (isEnabled())
+ updateNumChunks(maxNurseryChunks_);
+}
+
+void
+js::Nursery::leaveZealMode() {
+ if (isEnabled()) {
+ MOZ_ASSERT(isEmpty());
+ setCurrentChunk(0);
+ setStartPosition();
+ }
+}
+#endif // JS_GC_ZEAL
+
+JSObject*
+js::Nursery::allocateObject(JSContext* cx, size_t size, size_t numDynamic, const js::Class* clasp)
+{
+ /* Ensure there's enough space to replace the contents with a RelocationOverlay. */
+ MOZ_ASSERT(size >= sizeof(RelocationOverlay));
+
+ /* Sanity check the finalizer. */
+ MOZ_ASSERT_IF(clasp->hasFinalize(), CanNurseryAllocateFinalizedClass(clasp) ||
+ clasp->isProxy());
+
+ /* Make the object allocation. */
+ JSObject* obj = static_cast<JSObject*>(allocate(size));
+ if (!obj)
+ return nullptr;
+
+ /* If we want external slots, add them. */
+ HeapSlot* slots = nullptr;
+ if (numDynamic) {
+ MOZ_ASSERT(clasp->isNative() || clasp->isProxy());
+ slots = static_cast<HeapSlot*>(allocateBuffer(cx->zone(), numDynamic * sizeof(HeapSlot)));
+ if (!slots) {
+ /*
+ * It is safe to leave the allocated object uninitialized, since we
+ * do not visit unallocated things in the nursery.
+ */
+ return nullptr;
+ }
+ }
+
+ /* Always initialize the slots field to match the JIT behavior. */
+ obj->setInitialSlotsMaybeNonNative(slots);
+
+ TraceNurseryAlloc(obj, size);
+ return obj;
+}
+
+void*
+js::Nursery::allocate(size_t size)
+{
+ MOZ_ASSERT(isEnabled());
+ MOZ_ASSERT(!runtime()->isHeapBusy());
+ MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
+ MOZ_ASSERT(position() % gc::CellSize == 0);
+ MOZ_ASSERT(size % gc::CellSize == 0);
+
+#ifdef JS_GC_ZEAL
+ static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
+ if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
+ size += CanarySize;
+#endif
+
+ if (currentEnd() < position() + size) {
+ if (currentChunk_ + 1 == numChunks())
+ return nullptr;
+ setCurrentChunk(currentChunk_ + 1);
+ }
+
+ void* thing = (void*)position();
+ position_ = position() + size;
+
+ JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
+
+#ifdef JS_GC_ZEAL
+ if (runtime()->gc.hasZealMode(ZealMode::CheckNursery)) {
+ auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
+ canary->magicValue = CanaryMagicValue;
+ canary->next = nullptr;
+ if (lastCanary_) {
+ MOZ_ASSERT(!lastCanary_->next);
+ lastCanary_->next = canary;
+ }
+ lastCanary_ = canary;
+ }
+#endif
+
+ MemProfiler::SampleNursery(reinterpret_cast<void*>(thing), size);
+ return thing;
+}
+
+void*
+js::Nursery::allocateBuffer(Zone* zone, size_t nbytes)
+{
+ MOZ_ASSERT(nbytes > 0);
+
+ if (nbytes <= MaxNurseryBufferSize) {
+ void* buffer = allocate(nbytes);
+ if (buffer)
+ return buffer;
+ }
+
+ void* buffer = zone->pod_malloc<uint8_t>(nbytes);
+ if (buffer && !mallocedBuffers.putNew(buffer)) {
+ js_free(buffer);
+ return nullptr;
+ }
+ return buffer;
+}
+
+void*
+js::Nursery::allocateBuffer(JSObject* obj, size_t nbytes)
+{
+ MOZ_ASSERT(obj);
+ MOZ_ASSERT(nbytes > 0);
+
+ if (!IsInsideNursery(obj))
+ return obj->zone()->pod_malloc<uint8_t>(nbytes);
+ return allocateBuffer(obj->zone(), nbytes);
+}
+
+void*
+js::Nursery::reallocateBuffer(JSObject* obj, void* oldBuffer,
+ size_t oldBytes, size_t newBytes)
+{
+ if (!IsInsideNursery(obj))
+ return obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+
+ if (!isInside(oldBuffer)) {
+ void* newBuffer = obj->zone()->pod_realloc<uint8_t>((uint8_t*)oldBuffer, oldBytes, newBytes);
+ if (newBuffer && oldBuffer != newBuffer)
+ MOZ_ALWAYS_TRUE(mallocedBuffers.rekeyAs(oldBuffer, newBuffer, newBuffer));
+ return newBuffer;
+ }
+
+ /* The nursery cannot make use of the returned slots data. */
+ if (newBytes < oldBytes)
+ return oldBuffer;
+
+ void* newBuffer = allocateBuffer(obj->zone(), newBytes);
+ if (newBuffer)
+ PodCopy((uint8_t*)newBuffer, (uint8_t*)oldBuffer, oldBytes);
+ return newBuffer;
+}
+
+void
+js::Nursery::freeBuffer(void* buffer)
+{
+ if (!isInside(buffer)) {
+ removeMallocedBuffer(buffer);
+ js_free(buffer);
+ }
+}
+
+void
+Nursery::setForwardingPointer(void* oldData, void* newData, bool direct)
+{
+ MOZ_ASSERT(isInside(oldData));
+
+ // Bug 1196210: If a zero-capacity header lands in the last 2 words of a
+ // jemalloc chunk abutting the start of a nursery chunk, the (invalid)
+ // newData pointer will appear to be "inside" the nursery.
+ MOZ_ASSERT(!isInside(newData) || (uintptr_t(newData) & ChunkMask) == 0);
+
+ if (direct) {
+ *reinterpret_cast<void**>(oldData) = newData;
+ } else {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!forwardedBuffers.initialized() && !forwardedBuffers.init())
+ oomUnsafe.crash("Nursery::setForwardingPointer");
+#ifdef DEBUG
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(oldData))
+ MOZ_ASSERT(p->value() == newData);
+#endif
+ if (!forwardedBuffers.put(oldData, newData))
+ oomUnsafe.crash("Nursery::setForwardingPointer");
+ }
+}
+
+void
+Nursery::setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots, uint32_t nslots)
+{
+ // Slot arrays always have enough space for a forwarding pointer, since the
+ // number of slots is never zero.
+ MOZ_ASSERT(nslots > 0);
+ setForwardingPointer(oldSlots, newSlots, /* direct = */ true);
+}
+
+void
+Nursery::setElementsForwardingPointer(ObjectElements* oldHeader, ObjectElements* newHeader,
+ uint32_t nelems)
+{
+ // Only use a direct forwarding pointer if there is enough space for one.
+ setForwardingPointer(oldHeader->elements(), newHeader->elements(),
+ nelems > ObjectElements::VALUES_PER_HEADER);
+}
+
+#ifdef DEBUG
+static bool IsWriteableAddress(void* ptr)
+{
+ volatile uint64_t* vPtr = reinterpret_cast<volatile uint64_t*>(ptr);
+ *vPtr = *vPtr;
+ return true;
+}
+#endif
+
+void
+js::Nursery::forwardBufferPointer(HeapSlot** pSlotsElems)
+{
+ HeapSlot* old = *pSlotsElems;
+
+ if (!isInside(old))
+ return;
+
+ // The new location for this buffer is either stored inline with it or in
+ // the forwardedBuffers table.
+ do {
+ if (forwardedBuffers.initialized()) {
+ if (ForwardedBufferMap::Ptr p = forwardedBuffers.lookup(old)) {
+ *pSlotsElems = reinterpret_cast<HeapSlot*>(p->value());
+ break;
+ }
+ }
+
+ *pSlotsElems = *reinterpret_cast<HeapSlot**>(old);
+ } while (false);
+
+ MOZ_ASSERT(!isInside(*pSlotsElems));
+ MOZ_ASSERT(IsWriteableAddress(*pSlotsElems));
+}
+
+js::TenuringTracer::TenuringTracer(JSRuntime* rt, Nursery* nursery)
+ : JSTracer(rt, JSTracer::TracerKindTag::Tenuring, TraceWeakMapKeysValues)
+ , nursery_(*nursery)
+ , tenuredSize(0)
+ , head(nullptr)
+ , tail(&head)
+{
+}
+
+/* static */ void
+js::Nursery::printProfileHeader()
+{
+#define PRINT_HEADER(name, text) \
+ fprintf(stderr, " %6s", text);
+FOR_EACH_NURSERY_PROFILE_TIME(PRINT_HEADER)
+#undef PRINT_HEADER
+ fprintf(stderr, "\n");
+}
+
+/* static */ void
+js::Nursery::printProfileTimes(const ProfileTimes& times)
+{
+ for (auto time : times)
+ fprintf(stderr, " %6" PRIi64, time);
+ fprintf(stderr, "\n");
+}
+
+void
+js::Nursery::printTotalProfileTimes()
+{
+ if (enableProfiling_) {
+ fprintf(stderr, "MinorGC TOTALS: %7" PRIu64 " collections: ", minorGcCount_);
+ printProfileTimes(totalTimes_);
+ }
+}
+
+inline void
+js::Nursery::startProfile(ProfileKey key)
+{
+ startTimes_[key] = PRMJ_Now();
+}
+
+inline void
+js::Nursery::endProfile(ProfileKey key)
+{
+ profileTimes_[key] = PRMJ_Now() - startTimes_[key];
+ totalTimes_[key] += profileTimes_[key];
+}
+
+inline void
+js::Nursery::maybeStartProfile(ProfileKey key)
+{
+ if (enableProfiling_)
+ startProfile(key);
+}
+
+inline void
+js::Nursery::maybeEndProfile(ProfileKey key)
+{
+ if (enableProfiling_)
+ endProfile(key);
+}
+
+void
+js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
+{
+ MOZ_ASSERT(!rt->mainThread.suppressGC);
+ MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
+
+ if (!isEnabled() || isEmpty()) {
+ // Our barriers are not always exact, and there may be entries in the
+ // storebuffer even when the nursery is disabled or empty. It's not safe
+ // to keep these entries as they may refer to tenured cells which may be
+ // freed after this point.
+ rt->gc.storeBuffer.clear();
+ }
+
+ if (!isEnabled())
+ return;
+
+ rt->gc.incMinorGcNumber();
+
+#ifdef JS_GC_ZEAL
+ if (rt->gc.hasZealMode(ZealMode::CheckNursery)) {
+ for (auto canary = lastCanary_; canary; canary = canary->next)
+ MOZ_ASSERT(canary->magicValue == CanaryMagicValue);
+ }
+ lastCanary_ = nullptr;
+#endif
+
+ rt->gc.stats.beginNurseryCollection(reason);
+ TraceMinorGCStart();
+
+ startProfile(ProfileKey::Total);
+
+ // The hazard analysis thinks doCollection can invalidate pointers in
+ // tenureCounts below.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ TenureCountCache tenureCounts;
+ double promotionRate = 0;
+ if (!isEmpty())
+ promotionRate = doCollection(rt, reason, tenureCounts);
+
+ // Resize the nursery.
+ maybeStartProfile(ProfileKey::Resize);
+ maybeResizeNursery(reason, promotionRate);
+ maybeEndProfile(ProfileKey::Resize);
+
+ // If we are promoting the nursery, or exhausted the store buffer with
+ // pointers to nursery things, which will force a collection well before
+ // the nursery is full, look for object groups that are getting promoted
+ // excessively and try to pretenure them.
+ maybeStartProfile(ProfileKey::Pretenure);
+ uint32_t pretenureCount = 0;
+ if (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER) {
+ JSContext* cx = rt->contextFromMainThread();
+ for (auto& entry : tenureCounts.entries) {
+ if (entry.count >= 3000) {
+ ObjectGroup* group = entry.group;
+ if (group->canPreTenure()) {
+ AutoCompartment ac(cx, group->compartment());
+ group->setShouldPreTenure(cx);
+ pretenureCount++;
+ }
+ }
+ }
+ }
+ maybeEndProfile(ProfileKey::Pretenure);
+
+ // We ignore gcMaxBytes when allocating for minor collection. However, if we
+ // overflowed, we disable the nursery. The next time we allocate, we'll fail
+ // because gcBytes >= gcMaxBytes.
+ if (rt->gc.usage.gcBytes() >= rt->gc.tunables.gcMaxBytes())
+ disable();
+
+ endProfile(ProfileKey::Total);
+ minorGcCount_++;
+
+ int64_t totalTime = profileTimes_[ProfileKey::Total];
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_US, totalTime);
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON, reason);
+ if (totalTime > 1000)
+ rt->addTelemetry(JS_TELEMETRY_GC_MINOR_REASON_LONG, reason);
+ rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, sizeOfHeapCommitted());
+ rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT, pretenureCount);
+
+ rt->gc.stats.endNurseryCollection(reason);
+ TraceMinorGCEnd();
+
+ if (enableProfiling_ && totalTime >= profileThreshold_) {
+ static int printedHeader = 0;
+ if ((printedHeader++ % 200) == 0) {
+ fprintf(stderr, "MinorGC: Reason PRate Size ");
+ printProfileHeader();
+ }
+
+ fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
+ JS::gcreason::ExplainReason(reason),
+ promotionRate * 100,
+ numChunks());
+ printProfileTimes(profileTimes_);
+
+ if (reportTenurings_) {
+ for (auto& entry : tenureCounts.entries) {
+ if (entry.count >= reportTenurings_) {
+ fprintf(stderr, "%d x ", entry.count);
+ entry.group->print();
+ }
+ }
+ }
+ }
+}
+
+double
+js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
+ TenureCountCache& tenureCounts)
+{
+ AutoTraceSession session(rt, JS::HeapState::MinorCollecting);
+ AutoSetThreadIsPerformingGC performingGC;
+ AutoStopVerifyingBarriers av(rt, false);
+ AutoDisableProxyCheck disableStrictProxyChecking(rt);
+ mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
+
+ size_t initialNurserySize = spaceToEnd();
+
+ // Move objects pointed to by roots from the nursery to the major heap.
+ TenuringTracer mover(rt, this);
+
+ // Mark the store buffer. This must happen first.
+ StoreBuffer& sb = rt->gc.storeBuffer;
+
+ // The MIR graph only contains nursery pointers if cancelIonCompilations()
+ // is set on the store buffer, in which case we cancel all compilations.
+ maybeStartProfile(ProfileKey::CancelIonCompilations);
+ if (sb.cancelIonCompilations())
+ js::CancelOffThreadIonCompile(rt);
+ maybeEndProfile(ProfileKey::CancelIonCompilations);
+
+ maybeStartProfile(ProfileKey::TraceValues);
+ sb.traceValues(mover);
+ maybeEndProfile(ProfileKey::TraceValues);
+
+ maybeStartProfile(ProfileKey::TraceCells);
+ sb.traceCells(mover);
+ maybeEndProfile(ProfileKey::TraceCells);
+
+ maybeStartProfile(ProfileKey::TraceSlots);
+ sb.traceSlots(mover);
+ maybeEndProfile(ProfileKey::TraceSlots);
+
+ maybeStartProfile(ProfileKey::TraceWholeCells);
+ sb.traceWholeCells(mover);
+ maybeEndProfile(ProfileKey::TraceWholeCells);
+
+ maybeStartProfile(ProfileKey::TraceGenericEntries);
+ sb.traceGenericEntries(&mover);
+ maybeEndProfile(ProfileKey::TraceGenericEntries);
+
+ maybeStartProfile(ProfileKey::MarkRuntime);
+ rt->gc.traceRuntimeForMinorGC(&mover, session.lock);
+ maybeEndProfile(ProfileKey::MarkRuntime);
+
+ maybeStartProfile(ProfileKey::MarkDebugger);
+ {
+ gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
+ Debugger::markAll(&mover);
+ }
+ maybeEndProfile(ProfileKey::MarkDebugger);
+
+ maybeStartProfile(ProfileKey::ClearNewObjectCache);
+ rt->contextFromMainThread()->caches.newObjectCache.clearNurseryObjects(rt);
+ maybeEndProfile(ProfileKey::ClearNewObjectCache);
+
+ // Most of the work is done here. This loop iterates over objects that have
+ // been moved to the major heap. If these objects have any outgoing pointers
+ // to the nursery, then those nursery objects get moved as well, until no
+ // objects are left to move. That is, we iterate to a fixed point.
+ maybeStartProfile(ProfileKey::CollectToFP);
+ collectToFixedPoint(mover, tenureCounts);
+ maybeEndProfile(ProfileKey::CollectToFP);
+
+ // Sweep compartments to update the array buffer object's view lists.
+ maybeStartProfile(ProfileKey::SweepArrayBufferViewList);
+ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
+ c->sweepAfterMinorGC(&mover);
+ maybeEndProfile(ProfileKey::SweepArrayBufferViewList);
+
+ // Update any slot or element pointers whose destination has been tenured.
+ maybeStartProfile(ProfileKey::UpdateJitActivations);
+ js::jit::UpdateJitActivationsForMinorGC(rt, &mover);
+ forwardedBuffers.finish();
+ maybeEndProfile(ProfileKey::UpdateJitActivations);
+
+ maybeStartProfile(ProfileKey::ObjectsTenuredCallback);
+ rt->gc.callObjectsTenuredCallback();
+ maybeEndProfile(ProfileKey::ObjectsTenuredCallback);
+
+ // Sweep.
+ maybeStartProfile(ProfileKey::FreeMallocedBuffers);
+ freeMallocedBuffers();
+ maybeEndProfile(ProfileKey::FreeMallocedBuffers);
+
+ maybeStartProfile(ProfileKey::Sweep);
+ sweep();
+ maybeEndProfile(ProfileKey::Sweep);
+
+ maybeStartProfile(ProfileKey::ClearStoreBuffer);
+ rt->gc.storeBuffer.clear();
+ maybeEndProfile(ProfileKey::ClearStoreBuffer);
+
+ // Make sure hashtables have been updated after the collection.
+ maybeStartProfile(ProfileKey::CheckHashTables);
+#ifdef JS_GC_ZEAL
+ if (rt->hasZealMode(ZealMode::CheckHashTablesOnMinorGC))
+ CheckHashTablesAfterMovingGC(rt);
+#endif
+ maybeEndProfile(ProfileKey::CheckHashTables);
+
+ // Calculate and return the promotion rate.
+ return mover.tenuredSize / double(initialNurserySize);
+}
+
+void
+js::Nursery::FreeMallocedBuffersTask::transferBuffersToFree(MallocedBuffersSet& buffersToFree,
+ const AutoLockHelperThreadState& lock)
+{
+ // Transfer the contents of the source set to the task's buffers_ member by
+ // swapping the sets, which also clears the source.
+ MOZ_ASSERT(!isRunningWithLockHeld(lock));
+ MOZ_ASSERT(buffers_.empty());
+ mozilla::Swap(buffers_, buffersToFree);
+}
+
+void
+js::Nursery::FreeMallocedBuffersTask::run()
+{
+ for (MallocedBuffersSet::Range r = buffers_.all(); !r.empty(); r.popFront())
+ fop_->free_(r.front());
+ buffers_.clear();
+}
+
+void
+js::Nursery::freeMallocedBuffers()
+{
+ if (mallocedBuffers.empty())
+ return;
+
+ bool started;
+ {
+ AutoLockHelperThreadState lock;
+ freeMallocedBuffersTask->joinWithLockHeld(lock);
+ freeMallocedBuffersTask->transferBuffersToFree(mallocedBuffers, lock);
+ started = freeMallocedBuffersTask->startWithLockHeld(lock);
+ }
+
+ if (!started)
+ freeMallocedBuffersTask->runFromMainThread(runtime());
+
+ MOZ_ASSERT(mallocedBuffers.empty());
+}
+
+void
+js::Nursery::waitBackgroundFreeEnd()
+{
+ // We may finishRoots before nursery init if runtime init fails.
+ if (!isEnabled())
+ return;
+
+ MOZ_ASSERT(freeMallocedBuffersTask);
+ freeMallocedBuffersTask->join();
+}
+
+void
+js::Nursery::sweep()
+{
+ /* Sweep unique id's in all in-use chunks. */
+ for (CellsWithUniqueIdSet::Enum e(cellsWithUid_); !e.empty(); e.popFront()) {
+ JSObject* obj = static_cast<JSObject*>(e.front());
+ if (!IsForwarded(obj))
+ obj->zone()->removeUniqueId(obj);
+ else
+ MOZ_ASSERT(Forwarded(obj)->zone()->hasUniqueId(Forwarded(obj)));
+ }
+ cellsWithUid_.clear();
+
+ runSweepActions();
+ sweepDictionaryModeObjects();
+
+#ifdef JS_GC_ZEAL
+ /* Poison the nursery contents so touching a freed object will crash. */
+ for (unsigned i = 0; i < numChunks(); i++)
+ chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
+
+ if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
+ /* Only reset the alloc point when we are close to the end. */
+ if (currentChunk_ + 1 == numChunks())
+ setCurrentChunk(0);
+ } else
+#endif
+ {
+#ifdef JS_CRASH_DIAGNOSTICS
+ for (unsigned i = 0; i < numChunks(); ++i)
+ chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
+#endif
+ setCurrentChunk(0);
+ }
+
+ /* Set current start position for isEmpty checks. */
+ setStartPosition();
+ MemProfiler::SweepNursery(runtime());
+}
+
+size_t
+js::Nursery::spaceToEnd() const
+{
+ unsigned lastChunk = numChunks() - 1;
+
+ MOZ_ASSERT(lastChunk >= currentStartChunk_);
+ MOZ_ASSERT(currentStartPosition_ - chunk(currentStartChunk_).start() <= NurseryChunkUsableSize);
+
+ size_t bytes = (chunk(currentStartChunk_).end() - currentStartPosition_) +
+ ((lastChunk - currentStartChunk_) * NurseryChunkUsableSize);
+
+ MOZ_ASSERT(bytes <= numChunks() * NurseryChunkUsableSize);
+
+ return bytes;
+}
+
+MOZ_ALWAYS_INLINE void
+js::Nursery::setCurrentChunk(unsigned chunkno)
+{
+ MOZ_ASSERT(chunkno < maxChunks());
+ MOZ_ASSERT(chunkno < numChunks());
+ currentChunk_ = chunkno;
+ position_ = chunk(chunkno).start();
+ currentEnd_ = chunk(chunkno).end();
+ chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+}
+
+MOZ_ALWAYS_INLINE void
+js::Nursery::setStartPosition()
+{
+ currentStartChunk_ = currentChunk_;
+ currentStartPosition_ = position();
+}
+
+void
+js::Nursery::maybeResizeNursery(JS::gcreason::Reason reason, double promotionRate)
+{
+ static const double GrowThreshold = 0.05;
+ static const double ShrinkThreshold = 0.01;
+
+ // Shrink the nursery to its minimum size of we ran out of memory or
+ // received a memory pressure event.
+ if (gc::IsOOMReason(reason)) {
+ minimizeAllocableSpace();
+ return;
+ }
+
+ if (promotionRate > GrowThreshold)
+ growAllocableSpace();
+ else if (promotionRate < ShrinkThreshold && previousPromotionRate_ < ShrinkThreshold)
+ shrinkAllocableSpace();
+
+ previousPromotionRate_ = promotionRate;
+}
+
+void
+js::Nursery::growAllocableSpace()
+{
+ updateNumChunks(Min(numChunks() * 2, maxNurseryChunks_));
+}
+
+void
+js::Nursery::shrinkAllocableSpace()
+{
+#ifdef JS_GC_ZEAL
+ if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+ return;
+#endif
+ updateNumChunks(Max(numChunks() - 1, 1u));
+}
+
+void
+js::Nursery::minimizeAllocableSpace()
+{
+#ifdef JS_GC_ZEAL
+ if (runtime()->hasZealMode(ZealMode::GenerationalGC))
+ return;
+#endif
+ updateNumChunks(1);
+}
+
+void
+js::Nursery::updateNumChunks(unsigned newCount)
+{
+ if (numChunks() != newCount) {
+ AutoMaybeStartBackgroundAllocation maybeBgAlloc;
+ AutoLockGC lock(runtime());
+ updateNumChunksLocked(newCount, maybeBgAlloc, lock);
+ }
+}
+
+void
+js::Nursery::updateNumChunksLocked(unsigned newCount,
+ AutoMaybeStartBackgroundAllocation& maybeBgAlloc,
+ AutoLockGC& lock)
+{
+ // The GC nursery is an optimization and so if we fail to allocate nursery
+ // chunks we do not report an error.
+
+ unsigned priorCount = numChunks();
+ MOZ_ASSERT(priorCount != newCount);
+
+ if (newCount < priorCount) {
+ // Shrink the nursery and free unused chunks.
+ for (unsigned i = newCount; i < priorCount; i++)
+ runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
+ chunks_.shrinkTo(newCount);
+ return;
+ }
+
+ // Grow the nursery and allocate new chunks.
+ if (!chunks_.resize(newCount))
+ return;
+
+ for (unsigned i = priorCount; i < newCount; i++) {
+ auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
+ if (!newChunk) {
+ chunks_.shrinkTo(i);
+ return;
+ }
+
+ chunks_[i] = NurseryChunk::fromChunk(newChunk);
+ chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
+ }
+}
+
+void
+js::Nursery::queueSweepAction(SweepThunk thunk, void* data)
+{
+ static_assert(sizeof(SweepAction) % CellSize == 0,
+ "SweepAction size must be a multiple of cell size");
+
+ MOZ_ASSERT(isEnabled());
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ auto action = reinterpret_cast<SweepAction*>(allocate(sizeof(SweepAction)));
+ if (!action)
+ oomUnsafe.crash("Nursery::queueSweepAction");
+
+ new (action) SweepAction(thunk, data, sweepActions_);
+ sweepActions_ = action;
+}
+
+void
+js::Nursery::runSweepActions()
+{
+ // The hazard analysis doesn't know whether the thunks can GC.
+ JS::AutoSuppressGCAnalysis nogc;
+
+ AutoSetThreadIsSweeping threadIsSweeping;
+ for (auto action = sweepActions_; action; action = action->next)
+ action->thunk(action->data);
+ sweepActions_ = nullptr;
+}
+
+bool
+js::Nursery::queueDictionaryModeObjectToSweep(NativeObject* obj)
+{
+ MOZ_ASSERT(IsInsideNursery(obj));
+ return dictionaryModeObjects_.append(obj);
+}
+
+void
+js::Nursery::sweepDictionaryModeObjects()
+{
+ for (auto obj : dictionaryModeObjects_) {
+ if (!IsForwarded(obj))
+ obj->sweepDictionaryListPointer();
+ }
+ dictionaryModeObjects_.clear();
+}
diff --git a/js/src/gc/Nursery.h b/js/src/gc/Nursery.h
new file mode 100644
index 000000000..69fb66b7a
--- /dev/null
+++ b/js/src/gc/Nursery.h
@@ -0,0 +1,471 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sw=4 et tw=78:
+ *
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Nursery_h
+#define gc_Nursery_h
+
+#include "mozilla/EnumeratedArray.h"
+
+#include "jsalloc.h"
+#include "jspubtd.h"
+
+#include "ds/BitArray.h"
+#include "gc/Heap.h"
+#include "gc/Memory.h"
+#include "js/Class.h"
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "js/Value.h"
+#include "js/Vector.h"
+#include "vm/SharedMem.h"
+
+#define FOR_EACH_NURSERY_PROFILE_TIME(_) \
+ /* Key Header text */ \
+ _(Total, "total") \
+ _(CancelIonCompilations, "canIon") \
+ _(TraceValues, "mkVals") \
+ _(TraceCells, "mkClls") \
+ _(TraceSlots, "mkSlts") \
+ _(TraceWholeCells, "mcWCll") \
+ _(TraceGenericEntries, "mkGnrc") \
+ _(CheckHashTables, "ckTbls") \
+ _(MarkRuntime, "mkRntm") \
+ _(MarkDebugger, "mkDbgr") \
+ _(ClearNewObjectCache, "clrNOC") \
+ _(CollectToFP, "collct") \
+ _(ObjectsTenuredCallback, "tenCB") \
+ _(SweepArrayBufferViewList, "swpABO") \
+ _(UpdateJitActivations, "updtIn") \
+ _(FreeMallocedBuffers, "frSlts") \
+ _(ClearStoreBuffer, "clrSB") \
+ _(Sweep, "sweep") \
+ _(Resize, "resize") \
+ _(Pretenure, "pretnr")
+
+namespace JS {
+struct Zone;
+} // namespace JS
+
+namespace js {
+
+class ObjectElements;
+class NativeObject;
+class Nursery;
+class HeapSlot;
+
+void SetGCZeal(JSRuntime*, uint8_t, uint32_t);
+
+namespace gc {
+class AutoMaybeStartBackgroundAllocation;
+struct Cell;
+class MinorCollectionTracer;
+class RelocationOverlay;
+struct TenureCountCache;
+} /* namespace gc */
+
+namespace jit {
+class MacroAssembler;
+} // namespace jit
+
+class TenuringTracer : public JSTracer
+{
+ friend class Nursery;
+ Nursery& nursery_;
+
+ // Amount of data moved to the tenured generation during collection.
+ size_t tenuredSize;
+
+ // This list is threaded through the Nursery using the space from already
+ // moved things. The list is used to fix up the moved things and to find
+ // things held live by intra-Nursery pointers.
+ gc::RelocationOverlay* head;
+ gc::RelocationOverlay** tail;
+
+ TenuringTracer(JSRuntime* rt, Nursery* nursery);
+
+ public:
+ const Nursery& nursery() const { return nursery_; }
+
+ // Returns true if the pointer was updated.
+ template <typename T> void traverse(T** thingp);
+ template <typename T> void traverse(T* thingp);
+
+ void insertIntoFixupList(gc::RelocationOverlay* entry);
+
+ // The store buffers need to be able to call these directly.
+ void traceObject(JSObject* src);
+ void traceObjectSlots(NativeObject* nobj, uint32_t start, uint32_t length);
+ void traceSlots(JS::Value* vp, uint32_t nslots) { traceSlots(vp, vp + nslots); }
+
+ private:
+ Nursery& nursery() { return nursery_; }
+
+ JSObject* moveToTenured(JSObject* src);
+ size_t moveObjectToTenured(JSObject* dst, JSObject* src, gc::AllocKind dstKind);
+ size_t moveElementsToTenured(NativeObject* dst, NativeObject* src, gc::AllocKind dstKind);
+ size_t moveSlotsToTenured(NativeObject* dst, NativeObject* src, gc::AllocKind dstKind);
+
+ void traceSlots(JS::Value* vp, JS::Value* end);
+};
+
+/*
+ * Classes with JSCLASS_SKIP_NURSERY_FINALIZE or Wrapper classes with
+ * CROSS_COMPARTMENT flags will not have their finalizer called if they are
+ * nursery allocated and not promoted to the tenured heap. The finalizers for
+ * these classes must do nothing except free data which was allocated via
+ * Nursery::allocateBuffer.
+ */
+inline bool
+CanNurseryAllocateFinalizedClass(const js::Class* const clasp)
+{
+ MOZ_ASSERT(clasp->hasFinalize());
+ return clasp->flags & JSCLASS_SKIP_NURSERY_FINALIZE;
+}
+
+class Nursery
+{
+ public:
+ static const size_t Alignment = gc::ChunkSize;
+ static const size_t ChunkShift = gc::ChunkShift;
+
+ explicit Nursery(JSRuntime* rt);
+ ~Nursery();
+
+ MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
+
+ unsigned maxChunks() const { return maxNurseryChunks_; }
+ unsigned numChunks() const { return chunks_.length(); }
+
+ bool exists() const { return maxChunks() != 0; }
+ size_t nurserySize() const { return maxChunks() << ChunkShift; }
+
+ void enable();
+ void disable();
+ bool isEnabled() const { return numChunks() != 0; }
+
+ /* Return true if no allocations have been made since the last collection. */
+ bool isEmpty() const;
+
+ /*
+ * Check whether an arbitrary pointer is within the nursery. This is
+ * slower than IsInsideNursery(Cell*), but works on all types of pointers.
+ */
+ MOZ_ALWAYS_INLINE bool isInside(gc::Cell* cellp) const = delete;
+ MOZ_ALWAYS_INLINE bool isInside(const void* p) const {
+ for (auto chunk : chunks_) {
+ if (uintptr_t(p) - chunk->start() < gc::ChunkSize)
+ return true;
+ }
+ return false;
+ }
+ template<typename T>
+ bool isInside(const SharedMem<T>& p) const {
+ return isInside(p.unwrap(/*safe - used for value in comparison above*/));
+ }
+
+ /*
+ * Allocate and return a pointer to a new GC object with its |slots|
+ * pointer pre-filled. Returns nullptr if the Nursery is full.
+ */
+ JSObject* allocateObject(JSContext* cx, size_t size, size_t numDynamic, const js::Class* clasp);
+
+ /* Allocate a buffer for a given zone, using the nursery if possible. */
+ void* allocateBuffer(JS::Zone* zone, size_t nbytes);
+
+ /*
+ * Allocate a buffer for a given object, using the nursery if possible and
+ * obj is in the nursery.
+ */
+ void* allocateBuffer(JSObject* obj, size_t nbytes);
+
+ /* Resize an existing object buffer. */
+ void* reallocateBuffer(JSObject* obj, void* oldBuffer,
+ size_t oldBytes, size_t newBytes);
+
+ /* Free an object buffer. */
+ void freeBuffer(void* buffer);
+
+ /* The maximum number of bytes allowed to reside in nursery buffers. */
+ static const size_t MaxNurseryBufferSize = 1024;
+
+ /* Do a minor collection. */
+ void collect(JSRuntime* rt, JS::gcreason::Reason reason);
+
+ /*
+ * Check if the thing at |*ref| in the Nursery has been forwarded. If so,
+ * sets |*ref| to the new location of the object and returns true. Otherwise
+ * returns false and leaves |*ref| unset.
+ */
+ MOZ_ALWAYS_INLINE MOZ_MUST_USE bool getForwardedPointer(JSObject** ref) const;
+
+ /* Forward a slots/elements pointer stored in an Ion frame. */
+ void forwardBufferPointer(HeapSlot** pSlotsElems);
+
+ void maybeSetForwardingPointer(JSTracer* trc, void* oldData, void* newData, bool direct) {
+ if (trc->isTenuringTracer() && isInside(oldData))
+ setForwardingPointer(oldData, newData, direct);
+ }
+
+ /* Mark a malloced buffer as no longer needing to be freed. */
+ void removeMallocedBuffer(void* buffer) {
+ mallocedBuffers.remove(buffer);
+ }
+
+ void waitBackgroundFreeEnd();
+
+ MOZ_MUST_USE bool addedUniqueIdToCell(gc::Cell* cell) {
+ if (!IsInsideNursery(cell) || !isEnabled())
+ return true;
+ MOZ_ASSERT(cellsWithUid_.initialized());
+ MOZ_ASSERT(!cellsWithUid_.has(cell));
+ return cellsWithUid_.put(cell);
+ }
+
+ using SweepThunk = void (*)(void *data);
+ void queueSweepAction(SweepThunk thunk, void* data);
+
+ MOZ_MUST_USE bool queueDictionaryModeObjectToSweep(NativeObject* obj);
+
+ size_t sizeOfHeapCommitted() const {
+ return numChunks() * gc::ChunkSize;
+ }
+ size_t sizeOfMallocedBuffers(mozilla::MallocSizeOf mallocSizeOf) const {
+ size_t total = 0;
+ for (MallocedBuffersSet::Range r = mallocedBuffers.all(); !r.empty(); r.popFront())
+ total += mallocSizeOf(r.front());
+ total += mallocedBuffers.sizeOfExcludingThis(mallocSizeOf);
+ return total;
+ }
+
+ // The number of bytes from the start position to the end of the nursery.
+ size_t spaceToEnd() const;
+
+ // Free space remaining, not counting chunk trailers.
+ MOZ_ALWAYS_INLINE size_t freeSpace() const {
+ MOZ_ASSERT(currentEnd_ - position_ <= NurseryChunkUsableSize);
+ return (currentEnd_ - position_) +
+ (numChunks() - currentChunk_ - 1) * NurseryChunkUsableSize;
+ }
+
+#ifdef JS_GC_ZEAL
+ void enterZealMode();
+ void leaveZealMode();
+#endif
+
+ /* Print total profile times on shutdown. */
+ void printTotalProfileTimes();
+
+ private:
+ /* The amount of space in the mapped nursery available to allocations. */
+ static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
+
+ struct NurseryChunk {
+ char data[NurseryChunkUsableSize];
+ gc::ChunkTrailer trailer;
+ static NurseryChunk* fromChunk(gc::Chunk* chunk);
+ void init(JSRuntime* rt);
+ void poisonAndInit(JSRuntime* rt, uint8_t poison);
+ uintptr_t start() const { return uintptr_t(&data); }
+ uintptr_t end() const { return uintptr_t(&trailer); }
+ gc::Chunk* toChunk(JSRuntime* rt);
+ };
+ static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
+ "Nursery chunk size must match gc::Chunk size.");
+
+ /*
+ * The start and end pointers are stored under the runtime so that we can
+ * inline the isInsideNursery check into embedder code. Use the start()
+ * and heapEnd() functions to access these values.
+ */
+ JSRuntime* runtime_;
+
+ /* Vector of allocated chunks to allocate from. */
+ Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
+
+ /* Pointer to the first unallocated byte in the nursery. */
+ uintptr_t position_;
+
+ /* Pointer to the logical start of the Nursery. */
+ unsigned currentStartChunk_;
+ uintptr_t currentStartPosition_;
+
+ /* Pointer to the last byte of space in the current chunk. */
+ uintptr_t currentEnd_;
+
+ /* The index of the chunk that is currently being allocated from. */
+ unsigned currentChunk_;
+
+ /* Maximum number of chunks to allocate for the nursery. */
+ unsigned maxNurseryChunks_;
+
+ /* Promotion rate for the previous minor collection. */
+ double previousPromotionRate_;
+
+ /* Report minor collections taking at least this many us, if enabled. */
+ int64_t profileThreshold_;
+ bool enableProfiling_;
+
+ /* Report ObjectGroups with at lest this many instances tenured. */
+ int64_t reportTenurings_;
+
+ /* Profiling data. */
+
+ enum class ProfileKey
+ {
+#define DEFINE_TIME_KEY(name, text) \
+ name,
+ FOR_EACH_NURSERY_PROFILE_TIME(DEFINE_TIME_KEY)
+#undef DEFINE_TIME_KEY
+ KeyCount
+ };
+
+ using ProfileTimes = mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount, int64_t>;
+
+ ProfileTimes startTimes_;
+ ProfileTimes profileTimes_;
+ ProfileTimes totalTimes_;
+ uint64_t minorGcCount_;
+
+ /*
+ * The set of externally malloced buffers potentially kept live by objects
+ * stored in the nursery. Any external buffers that do not belong to a
+ * tenured thing at the end of a minor GC must be freed.
+ */
+ typedef HashSet<void*, PointerHasher<void*, 3>, SystemAllocPolicy> MallocedBuffersSet;
+ MallocedBuffersSet mallocedBuffers;
+
+ /* A task structure used to free the malloced bufers on a background thread. */
+ struct FreeMallocedBuffersTask;
+ FreeMallocedBuffersTask* freeMallocedBuffersTask;
+
+ /*
+ * During a collection most hoisted slot and element buffers indicate their
+ * new location with a forwarding pointer at the base. This does not work
+ * for buffers whose length is less than pointer width, or when different
+ * buffers might overlap each other. For these, an entry in the following
+ * table is used.
+ */
+ typedef HashMap<void*, void*, PointerHasher<void*, 1>, SystemAllocPolicy> ForwardedBufferMap;
+ ForwardedBufferMap forwardedBuffers;
+
+ /*
+ * When we assign a unique id to cell in the nursery, that almost always
+ * means that the cell will be in a hash table, and thus, held live,
+ * automatically moving the uid from the nursery to its new home in
+ * tenured. It is possible, if rare, for an object that acquired a uid to
+ * be dead before the next collection, in which case we need to know to
+ * remove it when we sweep.
+ *
+ * Note: we store the pointers as Cell* here, resulting in an ugly cast in
+ * sweep. This is because this structure is used to help implement
+ * stable object hashing and we have to break the cycle somehow.
+ */
+ using CellsWithUniqueIdSet = HashSet<gc::Cell*, PointerHasher<gc::Cell*, 3>, SystemAllocPolicy>;
+ CellsWithUniqueIdSet cellsWithUid_;
+
+ struct SweepAction;
+ SweepAction* sweepActions_;
+
+ using NativeObjectVector = Vector<NativeObject*, 0, SystemAllocPolicy>;
+ NativeObjectVector dictionaryModeObjects_;
+
+#ifdef JS_GC_ZEAL
+ struct Canary;
+ Canary* lastCanary_;
+#endif
+
+ NurseryChunk* allocChunk();
+
+ NurseryChunk& chunk(unsigned index) const {
+ return *chunks_[index];
+ }
+
+ void setCurrentChunk(unsigned chunkno);
+ void setStartPosition();
+
+ void updateNumChunks(unsigned newCount);
+ void updateNumChunksLocked(unsigned newCount,
+ gc::AutoMaybeStartBackgroundAllocation& maybeBgAlloc,
+ AutoLockGC& lock);
+
+ MOZ_ALWAYS_INLINE uintptr_t allocationEnd() const {
+ MOZ_ASSERT(numChunks() > 0);
+ return chunks_.back()->end();
+ }
+
+ MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {
+ MOZ_ASSERT(runtime_);
+ MOZ_ASSERT(currentEnd_ == chunk(currentChunk_).end());
+ return currentEnd_;
+ }
+ void* addressOfCurrentEnd() const {
+ MOZ_ASSERT(runtime_);
+ return (void*)&currentEnd_;
+ }
+
+ uintptr_t position() const { return position_; }
+ void* addressOfPosition() const { return (void*)&position_; }
+
+ JSRuntime* runtime() const { return runtime_; }
+
+ /* Allocates a new GC thing from the tenured generation during minor GC. */
+ gc::TenuredCell* allocateFromTenured(JS::Zone* zone, gc::AllocKind thingKind);
+
+ /* Common internal allocator function. */
+ void* allocate(size_t size);
+
+ double doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
+ gc::TenureCountCache& tenureCounts);
+
+ /*
+ * Move the object at |src| in the Nursery to an already-allocated cell
+ * |dst| in Tenured.
+ */
+ void collectToFixedPoint(TenuringTracer& trc, gc::TenureCountCache& tenureCounts);
+
+ /* Handle relocation of slots/elements pointers stored in Ion frames. */
+ void setForwardingPointer(void* oldData, void* newData, bool direct);
+
+ void setSlotsForwardingPointer(HeapSlot* oldSlots, HeapSlot* newSlots, uint32_t nslots);
+ void setElementsForwardingPointer(ObjectElements* oldHeader, ObjectElements* newHeader,
+ uint32_t nelems);
+
+ /* Free malloced pointers owned by freed things in the nursery. */
+ void freeMallocedBuffers();
+
+ /*
+ * Frees all non-live nursery-allocated things at the end of a minor
+ * collection.
+ */
+ void sweep();
+
+ void runSweepActions();
+ void sweepDictionaryModeObjects();
+
+ /* Change the allocable space provided by the nursery. */
+ void maybeResizeNursery(JS::gcreason::Reason reason, double promotionRate);
+ void growAllocableSpace();
+ void shrinkAllocableSpace();
+ void minimizeAllocableSpace();
+
+ /* Profile recording and printing. */
+ void startProfile(ProfileKey key);
+ void endProfile(ProfileKey key);
+ void maybeStartProfile(ProfileKey key);
+ void maybeEndProfile(ProfileKey key);
+ static void printProfileHeader();
+ static void printProfileTimes(const ProfileTimes& times);
+
+ friend class TenuringTracer;
+ friend class gc::MinorCollectionTracer;
+ friend class jit::MacroAssembler;
+};
+
+} /* namespace js */
+
+#endif /* gc_Nursery_h */
diff --git a/js/src/gc/NurseryAwareHashMap.h b/js/src/gc/NurseryAwareHashMap.h
new file mode 100644
index 000000000..5ade2e341
--- /dev/null
+++ b/js/src/gc/NurseryAwareHashMap.h
@@ -0,0 +1,178 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_NurseryAwareHashMap_h
+#define gc_NurseryAwareHashMap_h
+
+namespace js {
+
+namespace detail {
+// This class only handles the incremental case and does not deal with nursery
+// pointers. The only users should be for NurseryAwareHashMap; it is defined
+// externally because we need a GCPolicy for its use in the contained map.
+template <typename T>
+class UnsafeBareReadBarriered : public ReadBarrieredBase<T>
+{
+ public:
+ UnsafeBareReadBarriered() : ReadBarrieredBase<T>(JS::GCPolicy<T>::initial()) {}
+ MOZ_IMPLICIT UnsafeBareReadBarriered(const T& v) : ReadBarrieredBase<T>(v) {}
+ explicit UnsafeBareReadBarriered(const UnsafeBareReadBarriered& v) : ReadBarrieredBase<T>(v) {}
+ UnsafeBareReadBarriered(UnsafeBareReadBarriered&& v)
+ : ReadBarrieredBase<T>(mozilla::Move(v))
+ {}
+
+ UnsafeBareReadBarriered& operator=(const UnsafeBareReadBarriered& v) {
+ this->value = v.value;
+ return *this;
+ }
+
+ UnsafeBareReadBarriered& operator=(const T& v) {
+ this->value = v;
+ return *this;
+ }
+
+ const T get() const {
+ if (!InternalBarrierMethods<T>::isMarkable(this->value))
+ return JS::GCPolicy<T>::initial();
+ this->read();
+ return this->value;
+ }
+
+ explicit operator bool() const {
+ return bool(this->value);
+ }
+
+ const T unbarrieredGet() const { return this->value; }
+ T* unsafeGet() { return &this->value; }
+ T const* unsafeGet() const { return &this->value; }
+};
+} // namespace detail
+
+// The "nursery aware" hash map is a special case of GCHashMap that is able to
+// treat nursery allocated members weakly during a minor GC: e.g. it allows for
+// nursery allocated objects to be collected during nursery GC where a normal
+// hash table treats such edges strongly.
+//
+// Doing this requires some strong constraints on what can be stored in this
+// table and how it can be accessed. At the moment, this table assumes that
+// all values contain a strong reference to the key. It also requires the
+// policy to contain an |isTenured| and |needsSweep| members, which is fairly
+// non-standard. This limits its usefulness to the CrossCompartmentMap at the
+// moment, but might serve as a useful base for other tables in future.
+template <typename Key,
+ typename Value,
+ typename HashPolicy = DefaultHasher<Key>,
+ typename AllocPolicy = TempAllocPolicy>
+class NurseryAwareHashMap
+{
+ using BarrieredValue = detail::UnsafeBareReadBarriered<Value>;
+ using MapType = GCRekeyableHashMap<Key, BarrieredValue, HashPolicy, AllocPolicy>;
+ MapType map;
+
+ // Keep a list of all keys for which JS::GCPolicy<Key>::isTenured is false.
+ // This lets us avoid a full traveral of the map on each minor GC, keeping
+ // the minor GC times proportional to the nursery heap size.
+ Vector<Key, 0, AllocPolicy> nurseryEntries;
+
+ public:
+ using Lookup = typename MapType::Lookup;
+ using Ptr = typename MapType::Ptr;
+ using Range = typename MapType::Range;
+
+ explicit NurseryAwareHashMap(AllocPolicy a = AllocPolicy()) : map(a) {}
+
+ MOZ_MUST_USE bool init(uint32_t len = 16) { return map.init(len); }
+
+ bool empty() const { return map.empty(); }
+ Ptr lookup(const Lookup& l) const { return map.lookup(l); }
+ void remove(Ptr p) { map.remove(p); }
+ Range all() const { return map.all(); }
+ struct Enum : public MapType::Enum {
+ explicit Enum(NurseryAwareHashMap& namap) : MapType::Enum(namap.map) {}
+ };
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map.sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ MOZ_MUST_USE bool put(const Key& k, const Value& v) {
+ auto p = map.lookupForAdd(k);
+ if (p) {
+ if (!JS::GCPolicy<Key>::isTenured(k) || !JS::GCPolicy<Value>::isTenured(v)) {
+ if (!nurseryEntries.append(k))
+ return false;
+ }
+ p->value() = v;
+ return true;
+ }
+
+ bool ok = map.add(p, k, v);
+ if (!ok)
+ return false;
+
+ if (!JS::GCPolicy<Key>::isTenured(k) || !JS::GCPolicy<Value>::isTenured(v)) {
+ if (!nurseryEntries.append(k)) {
+ map.remove(k);
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ void sweepAfterMinorGC(JSTracer* trc) {
+ for (auto& key : nurseryEntries) {
+ auto p = map.lookup(key);
+ if (!p)
+ continue;
+
+ // Drop the entry if the value is not marked.
+ if (JS::GCPolicy<BarrieredValue>::needsSweep(&p->value())) {
+ map.remove(key);
+ continue;
+ }
+
+ // Update and relocate the key, if the value is still needed.
+ //
+ // Note that this currently assumes that all Value will contain a
+ // strong reference to Key, as per its use as the
+ // CrossCompartmentWrapperMap. We may need to make the following
+ // behavior more dynamic if we use this map in other nursery-aware
+ // contexts.
+ Key copy(key);
+ mozilla::DebugOnly<bool> sweepKey = JS::GCPolicy<Key>::needsSweep(&copy);
+ MOZ_ASSERT(!sweepKey);
+ map.rekeyIfMoved(key, copy);
+ }
+ nurseryEntries.clear();
+ }
+
+ void sweep() {
+ MOZ_ASSERT(nurseryEntries.empty());
+ map.sweep();
+ }
+};
+
+} // namespace js
+
+namespace JS {
+template <typename T>
+struct GCPolicy<js::detail::UnsafeBareReadBarriered<T>>
+{
+ static void trace(JSTracer* trc, js::detail::UnsafeBareReadBarriered<T>* thingp,
+ const char* name)
+ {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::detail::UnsafeBareReadBarriered<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+};
+} // namespace JS
+
+#endif // gc_NurseryAwareHashMap_h
diff --git a/js/src/gc/Policy.h b/js/src/gc/Policy.h
new file mode 100644
index 000000000..74b34d9c8
--- /dev/null
+++ b/js/src/gc/Policy.h
@@ -0,0 +1,159 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS Garbage Collector. */
+
+#ifndef gc_Policy_h
+#define gc_Policy_h
+
+#include "mozilla/TypeTraits.h"
+#include "gc/Barrier.h"
+#include "gc/Marking.h"
+#include "js/GCPolicyAPI.h"
+
+// Forward declare the types we're defining policies for. This file is
+// included in all places that define GC things, so the real definitions
+// will be available when we do template expansion, allowing for use of
+// static members in the underlying types. We cannot, however, use
+// static_assert to verify relations between types.
+class JSLinearString;
+namespace js {
+class AccessorShape;
+class ArgumentsObject;
+class ArrayBufferObject;
+class ArrayBufferObjectMaybeShared;
+class ArrayBufferViewObject;
+class ArrayObject;
+class BaseShape;
+class DebugEnvironmentProxy;
+class DebuggerFrame;
+class ExportEntryObject;
+class EnvironmentObject;
+class GlobalObject;
+class ImportEntryObject;
+class LazyScript;
+class LexicalEnvironmentObject;
+class ModuleEnvironmentObject;
+class ModuleNamespaceObject;
+class ModuleObject;
+class NativeObject;
+class ObjectGroup;
+class PlainObject;
+class PropertyName;
+class RegExpObject;
+class SavedFrame;
+class Scope;
+class EnvironmentObject;
+class ScriptSourceObject;
+class Shape;
+class SharedArrayBufferObject;
+class StructTypeDescr;
+class UnownedBaseShape;
+class WasmMemoryObject;
+namespace jit {
+class JitCode;
+} // namespace jit
+} // namespace js
+
+// Expand the given macro D for each valid GC reference type.
+#define FOR_EACH_INTERNAL_GC_POINTER_TYPE(D) \
+ D(JSFlatString*) \
+ D(JSLinearString*) \
+ D(js::AccessorShape*) \
+ D(js::ArgumentsObject*) \
+ D(js::ArrayBufferObject*) \
+ D(js::ArrayBufferObjectMaybeShared*) \
+ D(js::ArrayBufferViewObject*) \
+ D(js::ArrayObject*) \
+ D(js::BaseShape*) \
+ D(js::DebugEnvironmentProxy*) \
+ D(js::DebuggerFrame*) \
+ D(js::ExportEntryObject*) \
+ D(js::EnvironmentObject*) \
+ D(js::GlobalObject*) \
+ D(js::ImportEntryObject*) \
+ D(js::LazyScript*) \
+ D(js::LexicalEnvironmentObject*) \
+ D(js::ModuleEnvironmentObject*) \
+ D(js::ModuleNamespaceObject*) \
+ D(js::ModuleObject*) \
+ D(js::NativeObject*) \
+ D(js::ObjectGroup*) \
+ D(js::PlainObject*) \
+ D(js::PropertyName*) \
+ D(js::RegExpObject*) \
+ D(js::SavedFrame*) \
+ D(js::Scope*) \
+ D(js::ScriptSourceObject*) \
+ D(js::Shape*) \
+ D(js::SharedArrayBufferObject*) \
+ D(js::StructTypeDescr*) \
+ D(js::UnownedBaseShape*) \
+ D(js::WasmInstanceObject*) \
+ D(js::WasmMemoryObject*) \
+ D(js::WasmTableObject*) \
+ D(js::jit::JitCode*)
+
+// Expand the given macro D for each internal tagged GC pointer type.
+#define FOR_EACH_INTERNAL_TAGGED_GC_POINTER_TYPE(D) \
+ D(js::TaggedProto)
+
+// Expand the macro D for every GC reference type that we know about.
+#define FOR_EACH_GC_POINTER_TYPE(D) \
+ FOR_EACH_PUBLIC_GC_POINTER_TYPE(D) \
+ FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(D) \
+ FOR_EACH_INTERNAL_GC_POINTER_TYPE(D) \
+ FOR_EACH_INTERNAL_TAGGED_GC_POINTER_TYPE(D)
+
+namespace js {
+
+// Define the GCPolicy for all internal pointers.
+template <typename T>
+struct InternalGCPointerPolicy {
+ using Type = typename mozilla::RemovePointer<T>::Type;
+ static T initial() { return nullptr; }
+ static void preBarrier(T v) { Type::writeBarrierPre(v); }
+ static void postBarrier(T* vp, T prev, T next) { Type::writeBarrierPost(vp, prev, next); }
+ static void readBarrier(T v) { Type::readBarrier(v); }
+ static void trace(JSTracer* trc, T* vp, const char* name) {
+ TraceManuallyBarrieredEdge(trc, vp, name);
+ }
+};
+
+} // namespace js
+
+namespace JS {
+
+#define DEFINE_INTERNAL_GC_POLICY(type) \
+ template <> struct GCPolicy<type> : public js::InternalGCPointerPolicy<type> {};
+FOR_EACH_INTERNAL_GC_POINTER_TYPE(DEFINE_INTERNAL_GC_POLICY)
+#undef DEFINE_INTERNAL_GC_POLICY
+
+template <typename T>
+struct GCPolicy<js::HeapPtr<T>>
+{
+ static void trace(JSTracer* trc, js::HeapPtr<T>* thingp, const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::HeapPtr<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+};
+
+template <typename T>
+struct GCPolicy<js::ReadBarriered<T>>
+{
+ static void trace(JSTracer* trc, js::ReadBarriered<T>* thingp, const char* name) {
+ js::TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(js::ReadBarriered<T>* thingp) {
+ return js::gc::IsAboutToBeFinalized(thingp);
+ }
+};
+
+} // namespace JS
+
+#endif // gc_Policy_h
diff --git a/js/src/gc/RootMarking.cpp b/js/src/gc/RootMarking.cpp
new file mode 100644
index 000000000..93264084b
--- /dev/null
+++ b/js/src/gc/RootMarking.cpp
@@ -0,0 +1,543 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "mozilla/ArrayUtils.h"
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "jscntxt.h"
+#include "jsgc.h"
+#include "jsprf.h"
+#include "jstypes.h"
+#include "jswatchpoint.h"
+
+#include "builtin/MapObject.h"
+#include "frontend/BytecodeCompiler.h"
+#include "gc/GCInternals.h"
+#include "gc/Marking.h"
+#include "jit/MacroAssembler.h"
+#include "js/HashTable.h"
+#include "vm/Debugger.h"
+#include "vm/JSONParser.h"
+
+#include "jsgcinlines.h"
+#include "jsobjinlines.h"
+
+using namespace js;
+using namespace js::gc;
+
+using mozilla::ArrayEnd;
+
+using JS::AutoGCRooter;
+
+typedef RootedValueMap::Range RootRange;
+typedef RootedValueMap::Entry RootEntry;
+typedef RootedValueMap::Enum RootEnum;
+
+template <typename T>
+using TraceFunction = void (*)(JSTracer* trc, T* ref, const char* name);
+
+// For more detail see JS::Rooted::ptr and js::DispatchWrapper.
+//
+// The JS::RootKind::Traceable list contains a bunch of totally disparate
+// types, but the instantiations of DispatchWrapper below need /something/ in
+// the type field. We use the following type as a compatible stand-in. No
+// actual methods from ConcreteTraceable type are actually used at runtime --
+// the real trace function has been stored inline in the DispatchWrapper.
+struct ConcreteTraceable {
+ ConcreteTraceable() { MOZ_CRASH("instantiation of ConcreteTraceable"); }
+ void trace(JSTracer*) {}
+};
+
+template <typename T, TraceFunction<T> TraceFn = TraceNullableRoot>
+static inline void
+MarkExactStackRootList(JSTracer* trc, JS::Rooted<void*>* rooter, const char* name)
+{
+ while (rooter) {
+ T* addr = reinterpret_cast<JS::Rooted<T>*>(rooter)->address();
+ TraceFn(trc, addr, name);
+ rooter = rooter->previous();
+ }
+}
+
+static inline void
+TraceStackRoots(JSTracer* trc, RootedListHeads& stackRoots)
+{
+#define MARK_ROOTS(name, type, _) \
+ MarkExactStackRootList<type*>(trc, stackRoots[JS::RootKind::name], "exact-" #name);
+JS_FOR_EACH_TRACEKIND(MARK_ROOTS)
+#undef MARK_ROOTS
+ MarkExactStackRootList<jsid>(trc, stackRoots[JS::RootKind::Id], "exact-id");
+ MarkExactStackRootList<Value>(trc, stackRoots[JS::RootKind::Value], "exact-value");
+ MarkExactStackRootList<ConcreteTraceable,
+ js::DispatchWrapper<ConcreteTraceable>::TraceWrapped>(
+ trc, stackRoots[JS::RootKind::Traceable], "Traceable");
+}
+
+void
+js::RootLists::traceStackRoots(JSTracer* trc)
+{
+ TraceStackRoots(trc, stackRoots_);
+}
+
+static void
+MarkExactStackRoots(JSRuntime* rt, JSTracer* trc)
+{
+ for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next())
+ TraceStackRoots(trc, zone->stackRoots_);
+ rt->contextFromMainThread()->roots.traceStackRoots(trc);
+}
+
+template <typename T, TraceFunction<T> TraceFn = TraceNullableRoot>
+static inline void
+MarkPersistentRootedList(JSTracer* trc, mozilla::LinkedList<PersistentRooted<void*>>& list,
+ const char* name)
+{
+ for (PersistentRooted<void*>* r : list)
+ TraceFn(trc, reinterpret_cast<PersistentRooted<T>*>(r)->address(), name);
+}
+
+void
+js::RootLists::tracePersistentRoots(JSTracer* trc)
+{
+#define MARK_ROOTS(name, type, _) \
+ MarkPersistentRootedList<type*>(trc, heapRoots_[JS::RootKind::name], "persistent-" #name);
+JS_FOR_EACH_TRACEKIND(MARK_ROOTS)
+#undef MARK_ROOTS
+ MarkPersistentRootedList<jsid>(trc, heapRoots_[JS::RootKind::Id], "persistent-id");
+ MarkPersistentRootedList<Value>(trc, heapRoots_[JS::RootKind::Value], "persistent-value");
+ MarkPersistentRootedList<ConcreteTraceable,
+ js::DispatchWrapper<ConcreteTraceable>::TraceWrapped>(trc,
+ heapRoots_[JS::RootKind::Traceable], "persistent-traceable");
+}
+
+static void
+MarkPersistentRooted(JSRuntime* rt, JSTracer* trc)
+{
+ rt->contextFromMainThread()->roots.tracePersistentRoots(trc);
+}
+
+template <typename T>
+static void
+FinishPersistentRootedChain(mozilla::LinkedList<PersistentRooted<void*>>& listArg)
+{
+ auto& list = reinterpret_cast<mozilla::LinkedList<PersistentRooted<T>>&>(listArg);
+ while (!list.isEmpty())
+ list.getFirst()->reset();
+}
+
+void
+js::RootLists::finishPersistentRoots()
+{
+#define FINISH_ROOT_LIST(name, type, _) \
+ FinishPersistentRootedChain<type*>(heapRoots_[JS::RootKind::name]);
+JS_FOR_EACH_TRACEKIND(FINISH_ROOT_LIST)
+#undef FINISH_ROOT_LIST
+ FinishPersistentRootedChain<jsid>(heapRoots_[JS::RootKind::Id]);
+ FinishPersistentRootedChain<Value>(heapRoots_[JS::RootKind::Value]);
+
+ // Note that we do not finalize the Traceable list as we do not know how to
+ // safely clear memebers. We instead assert that none escape the RootLists.
+ // See the comment on RootLists::~RootLists for details.
+}
+
+inline void
+AutoGCRooter::trace(JSTracer* trc)
+{
+ switch (tag_) {
+ case PARSER:
+ frontend::MarkParser(trc, this);
+ return;
+
+ case VALARRAY: {
+ /*
+ * We don't know the template size parameter, but we can safely treat it
+ * as an AutoValueArray<1> because the length is stored separately.
+ */
+ AutoValueArray<1>* array = static_cast<AutoValueArray<1>*>(this);
+ TraceRootRange(trc, array->length(), array->begin(), "js::AutoValueArray");
+ return;
+ }
+
+ case IONMASM: {
+ static_cast<js::jit::MacroAssembler::AutoRooter*>(this)->masm()->trace(trc);
+ return;
+ }
+
+ case WRAPPER: {
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we mark
+ * wrapper roots in every slice. This is because of some rule-breaking
+ * in RemapAllWrappersForObject; see comment there.
+ */
+ TraceManuallyBarrieredEdge(trc, &static_cast<AutoWrapperRooter*>(this)->value.get(),
+ "JS::AutoWrapperRooter.value");
+ return;
+ }
+
+ case WRAPVECTOR: {
+ AutoWrapperVector::VectorImpl& vector = static_cast<AutoWrapperVector*>(this)->vector;
+ /*
+ * We need to use TraceManuallyBarrieredEdge here because we mark
+ * wrapper roots in every slice. This is because of some rule-breaking
+ * in RemapAllWrappersForObject; see comment there.
+ */
+ for (WrapperValue* p = vector.begin(); p < vector.end(); p++)
+ TraceManuallyBarrieredEdge(trc, &p->get(), "js::AutoWrapperVector.vector");
+ return;
+ }
+
+ case CUSTOM:
+ static_cast<JS::CustomAutoRooter*>(this)->trace(trc);
+ return;
+ }
+
+ MOZ_ASSERT(tag_ >= 0);
+ if (Value* vp = static_cast<AutoArrayRooter*>(this)->array)
+ TraceRootRange(trc, tag_, vp, "JS::AutoArrayRooter.array");
+}
+
+/* static */ void
+AutoGCRooter::traceAll(JSTracer* trc)
+{
+ for (AutoGCRooter* gcr = trc->runtime()->contextFromMainThread()->roots.autoGCRooters_; gcr; gcr = gcr->down)
+ gcr->trace(trc);
+}
+
+/* static */ void
+AutoGCRooter::traceAllWrappers(JSTracer* trc)
+{
+ JSContext* cx = trc->runtime()->contextFromMainThread();
+
+ for (AutoGCRooter* gcr = cx->roots.autoGCRooters_; gcr; gcr = gcr->down) {
+ if (gcr->tag_ == WRAPVECTOR || gcr->tag_ == WRAPPER)
+ gcr->trace(trc);
+ }
+}
+
+void
+StackShape::trace(JSTracer* trc)
+{
+ if (base)
+ TraceRoot(trc, &base, "StackShape base");
+
+ TraceRoot(trc, (jsid*) &propid, "StackShape id");
+
+ if ((attrs & JSPROP_GETTER) && rawGetter)
+ TraceRoot(trc, (JSObject**)&rawGetter, "StackShape getter");
+
+ if ((attrs & JSPROP_SETTER) && rawSetter)
+ TraceRoot(trc, (JSObject**)&rawSetter, "StackShape setter");
+}
+
+void
+PropertyDescriptor::trace(JSTracer* trc)
+{
+ if (obj)
+ TraceRoot(trc, &obj, "Descriptor::obj");
+ TraceRoot(trc, &value, "Descriptor::value");
+ if ((attrs & JSPROP_GETTER) && getter) {
+ JSObject* tmp = JS_FUNC_TO_DATA_PTR(JSObject*, getter);
+ TraceRoot(trc, &tmp, "Descriptor::get");
+ getter = JS_DATA_TO_FUNC_PTR(JSGetterOp, tmp);
+ }
+ if ((attrs & JSPROP_SETTER) && setter) {
+ JSObject* tmp = JS_FUNC_TO_DATA_PTR(JSObject*, setter);
+ TraceRoot(trc, &tmp, "Descriptor::set");
+ setter = JS_DATA_TO_FUNC_PTR(JSSetterOp, tmp);
+ }
+}
+
+void
+js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
+{
+ // FinishRoots will have asserted that every root that we do not expect
+ // is gone, so we can simply skip traceRuntime here.
+ if (rt->isBeingDestroyed())
+ return;
+
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+ if (rt->atomsCompartment(lock)->zone()->isCollecting())
+ traceRuntimeAtoms(trc, lock);
+ JSCompartment::traceIncomingCrossCompartmentEdgesForZoneGC(trc);
+ traceRuntimeCommon(trc, MarkRuntime, lock);
+}
+
+void
+js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAccess& lock)
+{
+ // Note that we *must* trace the runtime during the SHUTDOWN_GC's minor GC
+ // despite having called FinishRoots already. This is because FinishRoots
+ // does not clear the crossCompartmentWrapper map. It cannot do this
+ // because Proxy's trace for CrossCompartmentWrappers asserts presence in
+ // the map. And we can reach its trace function despite having finished the
+ // roots via the edges stored by the pre-barrier verifier when we finish
+ // the verifier for the last time.
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+
+ // FIXME: As per bug 1298816 comment 12, we should be able to remove this.
+ jit::JitRuntime::MarkJitcodeGlobalTableUnconditionally(trc);
+
+ traceRuntimeCommon(trc, TraceRuntime, lock);
+}
+
+void
+js::TraceRuntime(JSTracer* trc)
+{
+ MOZ_ASSERT(!trc->isMarkingTracer());
+
+ JSRuntime* rt = trc->runtime();
+ rt->gc.evictNursery();
+ AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
+ gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
+ rt->gc.traceRuntime(trc, prep.session().lock);
+}
+
+void
+js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock)
+{
+ MOZ_ASSERT(!rt->isBeingDestroyed());
+
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
+ traceRuntimeAtoms(trc, lock);
+ traceRuntimeCommon(trc, TraceRuntime, lock);
+}
+
+void
+js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock)
+{
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_RUNTIME_DATA);
+ MarkPermanentAtoms(trc);
+ MarkAtoms(trc, lock);
+ MarkWellKnownSymbols(trc);
+ jit::JitRuntime::Mark(trc, lock);
+}
+
+void
+js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
+ AutoLockForExclusiveAccess& lock)
+{
+ MOZ_ASSERT(!rt->mainThread.suppressGC);
+
+ {
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_STACK);
+
+ // Trace active interpreter and JIT stack roots.
+ MarkInterpreterActivations(rt, trc);
+ jit::MarkJitActivations(rt, trc);
+
+ // Trace legacy C stack roots.
+ AutoGCRooter::traceAll(trc);
+
+ for (RootRange r = rootsHash.all(); !r.empty(); r.popFront()) {
+ const RootEntry& entry = r.front();
+ TraceRoot(trc, entry.key(), entry.value());
+ }
+
+ // Trace C stack roots.
+ MarkExactStackRoots(rt, trc);
+ }
+
+ // Trace runtime global roots.
+ MarkPersistentRooted(rt, trc);
+
+ // Trace the self-hosting global compartment.
+ rt->markSelfHostingGlobal(trc);
+
+ // Trace the shared Intl data.
+ rt->traceSharedIntlData(trc);
+
+ // Trace anything in the single context. Note that this is actually the
+ // same struct as the JSRuntime, but is still split for historical reasons.
+ rt->contextFromMainThread()->mark(trc);
+
+ // Trace all compartment roots, but not the compartment itself; it is
+ // marked via the parent pointer if traceRoots actually traces anything.
+ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
+ c->traceRoots(trc, traceOrMark);
+
+ // Trace SPS.
+ rt->spsProfiler.trace(trc);
+
+ // Trace helper thread roots.
+ HelperThreadState().trace(trc);
+
+ // Trace the embedding's black and gray roots.
+ if (!rt->isHeapMinorCollecting()) {
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_EMBEDDING);
+
+ /*
+ * The embedding can register additional roots here.
+ *
+ * We don't need to trace these in a minor GC because all pointers into
+ * the nursery should be in the store buffer, and we want to avoid the
+ * time taken to trace all these roots.
+ */
+ for (size_t i = 0; i < blackRootTracers.length(); i++) {
+ const Callback<JSTraceDataOp>& e = blackRootTracers[i];
+ (*e.op)(trc, e.data);
+ }
+
+ /* During GC, we don't mark gray roots at this stage. */
+ if (JSTraceDataOp op = grayRootTracer.op) {
+ if (traceOrMark == TraceRuntime)
+ (*op)(trc, grayRootTracer.data);
+ }
+ }
+}
+
+#ifdef DEBUG
+class AssertNoRootsTracer : public JS::CallbackTracer
+{
+ void onChild(const JS::GCCellPtr& thing) override {
+ MOZ_CRASH("There should not be any roots after finishRoots");
+ }
+
+ public:
+ AssertNoRootsTracer(JSRuntime* rt, WeakMapTraceKind weakTraceKind)
+ : JS::CallbackTracer(rt, weakTraceKind)
+ {}
+};
+#endif // DEBUG
+
+void
+js::gc::GCRuntime::finishRoots()
+{
+ rt->finishAtoms();
+
+ if (rootsHash.initialized())
+ rootsHash.clear();
+
+ rt->contextFromMainThread()->roots.finishPersistentRoots();
+
+ rt->finishSelfHosting();
+
+ for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next())
+ c->finishRoots();
+
+#ifdef DEBUG
+ // The nsWrapperCache may not be empty before our shutdown GC, so we have
+ // to skip that table when verifying that we are fully unrooted.
+ auto prior = grayRootTracer;
+ grayRootTracer = Callback<JSTraceDataOp>(nullptr, nullptr);
+
+ AssertNoRootsTracer trc(rt, TraceWeakMapKeysValues);
+ AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
+ gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
+ traceRuntime(&trc, prep.session().lock);
+
+ // Restore the wrapper tracing so that we leak instead of leaving dangling
+ // pointers.
+ grayRootTracer = prior;
+#endif // DEBUG
+}
+
+// Append traced things to a buffer on the zone for use later in the GC.
+// See the comment in GCRuntime.h above grayBufferState for details.
+class BufferGrayRootsTracer : public JS::CallbackTracer
+{
+ // Set to false if we OOM while buffering gray roots.
+ bool bufferingGrayRootsFailed;
+
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ public:
+ explicit BufferGrayRootsTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt), bufferingGrayRootsFailed(false)
+ {}
+
+ bool failed() const { return bufferingGrayRootsFailed; }
+
+#ifdef DEBUG
+ TracerKind getTracerKind() const override { return TracerKind::GrayBuffering; }
+#endif
+};
+
+#ifdef DEBUG
+// Return true if this trace is happening on behalf of gray buffering during
+// the marking phase of incremental GC.
+bool
+js::IsBufferGrayRootsTracer(JSTracer* trc)
+{
+ return trc->isCallbackTracer() &&
+ trc->asCallbackTracer()->getTracerKind() == JS::CallbackTracer::TracerKind::GrayBuffering;
+}
+#endif
+
+void
+js::gc::GCRuntime::bufferGrayRoots()
+{
+ // Precondition: the state has been reset to "unused" after the last GC
+ // and the zone's buffers have been cleared.
+ MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
+ for (GCZonesIter zone(rt); !zone.done(); zone.next())
+ MOZ_ASSERT(zone->gcGrayRoots.empty());
+
+
+ BufferGrayRootsTracer grayBufferer(rt);
+ if (JSTraceDataOp op = grayRootTracer.op)
+ (*op)(&grayBufferer, grayRootTracer.data);
+
+ // Propagate the failure flag from the marker to the runtime.
+ if (grayBufferer.failed()) {
+ grayBufferState = GrayBufferState::Failed;
+ resetBufferedGrayRoots();
+ } else {
+ grayBufferState = GrayBufferState::Okay;
+ }
+}
+
+struct SetMaybeAliveFunctor {
+ template <typename T> void operator()(T* t) { SetMaybeAliveFlag(t); }
+};
+
+void
+BufferGrayRootsTracer::onChild(const JS::GCCellPtr& thing)
+{
+ MOZ_ASSERT(runtime()->isHeapBusy());
+ MOZ_RELEASE_ASSERT(thing);
+ // Check if |thing| is corrupt by calling a method that touches the heap.
+ MOZ_RELEASE_ASSERT(thing.asCell()->getTraceKind() <= JS::TraceKind::Null);
+
+ if (bufferingGrayRootsFailed)
+ return;
+
+ gc::TenuredCell* tenured = gc::TenuredCell::fromPointer(thing.asCell());
+
+ Zone* zone = tenured->zone();
+ if (zone->isCollecting()) {
+ // See the comment on SetMaybeAliveFlag to see why we only do this for
+ // objects and scripts. We rely on gray root buffering for this to work,
+ // but we only need to worry about uncollected dead compartments during
+ // incremental GCs (when we do gray root buffering).
+ DispatchTyped(SetMaybeAliveFunctor(), thing);
+
+ if (!zone->gcGrayRoots.append(tenured))
+ bufferingGrayRootsFailed = true;
+ }
+}
+
+void
+GCRuntime::markBufferedGrayRoots(JS::Zone* zone)
+{
+ MOZ_ASSERT(grayBufferState == GrayBufferState::Okay);
+ MOZ_ASSERT(zone->isGCMarkingGray() || zone->isGCCompacting());
+
+ for (auto cell : zone->gcGrayRoots)
+ TraceManuallyBarrieredGenericPointerEdge(&marker, &cell, "buffered gray root");
+}
+
+void
+GCRuntime::resetBufferedGrayRoots() const
+{
+ MOZ_ASSERT(grayBufferState != GrayBufferState::Okay,
+ "Do not clear the gray buffers unless we are Failed or becoming Unused");
+ for (GCZonesIter zone(rt); !zone.done(); zone.next())
+ zone->gcGrayRoots.clearAndFree();
+}
+
diff --git a/js/src/gc/Rooting.h b/js/src/gc/Rooting.h
new file mode 100644
index 000000000..7a179aa28
--- /dev/null
+++ b/js/src/gc/Rooting.h
@@ -0,0 +1,82 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Rooting_h
+#define gc_Rooting_h
+
+#include "js/GCVector.h"
+#include "js/RootingAPI.h"
+
+class JSAtom;
+class JSLinearString;
+
+namespace js {
+
+class PropertyName;
+class NativeObject;
+class ArrayObject;
+class GlobalObject;
+class PlainObject;
+class ScriptSourceObject;
+class SavedFrame;
+class Shape;
+class ObjectGroup;
+class DebuggerEnvironment;
+class DebuggerFrame;
+class DebuggerObject;
+class Scope;
+
+// These are internal counterparts to the public types such as HandleObject.
+
+typedef JS::Handle<NativeObject*> HandleNativeObject;
+typedef JS::Handle<Shape*> HandleShape;
+typedef JS::Handle<ObjectGroup*> HandleObjectGroup;
+typedef JS::Handle<JSAtom*> HandleAtom;
+typedef JS::Handle<JSLinearString*> HandleLinearString;
+typedef JS::Handle<PropertyName*> HandlePropertyName;
+typedef JS::Handle<ArrayObject*> HandleArrayObject;
+typedef JS::Handle<PlainObject*> HandlePlainObject;
+typedef JS::Handle<SavedFrame*> HandleSavedFrame;
+typedef JS::Handle<ScriptSourceObject*> HandleScriptSource;
+typedef JS::Handle<DebuggerEnvironment*> HandleDebuggerEnvironment;
+typedef JS::Handle<DebuggerFrame*> HandleDebuggerFrame;
+typedef JS::Handle<DebuggerObject*> HandleDebuggerObject;
+typedef JS::Handle<Scope*> HandleScope;
+
+typedef JS::MutableHandle<Shape*> MutableHandleShape;
+typedef JS::MutableHandle<JSAtom*> MutableHandleAtom;
+typedef JS::MutableHandle<NativeObject*> MutableHandleNativeObject;
+typedef JS::MutableHandle<PlainObject*> MutableHandlePlainObject;
+typedef JS::MutableHandle<SavedFrame*> MutableHandleSavedFrame;
+typedef JS::MutableHandle<DebuggerEnvironment*> MutableHandleDebuggerEnvironment;
+typedef JS::MutableHandle<DebuggerFrame*> MutableHandleDebuggerFrame;
+typedef JS::MutableHandle<DebuggerObject*> MutableHandleDebuggerObject;
+typedef JS::MutableHandle<Scope*> MutableHandleScope;
+
+typedef JS::Rooted<NativeObject*> RootedNativeObject;
+typedef JS::Rooted<Shape*> RootedShape;
+typedef JS::Rooted<ObjectGroup*> RootedObjectGroup;
+typedef JS::Rooted<JSAtom*> RootedAtom;
+typedef JS::Rooted<JSLinearString*> RootedLinearString;
+typedef JS::Rooted<PropertyName*> RootedPropertyName;
+typedef JS::Rooted<ArrayObject*> RootedArrayObject;
+typedef JS::Rooted<GlobalObject*> RootedGlobalObject;
+typedef JS::Rooted<PlainObject*> RootedPlainObject;
+typedef JS::Rooted<SavedFrame*> RootedSavedFrame;
+typedef JS::Rooted<ScriptSourceObject*> RootedScriptSource;
+typedef JS::Rooted<DebuggerEnvironment*> RootedDebuggerEnvironment;
+typedef JS::Rooted<DebuggerFrame*> RootedDebuggerFrame;
+typedef JS::Rooted<DebuggerObject*> RootedDebuggerObject;
+typedef JS::Rooted<Scope*> RootedScope;
+
+typedef JS::GCVector<JSFunction*> FunctionVector;
+typedef JS::GCVector<PropertyName*> PropertyNameVector;
+typedef JS::GCVector<Shape*> ShapeVector;
+typedef JS::GCVector<JSString*> StringVector;
+
+} /* namespace js */
+
+#endif /* gc_Rooting_h */
diff --git a/js/src/gc/Statistics.cpp b/js/src/gc/Statistics.cpp
new file mode 100644
index 000000000..19f9986dd
--- /dev/null
+++ b/js/src/gc/Statistics.cpp
@@ -0,0 +1,1383 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Statistics.h"
+
+#include "mozilla/ArrayUtils.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/Sprintf.h"
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <stdio.h>
+
+#include "jsprf.h"
+#include "jsutil.h"
+
+#include "gc/Memory.h"
+#include "vm/Debugger.h"
+#include "vm/HelperThreads.h"
+#include "vm/Runtime.h"
+#include "vm/Time.h"
+
+using namespace js;
+using namespace js::gc;
+using namespace js::gcstats;
+
+using mozilla::DebugOnly;
+using mozilla::MakeRange;
+using mozilla::PodArrayZero;
+using mozilla::PodZero;
+
+/*
+ * If this fails, then you can either delete this assertion and allow all
+ * larger-numbered reasons to pile up in the last telemetry bucket, or switch
+ * to GC_REASON_3 and bump the max value.
+ */
+JS_STATIC_ASSERT(JS::gcreason::NUM_TELEMETRY_REASONS >= JS::gcreason::NUM_REASONS);
+
+const char*
+js::gcstats::ExplainInvocationKind(JSGCInvocationKind gckind)
+{
+ MOZ_ASSERT(gckind == GC_NORMAL || gckind == GC_SHRINK);
+ if (gckind == GC_NORMAL)
+ return "Normal";
+ else
+ return "Shrinking";
+}
+
+JS_PUBLIC_API(const char*)
+JS::gcreason::ExplainReason(JS::gcreason::Reason reason)
+{
+ switch (reason) {
+#define SWITCH_REASON(name) \
+ case JS::gcreason::name: \
+ return #name;
+ GCREASONS(SWITCH_REASON)
+
+ default:
+ MOZ_CRASH("bad GC reason");
+#undef SWITCH_REASON
+ }
+}
+
+const char*
+js::gcstats::ExplainAbortReason(gc::AbortReason reason)
+{
+ switch (reason) {
+#define SWITCH_REASON(name) \
+ case gc::AbortReason::name: \
+ return #name;
+ GC_ABORT_REASONS(SWITCH_REASON)
+
+ default:
+ MOZ_CRASH("bad GC abort reason");
+#undef SWITCH_REASON
+ }
+}
+
+static double
+t(int64_t t)
+{
+ return double(t) / PRMJ_USEC_PER_MSEC;
+}
+
+struct PhaseInfo
+{
+ Phase index;
+ const char* name;
+ Phase parent;
+ const uint8_t telemetryBucket;
+};
+
+// The zeroth entry in the timing arrays is used for phases that have a
+// unique lineage.
+static const size_t PHASE_DAG_NONE = 0;
+
+// These are really just fields of PhaseInfo, but I have to initialize them
+// programmatically, which prevents making phases[] const. (And marking these
+// fields mutable does not work on Windows; the whole thing gets created in
+// read-only memory anyway.)
+struct ExtraPhaseInfo
+{
+ // Depth in the tree of each phase type
+ size_t depth;
+
+ // Index into the set of parallel arrays of timing data, for parents with
+ // at least one multi-parented child
+ size_t dagSlot;
+};
+
+static const Phase PHASE_NO_PARENT = PHASE_LIMIT;
+
+struct DagChildEdge {
+ Phase parent;
+ Phase child;
+} dagChildEdges[] = {
+ { PHASE_MARK, PHASE_MARK_ROOTS },
+ { PHASE_MINOR_GC, PHASE_MARK_ROOTS },
+ { PHASE_TRACE_HEAP, PHASE_MARK_ROOTS },
+ { PHASE_EVICT_NURSERY, PHASE_MARK_ROOTS },
+ { PHASE_COMPACT_UPDATE, PHASE_MARK_ROOTS }
+};
+
+/*
+ * Note that PHASE_MUTATOR, PHASE_GC_BEGIN, and PHASE_GC_END never have any
+ * child phases. If beginPhase is called while one of these is active, they
+ * will automatically be suspended and resumed when the phase stack is next
+ * empty. Timings for these phases are thus exclusive of any other phase.
+ */
+
+static const PhaseInfo phases[] = {
+ { PHASE_MUTATOR, "Mutator Running", PHASE_NO_PARENT, 0 },
+ { PHASE_GC_BEGIN, "Begin Callback", PHASE_NO_PARENT, 1 },
+ { PHASE_WAIT_BACKGROUND_THREAD, "Wait Background Thread", PHASE_NO_PARENT, 2 },
+ { PHASE_MARK_DISCARD_CODE, "Mark Discard Code", PHASE_NO_PARENT, 3 },
+ { PHASE_RELAZIFY_FUNCTIONS, "Relazify Functions", PHASE_NO_PARENT, 4 },
+ { PHASE_PURGE, "Purge", PHASE_NO_PARENT, 5 },
+ { PHASE_MARK, "Mark", PHASE_NO_PARENT, 6 },
+ { PHASE_UNMARK, "Unmark", PHASE_MARK, 7 },
+ /* PHASE_MARK_ROOTS */
+ { PHASE_MARK_DELAYED, "Mark Delayed", PHASE_MARK, 8 },
+ { PHASE_SWEEP, "Sweep", PHASE_NO_PARENT, 9 },
+ { PHASE_SWEEP_MARK, "Mark During Sweeping", PHASE_SWEEP, 10 },
+ { PHASE_SWEEP_MARK_TYPES, "Mark Types During Sweeping", PHASE_SWEEP_MARK, 11 },
+ { PHASE_SWEEP_MARK_INCOMING_BLACK, "Mark Incoming Black Pointers", PHASE_SWEEP_MARK, 12 },
+ { PHASE_SWEEP_MARK_WEAK, "Mark Weak", PHASE_SWEEP_MARK, 13 },
+ { PHASE_SWEEP_MARK_INCOMING_GRAY, "Mark Incoming Gray Pointers", PHASE_SWEEP_MARK, 14 },
+ { PHASE_SWEEP_MARK_GRAY, "Mark Gray", PHASE_SWEEP_MARK, 15 },
+ { PHASE_SWEEP_MARK_GRAY_WEAK, "Mark Gray and Weak", PHASE_SWEEP_MARK, 16 },
+ { PHASE_FINALIZE_START, "Finalize Start Callbacks", PHASE_SWEEP, 17 },
+ { PHASE_WEAK_ZONEGROUP_CALLBACK, "Per-Slice Weak Callback", PHASE_FINALIZE_START, 57 },
+ { PHASE_WEAK_COMPARTMENT_CALLBACK, "Per-Compartment Weak Callback", PHASE_FINALIZE_START, 58 },
+ { PHASE_SWEEP_ATOMS, "Sweep Atoms", PHASE_SWEEP, 18 },
+ { PHASE_SWEEP_SYMBOL_REGISTRY, "Sweep Symbol Registry", PHASE_SWEEP, 19 },
+ { PHASE_SWEEP_COMPARTMENTS, "Sweep Compartments", PHASE_SWEEP, 20 },
+ { PHASE_SWEEP_DISCARD_CODE, "Sweep Discard Code", PHASE_SWEEP_COMPARTMENTS, 21 },
+ { PHASE_SWEEP_INNER_VIEWS, "Sweep Inner Views", PHASE_SWEEP_COMPARTMENTS, 22 },
+ { PHASE_SWEEP_CC_WRAPPER, "Sweep Cross Compartment Wrappers", PHASE_SWEEP_COMPARTMENTS, 23 },
+ { PHASE_SWEEP_BASE_SHAPE, "Sweep Base Shapes", PHASE_SWEEP_COMPARTMENTS, 24 },
+ { PHASE_SWEEP_INITIAL_SHAPE, "Sweep Initial Shapes", PHASE_SWEEP_COMPARTMENTS, 25 },
+ { PHASE_SWEEP_TYPE_OBJECT, "Sweep Type Objects", PHASE_SWEEP_COMPARTMENTS, 26 },
+ { PHASE_SWEEP_BREAKPOINT, "Sweep Breakpoints", PHASE_SWEEP_COMPARTMENTS, 27 },
+ { PHASE_SWEEP_REGEXP, "Sweep Regexps", PHASE_SWEEP_COMPARTMENTS, 28 },
+ { PHASE_SWEEP_MISC, "Sweep Miscellaneous", PHASE_SWEEP_COMPARTMENTS, 29 },
+ { PHASE_SWEEP_TYPES, "Sweep type information", PHASE_SWEEP_COMPARTMENTS, 30 },
+ { PHASE_SWEEP_TYPES_BEGIN, "Sweep type tables and compilations", PHASE_SWEEP_TYPES, 31 },
+ { PHASE_SWEEP_TYPES_END, "Free type arena", PHASE_SWEEP_TYPES, 32 },
+ { PHASE_SWEEP_OBJECT, "Sweep Object", PHASE_SWEEP, 33 },
+ { PHASE_SWEEP_STRING, "Sweep String", PHASE_SWEEP, 34 },
+ { PHASE_SWEEP_SCRIPT, "Sweep Script", PHASE_SWEEP, 35 },
+ { PHASE_SWEEP_SCOPE, "Sweep Scope", PHASE_SWEEP, 59 },
+ { PHASE_SWEEP_SHAPE, "Sweep Shape", PHASE_SWEEP, 36 },
+ { PHASE_SWEEP_JITCODE, "Sweep JIT code", PHASE_SWEEP, 37 },
+ { PHASE_FINALIZE_END, "Finalize End Callback", PHASE_SWEEP, 38 },
+ { PHASE_DESTROY, "Deallocate", PHASE_SWEEP, 39 },
+ { PHASE_COMPACT, "Compact", PHASE_NO_PARENT, 40 },
+ { PHASE_COMPACT_MOVE, "Compact Move", PHASE_COMPACT, 41 },
+ { PHASE_COMPACT_UPDATE, "Compact Update", PHASE_COMPACT, 42 },
+ /* PHASE_MARK_ROOTS */
+ { PHASE_COMPACT_UPDATE_CELLS, "Compact Update Cells", PHASE_COMPACT_UPDATE, 43 },
+ { PHASE_GC_END, "End Callback", PHASE_NO_PARENT, 44 },
+ { PHASE_MINOR_GC, "All Minor GCs", PHASE_NO_PARENT, 45 },
+ /* PHASE_MARK_ROOTS */
+ { PHASE_EVICT_NURSERY, "Minor GCs to Evict Nursery", PHASE_NO_PARENT, 46 },
+ /* PHASE_MARK_ROOTS */
+ { PHASE_TRACE_HEAP, "Trace Heap", PHASE_NO_PARENT, 47 },
+ /* PHASE_MARK_ROOTS */
+ { PHASE_BARRIER, "Barriers", PHASE_NO_PARENT, 55 },
+ { PHASE_UNMARK_GRAY, "Unmark gray", PHASE_BARRIER, 56 },
+ { PHASE_MARK_ROOTS, "Mark Roots", PHASE_MULTI_PARENTS, 48 },
+ { PHASE_BUFFER_GRAY_ROOTS, "Buffer Gray Roots", PHASE_MARK_ROOTS, 49 },
+ { PHASE_MARK_CCWS, "Mark Cross Compartment Wrappers", PHASE_MARK_ROOTS, 50 },
+ { PHASE_MARK_STACK, "Mark C and JS stacks", PHASE_MARK_ROOTS, 51 },
+ { PHASE_MARK_RUNTIME_DATA, "Mark Runtime-wide Data", PHASE_MARK_ROOTS, 52 },
+ { PHASE_MARK_EMBEDDING, "Mark Embedding", PHASE_MARK_ROOTS, 53 },
+ { PHASE_MARK_COMPARTMENTS, "Mark Compartments", PHASE_MARK_ROOTS, 54 },
+ { PHASE_PURGE_SHAPE_TABLES, "Purge ShapeTables", PHASE_NO_PARENT, 60 },
+
+ { PHASE_LIMIT, nullptr, PHASE_NO_PARENT, 60 }
+
+ // Current number of telemetryBuckets is 60. If you insert new phases
+ // somewhere, start at that number and count up. Do not change any existing
+ // numbers.
+};
+
+static ExtraPhaseInfo phaseExtra[PHASE_LIMIT] = { { 0, 0 } };
+
+// Mapping from all nodes with a multi-parented child to a Vector of all
+// multi-parented children and their descendants. (Single-parented children will
+// not show up in this list.)
+static mozilla::Vector<Phase, 0, SystemAllocPolicy> dagDescendants[Statistics::NumTimingArrays];
+
+// Preorder iterator over all phases in the expanded tree. Positions are
+// returned as <phase,dagSlot> pairs (dagSlot will be zero aka PHASE_DAG_NONE
+// for the top nodes with a single path from the parent, and 1 or more for
+// nodes in multiparented subtrees).
+struct AllPhaseIterator {
+ // If 'descendants' is empty, the current Phase position.
+ int current;
+
+ // The depth of the current multiparented node that we are processing, or
+ // zero if we are pointing to the top portion of the tree.
+ int baseLevel;
+
+ // When looking at multiparented descendants, the dag slot (index into
+ // PhaseTimeTables) containing the entries for the current parent.
+ size_t activeSlot;
+
+ // When iterating over a multiparented subtree, the list of (remaining)
+ // subtree nodes.
+ mozilla::Vector<Phase, 0, SystemAllocPolicy>::Range descendants;
+
+ explicit AllPhaseIterator(const Statistics::PhaseTimeTable table)
+ : current(0)
+ , baseLevel(0)
+ , activeSlot(PHASE_DAG_NONE)
+ , descendants(dagDescendants[PHASE_DAG_NONE].all()) /* empty range */
+ {
+ }
+
+ void get(Phase* phase, size_t* dagSlot, size_t* level = nullptr) {
+ MOZ_ASSERT(!done());
+ *dagSlot = activeSlot;
+ *phase = descendants.empty() ? Phase(current) : descendants.front();
+ if (level)
+ *level = phaseExtra[*phase].depth + baseLevel;
+ }
+
+ void advance() {
+ MOZ_ASSERT(!done());
+
+ if (!descendants.empty()) {
+ // Currently iterating over a multiparented subtree.
+ descendants.popFront();
+ if (!descendants.empty())
+ return;
+
+ // Just before leaving the last child, reset the iterator to look
+ // at "main" phases (in PHASE_DAG_NONE) instead of multiparented
+ // subtree phases.
+ ++current;
+ activeSlot = PHASE_DAG_NONE;
+ baseLevel = 0;
+ return;
+ }
+
+ if (phaseExtra[current].dagSlot != PHASE_DAG_NONE) {
+ // The current phase has a shared subtree. Load them up into
+ // 'descendants' and advance to the first child.
+ activeSlot = phaseExtra[current].dagSlot;
+ descendants = dagDescendants[activeSlot].all();
+ MOZ_ASSERT(!descendants.empty());
+ baseLevel += phaseExtra[current].depth + 1;
+ return;
+ }
+
+ ++current;
+ }
+
+ bool done() const {
+ return phases[current].parent == PHASE_MULTI_PARENTS;
+ }
+};
+
+void
+Statistics::gcDuration(int64_t* total, int64_t* maxPause) const
+{
+ *total = *maxPause = 0;
+ for (const SliceData* slice = slices.begin(); slice != slices.end(); slice++) {
+ *total += slice->duration();
+ if (slice->duration() > *maxPause)
+ *maxPause = slice->duration();
+ }
+ if (*maxPause > maxPauseInInterval)
+ maxPauseInInterval = *maxPause;
+}
+
+void
+Statistics::sccDurations(int64_t* total, int64_t* maxPause)
+{
+ *total = *maxPause = 0;
+ for (size_t i = 0; i < sccTimes.length(); i++) {
+ *total += sccTimes[i];
+ *maxPause = Max(*maxPause, sccTimes[i]);
+ }
+}
+
+typedef Vector<UniqueChars, 8, SystemAllocPolicy> FragmentVector;
+
+static UniqueChars
+Join(const FragmentVector& fragments, const char* separator = "") {
+ const size_t separatorLength = strlen(separator);
+ size_t length = 0;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ length += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1))
+ length += separatorLength;
+ }
+
+ char* joined = js_pod_malloc<char>(length + 1);
+ joined[length] = '\0';
+
+ char* cursor = joined;
+ for (size_t i = 0; i < fragments.length(); ++i) {
+ if (fragments[i])
+ strcpy(cursor, fragments[i].get());
+ cursor += fragments[i] ? strlen(fragments[i].get()) : 0;
+ if (i < (fragments.length() - 1)) {
+ if (separatorLength)
+ strcpy(cursor, separator);
+ cursor += separatorLength;
+ }
+ }
+
+ return UniqueChars(joined);
+}
+
+static int64_t
+SumChildTimes(size_t phaseSlot, Phase phase, const Statistics::PhaseTimeTable phaseTimes)
+{
+ // Sum the contributions from single-parented children.
+ int64_t total = 0;
+ size_t depth = phaseExtra[phase].depth;
+ for (unsigned i = phase + 1; i < PHASE_LIMIT && phaseExtra[i].depth > depth; i++) {
+ if (phases[i].parent == phase)
+ total += phaseTimes[phaseSlot][i];
+ }
+
+ // Sum the contributions from multi-parented children.
+ size_t dagSlot = phaseExtra[phase].dagSlot;
+ if (dagSlot != PHASE_DAG_NONE) {
+ for (auto edge : dagChildEdges) {
+ if (edge.parent == phase)
+ total += phaseTimes[dagSlot][edge.child];
+ }
+ }
+ return total;
+}
+
+UniqueChars
+Statistics::formatCompactSliceMessage() const
+{
+ // Skip if we OOM'ed.
+ if (slices.length() == 0)
+ return UniqueChars(nullptr);
+
+ const size_t index = slices.length() - 1;
+ const SliceData& slice = slices[index];
+
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+ "GC Slice %u - Pause: %.3fms of %s budget (@ %.3fms); Reason: %s; Reset: %s%s; Times: ";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, index,
+ t(slice.duration()), budgetDescription, t(slice.start - slices[0].start),
+ ExplainReason(slice.reason),
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "");
+
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString(buffer)) ||
+ !fragments.append(formatCompactSlicePhaseTimes(slices[index].phaseTimes)))
+ {
+ return UniqueChars(nullptr);
+ }
+ return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatCompactSummaryMessage() const
+{
+ const double bytesPerMiB = 1024 * 1024;
+
+ FragmentVector fragments;
+ if (!fragments.append(DuplicateString("Summary - ")))
+ return UniqueChars(nullptr);
+
+ int64_t total, longest;
+ gcDuration(&total, &longest);
+
+ const double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
+ const double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
+
+ char buffer[1024];
+ if (!nonincremental()) {
+ SprintfLiteral(buffer,
+ "Max Pause: %.3fms; MMU 20ms: %.1f%%; MMU 50ms: %.1f%%; Total: %.3fms; ",
+ t(longest), mmu20 * 100., mmu50 * 100., t(total));
+ } else {
+ SprintfLiteral(buffer, "Non-Incremental: %.3fms (%s); ",
+ t(total), ExplainAbortReason(nonincrementalReason_));
+ }
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+
+ SprintfLiteral(buffer,
+ "Zones: %d of %d (-%d); Compartments: %d of %d (-%d); HeapSize: %.3f MiB; " \
+ "HeapChange (abs): %+d (%d); ",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount, zoneStats.sweptZoneCount,
+ zoneStats.collectedCompartmentCount, zoneStats.compartmentCount,
+ zoneStats.sweptCompartmentCount,
+ double(preBytes) / bytesPerMiB,
+ counts[STAT_NEW_CHUNK] - counts[STAT_DESTROY_CHUNK],
+ counts[STAT_NEW_CHUNK] + counts[STAT_DESTROY_CHUNK]);
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+
+ MOZ_ASSERT_IF(counts[STAT_ARENA_RELOCATED], gckind == GC_SHRINK);
+ if (gckind == GC_SHRINK) {
+ SprintfLiteral(buffer,
+ "Kind: %s; Relocated: %.3f MiB; ",
+ ExplainInvocationKind(gckind),
+ double(ArenaSize * counts[STAT_ARENA_RELOCATED]) / bytesPerMiB);
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatCompactSlicePhaseTimes(const PhaseTimeTable phaseTimes) const
+{
+ static const int64_t MaxUnaccountedTimeUS = 100;
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (AllPhaseIterator iter(phaseTimes); !iter.done(); iter.advance()) {
+ Phase phase;
+ size_t dagSlot;
+ size_t level;
+ iter.get(&phase, &dagSlot, &level);
+ MOZ_ASSERT(level < 4);
+
+ int64_t ownTime = phaseTimes[dagSlot][phase];
+ int64_t childTime = SumChildTimes(dagSlot, phase, phaseTimes);
+ if (ownTime > MaxUnaccountedTimeUS) {
+ SprintfLiteral(buffer, "%s: %.3fms", phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedTimeUS) {
+ MOZ_ASSERT(level < 3);
+ SprintfLiteral(buffer, "%s: %.3fms", "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ return Join(fragments, ", ");
+}
+
+UniqueChars
+Statistics::formatDetailedMessage()
+{
+ FragmentVector fragments;
+
+ if (!fragments.append(formatDetailedDescription()))
+ return UniqueChars(nullptr);
+
+ if (slices.length() > 1) {
+ for (unsigned i = 0; i < slices.length(); i++) {
+ if (!fragments.append(formatDetailedSliceDescription(i, slices[i])))
+ return UniqueChars(nullptr);
+ if (!fragments.append(formatDetailedPhaseTimes(slices[i].phaseTimes)))
+ return UniqueChars(nullptr);
+ }
+ }
+ if (!fragments.append(formatDetailedTotals()))
+ return UniqueChars(nullptr);
+ if (!fragments.append(formatDetailedPhaseTimes(phaseTimes)))
+ return UniqueChars(nullptr);
+
+ return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatDetailedDescription()
+{
+ const double bytesPerMiB = 1024 * 1024;
+
+ int64_t sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+
+ double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
+ double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
+
+ const char* format =
+"=================================================================\n\
+ Invocation Kind: %s\n\
+ Reason: %s\n\
+ Incremental: %s%s\n\
+ Zones Collected: %d of %d (-%d)\n\
+ Compartments Collected: %d of %d (-%d)\n\
+ MinorGCs since last GC: %d\n\
+ Store Buffer Overflows: %d\n\
+ MMU 20ms:%.1f%%; 50ms:%.1f%%\n\
+ SCC Sweep Total (MaxPause): %.3fms (%.3fms)\n\
+ HeapSize: %.3f MiB\n\
+ Chunk Delta (magnitude): %+d (%d)\n\
+ Arenas Relocated: %.3f MiB\n\
+";
+ char buffer[1024];
+ SprintfLiteral(buffer, format,
+ ExplainInvocationKind(gckind),
+ ExplainReason(slices[0].reason),
+ nonincremental() ? "no - " : "yes",
+ nonincremental() ? ExplainAbortReason(nonincrementalReason_) : "",
+ zoneStats.collectedZoneCount, zoneStats.zoneCount, zoneStats.sweptZoneCount,
+ zoneStats.collectedCompartmentCount, zoneStats.compartmentCount,
+ zoneStats.sweptCompartmentCount,
+ counts[STAT_MINOR_GC],
+ counts[STAT_STOREBUFFER_OVERFLOW],
+ mmu20 * 100., mmu50 * 100.,
+ t(sccTotal), t(sccLongest),
+ double(preBytes) / bytesPerMiB,
+ counts[STAT_NEW_CHUNK] - counts[STAT_DESTROY_CHUNK],
+ counts[STAT_NEW_CHUNK] + counts[STAT_DESTROY_CHUNK],
+ double(ArenaSize * counts[STAT_ARENA_RELOCATED]) / bytesPerMiB);
+ return DuplicateString(buffer);
+}
+
+UniqueChars
+Statistics::formatDetailedSliceDescription(unsigned i, const SliceData& slice)
+{
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+
+ const char* format =
+"\
+ ---- Slice %u ----\n\
+ Reason: %s\n\
+ Reset: %s%s\n\
+ State: %s -> %s\n\
+ Page Faults: %ld\n\
+ Pause: %.3fms of %s budget (@ %.3fms)\n\
+";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, i, ExplainReason(slice.reason),
+ slice.wasReset() ? "yes - " : "no",
+ slice.wasReset() ? ExplainAbortReason(slice.resetReason) : "",
+ gc::StateName(slice.initialState), gc::StateName(slice.finalState),
+ uint64_t(slice.endFaults - slice.startFaults),
+ t(slice.duration()), budgetDescription, t(slice.start - slices[0].start));
+ return DuplicateString(buffer);
+}
+
+UniqueChars
+Statistics::formatDetailedPhaseTimes(const PhaseTimeTable phaseTimes)
+{
+ static const char* LevelToIndent[] = { "", " ", " ", " " };
+ static const int64_t MaxUnaccountedChildTimeUS = 50;
+
+ FragmentVector fragments;
+ char buffer[128];
+ for (AllPhaseIterator iter(phaseTimes); !iter.done(); iter.advance()) {
+ Phase phase;
+ size_t dagSlot;
+ size_t level;
+ iter.get(&phase, &dagSlot, &level);
+ MOZ_ASSERT(level < 4);
+
+ int64_t ownTime = phaseTimes[dagSlot][phase];
+ int64_t childTime = SumChildTimes(dagSlot, phase, phaseTimes);
+ if (ownTime > 0) {
+ SprintfLiteral(buffer, " %s%s: %.3fms\n",
+ LevelToIndent[level], phases[phase].name, t(ownTime));
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+
+ if (childTime && (ownTime - childTime) > MaxUnaccountedChildTimeUS) {
+ MOZ_ASSERT(level < 3);
+ SprintfLiteral(buffer, " %s%s: %.3fms\n",
+ LevelToIndent[level + 1], "Other", t(ownTime - childTime));
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+ }
+ }
+ }
+ return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatDetailedTotals()
+{
+ int64_t total, longest;
+ gcDuration(&total, &longest);
+
+ const char* format =
+"\
+ ---- Totals ----\n\
+ Total Time: %.3fms\n\
+ Max Pause: %.3fms\n\
+";
+ char buffer[1024];
+ SprintfLiteral(buffer, format, t(total), t(longest));
+ return DuplicateString(buffer);
+}
+
+UniqueChars
+Statistics::formatJsonMessage(uint64_t timestamp)
+{
+ MOZ_ASSERT(!aborted);
+
+ FragmentVector fragments;
+
+ if (!fragments.append(DuplicateString("{")) ||
+ !fragments.append(formatJsonDescription(timestamp)) ||
+ !fragments.append(DuplicateString("\"slices\":[")))
+ {
+ return UniqueChars(nullptr);
+ }
+
+ for (unsigned i = 0; i < slices.length(); i++) {
+ if (!fragments.append(DuplicateString("{")) ||
+ !fragments.append(formatJsonSliceDescription(i, slices[i])) ||
+ !fragments.append(DuplicateString("\"times\":{")) ||
+ !fragments.append(formatJsonPhaseTimes(slices[i].phaseTimes)) ||
+ !fragments.append(DuplicateString("}}")) ||
+ (i < (slices.length() - 1) && !fragments.append(DuplicateString(","))))
+ {
+ return UniqueChars(nullptr);
+ }
+ }
+
+ if (!fragments.append(DuplicateString("],\"totals\":{")) ||
+ !fragments.append(formatJsonPhaseTimes(phaseTimes)) ||
+ !fragments.append(DuplicateString("}}")))
+ {
+ return UniqueChars(nullptr);
+ }
+
+ return Join(fragments);
+}
+
+UniqueChars
+Statistics::formatJsonDescription(uint64_t timestamp)
+{
+ int64_t total, longest;
+ gcDuration(&total, &longest);
+
+ int64_t sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+
+ double mmu20 = computeMMU(20 * PRMJ_USEC_PER_MSEC);
+ double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
+
+ const char *format =
+ "\"timestamp\":%llu,"
+ "\"max_pause\":%llu.%03llu,"
+ "\"total_time\":%llu.%03llu,"
+ "\"zones_collected\":%d,"
+ "\"total_zones\":%d,"
+ "\"total_compartments\":%d,"
+ "\"minor_gcs\":%d,"
+ "\"store_buffer_overflows\":%d,"
+ "\"mmu_20ms\":%d,"
+ "\"mmu_50ms\":%d,"
+ "\"scc_sweep_total\":%llu.%03llu,"
+ "\"scc_sweep_max_pause\":%llu.%03llu,"
+ "\"nonincremental_reason\":\"%s\","
+ "\"allocated\":%u,"
+ "\"added_chunks\":%d,"
+ "\"removed_chunks\":%d,";
+ char buffer[1024];
+ SprintfLiteral(buffer, format,
+ (unsigned long long)timestamp,
+ longest / 1000, longest % 1000,
+ total / 1000, total % 1000,
+ zoneStats.collectedZoneCount,
+ zoneStats.zoneCount,
+ zoneStats.compartmentCount,
+ counts[STAT_MINOR_GC],
+ counts[STAT_STOREBUFFER_OVERFLOW],
+ int(mmu20 * 100),
+ int(mmu50 * 100),
+ sccTotal / 1000, sccTotal % 1000,
+ sccLongest / 1000, sccLongest % 1000,
+ ExplainAbortReason(nonincrementalReason_),
+ unsigned(preBytes / 1024 / 1024),
+ counts[STAT_NEW_CHUNK],
+ counts[STAT_DESTROY_CHUNK]);
+ return DuplicateString(buffer);
+}
+
+UniqueChars
+Statistics::formatJsonSliceDescription(unsigned i, const SliceData& slice)
+{
+ int64_t duration = slice.duration();
+ int64_t when = slice.start - slices[0].start;
+ char budgetDescription[200];
+ slice.budget.describe(budgetDescription, sizeof(budgetDescription) - 1);
+ int64_t pageFaults = slice.endFaults - slice.startFaults;
+
+ const char* format =
+ "\"slice\":%d,"
+ "\"pause\":%llu.%03llu,"
+ "\"when\":%llu.%03llu,"
+ "\"reason\":\"%s\","
+ "\"initial_state\":\"%s\","
+ "\"final_state\":\"%s\","
+ "\"budget\":\"%s\","
+ "\"page_faults\":%llu,"
+ "\"start_timestamp\":%llu,"
+ "\"end_timestamp\":%llu,";
+ char buffer[1024];
+ SprintfLiteral(buffer, format,
+ i,
+ duration / 1000, duration % 1000,
+ when / 1000, when % 1000,
+ ExplainReason(slice.reason),
+ gc::StateName(slice.initialState),
+ gc::StateName(slice.finalState),
+ budgetDescription,
+ pageFaults,
+ slice.start,
+ slice.end);
+ return DuplicateString(buffer);
+}
+
+UniqueChars
+FilterJsonKey(const char*const buffer)
+{
+ char* mut = strdup(buffer);
+ char* c = mut;
+ while (*c) {
+ if (!isalpha(*c))
+ *c = '_';
+ else if (isupper(*c))
+ *c = tolower(*c);
+ ++c;
+ }
+ return UniqueChars(mut);
+}
+
+UniqueChars
+Statistics::formatJsonPhaseTimes(const PhaseTimeTable phaseTimes)
+{
+ FragmentVector fragments;
+ char buffer[128];
+ for (AllPhaseIterator iter(phaseTimes); !iter.done(); iter.advance()) {
+ Phase phase;
+ size_t dagSlot;
+ iter.get(&phase, &dagSlot);
+
+ UniqueChars name = FilterJsonKey(phases[phase].name);
+ int64_t ownTime = phaseTimes[dagSlot][phase];
+ if (ownTime > 0) {
+ SprintfLiteral(buffer, "\"%s\":%" PRId64 ".%03" PRId64,
+ name.get(), ownTime / 1000, ownTime % 1000);
+
+ if (!fragments.append(DuplicateString(buffer)))
+ return UniqueChars(nullptr);
+ }
+ }
+ return Join(fragments, ",");
+}
+
+Statistics::Statistics(JSRuntime* rt)
+ : runtime(rt),
+ startupTime(PRMJ_Now()),
+ fp(nullptr),
+ gcDepth(0),
+ nonincrementalReason_(gc::AbortReason::None),
+ timedGCStart(0),
+ preBytes(0),
+ maxPauseInInterval(0),
+ phaseNestingDepth(0),
+ activeDagSlot(PHASE_DAG_NONE),
+ suspended(0),
+ sliceCallback(nullptr),
+ nurseryCollectionCallback(nullptr),
+ aborted(false),
+ enableProfiling_(false),
+ sliceCount_(0)
+{
+ PodArrayZero(phaseTotals);
+ PodArrayZero(counts);
+ PodArrayZero(phaseStartTimes);
+ for (auto d : MakeRange(NumTimingArrays))
+ PodArrayZero(phaseTimes[d]);
+
+ const char* env = getenv("MOZ_GCTIMER");
+ if (env) {
+ if (strcmp(env, "none") == 0) {
+ fp = nullptr;
+ } else if (strcmp(env, "stdout") == 0) {
+ fp = stdout;
+ } else if (strcmp(env, "stderr") == 0) {
+ fp = stderr;
+ } else {
+ fp = fopen(env, "a");
+ if (!fp)
+ MOZ_CRASH("Failed to open MOZ_GCTIMER log file.");
+ }
+ }
+
+ env = getenv("JS_GC_PROFILE");
+ if (env) {
+ if (0 == strcmp(env, "help")) {
+ fprintf(stderr, "JS_GC_PROFILE=N\n"
+ "\tReport major GC's taking more than N milliseconds.\n");
+ exit(0);
+ }
+ enableProfiling_ = true;
+ profileThreshold_ = atoi(env);
+ }
+
+ PodZero(&totalTimes_);
+}
+
+Statistics::~Statistics()
+{
+ if (fp && fp != stdout && fp != stderr)
+ fclose(fp);
+}
+
+/* static */ bool
+Statistics::initialize()
+{
+ for (size_t i = 0; i < PHASE_LIMIT; i++) {
+ MOZ_ASSERT(phases[i].index == i);
+ for (size_t j = 0; j < PHASE_LIMIT; j++)
+ MOZ_ASSERT_IF(i != j, phases[i].telemetryBucket != phases[j].telemetryBucket);
+ }
+
+ // Create a static table of descendants for every phase with multiple
+ // children. This assumes that all descendants come linearly in the
+ // list, which is reasonable since full dags are not supported; any
+ // path from the leaf to the root must encounter at most one node with
+ // multiple parents.
+ size_t dagSlot = 0;
+ for (size_t i = 0; i < mozilla::ArrayLength(dagChildEdges); i++) {
+ Phase parent = dagChildEdges[i].parent;
+ if (!phaseExtra[parent].dagSlot)
+ phaseExtra[parent].dagSlot = ++dagSlot;
+
+ Phase child = dagChildEdges[i].child;
+ MOZ_ASSERT(phases[child].parent == PHASE_MULTI_PARENTS);
+ int j = child;
+ do {
+ if (!dagDescendants[phaseExtra[parent].dagSlot].append(Phase(j)))
+ return false;
+ j++;
+ } while (j != PHASE_LIMIT && phases[j].parent != PHASE_MULTI_PARENTS);
+ }
+ MOZ_ASSERT(dagSlot <= MaxMultiparentPhases - 1);
+
+ // Fill in the depth of each node in the tree. Multi-parented nodes
+ // have depth 0.
+ mozilla::Vector<Phase, 0, SystemAllocPolicy> stack;
+ if (!stack.append(PHASE_LIMIT)) // Dummy entry to avoid special-casing the first node
+ return false;
+ for (int i = 0; i < PHASE_LIMIT; i++) {
+ if (phases[i].parent == PHASE_NO_PARENT ||
+ phases[i].parent == PHASE_MULTI_PARENTS)
+ {
+ stack.clear();
+ } else {
+ while (stack.back() != phases[i].parent)
+ stack.popBack();
+ }
+ phaseExtra[i].depth = stack.length();
+ if (!stack.append(Phase(i)))
+ return false;
+ }
+
+ return true;
+}
+
+JS::GCSliceCallback
+Statistics::setSliceCallback(JS::GCSliceCallback newCallback)
+{
+ JS::GCSliceCallback oldCallback = sliceCallback;
+ sliceCallback = newCallback;
+ return oldCallback;
+}
+
+JS::GCNurseryCollectionCallback
+Statistics::setNurseryCollectionCallback(JS::GCNurseryCollectionCallback newCallback)
+{
+ auto oldCallback = nurseryCollectionCallback;
+ nurseryCollectionCallback = newCallback;
+ return oldCallback;
+}
+
+int64_t
+Statistics::clearMaxGCPauseAccumulator()
+{
+ int64_t prior = maxPauseInInterval;
+ maxPauseInInterval = 0;
+ return prior;
+}
+
+int64_t
+Statistics::getMaxGCPauseSinceClear()
+{
+ return maxPauseInInterval;
+}
+
+// Sum up the time for a phase, including instances of the phase with different
+// parents.
+static int64_t
+SumPhase(Phase phase, const Statistics::PhaseTimeTable times)
+{
+ int64_t sum = 0;
+ for (auto i : MakeRange(Statistics::NumTimingArrays))
+ sum += times[i][phase];
+ return sum;
+}
+
+static Phase
+LongestPhase(const Statistics::PhaseTimeTable times)
+{
+ int64_t longestTime = 0;
+ Phase longestPhase = PHASE_NONE;
+ for (size_t i = 0; i < PHASE_LIMIT; ++i) {
+ int64_t phaseTime = SumPhase(Phase(i), times);
+ if (phaseTime > longestTime) {
+ longestTime = phaseTime;
+ longestPhase = Phase(i);
+ }
+ }
+ return longestPhase;
+}
+
+void
+Statistics::printStats()
+{
+ if (aborted) {
+ fprintf(fp, "OOM during GC statistics collection. The report is unavailable for this GC.\n");
+ } else {
+ UniqueChars msg = formatDetailedMessage();
+ if (msg)
+ fprintf(fp, "GC(T+%.3fs) %s\n", t(slices[0].start - startupTime) / 1000.0, msg.get());
+ }
+ fflush(fp);
+}
+
+void
+Statistics::beginGC(JSGCInvocationKind kind)
+{
+ slices.clearAndFree();
+ sccTimes.clearAndFree();
+ gckind = kind;
+ nonincrementalReason_ = gc::AbortReason::None;
+
+ preBytes = runtime->gc.usage.gcBytes();
+}
+
+void
+Statistics::endGC()
+{
+ for (auto j : MakeRange(NumTimingArrays))
+ for (int i = 0; i < PHASE_LIMIT; i++)
+ phaseTotals[j][i] += phaseTimes[j][i];
+
+ int64_t total, longest;
+ gcDuration(&total, &longest);
+
+ int64_t sccTotal, sccLongest;
+ sccDurations(&sccTotal, &sccLongest);
+
+ runtime->addTelemetry(JS_TELEMETRY_GC_IS_ZONE_GC, !zoneStats.isCollectingAllZones());
+ runtime->addTelemetry(JS_TELEMETRY_GC_MS, t(total));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MAX_PAUSE_MS, t(longest));
+ int64_t markTotal = SumPhase(PHASE_MARK, phaseTimes);
+ int64_t markRootsTotal = SumPhase(PHASE_MARK_ROOTS, phaseTimes);
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_MS, t(markTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_SWEEP_MS, t(phaseTimes[PHASE_DAG_NONE][PHASE_SWEEP]));
+ if (runtime->gc.isCompactingGc()) {
+ runtime->addTelemetry(JS_TELEMETRY_GC_COMPACT_MS,
+ t(phaseTimes[PHASE_DAG_NONE][PHASE_COMPACT]));
+ }
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_ROOTS_MS, t(markRootsTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_MARK_GRAY_MS, t(phaseTimes[PHASE_DAG_NONE][PHASE_SWEEP_MARK_GRAY]));
+ runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL, nonincremental());
+ if (nonincremental())
+ runtime->addTelemetry(JS_TELEMETRY_GC_NON_INCREMENTAL_REASON, uint32_t(nonincrementalReason_));
+ runtime->addTelemetry(JS_TELEMETRY_GC_INCREMENTAL_DISABLED, !runtime->gc.isIncrementalGCAllowed());
+ runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_TOTAL_MS, t(sccTotal));
+ runtime->addTelemetry(JS_TELEMETRY_GC_SCC_SWEEP_MAX_PAUSE_MS, t(sccLongest));
+
+ if (!aborted) {
+ double mmu50 = computeMMU(50 * PRMJ_USEC_PER_MSEC);
+ runtime->addTelemetry(JS_TELEMETRY_GC_MMU_50, mmu50 * 100);
+ }
+
+ if (fp)
+ printStats();
+
+ // Clear the OOM flag but only if we are not in a nested GC.
+ if (gcDepth == 1)
+ aborted = false;
+}
+
+void
+Statistics::beginNurseryCollection(JS::gcreason::Reason reason)
+{
+ count(STAT_MINOR_GC);
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(runtime->contextFromMainThread(),
+ JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START,
+ reason);
+ }
+}
+
+void
+Statistics::endNurseryCollection(JS::gcreason::Reason reason)
+{
+ if (nurseryCollectionCallback) {
+ (*nurseryCollectionCallback)(runtime->contextFromMainThread(),
+ JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END,
+ reason);
+ }
+}
+
+void
+Statistics::beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
+ SliceBudget budget, JS::gcreason::Reason reason)
+{
+ gcDepth++;
+ this->zoneStats = zoneStats;
+
+ bool first = !runtime->gc.isIncrementalGCInProgress();
+ if (first)
+ beginGC(gckind);
+
+ SliceData data(budget, reason, PRMJ_Now(), JS_GetCurrentEmbedderTime(), GetPageFaultCount(),
+ runtime->gc.state());
+ if (!slices.append(data)) {
+ // If we are OOM, set a flag to indicate we have missing slice data.
+ aborted = true;
+ return;
+ }
+
+ runtime->addTelemetry(JS_TELEMETRY_GC_REASON, reason);
+
+ // Slice callbacks should only fire for the outermost level.
+ if (gcDepth == 1) {
+ bool wasFullGC = zoneStats.isCollectingAllZones();
+ if (sliceCallback)
+ (*sliceCallback)(runtime->contextFromMainThread(),
+ first ? JS::GC_CYCLE_BEGIN : JS::GC_SLICE_BEGIN,
+ JS::GCDescription(!wasFullGC, gckind, reason));
+ }
+}
+
+void
+Statistics::endSlice()
+{
+ if (!aborted) {
+ slices.back().end = PRMJ_Now();
+ slices.back().endTimestamp = JS_GetCurrentEmbedderTime();
+ slices.back().endFaults = GetPageFaultCount();
+ slices.back().finalState = runtime->gc.state();
+
+ int64_t sliceTime = slices.back().end - slices.back().start;
+ runtime->addTelemetry(JS_TELEMETRY_GC_SLICE_MS, t(sliceTime));
+ runtime->addTelemetry(JS_TELEMETRY_GC_RESET, slices.back().wasReset());
+ if (slices.back().wasReset())
+ runtime->addTelemetry(JS_TELEMETRY_GC_RESET_REASON, uint32_t(slices.back().resetReason));
+
+ if (slices.back().budget.isTimeBudget()) {
+ int64_t budget_ms = slices.back().budget.timeBudget.budget;
+ runtime->addTelemetry(JS_TELEMETRY_GC_BUDGET_MS, budget_ms);
+ if (budget_ms == runtime->gc.defaultSliceBudget())
+ runtime->addTelemetry(JS_TELEMETRY_GC_ANIMATION_MS, t(sliceTime));
+
+ // Record any phase that goes more than 2x over its budget.
+ if (sliceTime > 2 * budget_ms * 1000) {
+ Phase longest = LongestPhase(slices.back().phaseTimes);
+ runtime->addTelemetry(JS_TELEMETRY_GC_SLOW_PHASE, phases[longest].telemetryBucket);
+ }
+ }
+
+ sliceCount_++;
+ }
+
+ bool last = !runtime->gc.isIncrementalGCInProgress();
+ if (last)
+ endGC();
+
+ if (enableProfiling_ && !aborted && slices.back().duration() >= profileThreshold_)
+ printSliceProfile();
+
+ // Slice callbacks should only fire for the outermost level.
+ if (gcDepth == 1 && !aborted) {
+ bool wasFullGC = zoneStats.isCollectingAllZones();
+ if (sliceCallback)
+ (*sliceCallback)(runtime->contextFromMainThread(),
+ last ? JS::GC_CYCLE_END : JS::GC_SLICE_END,
+ JS::GCDescription(!wasFullGC, gckind, slices.back().reason));
+ }
+
+ /* Do this after the slice callback since it uses these values. */
+ if (last) {
+ PodArrayZero(counts);
+
+ // Clear the timers at the end of a GC because we accumulate time in
+ // between GCs for some (which come before PHASE_GC_BEGIN in the list.)
+ PodZero(&phaseStartTimes[PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN);
+ for (size_t d = PHASE_DAG_NONE; d < NumTimingArrays; d++)
+ PodZero(&phaseTimes[d][PHASE_GC_BEGIN], PHASE_LIMIT - PHASE_GC_BEGIN);
+ }
+
+ gcDepth--;
+ MOZ_ASSERT(gcDepth >= 0);
+}
+
+bool
+Statistics::startTimingMutator()
+{
+ if (phaseNestingDepth != 0) {
+ // Should only be called from outside of GC.
+ MOZ_ASSERT(phaseNestingDepth == 1);
+ MOZ_ASSERT(phaseNesting[0] == PHASE_MUTATOR);
+ return false;
+ }
+
+ MOZ_ASSERT(suspended == 0);
+
+ timedGCTime = 0;
+ phaseStartTimes[PHASE_MUTATOR] = 0;
+ phaseTimes[PHASE_DAG_NONE][PHASE_MUTATOR] = 0;
+ timedGCStart = 0;
+
+ beginPhase(PHASE_MUTATOR);
+ return true;
+}
+
+bool
+Statistics::stopTimingMutator(double& mutator_ms, double& gc_ms)
+{
+ // This should only be called from outside of GC, while timing the mutator.
+ if (phaseNestingDepth != 1 || phaseNesting[0] != PHASE_MUTATOR)
+ return false;
+
+ endPhase(PHASE_MUTATOR);
+ mutator_ms = t(phaseTimes[PHASE_DAG_NONE][PHASE_MUTATOR]);
+ gc_ms = t(timedGCTime);
+
+ return true;
+}
+
+void
+Statistics::suspendPhases(Phase suspension)
+{
+ MOZ_ASSERT(suspension == PHASE_EXPLICIT_SUSPENSION || suspension == PHASE_IMPLICIT_SUSPENSION);
+ while (phaseNestingDepth) {
+ MOZ_ASSERT(suspended < mozilla::ArrayLength(suspendedPhases));
+ Phase parent = phaseNesting[phaseNestingDepth - 1];
+ suspendedPhases[suspended++] = parent;
+ recordPhaseEnd(parent);
+ }
+ suspendedPhases[suspended++] = suspension;
+}
+
+void
+Statistics::resumePhases()
+{
+ DebugOnly<Phase> popped = suspendedPhases[--suspended];
+ MOZ_ASSERT(popped == PHASE_EXPLICIT_SUSPENSION || popped == PHASE_IMPLICIT_SUSPENSION);
+ while (suspended &&
+ suspendedPhases[suspended - 1] != PHASE_EXPLICIT_SUSPENSION &&
+ suspendedPhases[suspended - 1] != PHASE_IMPLICIT_SUSPENSION)
+ {
+ Phase resumePhase = suspendedPhases[--suspended];
+ if (resumePhase == PHASE_MUTATOR)
+ timedGCTime += PRMJ_Now() - timedGCStart;
+ beginPhase(resumePhase);
+ }
+}
+
+void
+Statistics::beginPhase(Phase phase)
+{
+ Phase parent = phaseNestingDepth ? phaseNesting[phaseNestingDepth - 1] : PHASE_NO_PARENT;
+
+ // Re-entry is allowed during callbacks, so pause callback phases while
+ // other phases are in progress, auto-resuming after they end. As a result,
+ // nested GC time will not be accounted against the callback phases.
+ //
+ // Reuse this mechanism for managing PHASE_MUTATOR.
+ if (parent == PHASE_GC_BEGIN || parent == PHASE_GC_END || parent == PHASE_MUTATOR) {
+ suspendPhases(PHASE_IMPLICIT_SUSPENSION);
+ parent = phaseNestingDepth ? phaseNesting[phaseNestingDepth - 1] : PHASE_NO_PARENT;
+ }
+
+ // Guard against any other re-entry.
+ MOZ_ASSERT(!phaseStartTimes[phase]);
+
+ MOZ_ASSERT(phases[phase].index == phase);
+ MOZ_ASSERT(phaseNestingDepth < MAX_NESTING);
+ MOZ_ASSERT(phases[phase].parent == parent || phases[phase].parent == PHASE_MULTI_PARENTS);
+
+ phaseNesting[phaseNestingDepth] = phase;
+ phaseNestingDepth++;
+
+ if (phases[phase].parent == PHASE_MULTI_PARENTS)
+ activeDagSlot = phaseExtra[parent].dagSlot;
+
+ phaseStartTimes[phase] = PRMJ_Now();
+}
+
+void
+Statistics::recordPhaseEnd(Phase phase)
+{
+ int64_t now = PRMJ_Now();
+
+ if (phase == PHASE_MUTATOR)
+ timedGCStart = now;
+
+ phaseNestingDepth--;
+
+ int64_t t = now - phaseStartTimes[phase];
+ if (!slices.empty())
+ slices.back().phaseTimes[activeDagSlot][phase] += t;
+ phaseTimes[activeDagSlot][phase] += t;
+ phaseStartTimes[phase] = 0;
+}
+
+void
+Statistics::endPhase(Phase phase)
+{
+ recordPhaseEnd(phase);
+
+ if (phases[phase].parent == PHASE_MULTI_PARENTS)
+ activeDagSlot = PHASE_DAG_NONE;
+
+ // When emptying the stack, we may need to resume a callback phase
+ // (PHASE_GC_BEGIN/END) or return to timing the mutator (PHASE_MUTATOR).
+ if (phaseNestingDepth == 0 && suspended > 0 && suspendedPhases[suspended - 1] == PHASE_IMPLICIT_SUSPENSION)
+ resumePhases();
+}
+
+void
+Statistics::endParallelPhase(Phase phase, const GCParallelTask* task)
+{
+ phaseNestingDepth--;
+
+ if (!slices.empty())
+ slices.back().phaseTimes[PHASE_DAG_NONE][phase] += task->duration();
+ phaseTimes[PHASE_DAG_NONE][phase] += task->duration();
+ phaseStartTimes[phase] = 0;
+}
+
+int64_t
+Statistics::beginSCC()
+{
+ return PRMJ_Now();
+}
+
+void
+Statistics::endSCC(unsigned scc, int64_t start)
+{
+ if (scc >= sccTimes.length() && !sccTimes.resize(scc + 1))
+ return;
+
+ sccTimes[scc] += PRMJ_Now() - start;
+}
+
+/*
+ * MMU (minimum mutator utilization) is a measure of how much garbage collection
+ * is affecting the responsiveness of the system. MMU measurements are given
+ * with respect to a certain window size. If we report MMU(50ms) = 80%, then
+ * that means that, for any 50ms window of time, at least 80% of the window is
+ * devoted to the mutator. In other words, the GC is running for at most 20% of
+ * the window, or 10ms. The GC can run multiple slices during the 50ms window
+ * as long as the total time it spends is at most 10ms.
+ */
+double
+Statistics::computeMMU(int64_t window) const
+{
+ MOZ_ASSERT(!slices.empty());
+
+ int64_t gc = slices[0].end - slices[0].start;
+ int64_t gcMax = gc;
+
+ if (gc >= window)
+ return 0.0;
+
+ int startIndex = 0;
+ for (size_t endIndex = 1; endIndex < slices.length(); endIndex++) {
+ gc += slices[endIndex].end - slices[endIndex].start;
+
+ while (slices[endIndex].end - slices[startIndex].end >= window) {
+ gc -= slices[startIndex].end - slices[startIndex].start;
+ startIndex++;
+ }
+
+ int64_t cur = gc;
+ if (slices[endIndex].end - slices[startIndex].start > window)
+ cur -= (slices[endIndex].end - slices[startIndex].start - window);
+ if (cur > gcMax)
+ gcMax = cur;
+ }
+
+ return double(window - gcMax) / window;
+}
+
+/* static */ void
+Statistics::printProfileHeader()
+{
+ fprintf(stderr, " %6s", "total");
+#define PRINT_PROFILE_HEADER(name, text, phase) \
+ fprintf(stderr, " %6s", text);
+FOR_EACH_GC_PROFILE_TIME(PRINT_PROFILE_HEADER)
+#undef PRINT_PROFILE_HEADER
+ fprintf(stderr, "\n");
+}
+
+/* static */ void
+Statistics::printProfileTimes(const ProfileTimes& times)
+{
+ for (auto time : times)
+ fprintf(stderr, " %6" PRIi64, time / PRMJ_USEC_PER_MSEC);
+ fprintf(stderr, "\n");
+}
+
+void
+Statistics::printSliceProfile()
+{
+ const SliceData& slice = slices.back();
+
+ static int printedHeader = 0;
+ if ((printedHeader++ % 200) == 0) {
+ fprintf(stderr, "MajorGC: Reason States ");
+ printProfileHeader();
+ }
+
+ fprintf(stderr, "MajorGC: %20s %1d -> %1d ",
+ ExplainReason(slice.reason), int(slice.initialState), int(slice.finalState));
+
+ ProfileTimes times;
+ times[ProfileKey::Total] = slice.duration();
+ totalTimes_[ProfileKey::Total] += times[ProfileKey::Total];
+
+#define GET_PROFILE_TIME(name, text, phase) \
+ times[ProfileKey::name] = slice.phaseTimes[PHASE_DAG_NONE][phase]; \
+ totalTimes_[ProfileKey::name] += times[ProfileKey::name];
+FOR_EACH_GC_PROFILE_TIME(GET_PROFILE_TIME)
+#undef GET_PROFILE_TIME
+
+ printProfileTimes(times);
+}
+
+void
+Statistics::printTotalProfileTimes()
+{
+ if (enableProfiling_) {
+ fprintf(stderr, "MajorGC TOTALS: %7" PRIu64 " slices: ", sliceCount_);
+ printProfileTimes(totalTimes_);
+ }
+}
+
diff --git a/js/src/gc/Statistics.h b/js/src/gc/Statistics.h
new file mode 100644
index 000000000..c9e5871e3
--- /dev/null
+++ b/js/src/gc/Statistics.h
@@ -0,0 +1,505 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Statistics_h
+#define gc_Statistics_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/IntegerRange.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/PodOperations.h"
+
+#include "jsalloc.h"
+#include "jsgc.h"
+#include "jspubtd.h"
+
+#include "js/GCAPI.h"
+#include "js/Vector.h"
+
+using mozilla::Maybe;
+
+namespace js {
+
+class GCParallelTask;
+
+namespace gcstats {
+
+enum Phase : uint8_t {
+ PHASE_MUTATOR,
+ PHASE_GC_BEGIN,
+ PHASE_WAIT_BACKGROUND_THREAD,
+ PHASE_MARK_DISCARD_CODE,
+ PHASE_RELAZIFY_FUNCTIONS,
+ PHASE_PURGE,
+ PHASE_MARK,
+ PHASE_UNMARK,
+ PHASE_MARK_DELAYED,
+ PHASE_SWEEP,
+ PHASE_SWEEP_MARK,
+ PHASE_SWEEP_MARK_TYPES,
+ PHASE_SWEEP_MARK_INCOMING_BLACK,
+ PHASE_SWEEP_MARK_WEAK,
+ PHASE_SWEEP_MARK_INCOMING_GRAY,
+ PHASE_SWEEP_MARK_GRAY,
+ PHASE_SWEEP_MARK_GRAY_WEAK,
+ PHASE_FINALIZE_START,
+ PHASE_WEAK_ZONEGROUP_CALLBACK,
+ PHASE_WEAK_COMPARTMENT_CALLBACK,
+ PHASE_SWEEP_ATOMS,
+ PHASE_SWEEP_SYMBOL_REGISTRY,
+ PHASE_SWEEP_COMPARTMENTS,
+ PHASE_SWEEP_DISCARD_CODE,
+ PHASE_SWEEP_INNER_VIEWS,
+ PHASE_SWEEP_CC_WRAPPER,
+ PHASE_SWEEP_BASE_SHAPE,
+ PHASE_SWEEP_INITIAL_SHAPE,
+ PHASE_SWEEP_TYPE_OBJECT,
+ PHASE_SWEEP_BREAKPOINT,
+ PHASE_SWEEP_REGEXP,
+ PHASE_SWEEP_MISC,
+ PHASE_SWEEP_TYPES,
+ PHASE_SWEEP_TYPES_BEGIN,
+ PHASE_SWEEP_TYPES_END,
+ PHASE_SWEEP_OBJECT,
+ PHASE_SWEEP_STRING,
+ PHASE_SWEEP_SCRIPT,
+ PHASE_SWEEP_SCOPE,
+ PHASE_SWEEP_SHAPE,
+ PHASE_SWEEP_JITCODE,
+ PHASE_FINALIZE_END,
+ PHASE_DESTROY,
+ PHASE_COMPACT,
+ PHASE_COMPACT_MOVE,
+ PHASE_COMPACT_UPDATE,
+ PHASE_COMPACT_UPDATE_CELLS,
+ PHASE_GC_END,
+ PHASE_MINOR_GC,
+ PHASE_EVICT_NURSERY,
+ PHASE_TRACE_HEAP,
+ PHASE_BARRIER,
+ PHASE_UNMARK_GRAY,
+ PHASE_MARK_ROOTS,
+ PHASE_BUFFER_GRAY_ROOTS,
+ PHASE_MARK_CCWS,
+ PHASE_MARK_STACK,
+ PHASE_MARK_RUNTIME_DATA,
+ PHASE_MARK_EMBEDDING,
+ PHASE_MARK_COMPARTMENTS,
+ PHASE_PURGE_SHAPE_TABLES,
+
+ PHASE_LIMIT,
+ PHASE_NONE = PHASE_LIMIT,
+ PHASE_EXPLICIT_SUSPENSION = PHASE_LIMIT,
+ PHASE_IMPLICIT_SUSPENSION,
+ PHASE_MULTI_PARENTS
+};
+
+enum Stat {
+ STAT_NEW_CHUNK,
+ STAT_DESTROY_CHUNK,
+ STAT_MINOR_GC,
+
+ // Number of times a 'put' into a storebuffer overflowed, triggering a
+ // compaction
+ STAT_STOREBUFFER_OVERFLOW,
+
+ // Number of arenas relocated by compacting GC.
+ STAT_ARENA_RELOCATED,
+
+ STAT_LIMIT
+};
+
+struct ZoneGCStats
+{
+ /* Number of zones collected in this GC. */
+ int collectedZoneCount;
+
+ /* Total number of zones in the Runtime at the start of this GC. */
+ int zoneCount;
+
+ /* Number of zones swept in this GC. */
+ int sweptZoneCount;
+
+ /* Total number of compartments in all zones collected. */
+ int collectedCompartmentCount;
+
+ /* Total number of compartments in the Runtime at the start of this GC. */
+ int compartmentCount;
+
+ /* Total number of compartments swept by this GC. */
+ int sweptCompartmentCount;
+
+ bool isCollectingAllZones() const { return collectedZoneCount == zoneCount; }
+
+ ZoneGCStats()
+ : collectedZoneCount(0), zoneCount(0), sweptZoneCount(0),
+ collectedCompartmentCount(0), compartmentCount(0), sweptCompartmentCount(0)
+ {}
+};
+
+#define FOR_EACH_GC_PROFILE_TIME(_) \
+ _(BeginCallback, "beginCB", PHASE_GC_BEGIN) \
+ _(WaitBgThread, "waitBG", PHASE_WAIT_BACKGROUND_THREAD) \
+ _(DiscardCode, "discard", PHASE_MARK_DISCARD_CODE) \
+ _(RelazifyFunc, "relazify", PHASE_RELAZIFY_FUNCTIONS) \
+ _(PurgeTables, "purgeTables", PHASE_PURGE_SHAPE_TABLES) \
+ _(Purge, "purge", PHASE_PURGE) \
+ _(Mark, "mark", PHASE_MARK) \
+ _(Sweep, "sweep", PHASE_SWEEP) \
+ _(Compact, "compact", PHASE_COMPACT) \
+ _(EndCallback, "endCB", PHASE_GC_END) \
+ _(Barriers, "barriers", PHASE_BARRIER)
+
+const char* ExplainAbortReason(gc::AbortReason reason);
+const char* ExplainInvocationKind(JSGCInvocationKind gckind);
+
+/*
+ * Struct for collecting timing statistics on a "phase tree". The tree is
+ * specified as a limited DAG, but the timings are collected for the whole tree
+ * that you would get by expanding out the DAG by duplicating subtrees rooted
+ * at nodes with multiple parents.
+ *
+ * During execution, a child phase can be activated multiple times, and the
+ * total time will be accumulated. (So for example, you can start and end
+ * PHASE_MARK_ROOTS multiple times before completing the parent phase.)
+ *
+ * Incremental GC is represented by recording separate timing results for each
+ * slice within the overall GC.
+ */
+struct Statistics
+{
+ /*
+ * Phases are allowed to have multiple parents, though any path from root
+ * to leaf is allowed at most one multi-parented phase. We keep a full set
+ * of timings for each of the multi-parented phases, to be able to record
+ * all the timings in the expanded tree induced by our dag.
+ *
+ * Note that this wastes quite a bit of space, since we have a whole
+ * separate array of timing data containing all the phases. We could be
+ * more clever and keep an array of pointers biased by the offset of the
+ * multi-parented phase, and thereby preserve the simple
+ * timings[slot][PHASE_*] indexing. But the complexity doesn't seem worth
+ * the few hundred bytes of savings. If we want to extend things to full
+ * DAGs, this decision should be reconsidered.
+ */
+ static const size_t MaxMultiparentPhases = 6;
+ static const size_t NumTimingArrays = MaxMultiparentPhases + 1;
+
+ /* Create a convenient type for referring to tables of phase times. */
+ using PhaseTimeTable = int64_t[NumTimingArrays][PHASE_LIMIT];
+
+ static MOZ_MUST_USE bool initialize();
+
+ explicit Statistics(JSRuntime* rt);
+ ~Statistics();
+
+ void beginPhase(Phase phase);
+ void endPhase(Phase phase);
+ void endParallelPhase(Phase phase, const GCParallelTask* task);
+
+ // Occasionally, we may be in the middle of something that is tracked by
+ // this class, and we need to do something unusual (eg evict the nursery)
+ // that doesn't normally nest within the current phase. Suspend the
+ // currently tracked phase stack, at which time the caller is free to do
+ // other tracked operations.
+ //
+ // This also happens internally with PHASE_GC_BEGIN and other "non-GC"
+ // phases. While in these phases, any beginPhase will automatically suspend
+ // the non-GC phase, until that inner stack is complete, at which time it
+ // will automatically resume the non-GC phase. Explicit suspensions do not
+ // get auto-resumed.
+ void suspendPhases(Phase suspension = PHASE_EXPLICIT_SUSPENSION);
+
+ // Resume a suspended stack of phases.
+ void resumePhases();
+
+ void beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
+ SliceBudget budget, JS::gcreason::Reason reason);
+ void endSlice();
+
+ MOZ_MUST_USE bool startTimingMutator();
+ MOZ_MUST_USE bool stopTimingMutator(double& mutator_ms, double& gc_ms);
+
+ // Note when we sweep a zone or compartment.
+ void sweptZone() { ++zoneStats.sweptZoneCount; }
+ void sweptCompartment() { ++zoneStats.sweptCompartmentCount; }
+
+ void reset(gc::AbortReason reason) {
+ MOZ_ASSERT(reason != gc::AbortReason::None);
+ if (!aborted)
+ slices.back().resetReason = reason;
+ }
+
+ void nonincremental(gc::AbortReason reason) {
+ MOZ_ASSERT(reason != gc::AbortReason::None);
+ nonincrementalReason_ = reason;
+ }
+
+ bool nonincremental() const {
+ return nonincrementalReason_ != gc::AbortReason::None;
+ }
+
+ const char* nonincrementalReason() const {
+ return ExplainAbortReason(nonincrementalReason_);
+ }
+
+ void count(Stat s) {
+ MOZ_ASSERT(s < STAT_LIMIT);
+ counts[s]++;
+ }
+
+ void beginNurseryCollection(JS::gcreason::Reason reason);
+ void endNurseryCollection(JS::gcreason::Reason reason);
+
+ int64_t beginSCC();
+ void endSCC(unsigned scc, int64_t start);
+
+ UniqueChars formatCompactSliceMessage() const;
+ UniqueChars formatCompactSummaryMessage() const;
+ UniqueChars formatJsonMessage(uint64_t timestamp);
+ UniqueChars formatDetailedMessage();
+
+ JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
+ JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
+ JS::GCNurseryCollectionCallback callback);
+
+ int64_t clearMaxGCPauseAccumulator();
+ int64_t getMaxGCPauseSinceClear();
+
+ // Return the current phase, suppressing the synthetic PHASE_MUTATOR phase.
+ Phase currentPhase() {
+ if (phaseNestingDepth == 0)
+ return PHASE_NONE;
+ if (phaseNestingDepth == 1)
+ return phaseNesting[0] == PHASE_MUTATOR ? PHASE_NONE : phaseNesting[0];
+ return phaseNesting[phaseNestingDepth - 1];
+ }
+
+ static const size_t MAX_NESTING = 20;
+
+ struct SliceData {
+ SliceData(SliceBudget budget, JS::gcreason::Reason reason, int64_t start,
+ double startTimestamp, size_t startFaults, gc::State initialState)
+ : budget(budget), reason(reason),
+ initialState(initialState),
+ finalState(gc::State::NotActive),
+ resetReason(gc::AbortReason::None),
+ start(start), startTimestamp(startTimestamp),
+ startFaults(startFaults)
+ {
+ for (auto i : mozilla::MakeRange(NumTimingArrays))
+ mozilla::PodArrayZero(phaseTimes[i]);
+ }
+
+ SliceBudget budget;
+ JS::gcreason::Reason reason;
+ gc::State initialState, finalState;
+ gc::AbortReason resetReason;
+ int64_t start, end;
+ double startTimestamp, endTimestamp;
+ size_t startFaults, endFaults;
+ PhaseTimeTable phaseTimes;
+
+ int64_t duration() const { return end - start; }
+ bool wasReset() const { return resetReason != gc::AbortReason::None; }
+ };
+
+ typedef Vector<SliceData, 8, SystemAllocPolicy> SliceDataVector;
+ typedef SliceDataVector::ConstRange SliceRange;
+
+ SliceRange sliceRange() const { return slices.all(); }
+ size_t slicesLength() const { return slices.length(); }
+
+ /* Print total profile times on shutdown. */
+ void printTotalProfileTimes();
+
+ private:
+ JSRuntime* runtime;
+
+ int64_t startupTime;
+
+ /* File pointer used for MOZ_GCTIMER output. */
+ FILE* fp;
+
+ /*
+ * GCs can't really nest, but a second GC can be triggered from within the
+ * JSGC_END callback.
+ */
+ int gcDepth;
+
+ ZoneGCStats zoneStats;
+
+ JSGCInvocationKind gckind;
+
+ gc::AbortReason nonincrementalReason_;
+
+ SliceDataVector slices;
+
+ /* Most recent time when the given phase started. */
+ int64_t phaseStartTimes[PHASE_LIMIT];
+
+ /* Bookkeeping for GC timings when timingMutator is true */
+ int64_t timedGCStart;
+ int64_t timedGCTime;
+
+ /* Total time in a given phase for this GC. */
+ PhaseTimeTable phaseTimes;
+
+ /* Total time in a given phase over all GCs. */
+ PhaseTimeTable phaseTotals;
+
+ /* Number of events of this type for this GC. */
+ unsigned int counts[STAT_LIMIT];
+
+ /* Allocated space before the GC started. */
+ size_t preBytes;
+
+ /* Records the maximum GC pause in an API-controlled interval (in us). */
+ mutable int64_t maxPauseInInterval;
+
+ /* Phases that are currently on stack. */
+ Phase phaseNesting[MAX_NESTING];
+ size_t phaseNestingDepth;
+ size_t activeDagSlot;
+
+ /*
+ * Certain phases can interrupt the phase stack, eg callback phases. When
+ * this happens, we move the suspended phases over to a sepearate list,
+ * terminated by a dummy PHASE_SUSPENSION phase (so that we can nest
+ * suspensions by suspending multiple stacks with a PHASE_SUSPENSION in
+ * between).
+ */
+ Phase suspendedPhases[MAX_NESTING * 3];
+ size_t suspended;
+
+ /* Sweep times for SCCs of compartments. */
+ Vector<int64_t, 0, SystemAllocPolicy> sccTimes;
+
+ JS::GCSliceCallback sliceCallback;
+ JS::GCNurseryCollectionCallback nurseryCollectionCallback;
+
+ /*
+ * True if we saw an OOM while allocating slices. The statistics for this
+ * GC will be invalid.
+ */
+ bool aborted;
+
+ /* Profiling data. */
+
+ enum class ProfileKey
+ {
+ Total,
+#define DEFINE_TIME_KEY(name, text, phase) \
+ name,
+FOR_EACH_GC_PROFILE_TIME(DEFINE_TIME_KEY)
+#undef DEFINE_TIME_KEY
+ KeyCount
+ };
+
+ using ProfileTimes = mozilla::EnumeratedArray<ProfileKey, ProfileKey::KeyCount, int64_t>;
+
+ int64_t profileThreshold_;
+ bool enableProfiling_;
+ ProfileTimes totalTimes_;
+ uint64_t sliceCount_;
+
+ void beginGC(JSGCInvocationKind kind);
+ void endGC();
+
+ void recordPhaseEnd(Phase phase);
+
+ void gcDuration(int64_t* total, int64_t* maxPause) const;
+ void sccDurations(int64_t* total, int64_t* maxPause);
+ void printStats();
+
+ UniqueChars formatCompactSlicePhaseTimes(const PhaseTimeTable phaseTimes) const;
+
+ UniqueChars formatDetailedDescription();
+ UniqueChars formatDetailedSliceDescription(unsigned i, const SliceData& slice);
+ UniqueChars formatDetailedPhaseTimes(const PhaseTimeTable phaseTimes);
+ UniqueChars formatDetailedTotals();
+
+ UniqueChars formatJsonDescription(uint64_t timestamp);
+ UniqueChars formatJsonSliceDescription(unsigned i, const SliceData& slice);
+ UniqueChars formatJsonPhaseTimes(const PhaseTimeTable phaseTimes);
+
+ double computeMMU(int64_t resolution) const;
+
+ void printSliceProfile();
+ static void printProfileHeader();
+ static void printProfileTimes(const ProfileTimes& times);
+};
+
+struct MOZ_RAII AutoGCSlice
+{
+ AutoGCSlice(Statistics& stats, const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
+ SliceBudget budget, JS::gcreason::Reason reason)
+ : stats(stats)
+ {
+ stats.beginSlice(zoneStats, gckind, budget, reason);
+ }
+ ~AutoGCSlice() { stats.endSlice(); }
+
+ Statistics& stats;
+};
+
+struct MOZ_RAII AutoPhase
+{
+ AutoPhase(Statistics& stats, Phase phase)
+ : stats(stats), task(nullptr), phase(phase), enabled(true)
+ {
+ stats.beginPhase(phase);
+ }
+
+ AutoPhase(Statistics& stats, bool condition, Phase phase)
+ : stats(stats), task(nullptr), phase(phase), enabled(condition)
+ {
+ if (enabled)
+ stats.beginPhase(phase);
+ }
+
+ AutoPhase(Statistics& stats, const GCParallelTask& task, Phase phase)
+ : stats(stats), task(&task), phase(phase), enabled(true)
+ {
+ if (enabled)
+ stats.beginPhase(phase);
+ }
+
+ ~AutoPhase() {
+ if (enabled) {
+ if (task)
+ stats.endParallelPhase(phase, task);
+ else
+ stats.endPhase(phase);
+ }
+ }
+
+ Statistics& stats;
+ const GCParallelTask* task;
+ Phase phase;
+ bool enabled;
+};
+
+struct MOZ_RAII AutoSCC
+{
+ AutoSCC(Statistics& stats, unsigned scc)
+ : stats(stats), scc(scc)
+ {
+ start = stats.beginSCC();
+ }
+ ~AutoSCC() {
+ stats.endSCC(scc, start);
+ }
+
+ Statistics& stats;
+ unsigned scc;
+ int64_t start;
+};
+
+} /* namespace gcstats */
+} /* namespace js */
+
+#endif /* gc_Statistics_h */
diff --git a/js/src/gc/StoreBuffer-inl.h b/js/src/gc/StoreBuffer-inl.h
new file mode 100644
index 000000000..5887c52a9
--- /dev/null
+++ b/js/src/gc/StoreBuffer-inl.h
@@ -0,0 +1,75 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_inl_h
+#define gc_StoreBuffer_inl_h
+
+#include "gc/StoreBuffer.h"
+
+#include "gc/Heap.h"
+
+namespace js {
+namespace gc {
+
+inline /* static */ size_t
+ArenaCellSet::getCellIndex(const TenuredCell* cell)
+{
+ MOZ_ASSERT((uintptr_t(cell) & ~ArenaMask) % CellSize == 0);
+ return (uintptr_t(cell) & ArenaMask) / CellSize;
+}
+
+inline /* static */ void
+ArenaCellSet::getWordIndexAndMask(size_t cellIndex, size_t* wordp, uint32_t* maskp)
+{
+ BitArray<ArenaCellCount>::getIndexAndMask(cellIndex, wordp, maskp);
+}
+
+inline bool
+ArenaCellSet::hasCell(size_t cellIndex) const
+{
+ MOZ_ASSERT(cellIndex < ArenaCellCount);
+ return bits.get(cellIndex);
+}
+
+inline void
+ArenaCellSet::putCell(size_t cellIndex)
+{
+ MOZ_ASSERT(cellIndex < ArenaCellCount);
+ bits.set(cellIndex);
+}
+
+inline void
+ArenaCellSet::check() const
+{
+#ifdef DEBUG
+ bool bitsZero = bits.isAllClear();
+ MOZ_ASSERT(isEmpty() == bitsZero);
+ MOZ_ASSERT(isEmpty() == !arena);
+ MOZ_ASSERT_IF(!isEmpty(), arena->bufferedCells == this);
+#endif
+}
+
+inline void
+StoreBuffer::putWholeCell(Cell* cell)
+{
+ MOZ_ASSERT(cell->isTenured());
+
+ Arena* arena = cell->asTenured().arena();
+ ArenaCellSet* cells = arena->bufferedCells;
+ if (cells->isEmpty()) {
+ cells = AllocateWholeCellSet(arena);
+ if (!cells)
+ return;
+ }
+
+ cells->putCell(&cell->asTenured());
+ cells->check();
+}
+
+} // namespace gc
+} // namespace js
+
+#endif // gc_StoreBuffer_inl_h
diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp
new file mode 100644
index 000000000..af13778f0
--- /dev/null
+++ b/js/src/gc/StoreBuffer.cpp
@@ -0,0 +1,153 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/StoreBuffer-inl.h"
+
+#include "mozilla/Assertions.h"
+
+#include "jscompartment.h"
+
+#include "gc/Statistics.h"
+#include "vm/ArgumentsObject.h"
+#include "vm/Runtime.h"
+
+#include "jsgcinlines.h"
+
+using namespace js;
+using namespace js::gc;
+
+void
+StoreBuffer::GenericBuffer::trace(StoreBuffer* owner, JSTracer* trc)
+{
+ mozilla::ReentrancyGuard g(*owner);
+ MOZ_ASSERT(owner->isEnabled());
+ if (!storage_)
+ return;
+
+ for (LifoAlloc::Enum e(*storage_); !e.empty();) {
+ unsigned size = *e.get<unsigned>();
+ e.popFront<unsigned>();
+ BufferableRef* edge = e.get<BufferableRef>(size);
+ edge->trace(trc);
+ e.popFront(size);
+ }
+}
+
+bool
+StoreBuffer::enable()
+{
+ if (enabled_)
+ return true;
+
+ if (!bufferVal.init() ||
+ !bufferCell.init() ||
+ !bufferSlot.init() ||
+ !bufferGeneric.init())
+ {
+ return false;
+ }
+
+ enabled_ = true;
+ return true;
+}
+
+void
+StoreBuffer::disable()
+{
+ if (!enabled_)
+ return;
+
+ aboutToOverflow_ = false;
+
+ enabled_ = false;
+}
+
+void
+StoreBuffer::clear()
+{
+ if (!enabled_)
+ return;
+
+ aboutToOverflow_ = false;
+ cancelIonCompilations_ = false;
+
+ bufferVal.clear();
+ bufferCell.clear();
+ bufferSlot.clear();
+ bufferGeneric.clear();
+
+ for (ArenaCellSet* set = bufferWholeCell; set; set = set->next)
+ set->arena->bufferedCells = nullptr;
+ bufferWholeCell = nullptr;
+}
+
+void
+StoreBuffer::setAboutToOverflow()
+{
+ if (!aboutToOverflow_) {
+ aboutToOverflow_ = true;
+ runtime_->gc.stats.count(gcstats::STAT_STOREBUFFER_OVERFLOW);
+ }
+ runtime_->gc.requestMinorGC(JS::gcreason::FULL_STORE_BUFFER);
+}
+
+void
+StoreBuffer::addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::GCSizes
+*sizes)
+{
+ sizes->storeBufferVals += bufferVal.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferCells += bufferCell.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferSlots += bufferSlot.sizeOfExcludingThis(mallocSizeOf);
+ sizes->storeBufferGenerics += bufferGeneric.sizeOfExcludingThis(mallocSizeOf);
+
+ for (ArenaCellSet* set = bufferWholeCell; set; set = set->next)
+ sizes->storeBufferWholeCells += sizeof(ArenaCellSet);
+}
+
+void
+StoreBuffer::addToWholeCellBuffer(ArenaCellSet* set)
+{
+ set->next = bufferWholeCell;
+ bufferWholeCell = set;
+}
+
+ArenaCellSet ArenaCellSet::Empty(nullptr);
+
+ArenaCellSet::ArenaCellSet(Arena* arena)
+ : arena(arena), next(nullptr)
+{
+ bits.clear(false);
+}
+
+ArenaCellSet*
+js::gc::AllocateWholeCellSet(Arena* arena)
+{
+ Zone* zone = arena->zone;
+ JSRuntime* rt = zone->runtimeFromMainThread();
+ if (!rt->gc.nursery.isEnabled())
+ return nullptr;
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ Nursery& nursery = rt->gc.nursery;
+ void* data = nursery.allocateBuffer(zone, sizeof(ArenaCellSet));
+ if (!data) {
+ oomUnsafe.crash("Failed to allocate WholeCellSet");
+ return nullptr;
+ }
+
+ if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
+ rt->gc.storeBuffer.setAboutToOverflow();
+
+ auto cells = static_cast<ArenaCellSet*>(data);
+ new (cells) ArenaCellSet(arena);
+ arena->bufferedCells = cells;
+ rt->gc.storeBuffer.addToWholeCellBuffer(cells);
+ return cells;
+}
+
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::ValueEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::CellPtrEdge>;
+template struct StoreBuffer::MonoTypeBuffer<StoreBuffer::SlotsEdge>;
diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h
new file mode 100644
index 000000000..f8d9031f1
--- /dev/null
+++ b/js/src/gc/StoreBuffer.h
@@ -0,0 +1,499 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_StoreBuffer_h
+#define gc_StoreBuffer_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/ReentrancyGuard.h"
+
+#include <algorithm>
+
+#include "jsalloc.h"
+
+#include "ds/LifoAlloc.h"
+#include "gc/Nursery.h"
+#include "js/MemoryMetrics.h"
+
+namespace js {
+namespace gc {
+
+class ArenaCellSet;
+
+/*
+ * BufferableRef represents an abstract reference for use in the generational
+ * GC's remembered set. Entries in the store buffer that cannot be represented
+ * with the simple pointer-to-a-pointer scheme must derive from this class and
+ * use the generic store buffer interface.
+ *
+ * A single BufferableRef entry in the generic buffer can represent many entries
+ * in the remembered set. For example js::OrderedHashTableRef represents all
+ * the incoming edges corresponding to keys in an ordered hash table.
+ */
+class BufferableRef
+{
+ public:
+ virtual void trace(JSTracer* trc) = 0;
+ bool maybeInRememberedSet(const Nursery&) const { return true; }
+};
+
+typedef HashSet<void*, PointerHasher<void*, 3>, SystemAllocPolicy> EdgeSet;
+
+/* The size of a single block of store buffer storage space. */
+static const size_t LifoAllocBlockSize = 1 << 13; /* 8KiB */
+
+/*
+ * The StoreBuffer observes all writes that occur in the system and performs
+ * efficient filtering of them to derive a remembered set for nursery GC.
+ */
+class StoreBuffer
+{
+ friend class mozilla::ReentrancyGuard;
+
+ /* The size at which a block is about to overflow. */
+ static const size_t LowAvailableThreshold = size_t(LifoAllocBlockSize / 2.0);
+
+ /*
+ * This buffer holds only a single type of edge. Using this buffer is more
+ * efficient than the generic buffer when many writes will be to the same
+ * type of edge: e.g. Value or Cell*.
+ */
+ template<typename T>
+ struct MonoTypeBuffer
+ {
+ /* The canonical set of stores. */
+ typedef HashSet<T, typename T::Hasher, SystemAllocPolicy> StoreSet;
+ StoreSet stores_;
+
+ /*
+ * A one element cache in front of the canonical set to speed up
+ * temporary instances of HeapPtr.
+ */
+ T last_;
+
+ /* Maximum number of entries before we request a minor GC. */
+ const static size_t MaxEntries = 48 * 1024 / sizeof(T);
+
+ explicit MonoTypeBuffer() : last_(T()) {}
+ ~MonoTypeBuffer() { stores_.finish(); }
+
+ MOZ_MUST_USE bool init() {
+ if (!stores_.initialized() && !stores_.init())
+ return false;
+ clear();
+ return true;
+ }
+
+ void clear() {
+ last_ = T();
+ if (stores_.initialized())
+ stores_.clear();
+ }
+
+ /* Add one item to the buffer. */
+ void put(StoreBuffer* owner, const T& t) {
+ MOZ_ASSERT(stores_.initialized());
+ sinkStore(owner);
+ last_ = t;
+ }
+
+ /* Remove an item from the store buffer. */
+ void unput(StoreBuffer* owner, const T& v) {
+ // Fast, hashless remove of last put.
+ if (last_ == v) {
+ last_ = T();
+ return;
+ }
+ stores_.remove(v);
+ }
+
+ /* Move any buffered stores to the canonical store set. */
+ void sinkStore(StoreBuffer* owner) {
+ MOZ_ASSERT(stores_.initialized());
+ if (last_) {
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!stores_.put(last_))
+ oomUnsafe.crash("Failed to allocate for MonoTypeBuffer::put.");
+ }
+ last_ = T();
+
+ if (MOZ_UNLIKELY(stores_.count() > MaxEntries))
+ owner->setAboutToOverflow();
+ }
+
+ bool has(StoreBuffer* owner, const T& v) {
+ sinkStore(owner);
+ return stores_.has(v);
+ }
+
+ /* Trace the source of all edges in the store buffer. */
+ void trace(StoreBuffer* owner, TenuringTracer& mover);
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return stores_.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ private:
+ MonoTypeBuffer& operator=(const MonoTypeBuffer& other) = delete;
+ };
+
+ struct GenericBuffer
+ {
+ LifoAlloc* storage_;
+
+ explicit GenericBuffer() : storage_(nullptr) {}
+ ~GenericBuffer() { js_delete(storage_); }
+
+ MOZ_MUST_USE bool init() {
+ if (!storage_)
+ storage_ = js_new<LifoAlloc>(LifoAllocBlockSize);
+ clear();
+ return bool(storage_);
+ }
+
+ void clear() {
+ if (!storage_)
+ return;
+
+ storage_->used() ? storage_->releaseAll() : storage_->freeAll();
+ }
+
+ bool isAboutToOverflow() const {
+ return !storage_->isEmpty() &&
+ storage_->availableInCurrentChunk() < LowAvailableThreshold;
+ }
+
+ /* Trace all generic edges. */
+ void trace(StoreBuffer* owner, JSTracer* trc);
+
+ template <typename T>
+ void put(StoreBuffer* owner, const T& t) {
+ MOZ_ASSERT(storage_);
+
+ /* Ensure T is derived from BufferableRef. */
+ (void)static_cast<const BufferableRef*>(&t);
+
+ AutoEnterOOMUnsafeRegion oomUnsafe;
+ unsigned size = sizeof(T);
+ unsigned* sizep = storage_->pod_malloc<unsigned>();
+ if (!sizep)
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+ *sizep = size;
+
+ T* tp = storage_->new_<T>(t);
+ if (!tp)
+ oomUnsafe.crash("Failed to allocate for GenericBuffer::put.");
+
+ if (isAboutToOverflow())
+ owner->setAboutToOverflow();
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
+ return storage_ ? storage_->sizeOfIncludingThis(mallocSizeOf) : 0;
+ }
+
+ bool isEmpty() {
+ return !storage_ || storage_->isEmpty();
+ }
+
+ private:
+ GenericBuffer& operator=(const GenericBuffer& other) = delete;
+ };
+
+ template <typename Edge>
+ struct PointerEdgeHasher
+ {
+ typedef Edge Lookup;
+ static HashNumber hash(const Lookup& l) { return uintptr_t(l.edge) >> 3; }
+ static bool match(const Edge& k, const Lookup& l) { return k == l; }
+ };
+
+ struct CellPtrEdge
+ {
+ Cell** edge;
+
+ CellPtrEdge() : edge(nullptr) {}
+ explicit CellPtrEdge(Cell** v) : edge(v) {}
+ bool operator==(const CellPtrEdge& other) const { return edge == other.edge; }
+ bool operator!=(const CellPtrEdge& other) const { return edge != other.edge; }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(*edge));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ CellPtrEdge tagged() const { return CellPtrEdge((Cell**)(uintptr_t(edge) | 1)); }
+ CellPtrEdge untagged() const { return CellPtrEdge((Cell**)(uintptr_t(edge) & ~1)); }
+ bool isTagged() const { return bool(uintptr_t(edge) & 1); }
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ typedef PointerEdgeHasher<CellPtrEdge> Hasher;
+ };
+
+ struct ValueEdge
+ {
+ JS::Value* edge;
+
+ ValueEdge() : edge(nullptr) {}
+ explicit ValueEdge(JS::Value* v) : edge(v) {}
+ bool operator==(const ValueEdge& other) const { return edge == other.edge; }
+ bool operator!=(const ValueEdge& other) const { return edge != other.edge; }
+
+ Cell* deref() const { return edge->isGCThing() ? static_cast<Cell*>(edge->toGCThing()) : nullptr; }
+
+ bool maybeInRememberedSet(const Nursery& nursery) const {
+ MOZ_ASSERT(IsInsideNursery(deref()));
+ return !nursery.isInside(edge);
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ ValueEdge tagged() const { return ValueEdge((JS::Value*)(uintptr_t(edge) | 1)); }
+ ValueEdge untagged() const { return ValueEdge((JS::Value*)(uintptr_t(edge) & ~1)); }
+ bool isTagged() const { return bool(uintptr_t(edge) & 1); }
+
+ explicit operator bool() const { return edge != nullptr; }
+
+ typedef PointerEdgeHasher<ValueEdge> Hasher;
+ };
+
+ struct SlotsEdge
+ {
+ // These definitions must match those in HeapSlot::Kind.
+ const static int SlotKind = 0;
+ const static int ElementKind = 1;
+
+ uintptr_t objectAndKind_; // NativeObject* | Kind
+ int32_t start_;
+ int32_t count_;
+
+ SlotsEdge() : objectAndKind_(0), start_(0), count_(0) {}
+ SlotsEdge(NativeObject* object, int kind, int32_t start, int32_t count)
+ : objectAndKind_(uintptr_t(object) | kind), start_(start), count_(count)
+ {
+ MOZ_ASSERT((uintptr_t(object) & 1) == 0);
+ MOZ_ASSERT(kind <= 1);
+ MOZ_ASSERT(start >= 0);
+ MOZ_ASSERT(count > 0);
+ }
+
+ NativeObject* object() const { return reinterpret_cast<NativeObject*>(objectAndKind_ & ~1); }
+ int kind() const { return (int)(objectAndKind_ & 1); }
+
+ bool operator==(const SlotsEdge& other) const {
+ return objectAndKind_ == other.objectAndKind_ &&
+ start_ == other.start_ &&
+ count_ == other.count_;
+ }
+
+ bool operator!=(const SlotsEdge& other) const {
+ return !(*this == other);
+ }
+
+ // True if this SlotsEdge range overlaps with the other SlotsEdge range,
+ // false if they do not overlap.
+ bool overlaps(const SlotsEdge& other) const {
+ if (objectAndKind_ != other.objectAndKind_)
+ return false;
+
+ // Widen our range by one on each side so that we consider
+ // adjacent-but-not-actually-overlapping ranges as overlapping. This
+ // is particularly useful for coalescing a series of increasing or
+ // decreasing single index writes 0, 1, 2, ..., N into a SlotsEdge
+ // range of elements [0, N].
+ auto end = start_ + count_ + 1;
+ auto start = start_ - 1;
+
+ auto otherEnd = other.start_ + other.count_;
+ return (start <= other.start_ && other.start_ <= end) ||
+ (start <= otherEnd && otherEnd <= end);
+ }
+
+ // Destructively make this SlotsEdge range the union of the other
+ // SlotsEdge range and this one. A precondition is that the ranges must
+ // overlap.
+ void merge(const SlotsEdge& other) {
+ MOZ_ASSERT(overlaps(other));
+ auto end = Max(start_ + count_, other.start_ + other.count_);
+ start_ = Min(start_, other.start_);
+ count_ = end - start_;
+ }
+
+ bool maybeInRememberedSet(const Nursery& n) const {
+ return !IsInsideNursery(reinterpret_cast<Cell*>(object()));
+ }
+
+ void trace(TenuringTracer& mover) const;
+
+ explicit operator bool() const { return objectAndKind_ != 0; }
+
+ typedef struct {
+ typedef SlotsEdge Lookup;
+ static HashNumber hash(const Lookup& l) { return l.objectAndKind_ ^ l.start_ ^ l.count_; }
+ static bool match(const SlotsEdge& k, const Lookup& l) { return k == l; }
+ } Hasher;
+ };
+
+ template <typename Buffer, typename Edge>
+ void unput(Buffer& buffer, const Edge& edge) {
+ MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ if (!isEnabled())
+ return;
+ mozilla::ReentrancyGuard g(*this);
+ buffer.unput(this, edge);
+ }
+
+ template <typename Buffer, typename Edge>
+ void put(Buffer& buffer, const Edge& edge) {
+ MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
+ if (!isEnabled())
+ return;
+ mozilla::ReentrancyGuard g(*this);
+ if (edge.maybeInRememberedSet(nursery_))
+ buffer.put(this, edge);
+ }
+
+ MonoTypeBuffer<ValueEdge> bufferVal;
+ MonoTypeBuffer<CellPtrEdge> bufferCell;
+ MonoTypeBuffer<SlotsEdge> bufferSlot;
+ ArenaCellSet* bufferWholeCell;
+ GenericBuffer bufferGeneric;
+ bool cancelIonCompilations_;
+
+ JSRuntime* runtime_;
+ const Nursery& nursery_;
+
+ bool aboutToOverflow_;
+ bool enabled_;
+#ifdef DEBUG
+ bool mEntered; /* For ReentrancyGuard. */
+#endif
+
+ public:
+ explicit StoreBuffer(JSRuntime* rt, const Nursery& nursery)
+ : bufferVal(), bufferCell(), bufferSlot(), bufferWholeCell(nullptr), bufferGeneric(),
+ cancelIonCompilations_(false), runtime_(rt), nursery_(nursery), aboutToOverflow_(false),
+ enabled_(false)
+#ifdef DEBUG
+ , mEntered(false)
+#endif
+ {
+ }
+
+ MOZ_MUST_USE bool enable();
+ void disable();
+ bool isEnabled() const { return enabled_; }
+
+ void clear();
+
+ /* Get the overflowed status. */
+ bool isAboutToOverflow() const { return aboutToOverflow_; }
+
+ bool cancelIonCompilations() const { return cancelIonCompilations_; }
+
+ /* Insert a single edge into the buffer/remembered set. */
+ void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); }
+ void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); }
+ void putCell(Cell** cellp) { put(bufferCell, CellPtrEdge(cellp)); }
+ void unputCell(Cell** cellp) { unput(bufferCell, CellPtrEdge(cellp)); }
+ void putSlot(NativeObject* obj, int kind, int32_t start, int32_t count) {
+ SlotsEdge edge(obj, kind, start, count);
+ if (bufferSlot.last_.overlaps(edge))
+ bufferSlot.last_.merge(edge);
+ else
+ put(bufferSlot, edge);
+ }
+ inline void putWholeCell(Cell* cell);
+
+ /* Insert an entry into the generic buffer. */
+ template <typename T>
+ void putGeneric(const T& t) { put(bufferGeneric, t);}
+
+ void setShouldCancelIonCompilations() {
+ cancelIonCompilations_ = true;
+ }
+
+ /* Methods to trace the source of all edges in the store buffer. */
+ void traceValues(TenuringTracer& mover) { bufferVal.trace(this, mover); }
+ void traceCells(TenuringTracer& mover) { bufferCell.trace(this, mover); }
+ void traceSlots(TenuringTracer& mover) { bufferSlot.trace(this, mover); }
+ void traceGenericEntries(JSTracer *trc) { bufferGeneric.trace(this, trc); }
+
+ void traceWholeCells(TenuringTracer& mover);
+ void traceWholeCell(TenuringTracer& mover, JS::TraceKind kind, Cell* cell);
+
+ /* For use by our owned buffers and for testing. */
+ void setAboutToOverflow();
+
+ void addToWholeCellBuffer(ArenaCellSet* set);
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, JS::GCSizes* sizes);
+};
+
+// A set of cells in an arena used to implement the whole cell store buffer.
+class ArenaCellSet
+{
+ friend class StoreBuffer;
+
+ // The arena this relates to.
+ Arena* arena;
+
+ // Pointer to next set forming a linked list.
+ ArenaCellSet* next;
+
+ // Bit vector for each possible cell start position.
+ BitArray<ArenaCellCount> bits;
+
+ public:
+ explicit ArenaCellSet(Arena* arena);
+
+ bool hasCell(const TenuredCell* cell) const {
+ return hasCell(getCellIndex(cell));
+ }
+
+ void putCell(const TenuredCell* cell) {
+ putCell(getCellIndex(cell));
+ }
+
+ bool isEmpty() const {
+ return this == &Empty;
+ }
+
+ bool hasCell(size_t cellIndex) const;
+
+ void putCell(size_t cellIndex);
+
+ void check() const;
+
+ // Sentinel object used for all empty sets.
+ static ArenaCellSet Empty;
+
+ static size_t getCellIndex(const TenuredCell* cell);
+ static void getWordIndexAndMask(size_t cellIndex, size_t* wordp, uint32_t* maskp);
+
+ // Attempt to trigger a minor GC if free space in the nursery (where these
+ // objects are allocated) falls below this threshold.
+ static const size_t NurseryFreeThresholdBytes = 64 * 1024;
+
+ static size_t offsetOfArena() {
+ return offsetof(ArenaCellSet, arena);
+ }
+ static size_t offsetOfBits() {
+ return offsetof(ArenaCellSet, bits);
+ }
+};
+
+ArenaCellSet* AllocateWholeCellSet(Arena* arena);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* gc_StoreBuffer_h */
diff --git a/js/src/gc/Tracer.cpp b/js/src/gc/Tracer.cpp
new file mode 100644
index 000000000..63cd9b08a
--- /dev/null
+++ b/js/src/gc/Tracer.cpp
@@ -0,0 +1,432 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Tracer.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/SizePrintfMacros.h"
+
+#include "jsapi.h"
+#include "jsfun.h"
+#include "jsgc.h"
+#include "jsprf.h"
+#include "jsscript.h"
+#include "jsutil.h"
+#include "NamespaceImports.h"
+
+#include "gc/GCInternals.h"
+#include "gc/Marking.h"
+#include "gc/Zone.h"
+
+#include "vm/Shape.h"
+#include "vm/Symbol.h"
+
+#include "jscompartmentinlines.h"
+#include "jsgcinlines.h"
+
+#include "vm/ObjectGroup-inl.h"
+
+using namespace js;
+using namespace js::gc;
+using mozilla::DebugOnly;
+
+namespace js {
+template<typename T>
+void
+CheckTracedThing(JSTracer* trc, T thing);
+} // namespace js
+
+
+/*** Callback Tracer Dispatch ********************************************************************/
+
+template <typename T>
+T
+DoCallback(JS::CallbackTracer* trc, T* thingp, const char* name)
+{
+ CheckTracedThing(trc, *thingp);
+ JS::AutoTracingName ctx(trc, name);
+ trc->dispatchToOnEdge(thingp);
+ return *thingp;
+}
+#define INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS(name, type, _) \
+ template type* DoCallback<type*>(JS::CallbackTracer*, type**, const char*);
+JS_FOR_EACH_TRACEKIND(INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS);
+#undef INSTANTIATE_ALL_VALID_TRACE_FUNCTIONS
+
+template <typename S>
+struct DoCallbackFunctor : public IdentityDefaultAdaptor<S> {
+ template <typename T> S operator()(T* t, JS::CallbackTracer* trc, const char* name) {
+ return js::gc::RewrapTaggedPointer<S, T>::wrap(DoCallback(trc, &t, name));
+ }
+};
+
+template <>
+Value
+DoCallback<Value>(JS::CallbackTracer* trc, Value* vp, const char* name)
+{
+ *vp = DispatchTyped(DoCallbackFunctor<Value>(), *vp, trc, name);
+ return *vp;
+}
+
+template <>
+jsid
+DoCallback<jsid>(JS::CallbackTracer* trc, jsid* idp, const char* name)
+{
+ *idp = DispatchTyped(DoCallbackFunctor<jsid>(), *idp, trc, name);
+ return *idp;
+}
+
+template <>
+TaggedProto
+DoCallback<TaggedProto>(JS::CallbackTracer* trc, TaggedProto* protop, const char* name)
+{
+ *protop = DispatchTyped(DoCallbackFunctor<TaggedProto>(), *protop, trc, name);
+ return *protop;
+}
+
+void
+JS::CallbackTracer::getTracingEdgeName(char* buffer, size_t bufferSize)
+{
+ MOZ_ASSERT(bufferSize > 0);
+ if (contextFunctor_) {
+ (*contextFunctor_)(this, buffer, bufferSize);
+ return;
+ }
+ if (contextIndex_ != InvalidIndex) {
+ snprintf(buffer, bufferSize, "%s[%" PRIuSIZE "]", contextName_, contextIndex_);
+ return;
+ }
+ snprintf(buffer, bufferSize, "%s", contextName_);
+}
+
+
+/*** Public Tracing API **************************************************************************/
+
+JS_PUBLIC_API(void)
+JS::TraceChildren(JSTracer* trc, GCCellPtr thing)
+{
+ js::TraceChildren(trc, thing.asCell(), thing.kind());
+}
+
+struct TraceChildrenFunctor {
+ template <typename T>
+ void operator()(JSTracer* trc, void* thing) {
+ static_cast<T*>(thing)->traceChildren(trc);
+ }
+};
+
+void
+js::TraceChildren(JSTracer* trc, void* thing, JS::TraceKind kind)
+{
+ MOZ_ASSERT(thing);
+ TraceChildrenFunctor f;
+ DispatchTraceKindTyped(f, kind, trc, thing);
+}
+
+namespace {
+struct TraceIncomingFunctor {
+ JSTracer* trc_;
+ const JS::CompartmentSet& compartments_;
+ TraceIncomingFunctor(JSTracer* trc, const JS::CompartmentSet& compartments)
+ : trc_(trc), compartments_(compartments)
+ {}
+ template <typename T>
+ void operator()(T tp) {
+ if (!compartments_.has((*tp)->compartment()))
+ return;
+ TraceManuallyBarrieredEdge(trc_, tp, "cross-compartment wrapper");
+ }
+ // StringWrappers are just used to avoid copying strings
+ // across zones multiple times, and don't hold a strong
+ // reference.
+ void operator()(JSString** tp) {}
+};
+} // namespace (anonymous)
+
+JS_PUBLIC_API(void)
+JS::TraceIncomingCCWs(JSTracer* trc, const JS::CompartmentSet& compartments)
+{
+ for (js::CompartmentsIter comp(trc->runtime(), SkipAtoms); !comp.done(); comp.next()) {
+ if (compartments.has(comp))
+ continue;
+
+ for (JSCompartment::WrapperEnum e(comp); !e.empty(); e.popFront()) {
+ mozilla::DebugOnly<const CrossCompartmentKey> prior = e.front().key();
+ e.front().mutableKey().applyToWrapped(TraceIncomingFunctor(trc, compartments));
+ MOZ_ASSERT(e.front().key() == prior);
+ }
+ }
+}
+
+
+/*** Cycle Collector Helpers **********************************************************************/
+
+// This function is used by the Cycle Collector (CC) to trace through -- or in
+// CC parlance, traverse -- a Shape tree. The CC does not care about Shapes or
+// BaseShapes, only the JSObjects held live by them. Thus, we walk the Shape
+// lineage, but only report non-Shape things. This effectively makes the entire
+// shape lineage into a single node in the CC, saving tremendous amounts of
+// space and time in its algorithms.
+//
+// The algorithm implemented here uses only bounded stack space. This would be
+// possible to implement outside the engine, but would require much extra
+// infrastructure and many, many more slow GOT lookups. We have implemented it
+// inside SpiderMonkey, despite the lack of general applicability, for the
+// simplicity and performance of FireFox's embedding of this engine.
+void
+gc::TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape)
+{
+ do {
+ MOZ_ASSERT(shape->base());
+ shape->base()->assertConsistency();
+
+ TraceEdge(trc, &shape->propidRef(), "propid");
+
+ if (shape->hasGetterObject()) {
+ JSObject* tmp = shape->getterObject();
+ DoCallback(trc, &tmp, "getter");
+ MOZ_ASSERT(tmp == shape->getterObject());
+ }
+
+ if (shape->hasSetterObject()) {
+ JSObject* tmp = shape->setterObject();
+ DoCallback(trc, &tmp, "setter");
+ MOZ_ASSERT(tmp == shape->setterObject());
+ }
+
+ shape = shape->previous();
+ } while (shape);
+}
+
+// Object groups can point to other object groups via an UnboxedLayout or the
+// the original unboxed group link. There can potentially be deep or cyclic
+// chains of such groups to trace through without going through a thing that
+// participates in cycle collection. These need to be handled iteratively to
+// avoid blowing the stack when running the cycle collector's callback tracer.
+struct ObjectGroupCycleCollectorTracer : public JS::CallbackTracer
+{
+ explicit ObjectGroupCycleCollectorTracer(JS::CallbackTracer* innerTracer)
+ : JS::CallbackTracer(innerTracer->runtime(), DoNotTraceWeakMaps),
+ innerTracer(innerTracer)
+ {}
+
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ JS::CallbackTracer* innerTracer;
+ Vector<ObjectGroup*, 4, SystemAllocPolicy> seen, worklist;
+};
+
+void
+ObjectGroupCycleCollectorTracer::onChild(const JS::GCCellPtr& thing)
+{
+ if (thing.is<BaseShape>()) {
+ // The CC does not care about BaseShapes, and no additional GC things
+ // will be reached by following this edge.
+ return;
+ }
+
+ if (thing.is<JSObject>() || thing.is<JSScript>()) {
+ // Invoke the inner cycle collector callback on this child. It will not
+ // recurse back into TraceChildren.
+ innerTracer->onChild(thing);
+ return;
+ }
+
+ if (thing.is<ObjectGroup>()) {
+ // If this group is required to be in an ObjectGroup chain, trace it
+ // via the provided worklist rather than continuing to recurse.
+ ObjectGroup& group = thing.as<ObjectGroup>();
+ if (group.maybeUnboxedLayout()) {
+ for (size_t i = 0; i < seen.length(); i++) {
+ if (seen[i] == &group)
+ return;
+ }
+ if (seen.append(&group) && worklist.append(&group)) {
+ return;
+ } else {
+ // If append fails, keep tracing normally. The worst that will
+ // happen is we end up overrecursing.
+ }
+ }
+ }
+
+ TraceChildren(this, thing.asCell(), thing.kind());
+}
+
+void
+gc::TraceCycleCollectorChildren(JS::CallbackTracer* trc, ObjectGroup* group)
+{
+ MOZ_ASSERT(trc->isCallbackTracer());
+
+ // Early return if this group is not required to be in an ObjectGroup chain.
+ if (!group->maybeUnboxedLayout())
+ return group->traceChildren(trc);
+
+ ObjectGroupCycleCollectorTracer groupTracer(trc->asCallbackTracer());
+ group->traceChildren(&groupTracer);
+
+ while (!groupTracer.worklist.empty()) {
+ ObjectGroup* innerGroup = groupTracer.worklist.popCopy();
+ innerGroup->traceChildren(&groupTracer);
+ }
+}
+
+
+/*** Traced Edge Printer *************************************************************************/
+
+static size_t
+CountDecimalDigits(size_t num)
+{
+ size_t numDigits = 0;
+ do {
+ num /= 10;
+ numDigits++;
+ } while (num > 0);
+
+ return numDigits;
+}
+
+JS_PUBLIC_API(void)
+JS_GetTraceThingInfo(char* buf, size_t bufsize, JSTracer* trc, void* thing,
+ JS::TraceKind kind, bool details)
+{
+ const char* name = nullptr; /* silence uninitialized warning */
+ size_t n;
+
+ if (bufsize == 0)
+ return;
+
+ switch (kind) {
+ case JS::TraceKind::Object:
+ {
+ name = static_cast<JSObject*>(thing)->getClass()->name;
+ break;
+ }
+
+ case JS::TraceKind::Script:
+ name = "script";
+ break;
+
+ case JS::TraceKind::String:
+ name = ((JSString*)thing)->isDependent()
+ ? "substring"
+ : "string";
+ break;
+
+ case JS::TraceKind::Symbol:
+ name = "symbol";
+ break;
+
+ case JS::TraceKind::BaseShape:
+ name = "base_shape";
+ break;
+
+ case JS::TraceKind::JitCode:
+ name = "jitcode";
+ break;
+
+ case JS::TraceKind::LazyScript:
+ name = "lazyscript";
+ break;
+
+ case JS::TraceKind::Shape:
+ name = "shape";
+ break;
+
+ case JS::TraceKind::ObjectGroup:
+ name = "object_group";
+ break;
+
+ default:
+ name = "INVALID";
+ break;
+ }
+
+ n = strlen(name);
+ if (n > bufsize - 1)
+ n = bufsize - 1;
+ js_memcpy(buf, name, n + 1);
+ buf += n;
+ bufsize -= n;
+ *buf = '\0';
+
+ if (details && bufsize > 2) {
+ switch (kind) {
+ case JS::TraceKind::Object:
+ {
+ JSObject* obj = (JSObject*)thing;
+ if (obj->is<JSFunction>()) {
+ JSFunction* fun = &obj->as<JSFunction>();
+ if (fun->displayAtom()) {
+ *buf++ = ' ';
+ bufsize--;
+ PutEscapedString(buf, bufsize, fun->displayAtom(), 0);
+ }
+ } else if (obj->getClass()->flags & JSCLASS_HAS_PRIVATE) {
+ snprintf(buf, bufsize, " %p", obj->as<NativeObject>().getPrivate());
+ } else {
+ snprintf(buf, bufsize, " <no private>");
+ }
+ break;
+ }
+
+ case JS::TraceKind::Script:
+ {
+ JSScript* script = static_cast<JSScript*>(thing);
+ snprintf(buf, bufsize, " %s:%" PRIuSIZE, script->filename(), script->lineno());
+ break;
+ }
+
+ case JS::TraceKind::String:
+ {
+ *buf++ = ' ';
+ bufsize--;
+ JSString* str = (JSString*)thing;
+
+ if (str->isLinear()) {
+ bool willFit = str->length() + strlen("<length > ") +
+ CountDecimalDigits(str->length()) < bufsize;
+
+ n = snprintf(buf, bufsize, "<length %" PRIuSIZE "%s> ",
+ str->length(),
+ willFit ? "" : " (truncated)");
+ buf += n;
+ bufsize -= n;
+
+ PutEscapedString(buf, bufsize, &str->asLinear(), 0);
+ } else {
+ snprintf(buf, bufsize, "<rope: length %" PRIuSIZE ">", str->length());
+ }
+ break;
+ }
+
+ case JS::TraceKind::Symbol:
+ {
+ JS::Symbol* sym = static_cast<JS::Symbol*>(thing);
+ if (JSString* desc = sym->description()) {
+ if (desc->isLinear()) {
+ *buf++ = ' ';
+ bufsize--;
+ PutEscapedString(buf, bufsize, &desc->asLinear(), 0);
+ } else {
+ snprintf(buf, bufsize, "<nonlinear desc>");
+ }
+ } else {
+ snprintf(buf, bufsize, "<null>");
+ }
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+ buf[bufsize - 1] = '\0';
+}
+
+JS::CallbackTracer::CallbackTracer(JSContext* cx, WeakMapTraceKind weakTraceKind)
+ : CallbackTracer(cx->runtime(), weakTraceKind)
+{}
diff --git a/js/src/gc/Tracer.h b/js/src/gc/Tracer.h
new file mode 100644
index 000000000..05c0a4e2e
--- /dev/null
+++ b/js/src/gc/Tracer.h
@@ -0,0 +1,159 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Tracer_h
+#define js_Tracer_h
+
+#include "jsfriendapi.h"
+
+#include "gc/Barrier.h"
+
+namespace js {
+
+// Internal Tracing API
+//
+// Tracing is an abstract visitation of each edge in a JS heap graph.[1] The
+// most common (and performance sensitive) use of this infrastructure is for GC
+// "marking" as part of the mark-and-sweep collector; however, this
+// infrastructure is much more general than that and is used for many other
+// purposes as well.
+//
+// One commonly misunderstood subtlety of the tracing architecture is the role
+// of graph vertices versus graph edges. Graph vertices are the heap
+// allocations -- GC things -- that are returned by Allocate. Graph edges are
+// pointers -- including tagged pointers like Value and jsid -- that link the
+// allocations into a complex heap. The tracing API deals *only* with edges.
+// Any action taken on the target of a graph edge is independent of the tracing
+// itself.
+//
+// Another common misunderstanding relates to the role of the JSTracer. The
+// JSTracer instance determines what tracing does when visiting an edge; it
+// does not itself participate in the tracing process, other than to be passed
+// through as opaque data. It works like a closure in that respect.
+//
+// Tracing implementations internal to SpiderMonkey should use these interfaces
+// instead of the public interfaces in js/TracingAPI.h. Unlike the public
+// tracing methods, these work on internal types and avoid an external call.
+//
+// Note that the implementations for these methods are, surprisingly, in
+// js/src/gc/Marking.cpp. This is so that the compiler can inline as much as
+// possible in the common, marking pathways. Conceptually, however, they remain
+// as part of the generic "tracing" architecture, rather than the more specific
+// marking implementation of tracing.
+//
+// 1 - In SpiderMonkey, we call this concept tracing rather than visiting
+// because "visiting" is already used by the compiler. Also, it's been
+// called "tracing" forever and changing it would be extremely difficult at
+// this point.
+
+// Trace through an edge in the live object graph on behalf of tracing. The
+// effect of tracing the edge depends on the JSTracer being used. For pointer
+// types, |*thingp| must not be null.
+template <typename T>
+void
+TraceEdge(JSTracer* trc, WriteBarrieredBase<T>* thingp, const char* name);
+
+template <typename T>
+void
+TraceEdge(JSTracer* trc, ReadBarriered<T>* thingp, const char* name);
+
+// Trace through an edge in the live object graph on behalf of tracing.
+template <typename T>
+void
+TraceNullableEdge(JSTracer* trc, WriteBarrieredBase<T>* thingp, const char* name);
+
+// Trace through a "root" edge. These edges are the initial edges in the object
+// graph traversal. Root edges are asserted to only be traversed in the initial
+// phase of a GC.
+template <typename T>
+void
+TraceRoot(JSTracer* trc, T* thingp, const char* name);
+
+template <typename T>
+void
+TraceRoot(JSTracer* trc, ReadBarriered<T>* thingp, const char* name);
+
+// Idential to TraceRoot, except that this variant will not crash if |*thingp|
+// is null.
+template <typename T>
+void
+TraceNullableRoot(JSTracer* trc, T* thingp, const char* name);
+
+template <typename T>
+void
+TraceNullableRoot(JSTracer* trc, ReadBarriered<T>* thingp, const char* name);
+
+// Like TraceEdge, but for edges that do not use one of the automatic barrier
+// classes and, thus, must be treated specially for moving GC. This method is
+// separate from TraceEdge to make accidental use of such edges more obvious.
+template <typename T>
+void
+TraceManuallyBarrieredEdge(JSTracer* trc, T* thingp, const char* name);
+
+// Visits a WeakRef, but does not trace its referents. If *thingp is not marked
+// at the end of marking, it is replaced by nullptr. This method records
+// thingp, so the edge location must not change after this function is called.
+template <typename T>
+void
+TraceWeakEdge(JSTracer* trc, WeakRef<T>* thingp, const char* name);
+
+// Trace all edges contained in the given array.
+template <typename T>
+void
+TraceRange(JSTracer* trc, size_t len, WriteBarrieredBase<T>* vec, const char* name);
+
+// Trace all root edges in the given array.
+template <typename T>
+void
+TraceRootRange(JSTracer* trc, size_t len, T* vec, const char* name);
+
+// Trace an edge that crosses compartment boundaries. If the compartment of the
+// destination thing is not being GC'd, then the edge will not be traced.
+template <typename T>
+void
+TraceCrossCompartmentEdge(JSTracer* trc, JSObject* src, WriteBarrieredBase<T>* dst,
+ const char* name);
+
+// As above but with manual barriers.
+template <typename T>
+void
+TraceManuallyBarrieredCrossCompartmentEdge(JSTracer* trc, JSObject* src, T* dst,
+ const char* name);
+
+// Permanent atoms and well-known symbols are shared between runtimes and must
+// use a separate marking path so that we can filter them out of normal heap
+// tracing.
+template <typename T>
+void
+TraceProcessGlobalRoot(JSTracer* trc, T* thing, const char* name);
+
+// Trace a root edge that uses the base GC thing type, instead of a more
+// specific type.
+void
+TraceGenericPointerRoot(JSTracer* trc, gc::Cell** thingp, const char* name);
+
+// Trace a non-root edge that uses the base GC thing type, instead of a more
+// specific type.
+void
+TraceManuallyBarrieredGenericPointerEdge(JSTracer* trc, gc::Cell** thingp, const char* name);
+
+// Deprecated. Please use one of the strongly typed variants above.
+void
+TraceChildren(JSTracer* trc, void* thing, JS::TraceKind kind);
+
+namespace gc {
+
+// Trace through a shape or group iteratively during cycle collection to avoid
+// deep or infinite recursion.
+void
+TraceCycleCollectorChildren(JS::CallbackTracer* trc, Shape* shape);
+void
+TraceCycleCollectorChildren(JS::CallbackTracer* trc, ObjectGroup* group);
+
+} // namespace gc
+} // namespace js
+
+#endif /* js_Tracer_h */
diff --git a/js/src/gc/Verifier.cpp b/js/src/gc/Verifier.cpp
new file mode 100644
index 000000000..dd4031606
--- /dev/null
+++ b/js/src/gc/Verifier.cpp
@@ -0,0 +1,569 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifdef MOZ_VALGRIND
+# include <valgrind/memcheck.h>
+#endif
+
+#include "mozilla/IntegerPrintfMacros.h"
+#include "mozilla/Sprintf.h"
+
+#include "jscntxt.h"
+#include "jsgc.h"
+#include "jsprf.h"
+
+#include "gc/GCInternals.h"
+#include "gc/Zone.h"
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+
+#include "jscntxtinlines.h"
+#include "jsgcinlines.h"
+
+using namespace js;
+using namespace js::gc;
+
+#ifdef JS_GC_ZEAL
+
+/*
+ * Write barrier verification
+ *
+ * The next few functions are for write barrier verification.
+ *
+ * The VerifyBarriers function is a shorthand. It checks if a verification phase
+ * is currently running. If not, it starts one. Otherwise, it ends the current
+ * phase and starts a new one.
+ *
+ * The user can adjust the frequency of verifications, which causes
+ * VerifyBarriers to be a no-op all but one out of N calls. However, if the
+ * |always| parameter is true, it starts a new phase no matter what.
+ *
+ * Pre-Barrier Verifier:
+ * When StartVerifyBarriers is called, a snapshot is taken of all objects in
+ * the GC heap and saved in an explicit graph data structure. Later,
+ * EndVerifyBarriers traverses the heap again. Any pointer values that were in
+ * the snapshot and are no longer found must be marked; otherwise an assertion
+ * triggers. Note that we must not GC in between starting and finishing a
+ * verification phase.
+ */
+
+struct EdgeValue
+{
+ void* thing;
+ JS::TraceKind kind;
+ const char* label;
+};
+
+struct VerifyNode
+{
+ void* thing;
+ JS::TraceKind kind;
+ uint32_t count;
+ EdgeValue edges[1];
+};
+
+typedef HashMap<void*, VerifyNode*, DefaultHasher<void*>, SystemAllocPolicy> NodeMap;
+
+/*
+ * The verifier data structures are simple. The entire graph is stored in a
+ * single block of memory. At the beginning is a VerifyNode for the root
+ * node. It is followed by a sequence of EdgeValues--the exact number is given
+ * in the node. After the edges come more nodes and their edges.
+ *
+ * The edgeptr and term fields are used to allocate out of the block of memory
+ * for the graph. If we run out of memory (i.e., if edgeptr goes beyond term),
+ * we just abandon the verification.
+ *
+ * The nodemap field is a hashtable that maps from the address of the GC thing
+ * to the VerifyNode that represents it.
+ */
+class js::VerifyPreTracer final : public JS::CallbackTracer
+{
+ JS::AutoDisableGenerationalGC noggc;
+
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ public:
+ /* The gcNumber when the verification began. */
+ uint64_t number;
+
+ /* This counts up to gcZealFrequency to decide whether to verify. */
+ int count;
+
+ /* This graph represents the initial GC "snapshot". */
+ VerifyNode* curnode;
+ VerifyNode* root;
+ char* edgeptr;
+ char* term;
+ NodeMap nodemap;
+
+ explicit VerifyPreTracer(JSRuntime* rt)
+ : JS::CallbackTracer(rt), noggc(rt), number(rt->gc.gcNumber()), count(0), curnode(nullptr),
+ root(nullptr), edgeptr(nullptr), term(nullptr)
+ {}
+
+ ~VerifyPreTracer() {
+ js_free(root);
+ }
+};
+
+/*
+ * This function builds up the heap snapshot by adding edges to the current
+ * node.
+ */
+void
+VerifyPreTracer::onChild(const JS::GCCellPtr& thing)
+{
+ MOZ_ASSERT(!IsInsideNursery(thing.asCell()));
+
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime())
+ return;
+
+ edgeptr += sizeof(EdgeValue);
+ if (edgeptr >= term) {
+ edgeptr = term;
+ return;
+ }
+
+ VerifyNode* node = curnode;
+ uint32_t i = node->count;
+
+ node->edges[i].thing = thing.asCell();
+ node->edges[i].kind = thing.kind();
+ node->edges[i].label = contextName();
+ node->count++;
+}
+
+static VerifyNode*
+MakeNode(VerifyPreTracer* trc, void* thing, JS::TraceKind kind)
+{
+ NodeMap::AddPtr p = trc->nodemap.lookupForAdd(thing);
+ if (!p) {
+ VerifyNode* node = (VerifyNode*)trc->edgeptr;
+ trc->edgeptr += sizeof(VerifyNode) - sizeof(EdgeValue);
+ if (trc->edgeptr >= trc->term) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ node->thing = thing;
+ node->count = 0;
+ node->kind = kind;
+ if (!trc->nodemap.add(p, thing, node)) {
+ trc->edgeptr = trc->term;
+ return nullptr;
+ }
+
+ return node;
+ }
+ return nullptr;
+}
+
+static VerifyNode*
+NextNode(VerifyNode* node)
+{
+ if (node->count == 0)
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) - sizeof(EdgeValue));
+ else
+ return (VerifyNode*)((char*)node + sizeof(VerifyNode) +
+ sizeof(EdgeValue)*(node->count - 1));
+}
+
+void
+gc::GCRuntime::startVerifyPreBarriers()
+{
+ if (verifyPreData || isIncrementalGCInProgress())
+ return;
+
+ if (IsIncrementalGCUnsafe(rt) != AbortReason::None)
+ return;
+
+ number++;
+
+ VerifyPreTracer* trc = js_new<VerifyPreTracer>(rt);
+ if (!trc)
+ return;
+
+ AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
+
+ for (auto chunk = allNonEmptyChunks(); !chunk.done(); chunk.next())
+ chunk->bitmap.clear();
+
+ gcstats::AutoPhase ap(stats, gcstats::PHASE_TRACE_HEAP);
+
+ const size_t size = 64 * 1024 * 1024;
+ trc->root = (VerifyNode*)js_malloc(size);
+ if (!trc->root)
+ goto oom;
+ trc->edgeptr = (char*)trc->root;
+ trc->term = trc->edgeptr + size;
+
+ if (!trc->nodemap.init())
+ goto oom;
+
+ /* Create the root node. */
+ trc->curnode = MakeNode(trc, nullptr, JS::TraceKind(0));
+
+ incrementalState = State::MarkRoots;
+
+ /* Make all the roots be edges emanating from the root node. */
+ traceRuntime(trc, prep.session().lock);
+
+ VerifyNode* node;
+ node = trc->curnode;
+ if (trc->edgeptr == trc->term)
+ goto oom;
+
+ /* For each edge, make a node for it if one doesn't already exist. */
+ while ((char*)node < trc->edgeptr) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& e = node->edges[i];
+ VerifyNode* child = MakeNode(trc, e.thing, e.kind);
+ if (child) {
+ trc->curnode = child;
+ js::TraceChildren(trc, e.thing, e.kind);
+ }
+ if (trc->edgeptr == trc->term)
+ goto oom;
+ }
+
+ node = NextNode(node);
+ }
+
+ verifyPreData = trc;
+ incrementalState = State::Mark;
+ marker.start();
+
+ for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
+ PurgeJITCaches(zone);
+ if (!zone->usedByExclusiveThread) {
+ zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
+ zone->arenas.purge();
+ }
+ }
+
+ return;
+
+oom:
+ incrementalState = State::NotActive;
+ js_delete(trc);
+ verifyPreData = nullptr;
+}
+
+static bool
+IsMarkedOrAllocated(TenuredCell* cell)
+{
+ return cell->isMarked() || cell->arena()->allocatedDuringIncremental;
+}
+
+struct CheckEdgeTracer : public JS::CallbackTracer {
+ VerifyNode* node;
+ explicit CheckEdgeTracer(JSRuntime* rt) : JS::CallbackTracer(rt), node(nullptr) {}
+ void onChild(const JS::GCCellPtr& thing) override;
+};
+
+static const uint32_t MAX_VERIFIER_EDGES = 1000;
+
+/*
+ * This function is called by EndVerifyBarriers for every heap edge. If the edge
+ * already existed in the original snapshot, we "cancel it out" by overwriting
+ * it with nullptr. EndVerifyBarriers later asserts that the remaining
+ * non-nullptr edges (i.e., the ones from the original snapshot that must have
+ * been modified) must point to marked objects.
+ */
+void
+CheckEdgeTracer::onChild(const JS::GCCellPtr& thing)
+{
+ // Skip things in other runtimes.
+ if (thing.asCell()->asTenured().runtimeFromAnyThread() != runtime())
+ return;
+
+ /* Avoid n^2 behavior. */
+ if (node->count > MAX_VERIFIER_EDGES)
+ return;
+
+ for (uint32_t i = 0; i < node->count; i++) {
+ if (node->edges[i].thing == thing.asCell()) {
+ MOZ_ASSERT(node->edges[i].kind == thing.kind());
+ node->edges[i].thing = nullptr;
+ return;
+ }
+ }
+}
+
+void
+js::gc::AssertSafeToSkipBarrier(TenuredCell* thing)
+{
+ Zone* zone = thing->zoneFromAnyThread();
+ MOZ_ASSERT(!zone->needsIncrementalBarrier() || zone->isAtomsZone());
+}
+
+static bool
+IsMarkedOrAllocated(const EdgeValue& edge)
+{
+ if (!edge.thing || IsMarkedOrAllocated(TenuredCell::fromPointer(edge.thing)))
+ return true;
+
+ // Permanent atoms and well-known symbols aren't marked during graph traversal.
+ if (edge.kind == JS::TraceKind::String && static_cast<JSString*>(edge.thing)->isPermanentAtom())
+ return true;
+ if (edge.kind == JS::TraceKind::Symbol && static_cast<JS::Symbol*>(edge.thing)->isWellKnownSymbol())
+ return true;
+
+ return false;
+}
+
+void
+gc::GCRuntime::endVerifyPreBarriers()
+{
+ VerifyPreTracer* trc = verifyPreData;
+
+ if (!trc)
+ return;
+
+ MOZ_ASSERT(!JS::IsGenerationalGCEnabled(rt));
+
+ AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms);
+
+ bool compartmentCreated = false;
+
+ /* We need to disable barriers before tracing, which may invoke barriers. */
+ for (ZonesIter zone(rt, WithAtoms); !zone.done(); zone.next()) {
+ if (!zone->needsIncrementalBarrier())
+ compartmentCreated = true;
+
+ zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
+ PurgeJITCaches(zone);
+ }
+
+ /*
+ * We need to bump gcNumber so that the methodjit knows that jitcode has
+ * been discarded.
+ */
+ MOZ_ASSERT(trc->number == number);
+ number++;
+
+ verifyPreData = nullptr;
+ incrementalState = State::NotActive;
+
+ if (!compartmentCreated && IsIncrementalGCUnsafe(rt) == AbortReason::None) {
+ CheckEdgeTracer cetrc(rt);
+
+ /* Start after the roots. */
+ VerifyNode* node = NextNode(trc->root);
+ while ((char*)node < trc->edgeptr) {
+ cetrc.node = node;
+ js::TraceChildren(&cetrc, node->thing, node->kind);
+
+ if (node->count <= MAX_VERIFIER_EDGES) {
+ for (uint32_t i = 0; i < node->count; i++) {
+ EdgeValue& edge = node->edges[i];
+ if (!IsMarkedOrAllocated(edge)) {
+ char msgbuf[1024];
+ SprintfLiteral(msgbuf,
+ "[barrier verifier] Unmarked edge: %s %p '%s' edge to %s %p",
+ JS::GCTraceKindToAscii(node->kind), node->thing,
+ edge.label,
+ JS::GCTraceKindToAscii(edge.kind), edge.thing);
+ MOZ_ReportAssertionFailure(msgbuf, __FILE__, __LINE__);
+ MOZ_CRASH();
+ }
+ }
+ }
+
+ node = NextNode(node);
+ }
+ }
+
+ marker.reset();
+ marker.stop();
+
+ js_delete(trc);
+}
+
+/*** Barrier Verifier Scheduling ***/
+
+void
+gc::GCRuntime::verifyPreBarriers()
+{
+ if (verifyPreData)
+ endVerifyPreBarriers();
+ else
+ startVerifyPreBarriers();
+}
+
+void
+gc::VerifyBarriers(JSRuntime* rt, VerifierType type)
+{
+ if (type == PreBarrierVerifier)
+ rt->gc.verifyPreBarriers();
+}
+
+void
+gc::GCRuntime::maybeVerifyPreBarriers(bool always)
+{
+ if (!hasZealMode(ZealMode::VerifierPre))
+ return;
+
+ if (rt->mainThread.suppressGC)
+ return;
+
+ if (verifyPreData) {
+ if (++verifyPreData->count < zealFrequency && !always)
+ return;
+
+ endVerifyPreBarriers();
+ }
+
+ startVerifyPreBarriers();
+}
+
+void
+js::gc::MaybeVerifyBarriers(JSContext* cx, bool always)
+{
+ GCRuntime* gc = &cx->runtime()->gc;
+ gc->maybeVerifyPreBarriers(always);
+}
+
+void
+js::gc::GCRuntime::finishVerifier()
+{
+ if (verifyPreData) {
+ js_delete(verifyPreData);
+ verifyPreData = nullptr;
+ }
+}
+
+#endif /* JS_GC_ZEAL */
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+
+class CheckHeapTracer : public JS::CallbackTracer
+{
+ public:
+ explicit CheckHeapTracer(JSRuntime* rt);
+ bool init();
+ void check(AutoLockForExclusiveAccess& lock);
+
+ private:
+ void onChild(const JS::GCCellPtr& thing) override;
+
+ struct WorkItem {
+ WorkItem(JS::GCCellPtr thing, const char* name, int parentIndex)
+ : thing(thing), name(name), parentIndex(parentIndex), processed(false)
+ {}
+
+ JS::GCCellPtr thing;
+ const char* name;
+ int parentIndex;
+ bool processed;
+ };
+
+ JSRuntime* rt;
+ bool oom;
+ size_t failures;
+ HashSet<Cell*, DefaultHasher<Cell*>, SystemAllocPolicy> visited;
+ Vector<WorkItem, 0, SystemAllocPolicy> stack;
+ int parentIndex;
+};
+
+CheckHeapTracer::CheckHeapTracer(JSRuntime* rt)
+ : CallbackTracer(rt, TraceWeakMapKeysValues),
+ rt(rt),
+ oom(false),
+ failures(0),
+ parentIndex(-1)
+{
+#ifdef DEBUG
+ setCheckEdges(false);
+#endif
+}
+
+bool
+CheckHeapTracer::init()
+{
+ return visited.init();
+}
+
+inline static bool
+IsValidGCThingPointer(Cell* cell)
+{
+ return (uintptr_t(cell) & CellMask) == 0;
+}
+
+void
+CheckHeapTracer::onChild(const JS::GCCellPtr& thing)
+{
+ Cell* cell = thing.asCell();
+ if (visited.lookup(cell))
+ return;
+
+ if (!visited.put(cell)) {
+ oom = true;
+ return;
+ }
+
+ if (!IsValidGCThingPointer(cell) || !IsGCThingValidAfterMovingGC(cell))
+ {
+ failures++;
+ fprintf(stderr, "Bad pointer %p\n", cell);
+ const char* name = contextName();
+ for (int index = parentIndex; index != -1; index = stack[index].parentIndex) {
+ const WorkItem& parent = stack[index];
+ cell = parent.thing.asCell();
+ fprintf(stderr, " from %s %p %s edge\n",
+ GCTraceKindToAscii(cell->getTraceKind()), cell, name);
+ name = parent.name;
+ }
+ fprintf(stderr, " from root %s\n", name);
+ return;
+ }
+
+ WorkItem item(thing, contextName(), parentIndex);
+ if (!stack.append(item))
+ oom = true;
+}
+
+void
+CheckHeapTracer::check(AutoLockForExclusiveAccess& lock)
+{
+ // The analysis thinks that traceRuntime might GC by calling a GC callback.
+ JS::AutoSuppressGCAnalysis nogc;
+ if (!rt->isBeingDestroyed())
+ rt->gc.traceRuntime(this, lock);
+
+ while (!stack.empty()) {
+ WorkItem item = stack.back();
+ if (item.processed) {
+ stack.popBack();
+ } else {
+ parentIndex = stack.length() - 1;
+ TraceChildren(this, item.thing);
+ stack.back().processed = true;
+ }
+ }
+
+ if (oom)
+ return;
+
+ if (failures) {
+ fprintf(stderr, "Heap check: %" PRIuSIZE " failure(s) out of %" PRIu32 " pointers checked\n",
+ failures, visited.count());
+ }
+ MOZ_RELEASE_ASSERT(failures == 0);
+}
+
+void
+js::gc::CheckHeapAfterGC(JSRuntime* rt)
+{
+ AutoTraceSession session(rt, JS::HeapState::Tracing);
+ CheckHeapTracer tracer(rt);
+ if (tracer.init())
+ tracer.check(session.lock);
+}
+
+#endif /* JSGC_HASH_TABLE_CHECKS */
diff --git a/js/src/gc/Zone.cpp b/js/src/gc/Zone.cpp
new file mode 100644
index 000000000..ed099341c
--- /dev/null
+++ b/js/src/gc/Zone.cpp
@@ -0,0 +1,471 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#include "gc/Zone.h"
+
+#include "jsgc.h"
+
+#include "gc/Policy.h"
+#include "jit/BaselineJIT.h"
+#include "jit/Ion.h"
+#include "jit/JitCompartment.h"
+#include "vm/Debugger.h"
+#include "vm/Runtime.h"
+
+#include "jscompartmentinlines.h"
+#include "jsgcinlines.h"
+
+using namespace js;
+using namespace js::gc;
+
+Zone * const Zone::NotOnList = reinterpret_cast<Zone*>(1);
+
+JS::Zone::Zone(JSRuntime* rt)
+ : JS::shadow::Zone(rt, &rt->gc.marker),
+ debuggers(nullptr),
+ suppressAllocationMetadataBuilder(false),
+ arenas(rt),
+ types(this),
+ compartments(),
+ gcGrayRoots(),
+ gcWeakKeys(SystemAllocPolicy(), rt->randomHashCodeScrambler()),
+ typeDescrObjects(this, SystemAllocPolicy()),
+ gcMallocBytes(0),
+ gcMallocGCTriggered(false),
+ usage(&rt->gc.usage),
+ gcDelayBytes(0),
+ propertyTree(this),
+ baseShapes(this, BaseShapeSet()),
+ initialShapes(this, InitialShapeSet()),
+ data(nullptr),
+ isSystem(false),
+ usedByExclusiveThread(false),
+ active(false),
+ jitZone_(nullptr),
+ gcState_(NoGC),
+ gcScheduled_(false),
+ gcPreserveCode_(false),
+ jitUsingBarriers_(false),
+ keepShapeTables_(false),
+ listNext_(NotOnList)
+{
+ /* Ensure that there are no vtables to mess us up here. */
+ MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
+ static_cast<JS::shadow::Zone*>(this));
+
+ AutoLockGC lock(rt);
+ threshold.updateAfterGC(8192, GC_NORMAL, rt->gc.tunables, rt->gc.schedulingState, lock);
+ setGCMaxMallocBytes(rt->gc.maxMallocBytesAllocated() * 0.9);
+}
+
+Zone::~Zone()
+{
+ JSRuntime* rt = runtimeFromMainThread();
+ if (this == rt->gc.systemZone)
+ rt->gc.systemZone = nullptr;
+
+ js_delete(debuggers);
+ js_delete(jitZone_);
+
+#ifdef DEBUG
+ // Avoid assertion destroying the weak map list if the embedding leaked GC things.
+ if (!rt->gc.shutdownCollectedEverything())
+ gcWeakMapList.clear();
+#endif
+}
+
+bool Zone::init(bool isSystemArg)
+{
+ isSystem = isSystemArg;
+ return uniqueIds_.init() &&
+ gcZoneGroupEdges.init() &&
+ gcWeakKeys.init() &&
+ typeDescrObjects.init();
+}
+
+void
+Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
+{
+ if (updateJit == UpdateJit && needs != jitUsingBarriers_) {
+ jit::ToggleBarriers(this, needs);
+ jitUsingBarriers_ = needs;
+ }
+
+ MOZ_ASSERT_IF(needs && isAtomsZone(), !runtimeFromMainThread()->exclusiveThreadsPresent());
+ MOZ_ASSERT_IF(needs, canCollect());
+ needsIncrementalBarrier_ = needs;
+}
+
+void
+Zone::resetGCMallocBytes()
+{
+ gcMallocBytes = ptrdiff_t(gcMaxMallocBytes);
+ gcMallocGCTriggered = false;
+}
+
+void
+Zone::setGCMaxMallocBytes(size_t value)
+{
+ /*
+ * For compatibility treat any value that exceeds PTRDIFF_T_MAX to
+ * mean that value.
+ */
+ gcMaxMallocBytes = (ptrdiff_t(value) >= 0) ? value : size_t(-1) >> 1;
+ resetGCMallocBytes();
+}
+
+void
+Zone::onTooMuchMalloc()
+{
+ if (!gcMallocGCTriggered) {
+ GCRuntime& gc = runtimeFromAnyThread()->gc;
+ gcMallocGCTriggered = gc.triggerZoneGC(this, JS::gcreason::TOO_MUCH_MALLOC);
+ }
+}
+
+void
+Zone::beginSweepTypes(FreeOp* fop, bool releaseTypes)
+{
+ // Periodically release observed types for all scripts. This is safe to
+ // do when there are no frames for the zone on the stack.
+ if (active)
+ releaseTypes = false;
+
+ AutoClearTypeInferenceStateOnOOM oom(this);
+ types.beginSweep(fop, releaseTypes, oom);
+}
+
+Zone::DebuggerVector*
+Zone::getOrCreateDebuggers(JSContext* cx)
+{
+ if (debuggers)
+ return debuggers;
+
+ debuggers = js_new<DebuggerVector>();
+ if (!debuggers)
+ ReportOutOfMemory(cx);
+ return debuggers;
+}
+
+void
+Zone::sweepBreakpoints(FreeOp* fop)
+{
+ if (fop->runtime()->debuggerList.isEmpty())
+ return;
+
+ /*
+ * Sweep all compartments in a zone at the same time, since there is no way
+ * to iterate over the scripts belonging to a single compartment in a zone.
+ */
+
+ MOZ_ASSERT(isGCSweepingOrCompacting());
+ for (auto iter = cellIter<JSScript>(); !iter.done(); iter.next()) {
+ JSScript* script = iter;
+ if (!script->hasAnyBreakpointsOrStepMode())
+ continue;
+
+ bool scriptGone = IsAboutToBeFinalizedUnbarriered(&script);
+ MOZ_ASSERT(script == iter);
+ for (unsigned i = 0; i < script->length(); i++) {
+ BreakpointSite* site = script->getBreakpointSite(script->offsetToPC(i));
+ if (!site)
+ continue;
+
+ Breakpoint* nextbp;
+ for (Breakpoint* bp = site->firstBreakpoint(); bp; bp = nextbp) {
+ nextbp = bp->nextInSite();
+ GCPtrNativeObject& dbgobj = bp->debugger->toJSObjectRef();
+
+ // If we are sweeping, then we expect the script and the
+ // debugger object to be swept in the same zone group, except if
+ // the breakpoint was added after we computed the zone
+ // groups. In this case both script and debugger object must be
+ // live.
+ MOZ_ASSERT_IF(isGCSweeping() && dbgobj->zone()->isCollecting(),
+ dbgobj->zone()->isGCSweeping() ||
+ (!scriptGone && dbgobj->asTenured().isMarked()));
+
+ bool dying = scriptGone || IsAboutToBeFinalized(&dbgobj);
+ MOZ_ASSERT_IF(!dying, !IsAboutToBeFinalized(&bp->getHandlerRef()));
+ if (dying)
+ bp->destroy(fop);
+ }
+ }
+ }
+}
+
+void
+Zone::sweepWeakMaps()
+{
+ /* Finalize unreachable (key,value) pairs in all weak maps. */
+ WeakMapBase::sweepZone(this);
+}
+
+void
+Zone::discardJitCode(FreeOp* fop, bool discardBaselineCode)
+{
+ if (!jitZone())
+ return;
+
+ if (isPreservingCode()) {
+ PurgeJITCaches(this);
+ } else {
+
+ if (discardBaselineCode) {
+#ifdef DEBUG
+ /* Assert no baseline scripts are marked as active. */
+ for (auto script = cellIter<JSScript>(); !script.done(); script.next())
+ MOZ_ASSERT_IF(script->hasBaselineScript(), !script->baselineScript()->active());
+#endif
+
+ /* Mark baseline scripts on the stack as active. */
+ jit::MarkActiveBaselineScripts(this);
+ }
+
+ /* Only mark OSI points if code is being discarded. */
+ jit::InvalidateAll(fop, this);
+
+ for (auto script = cellIter<JSScript>(); !script.done(); script.next()) {
+ jit::FinishInvalidation(fop, script);
+
+ /*
+ * Discard baseline script if it's not marked as active. Note that
+ * this also resets the active flag.
+ */
+ if (discardBaselineCode)
+ jit::FinishDiscardBaselineScript(fop, script);
+
+ /*
+ * Warm-up counter for scripts are reset on GC. After discarding code we
+ * need to let it warm back up to get information such as which
+ * opcodes are setting array holes or accessing getter properties.
+ */
+ script->resetWarmUpCounter();
+ }
+
+ /*
+ * When scripts contains pointers to nursery things, the store buffer
+ * can contain entries that point into the optimized stub space. Since
+ * this method can be called outside the context of a GC, this situation
+ * could result in us trying to mark invalid store buffer entries.
+ *
+ * Defer freeing any allocated blocks until after the next minor GC.
+ */
+ if (discardBaselineCode)
+ jitZone()->optimizedStubSpace()->freeAllAfterMinorGC(fop->runtime());
+ }
+}
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+void
+JS::Zone::checkUniqueIdTableAfterMovingGC()
+{
+ for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront())
+ js::gc::CheckGCThingAfterMovingGC(e.front().key());
+}
+#endif
+
+uint64_t
+Zone::gcNumber()
+{
+ // Zones in use by exclusive threads are not collected, and threads using
+ // them cannot access the main runtime's gcNumber without racing.
+ return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gc.gcNumber();
+}
+
+js::jit::JitZone*
+Zone::createJitZone(JSContext* cx)
+{
+ MOZ_ASSERT(!jitZone_);
+
+ if (!cx->runtime()->getJitRuntime(cx))
+ return nullptr;
+
+ jitZone_ = cx->new_<js::jit::JitZone>();
+ return jitZone_;
+}
+
+bool
+Zone::hasMarkedCompartments()
+{
+ for (CompartmentsInZoneIter comp(this); !comp.done(); comp.next()) {
+ if (comp->marked)
+ return true;
+ }
+ return false;
+}
+
+bool
+Zone::canCollect()
+{
+ // Zones cannot be collected while in use by other threads.
+ if (usedByExclusiveThread)
+ return false;
+ JSRuntime* rt = runtimeFromAnyThread();
+ if (isAtomsZone() && rt->exclusiveThreadsPresent())
+ return false;
+ return true;
+}
+
+void
+Zone::notifyObservingDebuggers()
+{
+ for (CompartmentsInZoneIter comps(this); !comps.done(); comps.next()) {
+ JSRuntime* rt = runtimeFromAnyThread();
+ RootedGlobalObject global(rt->contextFromMainThread(), comps->unsafeUnbarrieredMaybeGlobal());
+ if (!global)
+ continue;
+
+ GlobalObject::DebuggerVector* dbgs = global->getDebuggers();
+ if (!dbgs)
+ continue;
+
+ for (GlobalObject::DebuggerVector::Range r = dbgs->all(); !r.empty(); r.popFront()) {
+ if (!r.front()->debuggeeIsBeingCollected(rt->gc.majorGCCount())) {
+#ifdef DEBUG
+ fprintf(stderr,
+ "OOM while notifying observing Debuggers of a GC: The onGarbageCollection\n"
+ "hook will not be fired for this GC for some Debuggers!\n");
+#endif
+ return;
+ }
+ }
+ }
+}
+
+bool
+js::ZonesIter::atAtomsZone(JSRuntime* rt)
+{
+ return rt->isAtomsZone(*it);
+}
+
+bool
+Zone::isOnList() const
+{
+ return listNext_ != NotOnList;
+}
+
+Zone*
+Zone::nextZone() const
+{
+ MOZ_ASSERT(isOnList());
+ return listNext_;
+}
+
+void
+Zone::clearTables()
+{
+ if (baseShapes.initialized())
+ baseShapes.clear();
+ if (initialShapes.initialized())
+ initialShapes.clear();
+}
+
+void
+Zone::fixupAfterMovingGC()
+{
+ fixupInitialShapeTable();
+}
+
+ZoneList::ZoneList()
+ : head(nullptr), tail(nullptr)
+{}
+
+ZoneList::ZoneList(Zone* zone)
+ : head(zone), tail(zone)
+{
+ MOZ_RELEASE_ASSERT(!zone->isOnList());
+ zone->listNext_ = nullptr;
+}
+
+ZoneList::~ZoneList()
+{
+ MOZ_ASSERT(isEmpty());
+}
+
+void
+ZoneList::check() const
+{
+#ifdef DEBUG
+ MOZ_ASSERT((head == nullptr) == (tail == nullptr));
+ if (!head)
+ return;
+
+ Zone* zone = head;
+ for (;;) {
+ MOZ_ASSERT(zone && zone->isOnList());
+ if (zone == tail)
+ break;
+ zone = zone->listNext_;
+ }
+ MOZ_ASSERT(!zone->listNext_);
+#endif
+}
+
+bool
+ZoneList::isEmpty() const
+{
+ return head == nullptr;
+}
+
+Zone*
+ZoneList::front() const
+{
+ MOZ_ASSERT(!isEmpty());
+ MOZ_ASSERT(head->isOnList());
+ return head;
+}
+
+void
+ZoneList::append(Zone* zone)
+{
+ ZoneList singleZone(zone);
+ transferFrom(singleZone);
+}
+
+void
+ZoneList::transferFrom(ZoneList& other)
+{
+ check();
+ other.check();
+ MOZ_ASSERT(tail != other.tail);
+
+ if (tail)
+ tail->listNext_ = other.head;
+ else
+ head = other.head;
+ tail = other.tail;
+
+ other.head = nullptr;
+ other.tail = nullptr;
+}
+
+void
+ZoneList::removeFront()
+{
+ MOZ_ASSERT(!isEmpty());
+ check();
+
+ Zone* front = head;
+ head = head->listNext_;
+ if (!head)
+ tail = nullptr;
+
+ front->listNext_ = Zone::NotOnList;
+}
+
+void
+ZoneList::clear()
+{
+ while (!isEmpty())
+ removeFront();
+}
+
+JS_PUBLIC_API(void)
+JS::shadow::RegisterWeakCache(JS::Zone* zone, WeakCache<void*>* cachep)
+{
+ zone->registerWeakCache(cachep);
+}
diff --git a/js/src/gc/Zone.h b/js/src/gc/Zone.h
new file mode 100644
index 000000000..a3a6dc07f
--- /dev/null
+++ b/js/src/gc/Zone.h
@@ -0,0 +1,743 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef gc_Zone_h
+#define gc_Zone_h
+
+#include "mozilla/Atomics.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jscntxt.h"
+
+#include "ds/SplayTree.h"
+#include "gc/FindSCCs.h"
+#include "gc/GCRuntime.h"
+#include "js/GCHashTable.h"
+#include "js/TracingAPI.h"
+#include "vm/MallocProvider.h"
+#include "vm/TypeInference.h"
+
+namespace js {
+
+namespace jit {
+class JitZone;
+} // namespace jit
+
+namespace gc {
+
+// This class encapsulates the data that determines when we need to do a zone GC.
+class ZoneHeapThreshold
+{
+ // The "growth factor" for computing our next thresholds after a GC.
+ double gcHeapGrowthFactor_;
+
+ // GC trigger threshold for allocations on the GC heap.
+ mozilla::Atomic<size_t, mozilla::Relaxed> gcTriggerBytes_;
+
+ public:
+ ZoneHeapThreshold()
+ : gcHeapGrowthFactor_(3.0),
+ gcTriggerBytes_(0)
+ {}
+
+ double gcHeapGrowthFactor() const { return gcHeapGrowthFactor_; }
+ size_t gcTriggerBytes() const { return gcTriggerBytes_; }
+ double allocTrigger(bool highFrequencyGC) const;
+
+ void updateAfterGC(size_t lastBytes, JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables, const GCSchedulingState& state,
+ const AutoLockGC& lock);
+ void updateForRemovedArena(const GCSchedulingTunables& tunables);
+
+ private:
+ static double computeZoneHeapGrowthFactorForHeapSize(size_t lastBytes,
+ const GCSchedulingTunables& tunables,
+ const GCSchedulingState& state);
+ static size_t computeZoneTriggerBytes(double growthFactor, size_t lastBytes,
+ JSGCInvocationKind gckind,
+ const GCSchedulingTunables& tunables,
+ const AutoLockGC& lock);
+};
+
+struct ZoneComponentFinder : public ComponentFinder<JS::Zone, ZoneComponentFinder>
+{
+ ZoneComponentFinder(uintptr_t sl, AutoLockForExclusiveAccess& lock)
+ : ComponentFinder<JS::Zone, ZoneComponentFinder>(sl), lock(lock)
+ {}
+
+ AutoLockForExclusiveAccess& lock;
+};
+
+struct UniqueIdGCPolicy {
+ static bool needsSweep(Cell** cell, uint64_t* value);
+};
+
+// Maps a Cell* to a unique, 64bit id.
+using UniqueIdMap = GCHashMap<Cell*,
+ uint64_t,
+ PointerHasher<Cell*, 3>,
+ SystemAllocPolicy,
+ UniqueIdGCPolicy>;
+
+extern uint64_t NextCellUniqueId(JSRuntime* rt);
+
+template <typename T>
+class ZoneCellIter;
+
+} // namespace gc
+} // namespace js
+
+namespace JS {
+
+// A zone is a collection of compartments. Every compartment belongs to exactly
+// one zone. In Firefox, there is roughly one zone per tab along with a system
+// zone for everything else. Zones mainly serve as boundaries for garbage
+// collection. Unlike compartments, they have no special security properties.
+//
+// Every GC thing belongs to exactly one zone. GC things from the same zone but
+// different compartments can share an arena (4k page). GC things from different
+// zones cannot be stored in the same arena. The garbage collector is capable of
+// collecting one zone at a time; it cannot collect at the granularity of
+// compartments.
+//
+// GC things are tied to zones and compartments as follows:
+//
+// - JSObjects belong to a compartment and cannot be shared between
+// compartments. If an object needs to point to a JSObject in a different
+// compartment, regardless of zone, it must go through a cross-compartment
+// wrapper. Each compartment keeps track of its outgoing wrappers in a table.
+// JSObjects find their compartment via their ObjectGroup.
+//
+// - JSStrings do not belong to any particular compartment, but they do belong
+// to a zone. Thus, two different compartments in the same zone can point to a
+// JSString. When a string needs to be wrapped, we copy it if it's in a
+// different zone and do nothing if it's in the same zone. Thus, transferring
+// strings within a zone is very efficient.
+//
+// - Shapes and base shapes belong to a zone and are shared between compartments
+// in that zone where possible. Accessor shapes store getter and setter
+// JSObjects which belong to a single compartment, so these shapes and all
+// their descendants can't be shared with other compartments.
+//
+// - Scripts are also compartment-local and cannot be shared. A script points to
+// its compartment.
+//
+// - ObjectGroup and JitCode objects belong to a compartment and cannot be
+// shared. There is no mechanism to obtain the compartment from a JitCode
+// object.
+//
+// A zone remains alive as long as any GC things in the zone are alive. A
+// compartment remains alive as long as any JSObjects, scripts, shapes, or base
+// shapes within it are alive.
+//
+// We always guarantee that a zone has at least one live compartment by refusing
+// to delete the last compartment in a live zone.
+struct Zone : public JS::shadow::Zone,
+ public js::gc::GraphNodeBase<JS::Zone>,
+ public js::MallocProvider<JS::Zone>
+{
+ explicit Zone(JSRuntime* rt);
+ ~Zone();
+ MOZ_MUST_USE bool init(bool isSystem);
+
+ void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
+
+ void discardJitCode(js::FreeOp* fop, bool discardBaselineCode = true);
+
+ void addSizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf,
+ size_t* typePool,
+ size_t* baselineStubsOptimized,
+ size_t* uniqueIdMap,
+ size_t* shapeTables);
+
+ void resetGCMallocBytes();
+ void setGCMaxMallocBytes(size_t value);
+ void updateMallocCounter(size_t nbytes) {
+ // Note: this code may be run from worker threads. We tolerate any
+ // thread races when updating gcMallocBytes.
+ gcMallocBytes -= ptrdiff_t(nbytes);
+ if (MOZ_UNLIKELY(isTooMuchMalloc()))
+ onTooMuchMalloc();
+ }
+
+ // Iterate over all cells in the zone. See the definition of ZoneCellIter
+ // in jsgcinlines.h for the possible arguments and documentation.
+ template <typename T, typename... Args>
+ js::gc::ZoneCellIter<T> cellIter(Args&&... args) {
+ return js::gc::ZoneCellIter<T>(const_cast<Zone*>(this), mozilla::Forward<Args>(args)...);
+ }
+
+ bool isTooMuchMalloc() const { return gcMallocBytes <= 0; }
+ void onTooMuchMalloc();
+
+ MOZ_MUST_USE void* onOutOfMemory(js::AllocFunction allocFunc, size_t nbytes,
+ void* reallocPtr = nullptr) {
+ if (!js::CurrentThreadCanAccessRuntime(runtime_))
+ return nullptr;
+ return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
+ }
+ void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
+
+ void beginSweepTypes(js::FreeOp* fop, bool releaseTypes);
+
+ bool hasMarkedCompartments();
+
+ void scheduleGC() { MOZ_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
+ void unscheduleGC() { gcScheduled_ = false; }
+ bool isGCScheduled() { return gcScheduled_ && canCollect(); }
+
+ void setPreservingCode(bool preserving) { gcPreserveCode_ = preserving; }
+ bool isPreservingCode() const { return gcPreserveCode_; }
+
+ bool canCollect();
+
+ void notifyObservingDebuggers();
+
+ enum GCState {
+ NoGC,
+ Mark,
+ MarkGray,
+ Sweep,
+ Finished,
+ Compact
+ };
+ void setGCState(GCState state) {
+ MOZ_ASSERT(runtimeFromMainThread()->isHeapBusy());
+ MOZ_ASSERT_IF(state != NoGC, canCollect());
+ gcState_ = state;
+ if (state == Finished)
+ notifyObservingDebuggers();
+ }
+
+ bool isCollecting() const {
+ if (runtimeFromMainThread()->isHeapCollecting())
+ return gcState_ != NoGC;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ bool isCollectingFromAnyThread() const {
+ if (runtimeFromAnyThread()->isHeapCollecting())
+ return gcState_ != NoGC;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ // If this returns true, all object tracing must be done with a GC marking
+ // tracer.
+ bool requireGCTracer() const {
+ JSRuntime* rt = runtimeFromAnyThread();
+ return rt->isHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
+ }
+
+ bool isGCMarking() {
+ if (runtimeFromMainThread()->isHeapCollecting())
+ return gcState_ == Mark || gcState_ == MarkGray;
+ else
+ return needsIncrementalBarrier();
+ }
+
+ GCState gcState() const { return gcState_; }
+ bool wasGCStarted() const { return gcState_ != NoGC; }
+ bool isGCMarkingBlack() { return gcState_ == Mark; }
+ bool isGCMarkingGray() { return gcState_ == MarkGray; }
+ bool isGCSweeping() { return gcState_ == Sweep; }
+ bool isGCFinished() { return gcState_ == Finished; }
+ bool isGCCompacting() { return gcState_ == Compact; }
+ bool isGCSweepingOrCompacting() { return gcState_ == Sweep || gcState_ == Compact; }
+
+ // Get a number that is incremented whenever this zone is collected, and
+ // possibly at other times too.
+ uint64_t gcNumber();
+
+ bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
+ bool compileBarriers(bool needsIncrementalBarrier) const {
+ return needsIncrementalBarrier ||
+ runtimeFromMainThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
+ }
+
+ enum ShouldUpdateJit { DontUpdateJit, UpdateJit };
+ void setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit);
+ const bool* addressOfNeedsIncrementalBarrier() const { return &needsIncrementalBarrier_; }
+
+ js::jit::JitZone* getJitZone(JSContext* cx) { return jitZone_ ? jitZone_ : createJitZone(cx); }
+ js::jit::JitZone* jitZone() { return jitZone_; }
+
+ bool isAtomsZone() const { return runtimeFromAnyThread()->isAtomsZone(this); }
+ bool isSelfHostingZone() const { return runtimeFromAnyThread()->isSelfHostingZone(this); }
+
+ void prepareForCompacting();
+
+#ifdef DEBUG
+ // For testing purposes, return the index of the zone group which this zone
+ // was swept in in the last GC.
+ unsigned lastZoneGroupIndex() { return gcLastZoneGroupIndex; }
+#endif
+
+ using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
+
+ private:
+ DebuggerVector* debuggers;
+
+ void sweepBreakpoints(js::FreeOp* fop);
+ void sweepUniqueIds(js::FreeOp* fop);
+ void sweepWeakMaps();
+ void sweepCompartments(js::FreeOp* fop, bool keepAtleastOne, bool lastGC);
+
+ js::jit::JitZone* createJitZone(JSContext* cx);
+
+ bool isQueuedForBackgroundSweep() {
+ return isOnList();
+ }
+
+ // Side map for storing a unique ids for cells, independent of address.
+ js::gc::UniqueIdMap uniqueIds_;
+
+ public:
+ bool hasDebuggers() const { return debuggers && debuggers->length(); }
+ DebuggerVector* getDebuggers() const { return debuggers; }
+ DebuggerVector* getOrCreateDebuggers(JSContext* cx);
+
+ void clearTables();
+
+ /*
+ * When true, skip calling the metadata callback. We use this:
+ * - to avoid invoking the callback recursively;
+ * - to avoid observing lazy prototype setup (which confuses callbacks that
+ * want to use the types being set up!);
+ * - to avoid attaching allocation stacks to allocation stack nodes, which
+ * is silly
+ * And so on.
+ */
+ bool suppressAllocationMetadataBuilder;
+
+ js::gc::ArenaLists arenas;
+
+ js::TypeZone types;
+
+ /* Live weakmaps in this zone. */
+ mozilla::LinkedList<js::WeakMapBase> gcWeakMapList;
+
+ // The set of compartments in this zone.
+ typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
+ CompartmentVector compartments;
+
+ // This zone's gray roots.
+ typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
+ GrayRootVector gcGrayRoots;
+
+ // This zone's weak edges found via graph traversal during marking,
+ // preserved for re-scanning during sweeping.
+ using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
+ WeakEdges gcWeakRefs;
+
+ // List of non-ephemeron weak containers to sweep during beginSweepingZoneGroup.
+ mozilla::LinkedList<WeakCache<void*>> weakCaches_;
+ void registerWeakCache(WeakCache<void*>* cachep) {
+ weakCaches_.insertBack(cachep);
+ }
+
+ /*
+ * Mapping from not yet marked keys to a vector of all values that the key
+ * maps to in any live weak map.
+ */
+ js::gc::WeakKeyTable gcWeakKeys;
+
+ // A set of edges from this zone to other zones.
+ //
+ // This is used during GC while calculating zone groups to record edges that
+ // can't be determined by examining this zone by itself.
+ ZoneSet gcZoneGroupEdges;
+
+ // Keep track of all TypeDescr and related objects in this compartment.
+ // This is used by the GC to trace them all first when compacting, since the
+ // TypedObject trace hook may access these objects.
+ using TypeDescrObjectSet = js::GCHashSet<js::HeapPtr<JSObject*>,
+ js::MovableCellHasher<js::HeapPtr<JSObject*>>,
+ js::SystemAllocPolicy>;
+ JS::WeakCache<TypeDescrObjectSet> typeDescrObjects;
+
+
+ // Malloc counter to measure memory pressure for GC scheduling. It runs from
+ // gcMaxMallocBytes down to zero. This counter should be used only when it's
+ // not possible to know the size of a free.
+ mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
+
+ // GC trigger threshold for allocations on the C heap.
+ size_t gcMaxMallocBytes;
+
+ // Whether a GC has been triggered as a result of gcMallocBytes falling
+ // below zero.
+ //
+ // This should be a bool, but Atomic only supports 32-bit and pointer-sized
+ // types.
+ mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
+
+ // Track heap usage under this Zone.
+ js::gc::HeapUsage usage;
+
+ // Thresholds used to trigger GC.
+ js::gc::ZoneHeapThreshold threshold;
+
+ // Amount of data to allocate before triggering a new incremental slice for
+ // the current GC.
+ size_t gcDelayBytes;
+
+ // Shared Shape property tree.
+ js::PropertyTree propertyTree;
+
+ // Set of all unowned base shapes in the Zone.
+ JS::WeakCache<js::BaseShapeSet> baseShapes;
+
+ // Set of initial shapes in the Zone. For certain prototypes -- namely,
+ // those of various builtin classes -- there are two entries: one for a
+ // lookup via TaggedProto, and one for a lookup via JSProtoKey. See
+ // InitialShapeProto.
+ JS::WeakCache<js::InitialShapeSet> initialShapes;
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ void checkInitialShapesTableAfterMovingGC();
+ void checkBaseShapeTableAfterMovingGC();
+#endif
+ void fixupInitialShapeTable();
+ void fixupAfterMovingGC();
+
+ // Per-zone data for use by an embedder.
+ void* data;
+
+ bool isSystem;
+
+ mozilla::Atomic<bool> usedByExclusiveThread;
+
+ // True when there are active frames.
+ bool active;
+
+#ifdef DEBUG
+ unsigned gcLastZoneGroupIndex;
+#endif
+
+ static js::HashNumber UniqueIdToHash(uint64_t uid) {
+ return js::HashNumber(uid >> 32) ^ js::HashNumber(uid & 0xFFFFFFFF);
+ }
+
+ // Creates a HashNumber based on getUniqueId. Returns false on OOM.
+ MOZ_MUST_USE bool getHashCode(js::gc::Cell* cell, js::HashNumber* hashp) {
+ uint64_t uid;
+ if (!getUniqueId(cell, &uid))
+ return false;
+ *hashp = UniqueIdToHash(uid);
+ return true;
+ }
+
+ // Puts an existing UID in |uidp|, or creates a new UID for this Cell and
+ // puts that into |uidp|. Returns false on OOM.
+ MOZ_MUST_USE bool getUniqueId(js::gc::Cell* cell, uint64_t* uidp) {
+ MOZ_ASSERT(uidp);
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+
+ // Get an existing uid, if one has been set.
+ auto p = uniqueIds_.lookupForAdd(cell);
+ if (p) {
+ *uidp = p->value();
+ return true;
+ }
+
+ // Set a new uid on the cell.
+ *uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
+ if (!uniqueIds_.add(p, cell, *uidp))
+ return false;
+
+ // If the cell was in the nursery, hopefully unlikely, then we need to
+ // tell the nursery about it so that it can sweep the uid if the thing
+ // does not get tenured.
+ if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell)) {
+ uniqueIds_.remove(cell);
+ return false;
+ }
+
+ return true;
+ }
+
+ js::HashNumber getHashCodeInfallible(js::gc::Cell* cell) {
+ return UniqueIdToHash(getUniqueIdInfallible(cell));
+ }
+
+ uint64_t getUniqueIdInfallible(js::gc::Cell* cell) {
+ uint64_t uid;
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ if (!getUniqueId(cell, &uid))
+ oomUnsafe.crash("failed to allocate uid");
+ return uid;
+ }
+
+ // Return true if this cell has a UID associated with it.
+ MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ return uniqueIds_.has(cell);
+ }
+
+ // Transfer an id from another cell. This must only be called on behalf of a
+ // moving GC. This method is infallible.
+ void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
+ MOZ_ASSERT(src != tgt);
+ MOZ_ASSERT(!IsInsideNursery(tgt));
+ MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ uniqueIds_.rekeyIfMoved(src, tgt);
+ }
+
+ // Remove any unique id associated with this Cell.
+ void removeUniqueId(js::gc::Cell* cell) {
+ MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
+ uniqueIds_.remove(cell);
+ }
+
+ // When finished parsing off-thread, transfer any UIDs we created in the
+ // off-thread zone into the target zone.
+ void adoptUniqueIds(JS::Zone* source) {
+ js::AutoEnterOOMUnsafeRegion oomUnsafe;
+ for (js::gc::UniqueIdMap::Enum e(source->uniqueIds_); !e.empty(); e.popFront()) {
+ MOZ_ASSERT(!uniqueIds_.has(e.front().key()));
+ if (!uniqueIds_.put(e.front().key(), e.front().value()))
+ oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
+ }
+ source->uniqueIds_.clear();
+ }
+
+ JSContext* contextFromMainThread() {
+ return runtime_->contextFromMainThread();
+ }
+
+#ifdef JSGC_HASH_TABLE_CHECKS
+ // Assert that the UniqueId table has been redirected successfully.
+ void checkUniqueIdTableAfterMovingGC();
+#endif
+
+ bool keepShapeTables() const {
+ return keepShapeTables_;
+ }
+ void setKeepShapeTables(bool b) {
+ keepShapeTables_ = b;
+ }
+
+ private:
+ js::jit::JitZone* jitZone_;
+
+ GCState gcState_;
+ bool gcScheduled_;
+ bool gcPreserveCode_;
+ bool jitUsingBarriers_;
+ bool keepShapeTables_;
+
+ // Allow zones to be linked into a list
+ friend class js::gc::ZoneList;
+ static Zone * const NotOnList;
+ Zone* listNext_;
+ bool isOnList() const;
+ Zone* nextZone() const;
+
+ friend bool js::CurrentThreadCanAccessZone(Zone* zone);
+ friend class js::gc::GCRuntime;
+};
+
+} // namespace JS
+
+namespace js {
+
+// Using the atoms zone without holding the exclusive access lock is dangerous
+// because worker threads may be using it simultaneously. Therefore, it's
+// better to skip the atoms zone when iterating over zones. If you need to
+// iterate over the atoms zone, consider taking the exclusive access lock first.
+enum ZoneSelector {
+ WithAtoms,
+ SkipAtoms
+};
+
+class ZonesIter
+{
+ gc::AutoEnterIteration iterMarker;
+ JS::Zone** it;
+ JS::Zone** end;
+
+ public:
+ ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc) {
+ it = rt->gc.zones.begin();
+ end = rt->gc.zones.end();
+
+ if (selector == SkipAtoms) {
+ MOZ_ASSERT(atAtomsZone(rt));
+ it++;
+ }
+ }
+
+ bool atAtomsZone(JSRuntime* rt);
+
+ bool done() const { return it == end; }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ do {
+ it++;
+ } while (!done() && (*it)->usedByExclusiveThread);
+ }
+
+ JS::Zone* get() const {
+ MOZ_ASSERT(!done());
+ return *it;
+ }
+
+ operator JS::Zone*() const { return get(); }
+ JS::Zone* operator->() const { return get(); }
+};
+
+struct CompartmentsInZoneIter
+{
+ explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
+ it = zone->compartments.begin();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(it);
+ return it < zone->compartments.begin() ||
+ it >= zone->compartments.end();
+ }
+ void next() {
+ MOZ_ASSERT(!done());
+ it++;
+ }
+
+ JSCompartment* get() const {
+ MOZ_ASSERT(it);
+ return *it;
+ }
+
+ operator JSCompartment*() const { return get(); }
+ JSCompartment* operator->() const { return get(); }
+
+ private:
+ JS::Zone* zone;
+ JSCompartment** it;
+
+ CompartmentsInZoneIter()
+ : zone(nullptr), it(nullptr)
+ {}
+
+ // This is for the benefit of CompartmentsIterT::comp.
+ friend class mozilla::Maybe<CompartmentsInZoneIter>;
+};
+
+// This iterator iterates over all the compartments in a given set of zones. The
+// set of zones is determined by iterating ZoneIterT.
+template<class ZonesIterT>
+class CompartmentsIterT
+{
+ gc::AutoEnterIteration iterMarker;
+ ZonesIterT zone;
+ mozilla::Maybe<CompartmentsInZoneIter> comp;
+
+ public:
+ explicit CompartmentsIterT(JSRuntime* rt)
+ : iterMarker(&rt->gc), zone(rt)
+ {
+ if (zone.done())
+ comp.emplace();
+ else
+ comp.emplace(zone);
+ }
+
+ CompartmentsIterT(JSRuntime* rt, ZoneSelector selector)
+ : iterMarker(&rt->gc), zone(rt, selector)
+ {
+ if (zone.done())
+ comp.emplace();
+ else
+ comp.emplace(zone);
+ }
+
+ bool done() const { return zone.done(); }
+
+ void next() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(!comp.ref().done());
+ comp->next();
+ if (comp->done()) {
+ comp.reset();
+ zone.next();
+ if (!zone.done())
+ comp.emplace(zone);
+ }
+ }
+
+ JSCompartment* get() const {
+ MOZ_ASSERT(!done());
+ return *comp;
+ }
+
+ operator JSCompartment*() const { return get(); }
+ JSCompartment* operator->() const { return get(); }
+};
+
+typedef CompartmentsIterT<ZonesIter> CompartmentsIter;
+
+/*
+ * Allocation policy that uses Zone::pod_malloc and friends, so that memory
+ * pressure is accounted for on the zone. This is suitable for memory associated
+ * with GC things allocated in the zone.
+ *
+ * Since it doesn't hold a JSContext (those may not live long enough), it can't
+ * report out-of-memory conditions itself; the caller must check for OOM and
+ * take the appropriate action.
+ *
+ * FIXME bug 647103 - replace these *AllocPolicy names.
+ */
+class ZoneAllocPolicy
+{
+ Zone* const zone;
+
+ public:
+ MOZ_IMPLICIT ZoneAllocPolicy(Zone* zone) : zone(zone) {}
+
+ template <typename T>
+ T* maybe_pod_malloc(size_t numElems) {
+ return zone->maybe_pod_malloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_calloc(size_t numElems) {
+ return zone->maybe_pod_calloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* maybe_pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return zone->maybe_pod_realloc<T>(p, oldSize, newSize);
+ }
+
+ template <typename T>
+ T* pod_malloc(size_t numElems) {
+ return zone->pod_malloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* pod_calloc(size_t numElems) {
+ return zone->pod_calloc<T>(numElems);
+ }
+
+ template <typename T>
+ T* pod_realloc(T* p, size_t oldSize, size_t newSize) {
+ return zone->pod_realloc<T>(p, oldSize, newSize);
+ }
+
+ void free_(void* p) { js_free(p); }
+ void reportAllocOverflow() const {}
+
+ MOZ_MUST_USE bool checkSimulatedOOM() const {
+ return !js::oom::ShouldFailWithOOM();
+ }
+};
+
+} // namespace js
+
+#endif // gc_Zone_h