Improve dead compartment collection.

master
Fedor 2019-07-08 13:07:44 +03:00
parent 95de016256
commit ca72ef1006
11 changed files with 191 additions and 109 deletions

View File

@ -61,6 +61,15 @@ IdToObjectMap::find(ObjectId id)
return p->value(); return p->value();
} }
JSObject*
IdToObjectMap::findPreserveColor(ObjectId id)
{
Table::Ptr p = table_.lookup(id);
if (!p)
return nullptr;
return p->value().unbarrieredGet();
}
bool bool
IdToObjectMap::add(ObjectId id, JSObject* obj) IdToObjectMap::add(ObjectId id, JSObject* obj)
{ {
@ -757,4 +766,4 @@ CPOWManager*
mozilla::jsipc::CPOWManagerFor(PJavaScriptChild* aChild) mozilla::jsipc::CPOWManagerFor(PJavaScriptChild* aChild)
{ {
return static_cast<JavaScriptChild*>(aChild); return static_cast<JavaScriptChild*>(aChild);
} }

View File

@ -96,6 +96,7 @@ class IdToObjectMap
bool add(ObjectId id, JSObject* obj); bool add(ObjectId id, JSObject* obj);
JSObject* find(ObjectId id); JSObject* find(ObjectId id);
JSObject* findPreserveColor(ObjectId id);
void remove(ObjectId id); void remove(ObjectId id);
void clear(); void clear();
@ -233,4 +234,4 @@ class JavaScriptShared : public CPOWManager
} // namespace jsipc } // namespace jsipc
} // namespace mozilla } // namespace mozilla
#endif #endif

View File

@ -789,10 +789,10 @@ WrapperAnswer::RecvDOMInstanceOf(const ObjectId& objId, const int& prototypeID,
bool bool
WrapperAnswer::RecvDropObject(const ObjectId& objId) WrapperAnswer::RecvDropObject(const ObjectId& objId)
{ {
JSObject* obj = objects_.find(objId); JSObject* obj = objects_.findPreserveColor(objId);
if (obj) { if (obj) {
objectIdMap(objId.hasXrayWaiver()).remove(obj); objectIdMap(objId.hasXrayWaiver()).remove(obj);
objects_.remove(objId); objects_.remove(objId);
} }
return true; return true;
} }

View File

@ -652,7 +652,7 @@ ArrayMetaTypeDescr::create(JSContext* cx,
if (!CreateTraceList(cx, obj)) if (!CreateTraceList(cx, obj))
return nullptr; return nullptr;
if (!cx->zone()->typeDescrObjects.put(obj)) { if (!cx->zone()->addTypeDescrObject(cx, obj)) {
ReportOutOfMemory(cx); ReportOutOfMemory(cx);
return nullptr; return nullptr;
} }
@ -993,8 +993,8 @@ StructMetaTypeDescr::create(JSContext* cx,
if (!CreateTraceList(cx, descr)) if (!CreateTraceList(cx, descr))
return nullptr; return nullptr;
if (!cx->zone()->typeDescrObjects.put(descr) || if (!cx->zone()->addTypeDescrObject(cx, descr) ||
!cx->zone()->typeDescrObjects.put(fieldTypeVec)) !cx->zone()->addTypeDescrObject(cx, fieldTypeVec))
{ {
ReportOutOfMemory(cx); ReportOutOfMemory(cx);
return nullptr; return nullptr;
@ -1165,10 +1165,8 @@ DefineSimpleTypeDescr(JSContext* cx,
if (!CreateTraceList(cx, descr)) if (!CreateTraceList(cx, descr))
return false; return false;
if (!cx->zone()->typeDescrObjects.put(descr)) { if (!cx->zone()->addTypeDescrObject(cx, descr))
ReportOutOfMemory(cx);
return false; return false;
}
return true; return true;
} }
@ -3005,4 +3003,4 @@ TypeDescr::finalize(FreeOp* fop, JSObject* obj)
TypeDescr& descr = obj->as<TypeDescr>(); TypeDescr& descr = obj->as<TypeDescr>();
if (descr.hasTraceList()) if (descr.hasTraceList())
js_free(const_cast<int32_t*>(descr.traceList())); js_free(const_cast<int32_t*>(descr.traceList()));
} }

View File

@ -900,7 +900,8 @@ class GCRuntime
void requestMajorGC(JS::gcreason::Reason reason); void requestMajorGC(JS::gcreason::Reason reason);
SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis); SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
void budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock); void budgetIncrementalGC(JS::gcreason::Reason reason, SliceBudget& budget,
AutoLockForExclusiveAccess& lock);
void resetIncrementalGC(AbortReason reason, AutoLockForExclusiveAccess& lock); void resetIncrementalGC(AbortReason reason, AutoLockForExclusiveAccess& lock);
// Assert if the system state is such that we should never // Assert if the system state is such that we should never
@ -915,6 +916,7 @@ class GCRuntime
void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL; void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
MOZ_MUST_USE bool gcCycle(bool nonincrementalByAPI, SliceBudget& budget, MOZ_MUST_USE bool gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
JS::gcreason::Reason reason); JS::gcreason::Reason reason);
bool shouldRepeatForDeadZone(JS::gcreason::Reason reason);
void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason, void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
AutoLockForExclusiveAccess& lock); AutoLockForExclusiveAccess& lock);
@ -1348,4 +1350,4 @@ class MOZ_RAII AutoMaybeStartBackgroundAllocation
} /* namespace js */ } /* namespace js */
#endif #endif

View File

@ -478,6 +478,7 @@ js::gc::GCRuntime::bufferGrayRoots()
for (GCZonesIter zone(rt); !zone.done(); zone.next()) for (GCZonesIter zone(rt); !zone.done(); zone.next())
MOZ_ASSERT(zone->gcGrayRoots.empty()); MOZ_ASSERT(zone->gcGrayRoots.empty());
gcstats::AutoPhase ap(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
BufferGrayRootsTracer grayBufferer(rt); BufferGrayRootsTracer grayBufferer(rt);
if (JSTraceDataOp op = grayRootTracer.op) if (JSTraceDataOp op = grayRootTracer.op)
@ -539,5 +540,4 @@ GCRuntime::resetBufferedGrayRoots() const
"Do not clear the gray buffers unless we are Failed or becoming Unused"); "Do not clear the gray buffers unless we are Failed or becoming Unused");
for (GCZonesIter zone(rt); !zone.done(); zone.next()) for (GCZonesIter zone(rt); !zone.done(); zone.next())
zone->gcGrayRoots.clearAndFree(); zone->gcGrayRoots.clearAndFree();
} }

View File

@ -370,6 +370,21 @@ Zone::fixupAfterMovingGC()
fixupInitialShapeTable(); fixupInitialShapeTable();
} }
bool
Zone::addTypeDescrObject(JSContext* cx, HandleObject obj)
{
// Type descriptor objects are always tenured so we don't need post barriers
// on the set.
MOZ_ASSERT(!IsInsideNursery(obj));
if (!typeDescrObjects.put(obj)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
ZoneList::ZoneList() ZoneList::ZoneList()
: head(nullptr), tail(nullptr) : head(nullptr), tail(nullptr)
{} {}
@ -468,4 +483,4 @@ JS_PUBLIC_API(void)
JS::shadow::RegisterWeakCache(JS::Zone* zone, WeakCache<void*>* cachep) JS::shadow::RegisterWeakCache(JS::Zone* zone, WeakCache<void*>* cachep)
{ {
zone->registerWeakCache(cachep); zone->registerWeakCache(cachep);
} }

View File

@ -349,10 +349,17 @@ struct Zone : public JS::shadow::Zone,
// Keep track of all TypeDescr and related objects in this compartment. // Keep track of all TypeDescr and related objects in this compartment.
// This is used by the GC to trace them all first when compacting, since the // This is used by the GC to trace them all first when compacting, since the
// TypedObject trace hook may access these objects. // TypedObject trace hook may access these objects.
using TypeDescrObjectSet = js::GCHashSet<js::HeapPtr<JSObject*>,
js::MovableCellHasher<js::HeapPtr<JSObject*>>, //
// There are no barriers here - the set contains only tenured objects so no
// post-barrier is required, and these are weak references so no pre-barrier
// is required.
using TypeDescrObjectSet = js::GCHashSet<JSObject*,
js::MovableCellHasher<JSObject*>,
js::SystemAllocPolicy>; js::SystemAllocPolicy>;
JS::WeakCache<TypeDescrObjectSet> typeDescrObjects; JS::WeakCache<TypeDescrObjectSet> typeDescrObjects;
bool addTypeDescrObject(JSContext* cx, HandleObject obj);
// Malloc counter to measure memory pressure for GC scheduling. It runs from // Malloc counter to measure memory pressure for GC scheduling. It runs from
@ -734,4 +741,4 @@ class ZoneAllocPolicy
} // namespace js } // namespace js
#endif // gc_Zone_h #endif // gc_Zone_h

View File

@ -1524,20 +1524,11 @@ GCMarker::delayMarkingChildren(const void* thing)
} }
inline void inline void
ArenaLists::prepareForIncrementalGC(JSRuntime* rt) ArenaLists::prepareForIncrementalGC()
{ {
for (auto i : AllAllocKinds()) { purge();
FreeSpan* span = freeLists[i]; for (auto i : AllAllocKinds())
if (span != &placeholder) { arenaLists[i].moveCursorToEnd();
if (!span->isEmpty()) {
Arena* arena = span->getArena();
arena->allocatedDuringIncremental = true;
rt->gc.marker.delayMarkingArena(arena);
} else {
freeLists[i] = &placeholder;
}
}
}
} }
/* Compacting GC */ /* Compacting GC */
@ -2251,7 +2242,7 @@ GCRuntime::updateTypeDescrObjects(MovingTracer* trc, Zone* zone)
{ {
zone->typeDescrObjects.sweep(); zone->typeDescrObjects.sweep();
for (auto r = zone->typeDescrObjects.all(); !r.empty(); r.popFront()) for (auto r = zone->typeDescrObjects.all(); !r.empty(); r.popFront())
UpdateCellPointers(trc, r.front().get()); UpdateCellPointers(trc, r.front());
} }
void void
@ -3579,6 +3570,23 @@ RelazifyFunctions(Zone* zone, AllocKind kind)
} }
} }
static bool
ShouldCollectZone(Zone* zone, JS::gcreason::Reason reason)
{
// Normally we collect all scheduled zones.
if (reason != JS::gcreason::COMPARTMENT_REVIVED)
return zone->isGCScheduled();
// If we are repeating a GC becuase we noticed dead compartments haven't
// been collected, then only collect zones contianing those compartments.
for (CompartmentsInZoneIter comp(zone); !comp.done(); comp.next()) {
if (comp->scheduledForDestruction)
return true;
}
return false;
}
bool bool
GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock) GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAccess& lock)
{ {
@ -3602,7 +3610,7 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
#endif #endif
/* Set up which zones will be collected. */ /* Set up which zones will be collected. */
if (zone->isGCScheduled()) { if (ShouldCollectZone(zone, reason)) {
if (!zone->isAtomsZone()) { if (!zone->isAtomsZone()) {
any = true; any = true;
zone->setGCState(Zone::Mark); zone->setGCState(Zone::Mark);
@ -3621,7 +3629,7 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) { for (CompartmentsIter c(rt, WithAtoms); !c.done(); c.next()) {
c->marked = false; c->marked = false;
c->scheduledForDestruction = false; c->scheduledForDestruction = false;
c->maybeAlive = false; c->maybeAlive = c->hasBeenEntered() || !c->zone()->isGCScheduled();
if (shouldPreserveJITCode(c, currentTime, reason, canAllocateMoreCode)) if (shouldPreserveJITCode(c, currentTime, reason, canAllocateMoreCode))
c->zone()->setPreservingCode(true); c->zone()->setPreservingCode(true);
} }
@ -3640,6 +3648,12 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
* keepAtoms() will only change on the main thread, which we are currently * keepAtoms() will only change on the main thread, which we are currently
* on. If the value of keepAtoms() changes between GC slices, then we'll * on. If the value of keepAtoms() changes between GC slices, then we'll
* cancel the incremental GC. See IsIncrementalGCSafe. * cancel the incremental GC. See IsIncrementalGCSafe.
*/ */
if (isFull && !rt->keepAtoms()) { if (isFull && !rt->keepAtoms()) {
Zone* atomsZone = rt->atomsCompartment(lock)->zone(); Zone* atomsZone = rt->atomsCompartment(lock)->zone();
@ -3655,15 +3669,12 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
return false; return false;
/* /*
* At the end of each incremental slice, we call prepareForIncrementalGC, * Ensure that after the start of a collection we don't allocate into any
* which marks objects in all arenas that we're currently allocating * existing arenas, as this can cause unreachable things to be marked.
* into. This can cause leaks if unreachable objects are in these
* arenas. This purge call ensures that we only mark arenas that have had
* allocations after the incremental GC started.
*/ */
if (isIncremental) { if (isIncremental) {
for (GCZonesIter zone(rt); !zone.done(); zone.next()) for (GCZonesIter zone(rt); !zone.done(); zone.next())
zone->arenas.purge(); zone->arenas.prepareForIncrementalGC();
} }
MemProfiler::MarkTenuredStart(rt); MemProfiler::MarkTenuredStart(rt);
@ -3747,13 +3758,11 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS); gcstats::AutoPhase ap2(stats, gcstats::PHASE_MARK_ROOTS);
if (isIncremental) { if (isIncremental) {
gcstats::AutoPhase ap3(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS); bufferGrayRoots();
bufferGrayRoots(); markCompartments();
} }
markCompartments();
return true; return true;
} }
@ -3766,9 +3775,14 @@ GCRuntime::markCompartments()
* This code ensures that if a compartment is "dead", then it will be * This code ensures that if a compartment is "dead", then it will be
* collected in this GC. A compartment is considered dead if its maybeAlive * collected in this GC. A compartment is considered dead if its maybeAlive
* flag is false. The maybeAlive flag is set if: * flag is false. The maybeAlive flag is set if:
* (1) the compartment has incoming cross-compartment edges, or * (1) the compartment has been entered (set in beginMarkPhase() above)
* (2) an object in the compartment was marked during root marking, either * (2) the compartment is not being collected (set in beginMarkPhase()
* as a black root or a gray root. * above)
* (3) an object in the compartment was marked during root marking, either
* as a black root or a gray root (set in RootMarking.cpp), or
* (4) the compartment has incoming cross-compartment edges from another
* compartment that has maybeAlive set (set by this method).
*
* If the maybeAlive is false, then we set the scheduledForDestruction flag. * If the maybeAlive is false, then we set the scheduledForDestruction flag.
* At the end of the GC, we look for compartments where * At the end of the GC, we look for compartments where
* scheduledForDestruction is true. These are compartments that were somehow * scheduledForDestruction is true. These are compartments that were somehow
@ -3786,26 +3800,37 @@ GCRuntime::markCompartments()
* allocation and read barriers during JS_TransplantObject and the like. * allocation and read barriers during JS_TransplantObject and the like.
*/ */
/* Set the maybeAlive flag based on cross-compartment edges. */ /* Propagate the maybeAlive flag via cross-compartment edges. */
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
for (JSCompartment::WrapperEnum e(c); !e.empty(); e.popFront()) { Vector<JSCompartment*, 0, js::SystemAllocPolicy> workList;
for (CompartmentsIter comp(rt, SkipAtoms); !comp.done(); comp.next()) {
if (comp->maybeAlive) {
if (!workList.append(comp))
return;
}
}
while (!workList.empty()) {
JSCompartment* comp = workList.popCopy();
for (JSCompartment::WrapperEnum e(comp); !e.empty(); e.popFront()) {
if (e.front().key().is<JSString*>()) if (e.front().key().is<JSString*>())
continue; continue;
JSCompartment* dest = e.front().mutableKey().compartment(); JSCompartment* dest = e.front().mutableKey().compartment();
if (dest) if (dest && !dest->maybeAlive) {
dest->maybeAlive = true; dest->maybeAlive = true;
if (!workList.append(dest))
return;
}
} }
} }
/*
* For black roots, code in gc/Marking.cpp will already have set maybeAlive /* Set scheduleForDestruction based on maybeAlive. */
* during MarkRuntime.
*/ for (GCCompartmentsIter comp(rt); !comp.done(); comp.next()) {
MOZ_ASSERT(!comp->scheduledForDestruction);
/* Propogate maybeAlive to scheduleForDestruction. */ if (!comp->maybeAlive && !rt->isAtomsCompartment(comp))
for (GCCompartmentsIter c(rt); !c.done(); c.next()) { comp->scheduledForDestruction = true;
if (!c->maybeAlive && !rt->isAtomsCompartment(c))
c->scheduledForDestruction = true;
} }
} }
@ -5306,7 +5331,7 @@ AutoGCSlice::~AutoGCSlice()
for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) { for (ZonesIter zone(runtime, WithAtoms); !zone.done(); zone.next()) {
if (zone->isGCMarking()) { if (zone->isGCMarking()) {
zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit); zone->setNeedsIncrementalBarrier(true, Zone::UpdateJit);
zone->arenas.prepareForIncrementalGC(runtime); zone->arenas.purge();
} else { } else {
zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit); zone->setNeedsIncrementalBarrier(false, Zone::UpdateJit);
} }
@ -5487,9 +5512,9 @@ gc::AbortReason
gc::IsIncrementalGCUnsafe(JSRuntime* rt) gc::IsIncrementalGCUnsafe(JSRuntime* rt)
{ {
MOZ_ASSERT(!rt->mainThread.suppressGC); MOZ_ASSERT(!rt->mainThread.suppressGC);
if (rt->keepAtoms()) if (rt->keepAtoms())
return gc::AbortReason::KeepAtomsSet; return gc::AbortReason::KeepAtomsSet;
if (!rt->gc.isIncrementalGCAllowed()) if (!rt->gc.isIncrementalGCAllowed())
return gc::AbortReason::IncrementalDisabled; return gc::AbortReason::IncrementalDisabled;
@ -5498,9 +5523,17 @@ gc::IsIncrementalGCUnsafe(JSRuntime* rt)
} }
void void
GCRuntime::budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess& lock) GCRuntime::budgetIncrementalGC(JS::gcreason::Reason reason, SliceBudget& budget,
AutoLockForExclusiveAccess& lock)
{ {
AbortReason unsafeReason = IsIncrementalGCUnsafe(rt); AbortReason unsafeReason = IsIncrementalGCUnsafe(rt);
if (unsafeReason == AbortReason::None) {
if (reason == JS::gcreason::COMPARTMENT_REVIVED)
unsafeReason = gc::AbortReason::CompartmentRevived;
else if (mode != JSGC_MODE_INCREMENTAL)
unsafeReason = gc::AbortReason::ModeChange;
}
if (unsafeReason != AbortReason::None) { if (unsafeReason != AbortReason::None) {
resetIncrementalGC(unsafeReason, lock); resetIncrementalGC(unsafeReason, lock);
budget.makeUnlimited(); budget.makeUnlimited();
@ -5508,12 +5541,7 @@ GCRuntime::budgetIncrementalGC(SliceBudget& budget, AutoLockForExclusiveAccess&
return; return;
} }
if (mode != JSGC_MODE_INCREMENTAL) {
resetIncrementalGC(AbortReason::ModeChange, lock);
budget.makeUnlimited();
stats.nonincremental(AbortReason::ModeChange);
return;
}
if (isTooMuchMalloc()) { if (isTooMuchMalloc()) {
budget.makeUnlimited(); budget.makeUnlimited();
@ -5660,6 +5688,10 @@ GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::
} }
State prevState = incrementalState; State prevState = incrementalState;
if (nonincrementalByAPI) { if (nonincrementalByAPI) {
// Reset any in progress incremental GC if this was triggered via the // Reset any in progress incremental GC if this was triggered via the
@ -5672,7 +5704,7 @@ GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::
stats.nonincremental(gc::AbortReason::NonIncrementalRequested); stats.nonincremental(gc::AbortReason::NonIncrementalRequested);
budget.makeUnlimited(); budget.makeUnlimited();
} else { } else {
budgetIncrementalGC(budget, session.lock); budgetIncrementalGC(reason, budget, session.lock);
} }
/* The GC was reset, so we need a do-over. */ /* The GC was reset, so we need a do-over. */
@ -5764,6 +5796,22 @@ GCRuntime::checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason)
return true; return true;
} }
bool
GCRuntime::shouldRepeatForDeadZone(JS::gcreason::Reason reason)
{
MOZ_ASSERT_IF(reason == JS::gcreason::COMPARTMENT_REVIVED, !isIncremental);
if (!isIncremental || isIncrementalGCInProgress())
return false;
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) {
if (c->scheduledForDestruction)
return true;
}
return false;
}
void void
GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason)
{ {
@ -5782,27 +5830,23 @@ GCRuntime::collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::R
do { do {
poked = false; poked = false;
bool wasReset = gcCycle(nonincrementalByAPI, budget, reason); bool wasReset = gcCycle(nonincrementalByAPI, budget, reason);
/* Need to re-schedule all zones for GC. */ bool repeatForDeadZone = false;
if (poked && cleanUpEverything) if (poked && cleanUpEverything) {
/* Need to re-schedule all zones for GC. */
JS::PrepareForFullGC(rt->contextFromMainThread()); JS::PrepareForFullGC(rt->contextFromMainThread());
/*
* This code makes an extra effort to collect compartments that we } else if (shouldRepeatForDeadZone(reason) && !wasReset) {
* thought were dead at the start of the GC. See the large comment in /*
* beginMarkPhase. * This code makes an extra effort to collect compartments that we
*/ * thought were dead at the start of the GC. See the large comment
bool repeatForDeadZone = false; * in beginMarkPhase.
if (!nonincrementalByAPI && !isIncrementalGCInProgress()) { */
for (CompartmentsIter c(rt, SkipAtoms); !c.done(); c.next()) { repeatForDeadZone = true;
if (c->scheduledForDestruction) { reason = JS::gcreason::COMPARTMENT_REVIVED;
nonincrementalByAPI = true; }
repeatForDeadZone = true;
reason = JS::gcreason::COMPARTMENT_REVIVED;
c->zone()->scheduleGC();
}
}
}
/* /*
* If we reset an existing GC, we need to start a new one. Also, we * If we reset an existing GC, we need to start a new one. Also, we
@ -7070,4 +7114,4 @@ js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell)
} }
return detail::CellIsMarkedGray(tc); return detail::CellIsMarkedGray(tc);
} }

View File

@ -61,7 +61,8 @@ enum class State {
D(ModeChange) \ D(ModeChange) \
D(MallocBytesTrigger) \ D(MallocBytesTrigger) \
D(GCBytesTrigger) \ D(GCBytesTrigger) \
D(ZoneChange) D(ZoneChange) \
D(CompartmentRevived)
enum class AbortReason { enum class AbortReason {
#define MAKE_REASON(name) name, #define MAKE_REASON(name) name,
GC_ABORT_REASONS(MAKE_REASON) GC_ABORT_REASONS(MAKE_REASON)
@ -353,7 +354,6 @@ struct SortedArenaListSegment
* be treated as an invariant, however, as the free lists may be cleared, * be treated as an invariant, however, as the free lists may be cleared,
* leaving arenas previously used for allocation partially full. Sorting order * leaving arenas previously used for allocation partially full. Sorting order
* is restored during sweeping. * is restored during sweeping.
* Arenas following the cursor should not be full. * Arenas following the cursor should not be full.
*/ */
class ArenaList { class ArenaList {
@ -453,6 +453,11 @@ class ArenaList {
check(); check();
return !*cursorp_; return !*cursorp_;
} }
void moveCursorToEnd() {
while (!isCursorAtEnd())
cursorp_ = &(*cursorp_)->next;
}
// This can return nullptr. // This can return nullptr.
Arena* arenaAfterCursor() const { Arena* arenaAfterCursor() const {
@ -739,7 +744,7 @@ class ArenaLists
freeLists[i] = &placeholder; freeLists[i] = &placeholder;
} }
inline void prepareForIncrementalGC(JSRuntime* rt); inline void prepareForIncrementalGC();
/* Check if this arena is in use. */ /* Check if this arena is in use. */
bool arenaIsInUse(Arena* arena, AllocKind kind) const { bool arenaIsInUse(Arena* arena, AllocKind kind) const {
@ -1504,4 +1509,4 @@ UninlinedIsInsideNursery(const gc::Cell* cell);
} /* namespace js */ } /* namespace js */
#endif /* jsgc_h */ #endif /* jsgc_h */

View File

@ -185,17 +185,18 @@ WatchpointMap::markAll(JSTracer* trc)
{ {
for (Map::Enum e(map); !e.empty(); e.popFront()) { for (Map::Enum e(map); !e.empty(); e.popFront()) {
Map::Entry& entry = e.front(); Map::Entry& entry = e.front();
WatchKey key = entry.key(); JSObject* object = entry.key().object;
WatchKey prior = key; jsid id = entry.key().id;
MOZ_ASSERT(JSID_IS_STRING(prior.id) || JSID_IS_INT(prior.id) || JSID_IS_SYMBOL(prior.id)); JSObject* priorObject = object;
jsid priorId = id;
MOZ_ASSERT(JSID_IS_STRING(priorId) || JSID_IS_INT(priorId) || JSID_IS_SYMBOL(priorId));
TraceEdge(trc, const_cast<PreBarrieredObject*>(&key.object), TraceManuallyBarrieredEdge(trc, &object, "held Watchpoint object");
"held Watchpoint object"); TraceManuallyBarrieredEdge(trc, &id, "WatchKey::id");
TraceEdge(trc, const_cast<PreBarrieredId*>(&key.id), "WatchKey::id");
TraceEdge(trc, &entry.value().closure, "Watchpoint::closure"); TraceEdge(trc, &entry.value().closure, "Watchpoint::closure");
if (prior.object != key.object || prior.id != key.id) if (priorObject != object || priorId != id)
e.rekeyFront(key); e.rekeyFront(WatchKey(object, id));
} }
} }
@ -242,4 +243,4 @@ WatchpointMap::trace(WeakMapTracer* trc)
JS::GCCellPtr(entry.key().object.get()), JS::GCCellPtr(entry.key().object.get()),
JS::GCCellPtr(entry.value().closure.get())); JS::GCCellPtr(entry.value().closure.get()));
} }
} }