Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef gc_GCRuntime_h
8 : #define gc_GCRuntime_h
9 :
10 : #include "mozilla/Atomics.h"
11 : #include "mozilla/EnumSet.h"
12 : #include "mozilla/Maybe.h"
13 :
14 : #include "gc/ArenaList.h"
15 : #include "gc/AtomMarking.h"
16 : #include "gc/GCHelperState.h"
17 : #include "gc/GCMarker.h"
18 : #include "gc/GCParallelTask.h"
19 : #include "gc/Nursery.h"
20 : #include "gc/Scheduling.h"
21 : #include "gc/Statistics.h"
22 : #include "gc/StoreBuffer.h"
23 : #include "js/GCAnnotations.h"
24 : #include "js/UniquePtr.h"
25 : #include "vm/AtomsTable.h"
26 :
27 : namespace js {
28 :
29 : class AutoLockGC;
30 : class AutoLockGCBgAlloc;
31 : class AutoLockHelperThreadState;
32 : class VerifyPreTracer;
33 :
34 : namespace gc {
35 :
36 : using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
37 : using ZoneVector = Vector<JS::Zone*, 4, SystemAllocPolicy>;
38 :
39 : class AutoCallGCCallbacks;
40 : class AutoRunParallelTask;
41 : class AutoTraceSession;
42 : class MarkingValidator;
43 : struct MovingTracer;
44 : enum class ShouldCheckThresholds;
45 : class SweepGroupsIter;
46 : class WeakCacheSweepIterator;
47 :
48 : enum IncrementalProgress
49 : {
50 : NotFinished = 0,
51 : Finished
52 : };
53 :
54 : // Interface to a sweep action.
55 : //
56 : // Note that we don't need perfect forwarding for args here because the
57 : // types are not deduced but come ultimately from the type of a function pointer
58 : // passed to SweepFunc.
59 : template <typename... Args>
60 : struct SweepAction
61 : {
62 : virtual ~SweepAction() {}
63 : virtual IncrementalProgress run(Args... args) = 0;
64 : virtual void assertFinished() const = 0;
65 : };
66 :
67 : class ChunkPool
68 : {
69 : Chunk* head_;
70 : size_t count_;
71 :
72 : public:
73 0 : ChunkPool() : head_(nullptr), count_(0) {}
74 0 : ~ChunkPool() {
75 : // TODO: We should be able to assert that the chunk pool is empty but
76 : // this causes XPCShell test failures on Windows 2012. See bug 1379232.
77 : }
78 :
79 : bool empty() const { return !head_; }
80 : size_t count() const { return count_; }
81 :
82 0 : Chunk* head() { MOZ_ASSERT(head_); return head_; }
83 : Chunk* pop();
84 : void push(Chunk* chunk);
85 : Chunk* remove(Chunk* chunk);
86 :
87 : #ifdef DEBUG
88 : bool contains(Chunk* chunk) const;
89 : bool verify() const;
90 : #endif
91 :
92 : // Pool mutation does not invalidate an Iter unless the mutation
93 : // is of the Chunk currently being visited by the Iter.
94 : class Iter {
95 : public:
96 0 : explicit Iter(ChunkPool& pool) : current_(pool.head_) {}
97 0 : bool done() const { return !current_; }
98 : void next();
99 : Chunk* get() const { return current_; }
100 : operator Chunk*() const { return get(); }
101 0 : Chunk* operator->() const { return get(); }
102 : private:
103 : Chunk* current_;
104 : };
105 : };
106 :
107 : // Performs extra allocation off thread so that when memory is required on the
108 : // main thread it will already be available and waiting.
109 0 : class BackgroundAllocTask : public GCParallelTaskHelper<BackgroundAllocTask>
110 : {
111 : // Guarded by the GC lock.
112 : GCLockData<ChunkPool&> chunkPool_;
113 :
114 : const bool enabled_;
115 :
116 : public:
117 : BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool);
118 : bool enabled() const { return enabled_; }
119 :
120 : void run();
121 : };
122 :
123 : // Search the provided Chunks for free arenas and decommit them.
124 0 : class BackgroundDecommitTask : public GCParallelTaskHelper<BackgroundDecommitTask>
125 : {
126 : public:
127 : using ChunkVector = mozilla::Vector<Chunk*>;
128 :
129 0 : explicit BackgroundDecommitTask(JSRuntime *rt) : GCParallelTaskHelper(rt) {}
130 : void setChunksToScan(ChunkVector &chunks);
131 :
132 : void run();
133 :
134 : private:
135 : MainThreadOrGCTaskData<ChunkVector> toDecommit;
136 : };
137 :
138 : template<typename F>
139 : struct Callback {
140 : MainThreadOrGCTaskData<F> op;
141 : MainThreadOrGCTaskData<void*> data;
142 :
143 0 : Callback()
144 0 : : op(nullptr), data(nullptr)
145 : {}
146 0 : Callback(F op, void* data)
147 0 : : op(op), data(data)
148 : {}
149 : };
150 :
151 : template<typename F>
152 : using CallbackVector = MainThreadData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
153 :
154 : template <typename T, typename Iter0, typename Iter1>
155 : class ChainedIter
156 : {
157 : Iter0 iter0_;
158 : Iter1 iter1_;
159 :
160 : public:
161 : ChainedIter(const Iter0& iter0, const Iter1& iter1)
162 : : iter0_(iter0), iter1_(iter1)
163 : {}
164 :
165 0 : bool done() const { return iter0_.done() && iter1_.done(); }
166 0 : void next() {
167 0 : MOZ_ASSERT(!done());
168 0 : if (!iter0_.done()) {
169 0 : iter0_.next();
170 : } else {
171 0 : MOZ_ASSERT(!iter1_.done());
172 0 : iter1_.next();
173 : }
174 0 : }
175 0 : T get() const {
176 0 : MOZ_ASSERT(!done());
177 0 : if (!iter0_.done())
178 : return iter0_.get();
179 0 : MOZ_ASSERT(!iter1_.done());
180 : return iter1_.get();
181 : }
182 :
183 0 : operator T() const { return get(); }
184 0 : T operator->() const { return get(); }
185 : };
186 :
187 : typedef HashMap<Value*, const char*, DefaultHasher<Value*>, SystemAllocPolicy> RootedValueMap;
188 :
189 : using AllocKinds = mozilla::EnumSet<AllocKind>;
190 :
191 : // A singly linked list of zones.
192 : class ZoneList
193 : {
194 : static Zone * const End;
195 :
196 : Zone* head;
197 : Zone* tail;
198 :
199 : public:
200 : ZoneList();
201 : ~ZoneList();
202 :
203 : bool isEmpty() const;
204 : Zone* front() const;
205 :
206 : void append(Zone* zone);
207 : void transferFrom(ZoneList& other);
208 : Zone* removeFront();
209 : void clear();
210 :
211 : private:
212 : explicit ZoneList(Zone* singleZone);
213 : void check() const;
214 :
215 : ZoneList(const ZoneList& other) = delete;
216 : ZoneList& operator=(const ZoneList& other) = delete;
217 : };
218 :
219 0 : class GCRuntime
220 : {
221 : public:
222 : explicit GCRuntime(JSRuntime* rt);
223 : MOZ_MUST_USE bool init(uint32_t maxbytes, uint32_t maxNurseryBytes);
224 : void finishRoots();
225 : void finish();
226 :
227 : inline bool hasZealMode(ZealMode mode);
228 : inline void clearZealMode(ZealMode mode);
229 : inline bool upcomingZealousGC();
230 : inline bool needZealousGC();
231 : inline bool hasIncrementalTwoSliceZealMode();
232 :
233 : MOZ_MUST_USE bool addRoot(Value* vp, const char* name);
234 : void removeRoot(Value* vp);
235 : void setMarkStackLimit(size_t limit, AutoLockGC& lock);
236 :
237 : MOZ_MUST_USE bool setParameter(JSGCParamKey key, uint32_t value, AutoLockGC& lock);
238 : void resetParameter(JSGCParamKey key, AutoLockGC& lock);
239 : uint32_t getParameter(JSGCParamKey key, const AutoLockGC& lock);
240 :
241 : MOZ_MUST_USE bool triggerGC(JS::gcreason::Reason reason);
242 : void maybeAllocTriggerZoneGC(Zone* zone, const AutoLockGC& lock);
243 : // The return value indicates if we were able to do the GC.
244 : bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason,
245 : size_t usedBytes, size_t thresholdBytes);
246 : void maybeGC(Zone* zone);
247 : // The return value indicates whether a major GC was performed.
248 : bool gcIfRequested();
249 : void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
250 : void startGC(JSGCInvocationKind gckind, JS::gcreason::Reason reason, int64_t millis = 0);
251 : void gcSlice(JS::gcreason::Reason reason, int64_t millis = 0);
252 : void finishGC(JS::gcreason::Reason reason);
253 : void abortGC();
254 : void startDebugGC(JSGCInvocationKind gckind, SliceBudget& budget);
255 : void debugGCSlice(SliceBudget& budget);
256 :
257 : void triggerFullGCForAtoms(JSContext* cx);
258 :
259 : void runDebugGC();
260 : void notifyRootsRemoved();
261 :
262 : enum TraceOrMarkRuntime {
263 : TraceRuntime,
264 : MarkRuntime
265 : };
266 : void traceRuntime(JSTracer* trc, AutoTraceSession& session);
267 : void traceRuntimeForMinorGC(JSTracer* trc, AutoTraceSession& session);
268 :
269 : void purgeRuntimeForMinorGC();
270 :
271 : void shrinkBuffers();
272 : void onOutOfMallocMemory();
273 : void onOutOfMallocMemory(const AutoLockGC& lock);
274 :
275 : #ifdef JS_GC_ZEAL
276 0 : const void* addressOfZealModeBits() { return &zealModeBits; }
277 : void getZealBits(uint32_t* zealBits, uint32_t* frequency, uint32_t* nextScheduled);
278 : void setZeal(uint8_t zeal, uint32_t frequency);
279 : bool parseAndSetZeal(const char* str);
280 : void setNextScheduled(uint32_t count);
281 : void verifyPreBarriers();
282 : void maybeVerifyPreBarriers(bool always);
283 : bool selectForMarking(JSObject* object);
284 : void clearSelectedForMarking();
285 : void setDeterministic(bool enable);
286 : #endif
287 :
288 : #ifdef ENABLE_WASM_GC
289 : // If we run with wasm-gc enabled and there's wasm frames on the stack,
290 : // then GCs are suppressed and many APIs should not be available.
291 : // TODO (bug 1456824) This is temporary and should be removed once proper
292 : // GC support is implemented.
293 : static bool temporaryAbortIfWasmGc(JSContext* cx);
294 : #else
295 : static bool temporaryAbortIfWasmGc(JSContext* cx) { return false; }
296 : #endif
297 :
298 0 : uint64_t nextCellUniqueId() {
299 0 : MOZ_ASSERT(nextCellUniqueId_ > 0);
300 0 : uint64_t uid = ++nextCellUniqueId_;
301 0 : return uid;
302 : }
303 :
304 : #ifdef DEBUG
305 : bool shutdownCollectedEverything() const {
306 0 : return arenasEmptyAtShutdown;
307 : }
308 : #endif
309 :
310 : public:
311 : // Internal public interface
312 0 : State state() const { return incrementalState; }
313 0 : bool isHeapCompacting() const { return state() == State::Compact; }
314 0 : bool isForegroundSweeping() const { return state() == State::Sweep; }
315 0 : bool isBackgroundSweeping() { return helperState.isBackgroundSweeping(); }
316 0 : void waitBackgroundSweepEnd() { helperState.waitBackgroundSweepEnd(); }
317 0 : void waitBackgroundSweepOrAllocEnd() {
318 0 : helperState.waitBackgroundSweepEnd();
319 0 : allocTask.cancelAndWait();
320 0 : }
321 :
322 : #ifdef DEBUG
323 0 : bool onBackgroundThread() { return helperState.onBackgroundThread(); }
324 : #endif // DEBUG
325 :
326 : void lockGC() {
327 : lock.lock();
328 : }
329 :
330 : void unlockGC() {
331 : lock.unlock();
332 : }
333 :
334 : #ifdef DEBUG
335 : bool currentThreadHasLockedGC() const {
336 0 : return lock.ownedByCurrentThread();
337 : }
338 : #endif // DEBUG
339 :
340 0 : void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
341 :
342 0 : bool isIncrementalGCAllowed() const { return incrementalAllowed; }
343 0 : void disallowIncrementalGC() { incrementalAllowed = false; }
344 :
345 0 : bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
346 0 : bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
347 :
348 : bool isCompactingGCEnabled() const;
349 :
350 0 : bool isShrinkingGC() const { return invocationKind == GC_SHRINK; }
351 :
352 : bool initSweepActions();
353 :
354 : void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
355 : MOZ_MUST_USE bool addBlackRootsTracer(JSTraceDataOp traceOp, void* data);
356 : void removeBlackRootsTracer(JSTraceDataOp traceOp, void* data);
357 :
358 0 : int32_t getMallocBytes() const { return mallocCounter.bytes(); }
359 0 : size_t maxMallocBytesAllocated() const { return mallocCounter.maxBytes(); }
360 : void setMaxMallocBytes(size_t value, const AutoLockGC& lock);
361 :
362 0 : bool updateMallocCounter(size_t nbytes) {
363 0 : mallocCounter.update(nbytes);
364 0 : TriggerKind trigger = mallocCounter.shouldTriggerGC(tunables);
365 0 : if (MOZ_LIKELY(trigger == NoTrigger) || trigger <= mallocCounter.triggered())
366 : return false;
367 :
368 0 : if (!triggerGC(JS::gcreason::TOO_MUCH_MALLOC))
369 : return false;
370 :
371 : // Even though this method may be called off the main thread it is safe
372 : // to access mallocCounter here since triggerGC() will return false in
373 : // that case.
374 0 : stats().recordTrigger(mallocCounter.bytes(), mallocCounter.maxBytes());
375 :
376 0 : mallocCounter.recordTrigger(trigger);
377 0 : return true;
378 : }
379 :
380 : void updateMallocCountersOnGCStart();
381 :
382 : void setGCCallback(JSGCCallback callback, void* data);
383 : void callGCCallback(JSGCStatus status) const;
384 : void setObjectsTenuredCallback(JSObjectsTenuredCallback callback,
385 : void* data);
386 : void callObjectsTenuredCallback();
387 : MOZ_MUST_USE bool addFinalizeCallback(JSFinalizeCallback callback, void* data);
388 : void removeFinalizeCallback(JSFinalizeCallback func);
389 : MOZ_MUST_USE bool addWeakPointerZonesCallback(JSWeakPointerZonesCallback callback,
390 : void* data);
391 : void removeWeakPointerZonesCallback(JSWeakPointerZonesCallback callback);
392 : MOZ_MUST_USE bool addWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback,
393 : void* data);
394 : void removeWeakPointerCompartmentCallback(JSWeakPointerCompartmentCallback callback);
395 : JS::GCSliceCallback setSliceCallback(JS::GCSliceCallback callback);
396 : JS::GCNurseryCollectionCallback setNurseryCollectionCallback(
397 : JS::GCNurseryCollectionCallback callback);
398 : JS::DoCycleCollectionCallback setDoCycleCollectionCallback(JS::DoCycleCollectionCallback callback);
399 : void callDoCycleCollectionCallback(JSContext* cx);
400 :
401 : void setFullCompartmentChecks(bool enable);
402 :
403 0 : JS::Zone* getCurrentSweepGroup() { return currentSweepGroup; }
404 :
405 0 : uint64_t gcNumber() const { return number; }
406 :
407 0 : uint64_t minorGCCount() const { return minorGCNumber; }
408 0 : void incMinorGcNumber() { ++minorGCNumber; ++number; }
409 :
410 0 : uint64_t majorGCCount() const { return majorGCNumber; }
411 0 : void incMajorGcNumber() { ++majorGCNumber; ++number; }
412 :
413 0 : int64_t defaultSliceBudget() const { return defaultTimeBudget_; }
414 :
415 0 : bool isIncrementalGc() const { return isIncremental; }
416 : bool isFullGc() const { return isFull; }
417 0 : bool isCompactingGc() const { return isCompacting; }
418 :
419 0 : bool areGrayBitsValid() const { return grayBitsValid; }
420 0 : void setGrayBitsInvalid() { grayBitsValid = false; }
421 :
422 0 : bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
423 :
424 0 : bool fullGCForAtomsRequested() const { return fullGCForAtomsRequested_; }
425 :
426 : double computeHeapGrowthFactor(size_t lastBytes);
427 : size_t computeTriggerBytes(double growthFactor, size_t lastBytes);
428 :
429 0 : JSGCMode gcMode() const { return mode; }
430 0 : void setGCMode(JSGCMode m) {
431 0 : mode = m;
432 0 : marker.setGCMode(mode);
433 0 : }
434 :
435 : inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
436 : inline void updateOnArenaFree();
437 :
438 0 : ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
439 0 : ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_.ref(); }
440 0 : ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
441 0 : const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_.ref(); }
442 0 : const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_.ref(); }
443 0 : const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_.ref(); }
444 : typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
445 0 : NonEmptyChunksIter allNonEmptyChunks(const AutoLockGC& lock) {
446 0 : return NonEmptyChunksIter(ChunkPool::Iter(availableChunks(lock)),
447 0 : ChunkPool::Iter(fullChunks(lock)));
448 : }
449 :
450 : Chunk* getOrAllocChunk(AutoLockGCBgAlloc& lock);
451 : void recycleChunk(Chunk* chunk, const AutoLockGC& lock);
452 :
453 : #ifdef JS_GC_ZEAL
454 : void startVerifyPreBarriers();
455 : void endVerifyPreBarriers();
456 : void finishVerifier();
457 0 : bool isVerifyPreBarriersEnabled() const { return !!verifyPreData; }
458 : bool shouldYieldForZeal(ZealMode mode);
459 : #else
460 : bool isVerifyPreBarriersEnabled() const { return false; }
461 : #endif
462 :
463 : // Free certain LifoAlloc blocks when it is safe to do so.
464 : void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
465 : void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
466 :
467 : // Public here for ReleaseArenaLists and FinalizeTypedArenas.
468 : void releaseArena(Arena* arena, const AutoLockGC& lock);
469 :
470 : void releaseHeldRelocatedArenas();
471 : void releaseHeldRelocatedArenasWithoutUnlocking(const AutoLockGC& lock);
472 :
473 : // Allocator
474 : template <AllowGC allowGC>
475 : MOZ_MUST_USE bool checkAllocatorState(JSContext* cx, AllocKind kind);
476 : template <AllowGC allowGC>
477 : JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
478 : const Class* clasp);
479 : template <AllowGC allowGC>
480 : static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
481 : size_t nDynamicSlots);
482 : template <typename T, AllowGC allowGC>
483 : static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
484 : template <AllowGC allowGC>
485 : JSString* tryNewNurseryString(JSContext* cx, size_t thingSize, AllocKind kind);
486 : static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
487 :
488 : void bufferGrayRoots();
489 :
490 : /*
491 : * Concurrent sweep infrastructure.
492 : */
493 : void startTask(GCParallelTask& task, gcstats::PhaseKind phase,
494 : AutoLockHelperThreadState& locked);
495 : void joinTask(GCParallelTask& task, gcstats::PhaseKind phase,
496 : AutoLockHelperThreadState& locked);
497 :
498 : void mergeRealms(JS::Realm* source, JS::Realm* target);
499 :
500 : private:
501 : enum IncrementalResult
502 : {
503 : Reset = 0,
504 : Ok
505 : };
506 :
507 : // Delete an empty zone after its contents have been merged.
508 : void deleteEmptyZone(Zone* zone);
509 :
510 : // For ArenaLists::allocateFromArena()
511 : friend class ArenaLists;
512 : Chunk* pickChunk(AutoLockGCBgAlloc& lock);
513 : Arena* allocateArena(Chunk* chunk, Zone* zone, AllocKind kind,
514 : ShouldCheckThresholds checkThresholds, const AutoLockGC& lock);
515 :
516 :
517 : void arenaAllocatedDuringGC(JS::Zone* zone, Arena* arena);
518 :
519 : // Allocator internals
520 : MOZ_MUST_USE bool gcIfNeededAtAllocation(JSContext* cx);
521 : template <typename T>
522 : static void checkIncrementalZoneState(JSContext* cx, T* t);
523 : static TenuredCell* refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind);
524 : static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind);
525 : static TenuredCell* refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind);
526 :
527 : /*
528 : * Return the list of chunks that can be released outside the GC lock.
529 : * Must be called either during the GC or with the GC lock taken.
530 : */
531 : friend class BackgroundDecommitTask;
532 : ChunkPool expireEmptyChunkPool(const AutoLockGC& lock);
533 : void freeEmptyChunks(const AutoLockGC& lock);
534 : void prepareToFreeChunk(ChunkInfo& info);
535 :
536 : friend class BackgroundAllocTask;
537 : bool wantBackgroundAllocation(const AutoLockGC& lock) const;
538 : void startBackgroundAllocTaskIfIdle();
539 :
540 : void requestMajorGC(JS::gcreason::Reason reason);
541 : SliceBudget defaultBudget(JS::gcreason::Reason reason, int64_t millis);
542 : IncrementalResult budgetIncrementalGC(bool nonincrementalByAPI, JS::gcreason::Reason reason,
543 : SliceBudget& budget, AutoTraceSession& session);
544 : IncrementalResult resetIncrementalGC(AbortReason reason, AutoTraceSession& session);
545 :
546 : // Assert if the system state is such that we should never
547 : // receive a request to do GC work.
548 : void checkCanCallAPI();
549 :
550 : // Check if the system state is such that GC has been supressed
551 : // or otherwise delayed.
552 : MOZ_MUST_USE bool checkIfGCAllowedInCurrentState(JS::gcreason::Reason reason);
553 :
554 : gcstats::ZoneGCStats scanZonesBeforeGC();
555 : void collect(bool nonincrementalByAPI, SliceBudget budget, JS::gcreason::Reason reason) JS_HAZ_GC_CALL;
556 : MOZ_MUST_USE IncrementalResult gcCycle(bool nonincrementalByAPI, SliceBudget& budget,
557 : JS::gcreason::Reason reason);
558 : bool shouldRepeatForDeadZone(JS::gcreason::Reason reason);
559 : void incrementalCollectSlice(SliceBudget& budget, JS::gcreason::Reason reason,
560 : AutoTraceSession& session);
561 :
562 : friend class AutoCallGCCallbacks;
563 : void maybeCallGCCallback(JSGCStatus status);
564 :
565 : void pushZealSelectedObjects();
566 : void purgeRuntime();
567 : MOZ_MUST_USE bool beginMarkPhase(JS::gcreason::Reason reason, AutoTraceSession& session);
568 : bool prepareZonesForCollection(JS::gcreason::Reason reason, bool* isFullOut,
569 : AutoLockForExclusiveAccess& lock);
570 : bool shouldPreserveJITCode(JS::Realm* realm, int64_t currentTime,
571 : JS::gcreason::Reason reason, bool canAllocateMoreCode);
572 : void traceRuntimeForMajorGC(JSTracer* trc, AutoTraceSession& session);
573 : void traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock);
574 : void traceKeptAtoms(JSTracer* trc);
575 : void traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
576 : AutoTraceSession& session);
577 : void maybeDoCycleCollection();
578 : void markCompartments();
579 : IncrementalProgress drainMarkStack(SliceBudget& sliceBudget, gcstats::PhaseKind phase);
580 : template <class CompartmentIterT> void markWeakReferences(gcstats::PhaseKind phase);
581 : void markWeakReferencesInCurrentGroup(gcstats::PhaseKind phase);
582 : template <class ZoneIterT, class CompartmentIterT> void markGrayReferences(gcstats::PhaseKind phase);
583 : void markBufferedGrayRoots(JS::Zone* zone);
584 : void markGrayReferencesInCurrentGroup(gcstats::PhaseKind phase);
585 : void markAllWeakReferences(gcstats::PhaseKind phase);
586 : void markAllGrayReferences(gcstats::PhaseKind phase);
587 :
588 : void beginSweepPhase(JS::gcreason::Reason reason, AutoTraceSession& session);
589 : void groupZonesForSweeping(JS::gcreason::Reason reason);
590 : MOZ_MUST_USE bool findInterZoneEdges();
591 : void getNextSweepGroup();
592 : IncrementalProgress endMarkingSweepGroup(FreeOp* fop, SliceBudget& budget);
593 : IncrementalProgress beginSweepingSweepGroup(FreeOp* fop, SliceBudget& budget);
594 : #ifdef JS_GC_ZEAL
595 : IncrementalProgress maybeYieldForSweepingZeal(FreeOp* fop, SliceBudget& budget);
596 : #endif
597 : bool shouldReleaseObservedTypes();
598 : void sweepDebuggerOnMainThread(FreeOp* fop);
599 : void sweepJitDataOnMainThread(FreeOp* fop);
600 : IncrementalProgress endSweepingSweepGroup(FreeOp* fop, SliceBudget& budget);
601 : IncrementalProgress performSweepActions(SliceBudget& sliceBudget);
602 : IncrementalProgress sweepTypeInformation(FreeOp* fop, SliceBudget& budget, Zone* zone);
603 : IncrementalProgress releaseSweptEmptyArenas(FreeOp* fop, SliceBudget& budget, Zone* zone);
604 : void startSweepingAtomsTable();
605 : IncrementalProgress sweepAtomsTable(FreeOp* fop, SliceBudget& budget);
606 : IncrementalProgress sweepWeakCaches(FreeOp* fop, SliceBudget& budget);
607 : IncrementalProgress finalizeAllocKind(FreeOp* fop, SliceBudget& budget, Zone* zone,
608 : AllocKind kind);
609 : IncrementalProgress sweepShapeTree(FreeOp* fop, SliceBudget& budget, Zone* zone);
610 : void endSweepPhase(bool lastGC);
611 : bool allCCVisibleZonesWereCollected() const;
612 : void sweepZones(FreeOp* fop, bool destroyingRuntime);
613 : void decommitAllWithoutUnlocking(const AutoLockGC& lock);
614 : void startDecommit();
615 : void queueZonesForBackgroundSweep(ZoneList& zones);
616 : void sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks);
617 : void assertBackgroundSweepingFinished();
618 : bool shouldCompact();
619 : void beginCompactPhase();
620 : IncrementalProgress compactPhase(JS::gcreason::Reason reason, SliceBudget& sliceBudget,
621 : AutoTraceSession& session);
622 : void endCompactPhase();
623 : void sweepTypesAfterCompacting(Zone* zone);
624 : void sweepZoneAfterCompacting(Zone* zone);
625 : MOZ_MUST_USE bool relocateArenas(Zone* zone, JS::gcreason::Reason reason,
626 : Arena*& relocatedListOut, SliceBudget& sliceBudget);
627 : void updateTypeDescrObjects(MovingTracer* trc, Zone* zone);
628 : void updateCellPointers(Zone* zone, AllocKinds kinds, size_t bgTaskCount);
629 : void updateAllCellPointers(MovingTracer* trc, Zone* zone);
630 : void updateZonePointersToRelocatedCells(Zone* zone);
631 : void updateRuntimePointersToRelocatedCells(AutoTraceSession& session);
632 : void protectAndHoldArenas(Arena* arenaList);
633 : void unprotectHeldRelocatedArenas();
634 : void releaseRelocatedArenas(Arena* arenaList);
635 : void releaseRelocatedArenasWithoutUnlocking(Arena* arenaList, const AutoLockGC& lock);
636 : void finishCollection();
637 :
638 : void computeNonIncrementalMarkingForValidation(AutoTraceSession& session);
639 : void validateIncrementalMarking();
640 : void finishMarkingValidation();
641 :
642 : #ifdef DEBUG
643 : void checkForCompartmentMismatches();
644 : #endif
645 :
646 : void callFinalizeCallbacks(FreeOp* fop, JSFinalizeStatus status) const;
647 : void callWeakPointerZonesCallbacks() const;
648 : void callWeakPointerCompartmentCallbacks(JS::Compartment* comp) const;
649 :
650 : public:
651 : JSRuntime* const rt;
652 :
653 : /* Embedders can use this zone and group however they wish. */
654 : UnprotectedData<JS::Zone*> systemZone;
655 :
656 : // All zones in the runtime, except the atoms zone.
657 : private:
658 : MainThreadOrGCTaskData<ZoneVector> zones_;
659 : public:
660 : ZoneVector& zones() { return zones_.ref(); }
661 339 :
662 : // The unique atoms zone.
663 : WriteOnceData<Zone*> atomsZone;
664 :
665 : private:
666 : UnprotectedData<gcstats::Statistics> stats_;
667 : public:
668 : gcstats::Statistics& stats() { return stats_.ref(); }
669 186 :
670 : GCMarker marker;
671 :
672 : Vector<JS::GCCellPtr, 0, SystemAllocPolicy> unmarkGrayStack;
673 :
674 : /* Track heap usage for this runtime. */
675 : HeapUsage usage;
676 :
677 : /* GC scheduling state and parameters. */
678 : GCSchedulingTunables tunables;
679 : GCSchedulingState schedulingState;
680 :
681 : // State used for managing atom mark bitmaps in each zone. Protected by the
682 : // exclusive access lock.
683 : AtomMarkingRuntime atomMarking;
684 :
685 : private:
686 : // When chunks are empty, they reside in the emptyChunks pool and are
687 : // re-used as needed or eventually expired if not re-used. The emptyChunks
688 : // pool gets refilled from the background allocation task heuristically so
689 : // that empty chunks should always be available for immediate allocation
690 : // without syscalls.
691 : GCLockData<ChunkPool> emptyChunks_;
692 :
693 : // Chunks which have had some, but not all, of their arenas allocated live
694 : // in the available chunk lists. When all available arenas in a chunk have
695 : // been allocated, the chunk is removed from the available list and moved
696 : // to the fullChunks pool. During a GC, if all arenas are free, the chunk
697 : // is moved back to the emptyChunks pool and scheduled for eventual
698 : // release.
699 : GCLockData<ChunkPool> availableChunks_;
700 :
701 : // When all arenas in a chunk are used, it is moved to the fullChunks pool
702 : // so as to reduce the cost of operations on the available lists.
703 : GCLockData<ChunkPool> fullChunks_;
704 :
705 : MainThreadData<RootedValueMap> rootsHash;
706 :
707 : // An incrementing id used to assign unique ids to cells that require one.
708 : mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
709 :
710 : /*
711 : * Number of the committed arenas in all GC chunks including empty chunks.
712 : */
713 : mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
714 : MainThreadData<VerifyPreTracer*> verifyPreData;
715 :
716 : private:
717 : UnprotectedData<bool> chunkAllocationSinceLastGC;
718 : MainThreadData<int64_t> lastGCTime;
719 :
720 : /*
721 : * JSGC_MODE
722 : * prefs: javascript.options.mem.gc_per_zone and
723 : * javascript.options.mem.gc_incremental.
724 : */
725 : MainThreadData<JSGCMode> mode;
726 :
727 : mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
728 :
729 : /* During shutdown, the GC needs to clean up every possible object. */
730 : MainThreadData<bool> cleanUpEverything;
731 :
732 : // Gray marking must be done after all black marking is complete. However,
733 : // we do not have write barriers on XPConnect roots. Therefore, XPConnect
734 : // roots must be accumulated in the first slice of incremental GC. We
735 : // accumulate these roots in each zone's gcGrayRoots vector and then mark
736 : // them later, after black marking is complete for each compartment. This
737 : // accumulation can fail, but in that case we switch to non-incremental GC.
738 : enum class GrayBufferState {
739 : Unused,
740 : Okay,
741 : Failed
742 : };
743 : MainThreadOrGCTaskData<GrayBufferState> grayBufferState;
744 : bool hasValidGrayRootsBuffer() const { return grayBufferState == GrayBufferState::Okay; }
745 0 :
746 : // Clear each zone's gray buffers, but do not change the current state.
747 : void resetBufferedGrayRoots() const;
748 :
749 : // Reset the gray buffering state to Unused.
750 : void clearBufferedGrayRoots() {
751 : grayBufferState = GrayBufferState::Unused;
752 0 : resetBufferedGrayRoots();
753 0 : }
754 :
755 : /*
756 : * The gray bits can become invalid if UnmarkGray overflows the stack. A
757 : * full GC will reset this bit, since it fills in all the gray bits.
758 : */
759 : UnprotectedData<bool> grayBitsValid;
760 :
761 : mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
762 :
763 : private:
764 : /* Perform full GC if rt->keepAtoms() becomes false. */
765 : MainThreadData<bool> fullGCForAtomsRequested_;
766 :
767 : /* Incremented at the start of every minor GC. */
768 : MainThreadData<uint64_t> minorGCNumber;
769 :
770 : /* Incremented at the start of every major GC. */
771 : MainThreadData<uint64_t> majorGCNumber;
772 :
773 : /* The major GC number at which to release observed type information. */
774 : MainThreadData<uint64_t> jitReleaseNumber;
775 :
776 : /* Incremented on every GC slice. */
777 : MainThreadData<uint64_t> number;
778 :
779 : /* Whether the currently running GC can finish in multiple slices. */
780 : MainThreadData<bool> isIncremental;
781 :
782 : /* Whether all zones are being collected in first GC slice. */
783 : MainThreadData<bool> isFull;
784 :
785 : /* Whether the heap will be compacted at the end of GC. */
786 : MainThreadData<bool> isCompacting;
787 :
788 : /* The invocation kind of the current GC, taken from the first slice. */
789 : MainThreadData<JSGCInvocationKind> invocationKind;
790 :
791 : /* The initial GC reason, taken from the first slice. */
792 : MainThreadData<JS::gcreason::Reason> initialReason;
793 :
794 : /*
795 : * The current incremental GC phase. This is also used internally in
796 : * non-incremental GC.
797 : */
798 : MainThreadOrGCTaskData<State> incrementalState;
799 :
800 : /* The incremental state at the start of this slice. */
801 : MainThreadData<State> initialState;
802 :
803 : #ifdef JS_GC_ZEAL
804 : /* Whether to pay attention the zeal settings in this incremental slice. */
805 : MainThreadData<bool> useZeal;
806 : #endif
807 :
808 : /* Indicates that the last incremental slice exhausted the mark stack. */
809 : MainThreadData<bool> lastMarkSlice;
810 :
811 : /* Whether it's currently safe to yield to the mutator in an incremental GC. */
812 : MainThreadData<bool> safeToYield;
813 :
814 : /* Whether any sweeping will take place in the separate GC helper thread. */
815 : MainThreadData<bool> sweepOnBackgroundThread;
816 :
817 : /* Whether observed type information is being released in the current GC. */
818 : MainThreadData<bool> releaseObservedTypes;
819 :
820 : /* Singly linked list of zones to be swept in the background. */
821 : MainThreadOrGCTaskData<ZoneList> backgroundSweepZones;
822 :
823 : /*
824 : * Free LIFO blocks are transferred to this allocator before being freed on
825 : * the background GC thread after sweeping.
826 : */
827 : MainThreadOrGCTaskData<LifoAlloc> blocksToFreeAfterSweeping;
828 :
829 : private:
830 : /* Index of current sweep group (for stats). */
831 : MainThreadData<unsigned> sweepGroupIndex;
832 :
833 : /*
834 : * Incremental sweep state.
835 : */
836 :
837 : MainThreadData<JS::Zone*> sweepGroups;
838 : MainThreadOrGCTaskData<JS::Zone*> currentSweepGroup;
839 : MainThreadData<UniquePtr<SweepAction<GCRuntime*, FreeOp*, SliceBudget&>>> sweepActions;
840 : MainThreadOrGCTaskData<JS::Zone*> sweepZone;
841 : MainThreadData<mozilla::Maybe<AtomSet::Enum>> maybeAtomsToSweep;
842 : MainThreadOrGCTaskData<JS::detail::WeakCacheBase*> sweepCache;
843 : MainThreadData<bool> abortSweepAfterCurrentGroup;
844 :
845 : friend class SweepGroupsIter;
846 : friend class WeakCacheSweepIterator;
847 :
848 : /*
849 : * Incremental compacting state.
850 : */
851 : MainThreadData<bool> startedCompacting;
852 : MainThreadData<ZoneList> zonesToMaybeCompact;
853 : MainThreadData<Arena*> relocatedArenasToRelease;
854 :
855 : #ifdef JS_GC_ZEAL
856 : MainThreadData<MarkingValidator*> markingValidator;
857 : #endif
858 :
859 : /*
860 : * Default budget for incremental GC slice. See js/SliceBudget.h.
861 : *
862 : * JSGC_SLICE_TIME_BUDGET
863 : * pref: javascript.options.mem.gc_incremental_slice_ms,
864 : */
865 : MainThreadData<int64_t> defaultTimeBudget_;
866 :
867 : /*
868 : * We disable incremental GC if we encounter a Class with a trace hook
869 : * that does not implement write barriers.
870 : */
871 : MainThreadData<bool> incrementalAllowed;
872 :
873 : /*
874 : * Whether compacting GC can is enabled globally.
875 : *
876 : * JSGC_COMPACTING_ENABLED
877 : * pref: javascript.options.mem.gc_compacting
878 : */
879 : MainThreadData<bool> compactingEnabled;
880 :
881 : MainThreadData<bool> rootsRemoved;
882 :
883 : /*
884 : * These options control the zealousness of the GC. At every allocation,
885 : * nextScheduled is decremented. When it reaches zero we do a full GC.
886 : *
887 : * At this point, if zeal_ is one of the types that trigger periodic
888 : * collection, then nextScheduled is reset to the value of zealFrequency.
889 : * Otherwise, no additional GCs take place.
890 : *
891 : * You can control these values in several ways:
892 : * - Set the JS_GC_ZEAL environment variable
893 : * - Call gczeal() or schedulegc() from inside shell-executed JS code
894 : * (see the help for details)
895 : *
896 : * If gcZeal_ == 1 then we perform GCs in select places (during MaybeGC and
897 : * whenever we are notified that GC roots have been removed). This option is
898 : * mainly useful to embedders.
899 : *
900 : * We use zeal_ == 4 to enable write barrier verification. See the comment
901 : * in gc/Verifier.cpp for more information about this.
902 : *
903 : * zeal_ values from 8 to 10 periodically run different types of
904 : * incremental GC.
905 : *
906 : * zeal_ value 14 performs periodic shrinking collections.
907 : */
908 : #ifdef JS_GC_ZEAL
909 : static_assert(size_t(ZealMode::Count) <= 32,
910 : "Too many zeal modes to store in a uint32_t");
911 : MainThreadData<uint32_t> zealModeBits;
912 : MainThreadData<int> zealFrequency;
913 : MainThreadData<int> nextScheduled;
914 : MainThreadData<bool> deterministicOnly;
915 : MainThreadData<int> incrementalLimit;
916 :
917 : MainThreadData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
918 : #endif
919 :
920 : MainThreadData<bool> fullCompartmentChecks;
921 :
922 : MainThreadData<uint32_t> gcCallbackDepth;
923 :
924 : Callback<JSGCCallback> gcCallback;
925 : Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
926 : Callback<JSObjectsTenuredCallback> tenuredCallback;
927 : CallbackVector<JSFinalizeCallback> finalizeCallbacks;
928 : CallbackVector<JSWeakPointerZonesCallback> updateWeakPointerZonesCallbacks;
929 : CallbackVector<JSWeakPointerCompartmentCallback> updateWeakPointerCompartmentCallbacks;
930 :
931 : MemoryCounter mallocCounter;
932 :
933 : /*
934 : * The trace operations to trace embedding-specific GC roots. One is for
935 : * tracing through black roots and the other is for tracing through gray
936 : * roots. The black/gray distinction is only relevant to the cycle
937 : * collector.
938 : */
939 : CallbackVector<JSTraceDataOp> blackRootTracers;
940 : Callback<JSTraceDataOp> grayRootTracer;
941 :
942 : /* Always preserve JIT code during GCs, for testing. */
943 : MainThreadData<bool> alwaysPreserveCode;
944 :
945 : #ifdef DEBUG
946 : MainThreadData<bool> arenasEmptyAtShutdown;
947 : #endif
948 :
949 : /* Synchronize GC heap access among GC helper threads and the main thread. */
950 : friend class js::AutoLockGC;
951 : friend class js::AutoLockGCBgAlloc;
952 : js::Mutex lock;
953 :
954 : BackgroundAllocTask allocTask;
955 : BackgroundDecommitTask decommitTask;
956 :
957 : js::GCHelperState helperState;
958 :
959 : /*
960 : * During incremental sweeping, this field temporarily holds the arenas of
961 : * the current AllocKind being swept in order of increasing free space.
962 : */
963 : MainThreadData<SortedArenaList> incrementalSweepList;
964 :
965 : private:
966 : MainThreadData<Nursery> nursery_;
967 : MainThreadData<gc::StoreBuffer> storeBuffer_;
968 : public:
969 : Nursery& nursery() { return nursery_.ref(); }
970 0 : gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
971 5862 :
972 : // Free LIFO blocks are transferred to this allocator before being freed
973 : // after minor GC.
974 : MainThreadData<LifoAlloc> blocksToFreeAfterMinorGC;
975 :
976 : const void* addressOfNurseryPosition() {
977 : return nursery_.refNoCheck().addressOfPosition();
978 1028 : }
979 : const void* addressOfNurseryCurrentEnd() {
980 : return nursery_.refNoCheck().addressOfCurrentEnd();
981 514 : }
982 : const void* addressOfStringNurseryCurrentEnd() {
983 : return nursery_.refNoCheck().addressOfCurrentStringEnd();
984 0 : }
985 :
986 : void minorGC(JS::gcreason::Reason reason,
987 : gcstats::PhaseKind phase = gcstats::PhaseKind::MINOR_GC) JS_HAZ_GC_CALL;
988 : void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
989 : minorGC(reason, gcstats::PhaseKind::EVICT_NURSERY);
990 1 : }
991 : void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
992 :
993 : friend class js::GCHelperState;
994 : friend class MarkingValidator;
995 : friend class AutoTraceSession;
996 : friend class AutoEnterIteration;
997 : };
998 :
999 : /* Prevent compartments and zones from being collected during iteration. */
1000 : class MOZ_RAII AutoEnterIteration {
1001 : GCRuntime* gc;
1002 :
1003 : public:
1004 : explicit AutoEnterIteration(GCRuntime* gc_) : gc(gc_) {
1005 0 : ++gc->numActiveZoneIters;
1006 330 : }
1007 :
1008 : ~AutoEnterIteration() {
1009 0 : MOZ_ASSERT(gc->numActiveZoneIters);
1010 0 : --gc->numActiveZoneIters;
1011 0 : }
1012 165 : };
1013 :
1014 : #ifdef JS_GC_ZEAL
1015 :
1016 : inline bool
1017 : GCRuntime::hasZealMode(ZealMode mode)
1018 : {
1019 : static_assert(size_t(ZealMode::Limit) < sizeof(zealModeBits) * 8,
1020 : "Zeal modes must fit in zealModeBits");
1021 : return zealModeBits & (1 << uint32_t(mode));
1022 1707730 : }
1023 :
1024 : inline void
1025 : GCRuntime::clearZealMode(ZealMode mode)
1026 0 : {
1027 : zealModeBits &= ~(1 << uint32_t(mode));
1028 0 : MOZ_ASSERT(!hasZealMode(mode));
1029 0 : }
1030 0 :
1031 : inline bool
1032 : GCRuntime::upcomingZealousGC() {
1033 : return nextScheduled == 1;
1034 166708 : }
1035 :
1036 : inline bool
1037 : GCRuntime::needZealousGC() {
1038 0 : if (nextScheduled > 0 && --nextScheduled == 0) {
1039 0 : if (hasZealMode(ZealMode::Alloc) ||
1040 0 : hasZealMode(ZealMode::GenerationalGC) ||
1041 0 : hasZealMode(ZealMode::IncrementalMultipleSlices) ||
1042 0 : hasZealMode(ZealMode::Compact) ||
1043 0 : hasIncrementalTwoSliceZealMode())
1044 0 : {
1045 : nextScheduled = zealFrequency;
1046 0 : }
1047 : return true;
1048 : }
1049 : return false;
1050 : }
1051 :
1052 : inline bool
1053 : GCRuntime::hasIncrementalTwoSliceZealMode() {
1054 0 : return hasZealMode(ZealMode::YieldBeforeMarking) ||
1055 0 : hasZealMode(ZealMode::YieldBeforeSweeping) ||
1056 0 : hasZealMode(ZealMode::YieldBeforeSweepingAtoms) ||
1057 0 : hasZealMode(ZealMode::YieldBeforeSweepingCaches) ||
1058 0 : hasZealMode(ZealMode::YieldBeforeSweepingTypes) ||
1059 0 : hasZealMode(ZealMode::YieldBeforeSweepingObjects) ||
1060 0 : hasZealMode(ZealMode::YieldBeforeSweepingNonObjects) ||
1061 0 : hasZealMode(ZealMode::YieldBeforeSweepingShapeTrees);
1062 : }
1063 :
1064 : #else
1065 : inline bool GCRuntime::hasZealMode(ZealMode mode) { return false; }
1066 : inline void GCRuntime::clearZealMode(ZealMode mode) { }
1067 : inline bool GCRuntime::upcomingZealousGC() { return false; }
1068 : inline bool GCRuntime::needZealousGC() { return false; }
1069 : inline bool GCRuntime::hasIncrementalTwoSliceZealMode() { return false; }
1070 : #endif
1071 :
1072 : } /* namespace gc */
1073 : } /* namespace js */
1074 :
1075 : #endif
|