Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef gc_Barrier_h
8 : #define gc_Barrier_h
9 :
10 : #include "NamespaceImports.h"
11 :
12 : #include "gc/Cell.h"
13 : #include "gc/StoreBuffer.h"
14 : #include "js/HeapAPI.h"
15 : #include "js/Id.h"
16 : #include "js/RootingAPI.h"
17 : #include "js/Value.h"
18 :
19 : /*
20 : * A write barrier is a mechanism used by incremental or generation GCs to
21 : * ensure that every value that needs to be marked is marked. In general, the
22 : * write barrier should be invoked whenever a write can cause the set of things
23 : * traced through by the GC to change. This includes:
24 : * - writes to object properties
25 : * - writes to array slots
26 : * - writes to fields like JSObject::shape_ that we trace through
27 : * - writes to fields in private data
28 : * - writes to non-markable fields like JSObject::private that point to
29 : * markable data
30 : * The last category is the trickiest. Even though the private pointers does not
31 : * point to a GC thing, changing the private pointer may change the set of
32 : * objects that are traced by the GC. Therefore it needs a write barrier.
33 : *
34 : * Every barriered write should have the following form:
35 : * <pre-barrier>
36 : * obj->field = value; // do the actual write
37 : * <post-barrier>
38 : * The pre-barrier is used for incremental GC and the post-barrier is for
39 : * generational GC.
40 : *
41 : * PRE-BARRIER
42 : *
43 : * To understand the pre-barrier, let's consider how incremental GC works. The
44 : * GC itself is divided into "slices". Between each slice, JS code is allowed to
45 : * run. Each slice should be short so that the user doesn't notice the
46 : * interruptions. In our GC, the structure of the slices is as follows:
47 : *
48 : * 1. ... JS work, which leads to a request to do GC ...
49 : * 2. [first GC slice, which performs all root marking and possibly more marking]
50 : * 3. ... more JS work is allowed to run ...
51 : * 4. [GC mark slice, which runs entirely in drainMarkStack]
52 : * 5. ... more JS work ...
53 : * 6. [GC mark slice, which runs entirely in drainMarkStack]
54 : * 7. ... more JS work ...
55 : * 8. [GC marking finishes; sweeping done non-incrementally; GC is done]
56 : * 9. ... JS continues uninterrupted now that GC is finishes ...
57 : *
58 : * Of course, there may be a different number of slices depending on how much
59 : * marking is to be done.
60 : *
61 : * The danger inherent in this scheme is that the JS code in steps 3, 5, and 7
62 : * might change the heap in a way that causes the GC to collect an object that
63 : * is actually reachable. The write barrier prevents this from happening. We use
64 : * a variant of incremental GC called "snapshot at the beginning." This approach
65 : * guarantees the invariant that if an object is reachable in step 2, then we
66 : * will mark it eventually. The name comes from the idea that we take a
67 : * theoretical "snapshot" of all reachable objects in step 2; all objects in
68 : * that snapshot should eventually be marked. (Note that the write barrier
69 : * verifier code takes an actual snapshot.)
70 : *
71 : * The basic correctness invariant of a snapshot-at-the-beginning collector is
72 : * that any object reachable at the end of the GC (step 9) must either:
73 : * (1) have been reachable at the beginning (step 2) and thus in the snapshot
74 : * (2) or must have been newly allocated, in steps 3, 5, or 7.
75 : * To deal with case (2), any objects allocated during an incremental GC are
76 : * automatically marked black.
77 : *
78 : * This strategy is actually somewhat conservative: if an object becomes
79 : * unreachable between steps 2 and 8, it would be safe to collect it. We won't,
80 : * mainly for simplicity. (Also, note that the snapshot is entirely
81 : * theoretical. We don't actually do anything special in step 2 that we wouldn't
82 : * do in a non-incremental GC.
83 : *
84 : * It's the pre-barrier's job to maintain the snapshot invariant. Consider the
85 : * write "obj->field = value". Let the prior value of obj->field be
86 : * value0. Since it's possible that value0 may have been what obj->field
87 : * contained in step 2, when the snapshot was taken, the barrier marks
88 : * value0. Note that it only does this if we're in the middle of an incremental
89 : * GC. Since this is rare, the cost of the write barrier is usually just an
90 : * extra branch.
91 : *
92 : * In practice, we implement the pre-barrier differently based on the type of
93 : * value0. E.g., see JSObject::writeBarrierPre, which is used if obj->field is
94 : * a JSObject*. It takes value0 as a parameter.
95 : *
96 : * READ-BARRIER
97 : *
98 : * Incremental GC requires that weak pointers have read barriers. The problem
99 : * happens when, during an incremental GC, some code reads a weak pointer and
100 : * writes it somewhere on the heap that has been marked black in a previous
101 : * slice. Since the weak pointer will not otherwise be marked and will be swept
102 : * and finalized in the last slice, this will leave the pointer just written
103 : * dangling after the GC. To solve this, we immediately mark black all weak
104 : * pointers that get read between slices so that it is safe to store them in an
105 : * already marked part of the heap, e.g. in Rooted.
106 : *
107 : * POST-BARRIER
108 : *
109 : * For generational GC, we want to be able to quickly collect the nursery in a
110 : * minor collection. Part of the way this is achieved is to only mark the
111 : * nursery itself; tenured things, which may form the majority of the heap, are
112 : * not traced through or marked. This leads to the problem of what to do about
113 : * tenured objects that have pointers into the nursery: if such things are not
114 : * marked, they may be discarded while there are still live objects which
115 : * reference them. The solution is to maintain information about these pointers,
116 : * and mark their targets when we start a minor collection.
117 : *
118 : * The pointers can be thought of as edges in object graph, and the set of edges
119 : * from the tenured generation into the nursery is know as the remembered set.
120 : * Post barriers are used to track this remembered set.
121 : *
122 : * Whenever a slot which could contain such a pointer is written, we use a write
123 : * barrier to check if the edge created is in the remembered set, and if so we
124 : * insert it into the store buffer, which is the collector's representation of
125 : * the remembered set. This means than when we come to do a minor collection we
126 : * can examine the contents of the store buffer and mark any edge targets that
127 : * are in the nursery.
128 : *
129 : * IMPLEMENTATION DETAILS
130 : *
131 : * Since it would be awkward to change every write to memory into a function
132 : * call, this file contains a bunch of C++ classes and templates that use
133 : * operator overloading to take care of barriers automatically. In many cases,
134 : * all that's necessary to make some field be barriered is to replace
135 : * Type* field;
136 : * with
137 : * GCPtr<Type> field;
138 : *
139 : * One additional note: not all object writes need to be pre-barriered. Writes
140 : * to newly allocated objects do not need a pre-barrier. In these cases, we use
141 : * the "obj->field.init(value)" method instead of "obj->field = value". We use
142 : * the init naming idiom in many places to signify that a field is being
143 : * assigned for the first time.
144 : *
145 : * This file implements four classes, illustrated here:
146 : *
147 : * BarrieredBase base class of all barriers
148 : * | |
149 : * | WriteBarrieredBase base class which provides common write operations
150 : * | | | | |
151 : * | | | | PreBarriered provides pre-barriers only
152 : * | | | |
153 : * | | | GCPtr provides pre- and post-barriers
154 : * | | |
155 : * | | HeapPtr provides pre- and post-barriers; is relocatable
156 : * | | and deletable for use inside C++ managed memory
157 : * | |
158 : * | HeapSlot similar to GCPtr, but tailored to slots storage
159 : * |
160 : * ReadBarrieredBase base class which provides common read operations
161 : * |
162 : * ReadBarriered provides read barriers only
163 : *
164 : *
165 : * The implementation of the barrier logic is implemented on T::writeBarrier.*,
166 : * via:
167 : *
168 : * WriteBarrieredBase<T>::pre
169 : * -> InternalBarrierMethods<T*>::preBarrier
170 : * -> T::writeBarrierPre
171 : * -> InternalBarrierMethods<Value>::preBarrier
172 : * -> InternalBarrierMethods<jsid>::preBarrier
173 : * -> InternalBarrierMethods<T*>::preBarrier
174 : * -> T::writeBarrierPre
175 : *
176 : * GCPtr<T>::post and HeapPtr<T>::post
177 : * -> InternalBarrierMethods<T*>::postBarrier
178 : * -> T::writeBarrierPost
179 : * -> InternalBarrierMethods<Value>::postBarrier
180 : * -> StoreBuffer::put
181 : *
182 : * These classes are designed to be used by the internals of the JS engine.
183 : * Barriers designed to be used externally are provided in js/RootingAPI.h.
184 : * These external barriers call into the same post-barrier implementations at
185 : * InternalBarrierMethods<T>::post via an indirect call to Heap(.+)Barrier.
186 : *
187 : * These clases are designed to be used to wrap GC thing pointers or values that
188 : * act like them (i.e. JS::Value and jsid). It is possible to use them for
189 : * other types by supplying the necessary barrier implementations but this
190 : * is not usually necessary and should be done with caution.
191 : */
192 :
193 : class JSFlatString;
194 : class JSLinearString;
195 :
196 : namespace js {
197 :
198 : class AccessorShape;
199 : class ArrayObject;
200 : class ArgumentsObject;
201 : class ArrayBufferObjectMaybeShared;
202 : class ArrayBufferObject;
203 : class ArrayBufferViewObject;
204 : class SharedArrayBufferObject;
205 : class BaseShape;
206 : class DebugEnvironmentProxy;
207 : class GlobalObject;
208 : class LazyScript;
209 : class ModuleObject;
210 : class ModuleEnvironmentObject;
211 : class ModuleNamespaceObject;
212 : class NativeObject;
213 : class PlainObject;
214 : class PropertyName;
215 : class SavedFrame;
216 : class EnvironmentObject;
217 : class ScriptSourceObject;
218 : class Shape;
219 : class UnownedBaseShape;
220 : class ObjectGroup;
221 :
222 : namespace jit {
223 : class JitCode;
224 : } // namespace jit
225 :
226 : #ifdef DEBUG
227 :
228 : // Barriers can't be triggered during backend Ion compilation, which may run on
229 : // a helper thread.
230 : bool
231 : CurrentThreadIsIonCompiling();
232 :
233 : bool
234 : CurrentThreadIsIonCompilingSafeForMinorGC();
235 :
236 : bool
237 : CurrentThreadIsGCSweeping();
238 :
239 : bool
240 : IsMarkedBlack(JSObject* obj);
241 :
242 : bool
243 : CurrentThreadIsTouchingGrayThings();
244 :
245 : #endif
246 :
247 : struct MOZ_RAII AutoTouchingGrayThings
248 : {
249 : #ifdef DEBUG
250 : AutoTouchingGrayThings();
251 : ~AutoTouchingGrayThings();
252 : #else
253 : AutoTouchingGrayThings() {}
254 : #endif
255 : };
256 :
257 : template <typename T>
258 : struct InternalBarrierMethods {};
259 :
260 : template <typename T>
261 : struct InternalBarrierMethods<T*>
262 : {
263 : static bool isMarkable(T* v) { return v != nullptr; }
264 :
265 0 : static void preBarrier(T* v) { T::writeBarrierPre(v); }
266 :
267 0 : static void postBarrier(T** vp, T* prev, T* next) { T::writeBarrierPost(vp, prev, next); }
268 :
269 0 : static void readBarrier(T* v) { T::readBarrier(v); }
270 :
271 : #ifdef DEBUG
272 0 : static bool thingIsNotGray(T* v) { return T::thingIsNotGray(v); }
273 : #endif
274 : };
275 :
276 : template <typename S> struct PreBarrierFunctor : public VoidDefaultAdaptor<S> {
277 : template <typename T> void operator()(T* t);
278 : };
279 :
280 : template <typename S> struct ReadBarrierFunctor : public VoidDefaultAdaptor<S> {
281 : template <typename T> void operator()(T* t);
282 : };
283 :
284 : template <>
285 : struct InternalBarrierMethods<Value>
286 : {
287 0 : static bool isMarkable(const Value& v) { return v.isGCThing(); }
288 :
289 : static void preBarrier(const Value& v) {
290 0 : DispatchTyped(PreBarrierFunctor<Value>(), v);
291 : }
292 :
293 0 : static MOZ_ALWAYS_INLINE void postBarrier(Value* vp, const Value& prev, const Value& next) {
294 0 : MOZ_ASSERT(!CurrentThreadIsIonCompiling());
295 0 : MOZ_ASSERT(vp);
296 :
297 : // If the target needs an entry, add it.
298 : js::gc::StoreBuffer* sb;
299 0 : if ((next.isObject() || next.isString()) && (sb = next.toGCThing()->storeBuffer())) {
300 : // If we know that the prev has already inserted an entry, we can
301 : // skip doing the lookup to add the new entry. Note that we cannot
302 : // safely assert the presence of the entry because it may have been
303 : // added via a different store buffer.
304 0 : if ((prev.isObject() || prev.isString()) && prev.toGCThing()->storeBuffer())
305 : return;
306 0 : sb->putValue(vp);
307 : return;
308 : }
309 : // Remove the prev entry if the new value does not need it.
310 0 : if ((prev.isObject() || prev.isString()) && (sb = prev.toGCThing()->storeBuffer()))
311 0 : sb->unputValue(vp);
312 : }
313 :
314 : static void readBarrier(const Value& v) {
315 0 : DispatchTyped(ReadBarrierFunctor<Value>(), v);
316 : }
317 :
318 : #ifdef DEBUG
319 0 : static bool thingIsNotGray(const Value& v) { return JS::ValueIsNotGray(v); }
320 : #endif
321 : };
322 :
323 : template <>
324 : struct InternalBarrierMethods<jsid>
325 : {
326 0 : static bool isMarkable(jsid id) { return JSID_IS_GCTHING(id); }
327 0 : static void preBarrier(jsid id) { DispatchTyped(PreBarrierFunctor<jsid>(), id); }
328 : static void postBarrier(jsid* idp, jsid prev, jsid next) {}
329 : #ifdef DEBUG
330 0 : static bool thingIsNotGray(jsid id) { return JS::IdIsNotGray(id); }
331 : #endif
332 : };
333 :
334 : template <typename T>
335 : static inline void
336 0 : CheckTargetIsNotGray(const T& v)
337 : {
338 0 : MOZ_ASSERT(InternalBarrierMethods<T>::thingIsNotGray(v) ||
339 : CurrentThreadIsTouchingGrayThings());
340 0 : }
341 :
342 : // Base class of all barrier types.
343 : //
344 : // This is marked non-memmovable since post barriers added by derived classes
345 : // can add pointers to class instances to the store buffer.
346 : template <typename T>
347 : class MOZ_NON_MEMMOVABLE BarrieredBase
348 : {
349 : protected:
350 : // BarrieredBase is not directly instantiable.
351 0 : explicit BarrieredBase(const T& v) : value(v) {}
352 :
353 : // BarrieredBase subclasses cannot be copy constructed by default.
354 0 : BarrieredBase(const BarrieredBase<T>& other) = default;
355 :
356 : // Storage for all barrier classes. |value| must be a GC thing reference
357 : // type: either a direct pointer to a GC thing or a supported tagged
358 : // pointer that can reference GC things, such as JS::Value or jsid. Nested
359 : // barrier types are NOT supported. See assertTypeConstraints.
360 : T value;
361 :
362 : public:
363 : // Note: this is public because C++ cannot friend to a specific template instantiation.
364 : // Friending to the generic template leads to a number of unintended consequences, including
365 : // template resolution ambiguity and a circular dependency with Tracing.h.
366 0 : T* unsafeUnbarrieredForTracing() { return &value; }
367 : };
368 :
369 : // Base class for barriered pointer types that intercept only writes.
370 : template <class T>
371 : class WriteBarrieredBase : public BarrieredBase<T>,
372 : public WrappedPtrOperations<T, WriteBarrieredBase<T>>
373 : {
374 : protected:
375 : using BarrieredBase<T>::value;
376 :
377 : // WriteBarrieredBase is not directly instantiable.
378 0 : explicit WriteBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
379 :
380 : public:
381 : using ElementType = T;
382 :
383 0 : DECLARE_POINTER_CONSTREF_OPS(T);
384 :
385 : // Use this if the automatic coercion to T isn't working.
386 0 : const T& get() const { return this->value; }
387 :
388 : // Use this if you want to change the value without invoking barriers.
389 : // Obviously this is dangerous unless you know the barrier is not needed.
390 0 : void unsafeSet(const T& v) { this->value = v; }
391 :
392 : // For users who need to manually barrier the raw types.
393 0 : static void writeBarrierPre(const T& v) { InternalBarrierMethods<T>::preBarrier(v); }
394 :
395 : protected:
396 0 : void pre() { InternalBarrierMethods<T>::preBarrier(this->value); }
397 : MOZ_ALWAYS_INLINE void post(const T& prev, const T& next) {
398 0 : InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
399 : }
400 : };
401 :
402 : /*
403 : * PreBarriered only automatically handles pre-barriers. Post-barriers must be
404 : * manually implemented when using this class. GCPtr and HeapPtr should be used
405 : * in all cases that do not require explicit low-level control of moving
406 : * behavior, e.g. for HashMap keys.
407 : */
408 : template <class T>
409 : class PreBarriered : public WriteBarrieredBase<T>
410 : {
411 : public:
412 : PreBarriered() : WriteBarrieredBase<T>(JS::SafelyInitialized<T>()) {}
413 : /*
414 : * Allow implicit construction for use in generic contexts, such as
415 : * DebuggerWeakMap::markKeys.
416 : */
417 0 : MOZ_IMPLICIT PreBarriered(const T& v) : WriteBarrieredBase<T>(v) {}
418 0 : explicit PreBarriered(const PreBarriered<T>& v) : WriteBarrieredBase<T>(v.value) {}
419 0 : ~PreBarriered() { this->pre(); }
420 :
421 : void init(const T& v) {
422 0 : this->value = v;
423 : }
424 :
425 : /* Use to set the pointer to nullptr. */
426 : void clear() {
427 : this->pre();
428 : this->value = nullptr;
429 : }
430 :
431 0 : DECLARE_POINTER_ASSIGN_OPS(PreBarriered, T);
432 :
433 : private:
434 : void set(const T& v) {
435 0 : CheckTargetIsNotGray(v);
436 0 : this->pre();
437 0 : this->value = v;
438 : }
439 : };
440 :
441 : /*
442 : * A pre- and post-barriered heap pointer, for use inside the JS engine.
443 : *
444 : * It must only be stored in memory that has GC lifetime. GCPtr must not be
445 : * used in contexts where it may be implicitly moved or deleted, e.g. most
446 : * containers.
447 : *
448 : * The post-barriers implemented by this class are faster than those
449 : * implemented by js::HeapPtr<T> or JS::Heap<T> at the cost of not
450 : * automatically handling deletion or movement.
451 : */
452 : template <class T>
453 : class GCPtr : public WriteBarrieredBase<T>
454 : {
455 : public:
456 0 : GCPtr() : WriteBarrieredBase<T>(JS::SafelyInitialized<T>()) {}
457 0 : explicit GCPtr(const T& v) : WriteBarrieredBase<T>(v) {
458 0 : this->post(JS::SafelyInitialized<T>(), v);
459 : }
460 0 : explicit GCPtr(const GCPtr<T>& v) : WriteBarrieredBase<T>(v) {
461 0 : this->post(JS::SafelyInitialized<T>(), v);
462 : }
463 : #ifdef DEBUG
464 0 : ~GCPtr() {
465 : // No barriers are necessary as this only happens when we are sweeping
466 : // or when after GCManagedDeletePolicy has triggered the barriers for us
467 : // and cleared the pointer.
468 : //
469 : // If you get a crash here, you may need to make the containing object
470 : // use GCManagedDeletePolicy and use JS::DeletePolicy to destroy it.
471 : //
472 : // Note that when sweeping the wrapped pointer may already have been
473 : // freed by this point.
474 0 : MOZ_ASSERT(CurrentThreadIsGCSweeping() || this->value == JS::SafelyInitialized<T>());
475 0 : Poison(this, JS_FREED_HEAP_PTR_PATTERN, sizeof(*this), MemCheckKind::MakeNoAccess);
476 0 : }
477 : #endif
478 :
479 0 : void init(const T& v) {
480 0 : CheckTargetIsNotGray(v);
481 0 : this->value = v;
482 0 : this->post(JS::SafelyInitialized<T>(), v);
483 0 : }
484 :
485 0 : DECLARE_POINTER_ASSIGN_OPS(GCPtr, T);
486 :
487 : private:
488 0 : void set(const T& v) {
489 0 : CheckTargetIsNotGray(v);
490 0 : this->pre();
491 0 : T tmp = this->value;
492 0 : this->value = v;
493 0 : this->post(tmp, this->value);
494 0 : }
495 :
496 : /*
497 : * Unlike HeapPtr<T>, GCPtr<T> must be managed with GC lifetimes.
498 : * Specifically, the memory used by the pointer itself must be live until
499 : * at least the next minor GC. For that reason, move semantics are invalid
500 : * and are deleted here. Please note that not all containers support move
501 : * semantics, so this does not completely prevent invalid uses.
502 : */
503 : GCPtr(GCPtr<T>&&) = delete;
504 : GCPtr<T>& operator=(GCPtr<T>&&) = delete;
505 : };
506 :
507 : /*
508 : * A pre- and post-barriered heap pointer, for use inside the JS engine. These
509 : * heap pointers can be stored in C++ containers like GCVector and GCHashMap.
510 : *
511 : * The GC sometimes keeps pointers to pointers to GC things --- for example, to
512 : * track references into the nursery. However, C++ containers like GCVector and
513 : * GCHashMap usually reserve the right to relocate their elements any time
514 : * they're modified, invalidating all pointers to the elements. HeapPtr
515 : * has a move constructor which knows how to keep the GC up to date if it is
516 : * moved to a new location.
517 : *
518 : * However, because of this additional communication with the GC, HeapPtr
519 : * is somewhat slower, so it should only be used in contexts where this ability
520 : * is necessary.
521 : *
522 : * Obviously, JSObjects, JSStrings, and the like get tenured and compacted, so
523 : * whatever pointers they contain get relocated, in the sense used here.
524 : * However, since the GC itself is moving those values, it takes care of its
525 : * internal pointers to those pointers itself. HeapPtr is only necessary
526 : * when the relocation would otherwise occur without the GC's knowledge.
527 : */
528 : template <class T>
529 : class HeapPtr : public WriteBarrieredBase<T>
530 : {
531 : public:
532 0 : HeapPtr() : WriteBarrieredBase<T>(JS::SafelyInitialized<T>()) {}
533 :
534 : // Implicitly adding barriers is a reasonable default.
535 0 : MOZ_IMPLICIT HeapPtr(const T& v) : WriteBarrieredBase<T>(v) {
536 0 : this->post(JS::SafelyInitialized<T>(), this->value);
537 : }
538 :
539 : /*
540 : * For HeapPtr, move semantics are equivalent to copy semantics. In
541 : * C++, a copy constructor taking const-ref is the way to get a single
542 : * function that will be used for both lvalue and rvalue copies, so we can
543 : * simply omit the rvalue variant.
544 : */
545 0 : MOZ_IMPLICIT HeapPtr(const HeapPtr<T>& v) : WriteBarrieredBase<T>(v) {
546 0 : this->post(JS::SafelyInitialized<T>(), this->value);
547 : }
548 :
549 0 : ~HeapPtr() {
550 0 : this->pre();
551 0 : this->post(this->value, JS::SafelyInitialized<T>());
552 0 : }
553 :
554 0 : void init(const T& v) {
555 0 : CheckTargetIsNotGray(v);
556 0 : this->value = v;
557 0 : this->post(JS::SafelyInitialized<T>(), this->value);
558 0 : }
559 :
560 0 : DECLARE_POINTER_ASSIGN_OPS(HeapPtr, T);
561 :
562 : /* Make this friend so it can access pre() and post(). */
563 : template <class T1, class T2>
564 : friend inline void
565 : BarrieredSetPair(Zone* zone,
566 : HeapPtr<T1*>& v1, T1* val1,
567 : HeapPtr<T2*>& v2, T2* val2);
568 :
569 : protected:
570 0 : void set(const T& v) {
571 0 : CheckTargetIsNotGray(v);
572 0 : this->pre();
573 0 : postBarrieredSet(v);
574 0 : }
575 :
576 0 : void postBarrieredSet(const T& v) {
577 0 : CheckTargetIsNotGray(v);
578 0 : T tmp = this->value;
579 0 : this->value = v;
580 0 : this->post(tmp, this->value);
581 0 : }
582 : };
583 :
584 : // Base class for barriered pointer types that intercept reads and writes.
585 : template <typename T>
586 0 : class ReadBarrieredBase : public BarrieredBase<T>
587 : {
588 : protected:
589 : // ReadBarrieredBase is not directly instantiable.
590 0 : explicit ReadBarrieredBase(const T& v) : BarrieredBase<T>(v) {}
591 :
592 : protected:
593 0 : void read() const { InternalBarrierMethods<T>::readBarrier(this->value); }
594 : void post(const T& prev, const T& next) {
595 0 : InternalBarrierMethods<T>::postBarrier(&this->value, prev, next);
596 : }
597 : };
598 :
599 : // Incremental GC requires that weak pointers have read barriers. See the block
600 : // comment at the top of Barrier.h for a complete discussion of why.
601 : //
602 : // Note that this class also has post-barriers, so is safe to use with nursery
603 : // pointers. However, when used as a hashtable key, care must still be taken to
604 : // insert manual post-barriers on the table for rekeying if the key is based in
605 : // any way on the address of the object.
606 : template <typename T>
607 : class ReadBarriered : public ReadBarrieredBase<T>,
608 : public WrappedPtrOperations<T, ReadBarriered<T>>
609 : {
610 : protected:
611 : using ReadBarrieredBase<T>::value;
612 :
613 : public:
614 0 : ReadBarriered() : ReadBarrieredBase<T>(JS::SafelyInitialized<T>()) {}
615 :
616 : // It is okay to add barriers implicitly.
617 0 : MOZ_IMPLICIT ReadBarriered(const T& v) : ReadBarrieredBase<T>(v) {
618 0 : this->post(JS::SafelyInitialized<T>(), v);
619 : }
620 :
621 : // The copy constructor creates a new weak edge but the wrapped pointer does
622 : // not escape, so no read barrier is necessary.
623 0 : explicit ReadBarriered(const ReadBarriered& v) : ReadBarrieredBase<T>(v) {
624 0 : this->post(JS::SafelyInitialized<T>(), v.unbarrieredGet());
625 : }
626 :
627 : // Move retains the lifetime status of the source edge, so does not fire
628 : // the read barrier of the defunct edge.
629 0 : ReadBarriered(ReadBarriered&& v)
630 0 : : ReadBarrieredBase<T>(std::move(v))
631 : {
632 0 : this->post(JS::SafelyInitialized<T>(), v.value);
633 : }
634 :
635 : ~ReadBarriered() {
636 0 : this->post(this->value, JS::SafelyInitialized<T>());
637 0 : }
638 :
639 0 : ReadBarriered& operator=(const ReadBarriered& v) {
640 0 : CheckTargetIsNotGray(v.value);
641 0 : T prior = this->value;
642 0 : this->value = v.value;
643 0 : this->post(prior, v.value);
644 0 : return *this;
645 : }
646 :
647 : const T& get() const {
648 0 : if (InternalBarrierMethods<T>::isMarkable(this->value))
649 0 : this->read();
650 0 : return this->value;
651 : }
652 :
653 : const T& unbarrieredGet() const {
654 0 : return this->value;
655 : }
656 :
657 : explicit operator bool() const {
658 0 : return bool(this->value);
659 : }
660 :
661 0 : operator const T&() const { return get(); }
662 :
663 0 : const T& operator->() const { return get(); }
664 :
665 0 : T* unsafeGet() { return &this->value; }
666 : T const* unsafeGet() const { return &this->value; }
667 :
668 0 : void set(const T& v)
669 : {
670 0 : CheckTargetIsNotGray(v);
671 0 : T tmp = this->value;
672 0 : this->value = v;
673 0 : this->post(tmp, v);
674 0 : }
675 : };
676 :
677 : // A WeakRef pointer does not hold its target live and is automatically nulled
678 : // out when the GC discovers that it is not reachable from any other path.
679 : template <typename T>
680 : using WeakRef = ReadBarriered<T>;
681 :
682 : // A pre- and post-barriered Value that is specialized to be aware that it
683 : // resides in a slots or elements vector. This allows it to be relocated in
684 : // memory, but with substantially less overhead than a HeapPtr.
685 : class HeapSlot : public WriteBarrieredBase<Value>
686 : {
687 : public:
688 : enum Kind {
689 : Slot = 0,
690 : Element = 1
691 : };
692 :
693 : void init(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
694 0 : value = v;
695 0 : post(owner, kind, slot, v);
696 : }
697 :
698 : void destroy() {
699 0 : pre();
700 : }
701 :
702 : #ifdef DEBUG
703 : bool preconditionForSet(NativeObject* owner, Kind kind, uint32_t slot) const;
704 : void assertPreconditionForWriteBarrierPost(NativeObject* obj, Kind kind, uint32_t slot,
705 : const Value& target) const;
706 : #endif
707 :
708 0 : MOZ_ALWAYS_INLINE void set(NativeObject* owner, Kind kind, uint32_t slot, const Value& v) {
709 0 : MOZ_ASSERT(preconditionForSet(owner, kind, slot));
710 0 : pre();
711 0 : value = v;
712 0 : post(owner, kind, slot, v);
713 0 : }
714 :
715 : private:
716 0 : void post(NativeObject* owner, Kind kind, uint32_t slot, const Value& target) {
717 : #ifdef DEBUG
718 0 : assertPreconditionForWriteBarrierPost(owner, kind, slot, target);
719 : #endif
720 0 : if (this->value.isObject() || this->value.isString()) {
721 0 : gc::Cell* cell = this->value.toGCThing();
722 0 : if (cell->storeBuffer())
723 0 : cell->storeBuffer()->putSlot(owner, kind, slot, 1);
724 : }
725 0 : }
726 : };
727 :
728 : class HeapSlotArray
729 : {
730 : HeapSlot* array;
731 :
732 : // Whether writes may be performed to the slots in this array. This helps
733 : // to control how object elements which may be copy on write are used.
734 : #ifdef DEBUG
735 : bool allowWrite_;
736 : #endif
737 :
738 : public:
739 : explicit HeapSlotArray(HeapSlot* array, bool allowWrite)
740 : : array(array)
741 : #ifdef DEBUG
742 0 : , allowWrite_(allowWrite)
743 : #endif
744 : {}
745 :
746 : operator const Value*() const {
747 : JS_STATIC_ASSERT(sizeof(GCPtr<Value>) == sizeof(Value));
748 : JS_STATIC_ASSERT(sizeof(HeapSlot) == sizeof(Value));
749 : return reinterpret_cast<const Value*>(array);
750 : }
751 0 : operator HeapSlot*() const { MOZ_ASSERT(allowWrite()); return array; }
752 :
753 : HeapSlotArray operator +(int offset) const { return HeapSlotArray(array + offset, allowWrite()); }
754 0 : HeapSlotArray operator +(uint32_t offset) const { return HeapSlotArray(array + offset, allowWrite()); }
755 :
756 : private:
757 : bool allowWrite() const {
758 : #ifdef DEBUG
759 : return allowWrite_;
760 : #else
761 : return true;
762 : #endif
763 : }
764 : };
765 :
766 : /*
767 : * This is a hack for RegExpStatics::updateFromMatch. It allows us to do two
768 : * barriers with only one branch to check if we're in an incremental GC.
769 : */
770 : template <class T1, class T2>
771 : static inline void
772 0 : BarrieredSetPair(Zone* zone,
773 : HeapPtr<T1*>& v1, T1* val1,
774 : HeapPtr<T2*>& v2, T2* val2)
775 : {
776 0 : if (T1::needWriteBarrierPre(zone)) {
777 0 : v1.pre();
778 0 : v2.pre();
779 : }
780 0 : v1.postBarrieredSet(val1);
781 0 : v2.postBarrieredSet(val2);
782 0 : }
783 :
784 : /*
785 : * ImmutableTenuredPtr is designed for one very narrow case: replacing
786 : * immutable raw pointers to GC-managed things, implicitly converting to a
787 : * handle type for ease of use. Pointers encapsulated by this type must:
788 : *
789 : * be immutable (no incremental write barriers),
790 : * never point into the nursery (no generational write barriers), and
791 : * be traced via MarkRuntime (we use fromMarkedLocation).
792 : *
793 : * In short: you *really* need to know what you're doing before you use this
794 : * class!
795 : */
796 : template <typename T>
797 : class ImmutableTenuredPtr
798 : {
799 : T value;
800 :
801 : public:
802 0 : operator T() const { return value; }
803 : T operator->() const { return value; }
804 :
805 : operator Handle<T>() const {
806 0 : return Handle<T>::fromMarkedLocation(&value);
807 : }
808 :
809 0 : void init(T ptr) {
810 0 : MOZ_ASSERT(ptr->isTenured());
811 0 : CheckTargetIsNotGray(ptr);
812 0 : value = ptr;
813 0 : }
814 :
815 0 : T get() const { return value; }
816 : const T* address() { return &value; }
817 : };
818 :
819 : template <typename T>
820 : struct MovableCellHasher<PreBarriered<T>>
821 : {
822 : using Key = PreBarriered<T>;
823 : using Lookup = T;
824 :
825 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
826 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
827 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
828 : static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
829 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
830 : };
831 :
832 : template <typename T>
833 : struct MovableCellHasher<HeapPtr<T>>
834 : {
835 : using Key = HeapPtr<T>;
836 : using Lookup = T;
837 :
838 0 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
839 0 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
840 0 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
841 0 : static bool match(const Key& k, const Lookup& l) { return MovableCellHasher<T>::match(k, l); }
842 0 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
843 : };
844 :
845 : template <typename T>
846 : struct MovableCellHasher<ReadBarriered<T>>
847 : {
848 : using Key = ReadBarriered<T>;
849 : using Lookup = T;
850 :
851 0 : static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
852 0 : static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
853 0 : static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
854 : static bool match(const Key& k, const Lookup& l) {
855 0 : return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
856 : }
857 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
858 : };
859 :
860 : /* Useful for hashtables with a GCPtr as key. */
861 : template <class T>
862 : struct GCPtrHasher
863 : {
864 : typedef GCPtr<T> Key;
865 : typedef T Lookup;
866 :
867 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
868 : static bool match(const Key& k, Lookup l) { return k.get() == l; }
869 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
870 : };
871 :
872 : /* Specialized hashing policy for GCPtrs. */
873 : template <class T>
874 : struct DefaultHasher<GCPtr<T>> : GCPtrHasher<T> {};
875 :
876 : template <class T>
877 : struct PreBarrieredHasher
878 : {
879 : typedef PreBarriered<T> Key;
880 : typedef T Lookup;
881 :
882 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
883 : static bool match(const Key& k, Lookup l) { return k.get() == l; }
884 : static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
885 : };
886 :
887 : template <class T>
888 : struct DefaultHasher<PreBarriered<T>> : PreBarrieredHasher<T> { };
889 :
890 : /* Useful for hashtables with a ReadBarriered as key. */
891 : template <class T>
892 : struct ReadBarrieredHasher
893 : {
894 : typedef ReadBarriered<T> Key;
895 : typedef T Lookup;
896 :
897 : static HashNumber hash(Lookup obj) { return DefaultHasher<T>::hash(obj); }
898 : static bool match(const Key& k, Lookup l) { return k.unbarrieredGet() == l; }
899 : static void rekey(Key& k, const Key& newKey) { k.set(newKey.unbarrieredGet()); }
900 : };
901 :
902 : /* Specialized hashing policy for ReadBarriereds. */
903 : template <class T>
904 0 : struct DefaultHasher<ReadBarriered<T>> : ReadBarrieredHasher<T> { };
905 :
906 : class ArrayObject;
907 : class ArrayBufferObject;
908 : class GlobalObject;
909 : class Scope;
910 : class ScriptSourceObject;
911 : class Shape;
912 : class BaseShape;
913 : class UnownedBaseShape;
914 : class WasmInstanceObject;
915 : class WasmTableObject;
916 : namespace jit {
917 : class JitCode;
918 : } // namespace jit
919 :
920 : typedef PreBarriered<JSObject*> PreBarrieredObject;
921 : typedef PreBarriered<JSScript*> PreBarrieredScript;
922 : typedef PreBarriered<jit::JitCode*> PreBarrieredJitCode;
923 : typedef PreBarriered<JSString*> PreBarrieredString;
924 : typedef PreBarriered<JSAtom*> PreBarrieredAtom;
925 :
926 : typedef GCPtr<NativeObject*> GCPtrNativeObject;
927 : typedef GCPtr<ArrayObject*> GCPtrArrayObject;
928 : typedef GCPtr<ArrayBufferObjectMaybeShared*> GCPtrArrayBufferObjectMaybeShared;
929 : typedef GCPtr<ArrayBufferObject*> GCPtrArrayBufferObject;
930 : typedef GCPtr<BaseShape*> GCPtrBaseShape;
931 : typedef GCPtr<JSAtom*> GCPtrAtom;
932 : typedef GCPtr<JSFlatString*> GCPtrFlatString;
933 : typedef GCPtr<JSFunction*> GCPtrFunction;
934 : typedef GCPtr<JSLinearString*> GCPtrLinearString;
935 : typedef GCPtr<JSObject*> GCPtrObject;
936 : typedef GCPtr<JSScript*> GCPtrScript;
937 : typedef GCPtr<JSString*> GCPtrString;
938 : typedef GCPtr<ModuleObject*> GCPtrModuleObject;
939 : typedef GCPtr<ModuleEnvironmentObject*> GCPtrModuleEnvironmentObject;
940 : typedef GCPtr<ModuleNamespaceObject*> GCPtrModuleNamespaceObject;
941 : typedef GCPtr<PlainObject*> GCPtrPlainObject;
942 : typedef GCPtr<PropertyName*> GCPtrPropertyName;
943 : typedef GCPtr<Shape*> GCPtrShape;
944 : typedef GCPtr<UnownedBaseShape*> GCPtrUnownedBaseShape;
945 : typedef GCPtr<jit::JitCode*> GCPtrJitCode;
946 : typedef GCPtr<ObjectGroup*> GCPtrObjectGroup;
947 : typedef GCPtr<Scope*> GCPtrScope;
948 :
949 : typedef PreBarriered<Value> PreBarrieredValue;
950 : typedef GCPtr<Value> GCPtrValue;
951 :
952 : typedef PreBarriered<jsid> PreBarrieredId;
953 : typedef GCPtr<jsid> GCPtrId;
954 :
955 : typedef ImmutableTenuredPtr<PropertyName*> ImmutablePropertyNamePtr;
956 : typedef ImmutableTenuredPtr<JS::Symbol*> ImmutableSymbolPtr;
957 :
958 : typedef ReadBarriered<DebugEnvironmentProxy*> ReadBarrieredDebugEnvironmentProxy;
959 : typedef ReadBarriered<GlobalObject*> ReadBarrieredGlobalObject;
960 : typedef ReadBarriered<JSObject*> ReadBarrieredObject;
961 : typedef ReadBarriered<JSFunction*> ReadBarrieredFunction;
962 : typedef ReadBarriered<JSScript*> ReadBarrieredScript;
963 : typedef ReadBarriered<ScriptSourceObject*> ReadBarrieredScriptSourceObject;
964 : typedef ReadBarriered<Shape*> ReadBarrieredShape;
965 : typedef ReadBarriered<jit::JitCode*> ReadBarrieredJitCode;
966 : typedef ReadBarriered<ObjectGroup*> ReadBarrieredObjectGroup;
967 : typedef ReadBarriered<JS::Symbol*> ReadBarrieredSymbol;
968 : typedef ReadBarriered<WasmInstanceObject*> ReadBarrieredWasmInstanceObject;
969 : typedef ReadBarriered<WasmTableObject*> ReadBarrieredWasmTableObject;
970 :
971 : typedef ReadBarriered<Value> ReadBarrieredValue;
972 :
973 : namespace detail {
974 :
975 : template <typename T>
976 : struct DefineComparisonOps<PreBarriered<T>> : mozilla::TrueType {
977 0 : static const T& get(const PreBarriered<T>& v) { return v.get(); }
978 : };
979 :
980 : template <typename T>
981 : struct DefineComparisonOps<GCPtr<T>> : mozilla::TrueType {
982 0 : static const T& get(const GCPtr<T>& v) { return v.get(); }
983 : };
984 :
985 : template <typename T>
986 : struct DefineComparisonOps<HeapPtr<T>> : mozilla::TrueType {
987 0 : static const T& get(const HeapPtr<T>& v) { return v.get(); }
988 : };
989 :
990 : template <typename T>
991 : struct DefineComparisonOps<ReadBarriered<T>> : mozilla::TrueType {
992 0 : static const T& get(const ReadBarriered<T>& v) { return v.unbarrieredGet(); }
993 : };
994 :
995 : template <>
996 : struct DefineComparisonOps<HeapSlot> : mozilla::TrueType {
997 : static const Value& get(const HeapSlot& v) { return v.get(); }
998 : };
999 :
1000 : } /* namespace detail */
1001 :
1002 : } /* namespace js */
1003 :
1004 : #endif /* gc_Barrier_h */
|