Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/MacroAssembler-inl.h"
8 :
9 : #include "mozilla/CheckedInt.h"
10 : #include "mozilla/MathAlgorithms.h"
11 :
12 : #include "jsfriendapi.h"
13 :
14 : #include "builtin/TypedObject.h"
15 : #include "gc/GCTrace.h"
16 : #include "jit/AtomicOp.h"
17 : #include "jit/Bailouts.h"
18 : #include "jit/BaselineFrame.h"
19 : #include "jit/BaselineIC.h"
20 : #include "jit/BaselineJIT.h"
21 : #include "jit/JitOptions.h"
22 : #include "jit/Lowering.h"
23 : #include "jit/MIR.h"
24 : #include "js/Conversions.h"
25 : #include "js/Printf.h"
26 : #include "vm/TraceLogging.h"
27 :
28 : #include "gc/Nursery-inl.h"
29 : #include "jit/shared/Lowering-shared-inl.h"
30 : #include "jit/TemplateObject-inl.h"
31 : #include "vm/Interpreter-inl.h"
32 : #include "vm/JSObject-inl.h"
33 : #include "vm/TypeInference-inl.h"
34 :
35 : using namespace js;
36 : using namespace js::jit;
37 :
38 : using JS::GenericNaN;
39 : using JS::ToInt32;
40 :
41 : using mozilla::CheckedUint32;
42 :
43 : template <typename T>
44 : static void
45 0 : EmitTypeCheck(MacroAssembler& masm, Assembler::Condition cond, const T& src, TypeSet::Type type,
46 : Label* label)
47 : {
48 0 : if (type.isAnyObject()) {
49 : masm.branchTestObject(cond, src, label);
50 : return;
51 : }
52 0 : switch (type.primitive()) {
53 : case JSVAL_TYPE_DOUBLE:
54 : // TI double type includes int32.
55 : masm.branchTestNumber(cond, src, label);
56 : break;
57 : case JSVAL_TYPE_INT32:
58 : masm.branchTestInt32(cond, src, label);
59 : break;
60 : case JSVAL_TYPE_BOOLEAN:
61 : masm.branchTestBoolean(cond, src, label);
62 : break;
63 : case JSVAL_TYPE_STRING:
64 : masm.branchTestString(cond, src, label);
65 : break;
66 : case JSVAL_TYPE_SYMBOL:
67 : masm.branchTestSymbol(cond, src, label);
68 : break;
69 : case JSVAL_TYPE_NULL:
70 : masm.branchTestNull(cond, src, label);
71 : break;
72 : case JSVAL_TYPE_UNDEFINED:
73 : masm.branchTestUndefined(cond, src, label);
74 : break;
75 : case JSVAL_TYPE_MAGIC:
76 : masm.branchTestMagic(cond, src, label);
77 : break;
78 : default:
79 0 : MOZ_CRASH("Unexpected type");
80 : }
81 : }
82 :
83 : template <typename Source> void
84 0 : MacroAssembler::guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
85 : Register unboxScratch, Register objScratch,
86 : Register spectreRegToZero, Label* miss)
87 : {
88 : // unboxScratch may be InvalidReg on 32-bit platforms. It should only be
89 : // used for extracting the Value tag or payload.
90 : //
91 : // objScratch may be InvalidReg if the TypeSet does not contain specific
92 : // objects to guard on. It should only be used for guardObjectType.
93 : //
94 : // spectreRegToZero is a register that will be zeroed by guardObjectType on
95 : // speculatively executed paths.
96 :
97 0 : MOZ_ASSERT(kind == BarrierKind::TypeTagOnly || kind == BarrierKind::TypeSet);
98 0 : MOZ_ASSERT(!types->unknown());
99 :
100 0 : Label matched;
101 : TypeSet::Type tests[8] = {
102 : TypeSet::Int32Type(),
103 : TypeSet::UndefinedType(),
104 : TypeSet::BooleanType(),
105 : TypeSet::StringType(),
106 : TypeSet::SymbolType(),
107 : TypeSet::NullType(),
108 : TypeSet::MagicArgType(),
109 : TypeSet::AnyObjectType()
110 0 : };
111 :
112 : // The double type also implies Int32.
113 : // So replace the int32 test with the double one.
114 0 : if (types->hasType(TypeSet::DoubleType())) {
115 0 : MOZ_ASSERT(types->hasType(TypeSet::Int32Type()));
116 0 : tests[0] = TypeSet::DoubleType();
117 : }
118 :
119 : unsigned numBranches = 0;
120 0 : for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
121 0 : if (types->hasType(tests[i]))
122 0 : numBranches++;
123 : }
124 :
125 0 : if (!types->unknownObject() && types->getObjectCount() > 0)
126 0 : numBranches++;
127 :
128 0 : if (numBranches == 0) {
129 0 : MOZ_ASSERT(types->empty());
130 0 : jump(miss);
131 0 : return;
132 : }
133 :
134 0 : Register tag = extractTag(address, unboxScratch);
135 :
136 : // Emit all typed tests.
137 0 : for (size_t i = 0; i < mozilla::ArrayLength(tests); i++) {
138 0 : if (!types->hasType(tests[i]))
139 : continue;
140 :
141 0 : if (--numBranches > 0)
142 0 : EmitTypeCheck(*this, Equal, tag, tests[i], &matched);
143 : else
144 0 : EmitTypeCheck(*this, NotEqual, tag, tests[i], miss);
145 : }
146 :
147 : // If we don't have specific objects to check for, we're done.
148 0 : if (numBranches == 0) {
149 0 : MOZ_ASSERT(types->unknownObject() || types->getObjectCount() == 0);
150 0 : bind(&matched);
151 0 : return;
152 : }
153 :
154 : // Test specific objects.
155 0 : MOZ_ASSERT(objScratch != InvalidReg);
156 0 : MOZ_ASSERT(objScratch != unboxScratch);
157 :
158 0 : MOZ_ASSERT(numBranches == 1);
159 0 : branchTestObject(NotEqual, tag, miss);
160 :
161 0 : if (kind != BarrierKind::TypeTagOnly) {
162 0 : Register obj = extractObject(address, unboxScratch);
163 0 : guardObjectType(obj, types, objScratch, spectreRegToZero, miss);
164 : } else {
165 : #ifdef DEBUG
166 0 : Label fail;
167 0 : Register obj = extractObject(address, unboxScratch);
168 0 : guardObjectType(obj, types, objScratch, spectreRegToZero, &fail);
169 0 : jump(&matched);
170 :
171 0 : bind(&fail);
172 0 : guardTypeSetMightBeIncomplete(types, obj, objScratch, &matched);
173 0 : assumeUnreachable("Unexpected object type");
174 : #endif
175 : }
176 :
177 0 : bind(&matched);
178 : }
179 :
180 : #ifdef DEBUG
181 : // guardTypeSetMightBeIncomplete is only used in DEBUG builds. If this ever
182 : // changes, we need to make sure it's Spectre-safe.
183 : void
184 0 : MacroAssembler::guardTypeSetMightBeIncomplete(const TypeSet* types, Register obj,
185 : Register scratch, Label* label)
186 : {
187 : // Type set guards might miss when an object's group changes. In this case
188 : // either its old group's properties will become unknown, or it will change
189 : // to a native object with an original unboxed group. Jump to label if this
190 : // might have happened for the input object.
191 :
192 0 : if (types->unknownObject()) {
193 0 : jump(label);
194 : return;
195 : }
196 :
197 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
198 0 : load32(Address(scratch, ObjectGroup::offsetOfFlags()), scratch);
199 0 : and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
200 0 : branch32(Assembler::Equal,
201 0 : scratch, Imm32(ObjectGroup::addendumOriginalUnboxedGroupValue()), label);
202 :
203 0 : for (size_t i = 0; i < types->getObjectCount(); i++) {
204 0 : if (JSObject* singleton = getSingletonAndDelayBarrier(types, i)) {
205 0 : movePtr(ImmGCPtr(singleton), scratch);
206 0 : loadPtr(Address(scratch, JSObject::offsetOfGroup()), scratch);
207 0 : } else if (ObjectGroup* group = getGroupAndDelayBarrier(types, i)) {
208 0 : movePtr(ImmGCPtr(group), scratch);
209 : } else {
210 : continue;
211 : }
212 0 : branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
213 0 : Imm32(OBJECT_FLAG_UNKNOWN_PROPERTIES), label);
214 : }
215 : }
216 : #endif
217 :
218 : void
219 0 : MacroAssembler::guardObjectType(Register obj, const TypeSet* types, Register scratch,
220 : Register spectreRegToZero, Label* miss)
221 : {
222 0 : MOZ_ASSERT(obj != scratch);
223 0 : MOZ_ASSERT(!types->unknown());
224 0 : MOZ_ASSERT(!types->hasType(TypeSet::AnyObjectType()));
225 0 : MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
226 :
227 : // Note: this method elides read barriers on values read from type sets, as
228 : // this may be called off thread during Ion compilation. This is
229 : // safe to do as the final JitCode object will be allocated during the
230 : // incremental GC (or the compilation canceled before we start sweeping),
231 : // see CodeGenerator::link. Other callers should use TypeSet::readBarrier
232 : // to trigger the barrier on the contents of type sets passed in here.
233 0 : Label matched;
234 :
235 0 : bool hasSingletons = false;
236 0 : bool hasObjectGroups = false;
237 0 : unsigned numBranches = 0;
238 :
239 0 : unsigned count = types->getObjectCount();
240 0 : for (unsigned i = 0; i < count; i++) {
241 0 : if (types->hasGroup(i)) {
242 0 : hasObjectGroups = true;
243 0 : numBranches++;
244 0 : } else if (types->hasSingleton(i)) {
245 0 : hasSingletons = true;
246 0 : numBranches++;
247 : }
248 : }
249 :
250 0 : if (numBranches == 0) {
251 0 : jump(miss);
252 0 : return;
253 : }
254 :
255 0 : if (JitOptions.spectreObjectMitigationsBarriers)
256 0 : move32(Imm32(0), scratch);
257 :
258 0 : if (hasSingletons) {
259 0 : for (unsigned i = 0; i < count; i++) {
260 0 : JSObject* singleton = getSingletonAndDelayBarrier(types, i);
261 0 : if (!singleton)
262 : continue;
263 :
264 0 : if (JitOptions.spectreObjectMitigationsBarriers) {
265 0 : if (--numBranches > 0) {
266 0 : Label next;
267 0 : branchPtr(NotEqual, obj, ImmGCPtr(singleton), &next);
268 0 : spectreMovePtr(NotEqual, scratch, spectreRegToZero);
269 0 : jump(&matched);
270 0 : bind(&next);
271 : } else {
272 0 : branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
273 0 : spectreMovePtr(NotEqual, scratch, spectreRegToZero);
274 : }
275 : } else {
276 0 : if (--numBranches > 0)
277 0 : branchPtr(Equal, obj, ImmGCPtr(singleton), &matched);
278 : else
279 0 : branchPtr(NotEqual, obj, ImmGCPtr(singleton), miss);
280 : }
281 : }
282 : }
283 :
284 0 : if (hasObjectGroups) {
285 0 : comment("has object groups");
286 :
287 : // If Spectre mitigations are enabled, we use the scratch register as
288 : // zero register. Without mitigations we can use it to store the group.
289 0 : Address groupAddr(obj, JSObject::offsetOfGroup());
290 0 : if (!JitOptions.spectreObjectMitigationsBarriers)
291 0 : loadPtr(groupAddr, scratch);
292 :
293 0 : for (unsigned i = 0; i < count; i++) {
294 0 : ObjectGroup* group = getGroupAndDelayBarrier(types, i);
295 0 : if (!group)
296 0 : continue;
297 :
298 0 : if (!pendingObjectGroupReadBarriers_.append(group)) {
299 0 : setOOM();
300 0 : return;
301 : }
302 :
303 0 : if (JitOptions.spectreObjectMitigationsBarriers) {
304 0 : if (--numBranches > 0) {
305 0 : Label next;
306 0 : branchPtr(NotEqual, groupAddr, ImmGCPtr(group), &next);
307 0 : spectreMovePtr(NotEqual, scratch, spectreRegToZero);
308 0 : jump(&matched);
309 0 : bind(&next);
310 : } else {
311 0 : branchPtr(NotEqual, groupAddr, ImmGCPtr(group), miss);
312 0 : spectreMovePtr(NotEqual, scratch, spectreRegToZero);
313 : }
314 : } else {
315 0 : if (--numBranches > 0)
316 0 : branchPtr(Equal, scratch, ImmGCPtr(group), &matched);
317 : else
318 0 : branchPtr(NotEqual, scratch, ImmGCPtr(group), miss);
319 : }
320 : }
321 : }
322 :
323 0 : MOZ_ASSERT(numBranches == 0);
324 :
325 0 : bind(&matched);
326 : }
327 :
328 : template void MacroAssembler::guardTypeSet(const Address& address, const TypeSet* types,
329 : BarrierKind kind, Register unboxScratch,
330 : Register objScratch, Register spectreRegToZero,
331 : Label* miss);
332 : template void MacroAssembler::guardTypeSet(const ValueOperand& value, const TypeSet* types,
333 : BarrierKind kind, Register unboxScratch,
334 : Register objScratch, Register spectreRegToZero,
335 : Label* miss);
336 : template void MacroAssembler::guardTypeSet(const TypedOrValueRegister& value, const TypeSet* types,
337 : BarrierKind kind, Register unboxScratch,
338 : Register objScratch, Register spectreRegToZero,
339 : Label* miss);
340 :
341 : template<typename S, typename T>
342 : static void
343 0 : StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
344 : unsigned numElems)
345 : {
346 0 : switch (arrayType) {
347 : case Scalar::Float32:
348 : masm.storeFloat32(value, dest);
349 : break;
350 : case Scalar::Float64:
351 : masm.storeDouble(value, dest);
352 : break;
353 : case Scalar::Float32x4:
354 0 : switch (numElems) {
355 : case 1:
356 : masm.storeFloat32(value, dest);
357 : break;
358 : case 2:
359 : masm.storeDouble(value, dest);
360 : break;
361 : case 3:
362 0 : masm.storeFloat32x3(value, dest);
363 0 : break;
364 : case 4:
365 0 : masm.storeUnalignedSimd128Float(value, dest);
366 0 : break;
367 0 : default: MOZ_CRASH("unexpected number of elements in simd write");
368 : }
369 : break;
370 : case Scalar::Int32x4:
371 0 : switch (numElems) {
372 : case 1:
373 0 : masm.storeInt32x1(value, dest);
374 0 : break;
375 : case 2:
376 0 : masm.storeInt32x2(value, dest);
377 0 : break;
378 : case 3:
379 0 : masm.storeInt32x3(value, dest);
380 0 : break;
381 : case 4:
382 0 : masm.storeUnalignedSimd128Int(value, dest);
383 0 : break;
384 0 : default: MOZ_CRASH("unexpected number of elements in simd write");
385 : }
386 : break;
387 : case Scalar::Int8x16:
388 0 : MOZ_ASSERT(numElems == 16, "unexpected partial store");
389 0 : masm.storeUnalignedSimd128Int(value, dest);
390 0 : break;
391 : case Scalar::Int16x8:
392 0 : MOZ_ASSERT(numElems == 8, "unexpected partial store");
393 0 : masm.storeUnalignedSimd128Int(value, dest);
394 0 : break;
395 : default:
396 0 : MOZ_CRASH("Invalid typed array type");
397 : }
398 0 : }
399 :
400 : void
401 0 : MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
402 : const BaseIndex& dest, unsigned numElems)
403 : {
404 0 : StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
405 0 : }
406 : void
407 0 : MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
408 : const Address& dest, unsigned numElems)
409 : {
410 0 : StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
411 0 : }
412 :
413 : template<typename T>
414 : void
415 0 : MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
416 : Label* fail, bool canonicalizeDoubles, unsigned numElems)
417 : {
418 0 : switch (arrayType) {
419 : case Scalar::Int8:
420 0 : load8SignExtend(src, dest.gpr());
421 0 : break;
422 : case Scalar::Uint8:
423 : case Scalar::Uint8Clamped:
424 0 : load8ZeroExtend(src, dest.gpr());
425 0 : break;
426 : case Scalar::Int16:
427 0 : load16SignExtend(src, dest.gpr());
428 0 : break;
429 : case Scalar::Uint16:
430 0 : load16ZeroExtend(src, dest.gpr());
431 0 : break;
432 : case Scalar::Int32:
433 0 : load32(src, dest.gpr());
434 0 : break;
435 : case Scalar::Uint32:
436 0 : if (dest.isFloat()) {
437 0 : load32(src, temp);
438 0 : convertUInt32ToDouble(temp, dest.fpu());
439 : } else {
440 0 : load32(src, dest.gpr());
441 :
442 : // Bail out if the value doesn't fit into a signed int32 value. This
443 : // is what allows MLoadUnboxedScalar to have a type() of
444 : // MIRType::Int32 for UInt32 array loads.
445 0 : branchTest32(Assembler::Signed, dest.gpr(), dest.gpr(), fail);
446 : }
447 : break;
448 : case Scalar::Float32:
449 0 : loadFloat32(src, dest.fpu());
450 0 : canonicalizeFloat(dest.fpu());
451 0 : break;
452 : case Scalar::Float64:
453 0 : loadDouble(src, dest.fpu());
454 0 : if (canonicalizeDoubles)
455 0 : canonicalizeDouble(dest.fpu());
456 : break;
457 : case Scalar::Int32x4:
458 0 : switch (numElems) {
459 : case 1:
460 0 : loadInt32x1(src, dest.fpu());
461 0 : break;
462 : case 2:
463 0 : loadInt32x2(src, dest.fpu());
464 0 : break;
465 : case 3:
466 0 : loadInt32x3(src, dest.fpu());
467 0 : break;
468 : case 4:
469 0 : loadUnalignedSimd128Int(src, dest.fpu());
470 0 : break;
471 0 : default: MOZ_CRASH("unexpected number of elements in SIMD load");
472 : }
473 : break;
474 : case Scalar::Float32x4:
475 0 : switch (numElems) {
476 : case 1:
477 0 : loadFloat32(src, dest.fpu());
478 : break;
479 : case 2:
480 0 : loadDouble(src, dest.fpu());
481 : break;
482 : case 3:
483 0 : loadFloat32x3(src, dest.fpu());
484 0 : break;
485 : case 4:
486 0 : loadUnalignedSimd128Float(src, dest.fpu());
487 0 : break;
488 0 : default: MOZ_CRASH("unexpected number of elements in SIMD load");
489 : }
490 : break;
491 : case Scalar::Int8x16:
492 0 : MOZ_ASSERT(numElems == 16, "unexpected partial load");
493 0 : loadUnalignedSimd128Int(src, dest.fpu());
494 0 : break;
495 : case Scalar::Int16x8:
496 0 : MOZ_ASSERT(numElems == 8, "unexpected partial load");
497 0 : loadUnalignedSimd128Int(src, dest.fpu());
498 0 : break;
499 : default:
500 0 : MOZ_CRASH("Invalid typed array type");
501 : }
502 0 : }
503 :
504 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
505 : Register temp, Label* fail, bool canonicalizeDoubles,
506 : unsigned numElems);
507 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
508 : Register temp, Label* fail, bool canonicalizeDoubles,
509 : unsigned numElems);
510 :
511 : template<typename T>
512 : void
513 0 : MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest,
514 : bool allowDouble, Register temp, Label* fail)
515 : {
516 0 : switch (arrayType) {
517 : case Scalar::Int8:
518 : case Scalar::Uint8:
519 : case Scalar::Uint8Clamped:
520 : case Scalar::Int16:
521 : case Scalar::Uint16:
522 : case Scalar::Int32:
523 0 : loadFromTypedArray(arrayType, src, AnyRegister(dest.scratchReg()), InvalidReg, nullptr);
524 0 : tagValue(JSVAL_TYPE_INT32, dest.scratchReg(), dest);
525 0 : break;
526 : case Scalar::Uint32:
527 : // Don't clobber dest when we could fail, instead use temp.
528 0 : load32(src, temp);
529 0 : if (allowDouble) {
530 : // If the value fits in an int32, store an int32 type tag.
531 : // Else, convert the value to double and box it.
532 0 : Label done, isDouble;
533 0 : branchTest32(Assembler::Signed, temp, temp, &isDouble);
534 : {
535 0 : tagValue(JSVAL_TYPE_INT32, temp, dest);
536 0 : jump(&done);
537 : }
538 0 : bind(&isDouble);
539 : {
540 0 : convertUInt32ToDouble(temp, ScratchDoubleReg);
541 0 : boxDouble(ScratchDoubleReg, dest, ScratchDoubleReg);
542 : }
543 0 : bind(&done);
544 : } else {
545 : // Bailout if the value does not fit in an int32.
546 0 : branchTest32(Assembler::Signed, temp, temp, fail);
547 0 : tagValue(JSVAL_TYPE_INT32, temp, dest);
548 : }
549 : break;
550 : case Scalar::Float32:
551 0 : loadFromTypedArray(arrayType, src, AnyRegister(ScratchFloat32Reg), dest.scratchReg(),
552 : nullptr);
553 0 : convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
554 0 : boxDouble(ScratchDoubleReg, dest, ScratchDoubleReg);
555 : break;
556 : case Scalar::Float64:
557 0 : loadFromTypedArray(arrayType, src, AnyRegister(ScratchDoubleReg), dest.scratchReg(),
558 : nullptr);
559 0 : boxDouble(ScratchDoubleReg, dest, ScratchDoubleReg);
560 : break;
561 : default:
562 0 : MOZ_CRASH("Invalid typed array type");
563 : }
564 0 : }
565 :
566 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, const ValueOperand& dest,
567 : bool allowDouble, Register temp, Label* fail);
568 : template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, const ValueOperand& dest,
569 : bool allowDouble, Register temp, Label* fail);
570 :
571 : template <typename T>
572 : void
573 0 : MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)
574 : {
575 0 : switch (type) {
576 : case JSVAL_TYPE_INT32: {
577 : // Handle loading an int32 into a double reg.
578 0 : if (output.type() == MIRType::Double) {
579 0 : convertInt32ToDouble(address, output.typedReg().fpu());
580 0 : break;
581 : }
582 : MOZ_FALLTHROUGH;
583 : }
584 :
585 : case JSVAL_TYPE_BOOLEAN:
586 : case JSVAL_TYPE_STRING: {
587 0 : Register outReg;
588 0 : if (output.hasValue()) {
589 0 : outReg = output.valueReg().scratchReg();
590 : } else {
591 0 : MOZ_ASSERT(output.type() == MIRTypeFromValueType(type));
592 0 : outReg = output.typedReg().gpr();
593 : }
594 :
595 0 : switch (type) {
596 : case JSVAL_TYPE_BOOLEAN:
597 0 : load8ZeroExtend(address, outReg);
598 0 : break;
599 : case JSVAL_TYPE_INT32:
600 0 : load32(address, outReg);
601 0 : break;
602 : case JSVAL_TYPE_STRING:
603 0 : loadPtr(address, outReg);
604 0 : break;
605 : default:
606 0 : MOZ_CRASH();
607 : }
608 :
609 0 : if (output.hasValue())
610 0 : tagValue(type, outReg, output.valueReg());
611 : break;
612 : }
613 :
614 : case JSVAL_TYPE_OBJECT:
615 0 : if (output.hasValue()) {
616 0 : Register scratch = output.valueReg().scratchReg();
617 0 : loadPtr(address, scratch);
618 :
619 0 : Label notNull, done;
620 0 : branchPtr(Assembler::NotEqual, scratch, ImmWord(0), ¬Null);
621 :
622 0 : moveValue(NullValue(), output.valueReg());
623 0 : jump(&done);
624 :
625 0 : bind(¬Null);
626 0 : tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
627 :
628 0 : bind(&done);
629 : } else {
630 : // Reading null can't be possible here, as otherwise the result
631 : // would be a value (either because null has been read before or
632 : // because there is a barrier).
633 0 : Register reg = output.typedReg().gpr();
634 0 : loadPtr(address, reg);
635 : #ifdef DEBUG
636 0 : Label ok;
637 0 : branchTestPtr(Assembler::NonZero, reg, reg, &ok);
638 0 : assumeUnreachable("Null not possible");
639 0 : bind(&ok);
640 : #endif
641 : }
642 : break;
643 :
644 : case JSVAL_TYPE_DOUBLE:
645 : // Note: doubles in unboxed objects are not accessed through other
646 : // views and do not need canonicalization.
647 0 : if (output.hasValue())
648 0 : loadValue(address, output.valueReg());
649 : else
650 0 : loadDouble(address, output.typedReg().fpu());
651 : break;
652 :
653 : default:
654 0 : MOZ_CRASH();
655 : }
656 0 : }
657 :
658 : template void
659 : MacroAssembler::loadUnboxedProperty(Address address, JSValueType type,
660 : TypedOrValueRegister output);
661 :
662 : template void
663 : MacroAssembler::loadUnboxedProperty(BaseIndex address, JSValueType type,
664 : TypedOrValueRegister output);
665 :
666 : static void
667 0 : StoreUnboxedFailure(MacroAssembler& masm, Label* failure)
668 : {
669 : // Storing a value to an unboxed property is a fallible operation and
670 : // the caller must provide a failure label if a particular unboxed store
671 : // might fail. Sometimes, however, a store that cannot succeed (such as
672 : // storing a string to an int32 property) will be marked as infallible.
673 : // This can only happen if the code involved is unreachable.
674 0 : if (failure)
675 0 : masm.jump(failure);
676 : else
677 0 : masm.assumeUnreachable("Incompatible write to unboxed property");
678 0 : }
679 :
680 : template <typename T>
681 : void
682 0 : MacroAssembler::storeUnboxedProperty(T address, JSValueType type,
683 : const ConstantOrRegister& value, Label* failure)
684 : {
685 0 : switch (type) {
686 : case JSVAL_TYPE_BOOLEAN:
687 0 : if (value.constant()) {
688 0 : if (value.value().isBoolean())
689 0 : store8(Imm32(value.value().toBoolean()), address);
690 : else
691 0 : StoreUnboxedFailure(*this, failure);
692 0 : } else if (value.reg().hasTyped()) {
693 0 : if (value.reg().type() == MIRType::Boolean)
694 0 : store8(value.reg().typedReg().gpr(), address);
695 : else
696 0 : StoreUnboxedFailure(*this, failure);
697 : } else {
698 0 : if (failure)
699 0 : branchTestBoolean(Assembler::NotEqual, value.reg().valueReg(), failure);
700 0 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 1, type);
701 : }
702 : break;
703 :
704 : case JSVAL_TYPE_INT32:
705 0 : if (value.constant()) {
706 0 : if (value.value().isInt32())
707 0 : store32(Imm32(value.value().toInt32()), address);
708 : else
709 0 : StoreUnboxedFailure(*this, failure);
710 0 : } else if (value.reg().hasTyped()) {
711 0 : if (value.reg().type() == MIRType::Int32)
712 0 : store32(value.reg().typedReg().gpr(), address);
713 : else
714 0 : StoreUnboxedFailure(*this, failure);
715 : } else {
716 0 : if (failure)
717 0 : branchTestInt32(Assembler::NotEqual, value.reg().valueReg(), failure);
718 0 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ 4, type);
719 : }
720 : break;
721 :
722 : case JSVAL_TYPE_DOUBLE:
723 0 : if (value.constant()) {
724 0 : if (value.value().isNumber()) {
725 0 : loadConstantDouble(value.value().toNumber(), ScratchDoubleReg);
726 : storeDouble(ScratchDoubleReg, address);
727 : } else {
728 0 : StoreUnboxedFailure(*this, failure);
729 : }
730 0 : } else if (value.reg().hasTyped()) {
731 0 : if (value.reg().type() == MIRType::Int32) {
732 0 : convertInt32ToDouble(value.reg().typedReg().gpr(), ScratchDoubleReg);
733 : storeDouble(ScratchDoubleReg, address);
734 0 : } else if (value.reg().type() == MIRType::Double) {
735 0 : storeDouble(value.reg().typedReg().fpu(), address);
736 : } else {
737 0 : StoreUnboxedFailure(*this, failure);
738 : }
739 : } else {
740 0 : ValueOperand reg = value.reg().valueReg();
741 0 : Label notInt32, end;
742 0 : branchTestInt32(Assembler::NotEqual, reg, ¬Int32);
743 0 : int32ValueToDouble(reg, ScratchDoubleReg);
744 0 : storeDouble(ScratchDoubleReg, address);
745 0 : jump(&end);
746 0 : bind(¬Int32);
747 0 : if (failure)
748 : branchTestDouble(Assembler::NotEqual, reg, failure);
749 0 : storeValue(reg, address);
750 0 : bind(&end);
751 : }
752 : break;
753 :
754 : case JSVAL_TYPE_OBJECT:
755 0 : if (value.constant()) {
756 0 : if (value.value().isObjectOrNull())
757 0 : storePtr(ImmGCPtr(value.value().toObjectOrNull()), address);
758 : else
759 0 : StoreUnboxedFailure(*this, failure);
760 0 : } else if (value.reg().hasTyped()) {
761 0 : MOZ_ASSERT(value.reg().type() != MIRType::Null);
762 0 : if (value.reg().type() == MIRType::Object)
763 0 : storePtr(value.reg().typedReg().gpr(), address);
764 : else
765 0 : StoreUnboxedFailure(*this, failure);
766 : } else {
767 0 : if (failure) {
768 0 : Label ok;
769 0 : branchTestNull(Assembler::Equal, value.reg().valueReg(), &ok);
770 0 : branchTestObject(Assembler::NotEqual, value.reg().valueReg(), failure);
771 0 : bind(&ok);
772 : }
773 0 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t),
774 : type);
775 : }
776 : break;
777 :
778 : case JSVAL_TYPE_STRING:
779 0 : if (value.constant()) {
780 0 : if (value.value().isString())
781 0 : storePtr(ImmGCPtr(value.value().toString()), address);
782 : else
783 0 : StoreUnboxedFailure(*this, failure);
784 0 : } else if (value.reg().hasTyped()) {
785 0 : if (value.reg().type() == MIRType::String)
786 0 : storePtr(value.reg().typedReg().gpr(), address);
787 : else
788 0 : StoreUnboxedFailure(*this, failure);
789 : } else {
790 0 : if (failure)
791 0 : branchTestString(Assembler::NotEqual, value.reg().valueReg(), failure);
792 0 : storeUnboxedPayload(value.reg().valueReg(), address, /* width = */ sizeof(uintptr_t),
793 : type);
794 : }
795 : break;
796 :
797 : default:
798 0 : MOZ_CRASH();
799 : }
800 0 : }
801 :
802 : template void
803 : MacroAssembler::storeUnboxedProperty(Address address, JSValueType type,
804 : const ConstantOrRegister& value, Label* failure);
805 :
806 : template void
807 : MacroAssembler::storeUnboxedProperty(BaseIndex address, JSValueType type,
808 : const ConstantOrRegister& value, Label* failure);
809 :
810 : // Inlined version of gc::CheckAllocatorState that checks the bare essentials
811 : // and bails for anything that cannot be handled with our jit allocators.
812 : void
813 0 : MacroAssembler::checkAllocatorState(Label* fail)
814 : {
815 : // Don't execute the inline path if we are tracing allocations.
816 0 : if (js::gc::gcTracer.traceEnabled())
817 : jump(fail);
818 :
819 : #ifdef JS_GC_ZEAL
820 : // Don't execute the inline path if gc zeal or tracing are active.
821 0 : branch32(Assembler::NotEqual,
822 0 : AbsoluteAddress(GetJitContext()->runtime->addressOfGCZealModeBits()), Imm32(0),
823 0 : fail);
824 : #endif
825 :
826 : // Don't execute the inline path if the realm has an object metadata callback,
827 : // as the metadata to use for the object may vary between executions of the op.
828 0 : if (GetJitContext()->realm->hasAllocationMetadataBuilder())
829 0 : jump(fail);
830 0 : }
831 :
832 : bool
833 0 : MacroAssembler::shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap)
834 : {
835 : // Note that Ion elides barriers on writes to objects known to be in the
836 : // nursery, so any allocation that can be made into the nursery must be made
837 : // into the nursery, even if the nursery is disabled. At runtime these will
838 : // take the out-of-line path, which is required to insert a barrier for the
839 : // initializing writes.
840 0 : return IsNurseryAllocable(allocKind) && initialHeap != gc::TenuredHeap;
841 : }
842 :
843 : // Inline version of Nursery::allocateObject. If the object has dynamic slots,
844 : // this fills in the slots_ pointer.
845 : void
846 0 : MacroAssembler::nurseryAllocateObject(Register result, Register temp, gc::AllocKind allocKind,
847 : size_t nDynamicSlots, Label* fail)
848 : {
849 0 : MOZ_ASSERT(IsNurseryAllocable(allocKind));
850 :
851 : // We still need to allocate in the nursery, per the comment in
852 : // shouldNurseryAllocate; however, we need to insert into the
853 : // mallocedBuffers set, so bail to do the nursery allocation in the
854 : // interpreter.
855 0 : if (nDynamicSlots >= Nursery::MaxNurseryBufferSize / sizeof(Value)) {
856 0 : jump(fail);
857 : return;
858 : }
859 :
860 : // No explicit check for nursery.isEnabled() is needed, as the comparison
861 : // with the nursery's end will always fail in such cases.
862 0 : CompileZone* zone = GetJitContext()->realm->zone();
863 0 : int thingSize = int(gc::Arena::thingSize(allocKind));
864 0 : int totalSize = thingSize + nDynamicSlots * sizeof(HeapSlot);
865 0 : MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
866 0 : loadPtr(AbsoluteAddress(zone->addressOfNurseryPosition()), result);
867 0 : computeEffectiveAddress(Address(result, totalSize), temp);
868 0 : branchPtr(Assembler::Below, AbsoluteAddress(zone->addressOfNurseryCurrentEnd()), temp, fail);
869 0 : storePtr(temp, AbsoluteAddress(zone->addressOfNurseryPosition()));
870 :
871 0 : if (nDynamicSlots) {
872 0 : computeEffectiveAddress(Address(result, thingSize), temp);
873 0 : storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
874 : }
875 : }
876 :
877 : // Inlined version of FreeSpan::allocate. This does not fill in slots_.
878 : void
879 0 : MacroAssembler::freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
880 : {
881 0 : CompileZone* zone = GetJitContext()->realm->zone();
882 0 : int thingSize = int(gc::Arena::thingSize(allocKind));
883 :
884 0 : Label fallback;
885 0 : Label success;
886 :
887 : // Load the first and last offsets of |zone|'s free list for |allocKind|.
888 : // If there is no room remaining in the span, fall back to get the next one.
889 0 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
890 0 : load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfFirst()), result);
891 0 : load16ZeroExtend(Address(temp, js::gc::FreeSpan::offsetOfLast()), temp);
892 0 : branch32(Assembler::AboveOrEqual, result, temp, &fallback);
893 :
894 : // Bump the offset for the next allocation.
895 0 : add32(Imm32(thingSize), result);
896 0 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
897 0 : store16(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
898 0 : sub32(Imm32(thingSize), result);
899 0 : addPtr(temp, result); // Turn the offset into a pointer.
900 0 : jump(&success);
901 :
902 0 : bind(&fallback);
903 : // If there are no free spans left, we bail to finish the allocation. The
904 : // interpreter will call the GC allocator to set up a new arena to allocate
905 : // from, after which we can resume allocating in the jit.
906 0 : branchTest32(Assembler::Zero, result, result, fail);
907 0 : loadPtr(AbsoluteAddress(zone->addressOfFreeList(allocKind)), temp);
908 0 : addPtr(temp, result); // Turn the offset into a pointer.
909 0 : Push(result);
910 : // Update the free list to point to the next span (which may be empty).
911 0 : load32(Address(result, 0), result);
912 0 : store32(result, Address(temp, js::gc::FreeSpan::offsetOfFirst()));
913 0 : Pop(result);
914 :
915 0 : bind(&success);
916 0 : }
917 :
918 : void
919 0 : MacroAssembler::callMallocStub(size_t nbytes, Register result, Label* fail)
920 : {
921 : // These registers must match the ones in JitRuntime::generateMallocStub.
922 0 : const Register regReturn = CallTempReg0;
923 0 : const Register regZone = CallTempReg0;
924 0 : const Register regNBytes = CallTempReg1;
925 :
926 0 : MOZ_ASSERT(nbytes > 0);
927 0 : MOZ_ASSERT(nbytes <= INT32_MAX);
928 :
929 0 : if (regZone != result)
930 0 : push(regZone);
931 0 : if (regNBytes != result)
932 0 : push(regNBytes);
933 :
934 0 : move32(Imm32(nbytes), regNBytes);
935 0 : movePtr(ImmPtr(GetJitContext()->realm->zone()), regZone);
936 0 : call(GetJitContext()->runtime->jitRuntime()->mallocStub());
937 0 : if (regReturn != result)
938 0 : movePtr(regReturn, result);
939 :
940 0 : if (regNBytes != result)
941 0 : pop(regNBytes);
942 0 : if (regZone != result)
943 0 : pop(regZone);
944 :
945 0 : branchTest32(Assembler::Zero, result, result, fail);
946 0 : }
947 :
948 : void
949 0 : MacroAssembler::callFreeStub(Register slots)
950 : {
951 : // This register must match the one in JitRuntime::generateFreeStub.
952 0 : const Register regSlots = CallTempReg0;
953 :
954 0 : push(regSlots);
955 0 : movePtr(slots, regSlots);
956 0 : call(GetJitContext()->runtime->jitRuntime()->freeStub());
957 0 : pop(regSlots);
958 0 : }
959 :
960 : // Inlined equivalent of gc::AllocateObject, without failure case handling.
961 : void
962 0 : MacroAssembler::allocateObject(Register result, Register temp, gc::AllocKind allocKind,
963 : uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail)
964 : {
965 0 : MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
966 :
967 0 : checkAllocatorState(fail);
968 :
969 0 : if (shouldNurseryAllocate(allocKind, initialHeap)) {
970 0 : MOZ_ASSERT(initialHeap == gc::DefaultHeap);
971 0 : return nurseryAllocateObject(result, temp, allocKind, nDynamicSlots, fail);
972 : }
973 :
974 0 : if (!nDynamicSlots)
975 0 : return freeListAllocate(result, temp, allocKind, fail);
976 :
977 : // Only NativeObject can have nDynamicSlots > 0 and reach here.
978 :
979 0 : callMallocStub(nDynamicSlots * sizeof(GCPtrValue), temp, fail);
980 :
981 0 : Label failAlloc;
982 0 : Label success;
983 :
984 0 : push(temp);
985 0 : freeListAllocate(result, temp, allocKind, &failAlloc);
986 :
987 0 : pop(temp);
988 0 : storePtr(temp, Address(result, NativeObject::offsetOfSlots()));
989 :
990 0 : jump(&success);
991 :
992 0 : bind(&failAlloc);
993 0 : pop(temp);
994 0 : callFreeStub(temp);
995 0 : jump(fail);
996 :
997 0 : bind(&success);
998 : }
999 :
1000 : void
1001 0 : MacroAssembler::createGCObject(Register obj, Register temp, const TemplateObject& templateObj,
1002 : gc::InitialHeap initialHeap, Label* fail, bool initContents)
1003 : {
1004 0 : gc::AllocKind allocKind = templateObj.getAllocKind();
1005 0 : MOZ_ASSERT(gc::IsObjectAllocKind(allocKind));
1006 :
1007 0 : uint32_t nDynamicSlots = 0;
1008 0 : if (templateObj.isNative()) {
1009 0 : const NativeTemplateObject& ntemplate = templateObj.asNativeTemplateObject();
1010 0 : nDynamicSlots = ntemplate.numDynamicSlots();
1011 :
1012 : // Arrays with copy on write elements do not need fixed space for an
1013 : // elements header. The template object, which owns the original
1014 : // elements, might have another allocation kind.
1015 0 : if (ntemplate.denseElementsAreCopyOnWrite())
1016 0 : allocKind = gc::AllocKind::OBJECT0_BACKGROUND;
1017 : }
1018 :
1019 0 : allocateObject(obj, temp, allocKind, nDynamicSlots, initialHeap, fail);
1020 0 : initGCThing(obj, temp, templateObj, initContents);
1021 0 : }
1022 :
1023 :
1024 : // Inlined equivalent of gc::AllocateNonObject, without failure case handling.
1025 : // Non-object allocation does not need to worry about slots, so can take a
1026 : // simpler path.
1027 : void
1028 0 : MacroAssembler::allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail)
1029 : {
1030 0 : checkAllocatorState(fail);
1031 0 : freeListAllocate(result, temp, allocKind, fail);
1032 0 : }
1033 :
1034 : // Inline version of Nursery::allocateString.
1035 : void
1036 0 : MacroAssembler::nurseryAllocateString(Register result, Register temp, gc::AllocKind allocKind,
1037 : Label* fail)
1038 : {
1039 0 : MOZ_ASSERT(IsNurseryAllocable(allocKind));
1040 :
1041 : // No explicit check for nursery.isEnabled() is needed, as the comparison
1042 : // with the nursery's end will always fail in such cases.
1043 :
1044 0 : CompileZone* zone = GetJitContext()->realm->zone();
1045 0 : int thingSize = int(gc::Arena::thingSize(allocKind));
1046 0 : int totalSize = js::Nursery::stringHeaderSize() + thingSize;
1047 0 : MOZ_ASSERT(totalSize % gc::CellAlignBytes == 0);
1048 :
1049 : // The nursery position (allocation pointer) and the nursery end are stored
1050 : // very close to each other. In practice, the zone will probably be close
1051 : // (within 32 bits) as well. If so, use relative offsets between them, to
1052 : // avoid multiple 64-bit immediate loads.
1053 0 : auto nurseryPosAddr = intptr_t(zone->addressOfStringNurseryPosition());
1054 0 : auto nurseryEndAddr = intptr_t(zone->addressOfStringNurseryCurrentEnd());
1055 0 : auto zoneAddr = intptr_t(zone);
1056 :
1057 0 : intptr_t maxOffset = std::max(std::abs(nurseryPosAddr - zoneAddr),
1058 0 : std::abs(nurseryEndAddr - zoneAddr));
1059 0 : if (maxOffset < (1 << 31)) {
1060 0 : movePtr(ImmPtr(zone), temp); // temp holds the Zone pointer from here on.
1061 0 : loadPtr(Address(temp, nurseryPosAddr - zoneAddr), result);
1062 0 : addPtr(Imm32(totalSize), result); // result points past this allocation.
1063 0 : branchPtr(Assembler::Below, Address(temp, nurseryEndAddr - zoneAddr), result, fail);
1064 0 : storePtr(result, Address(temp, nurseryPosAddr - zoneAddr)); // Update position.
1065 0 : subPtr(Imm32(thingSize), result); // Point result at Cell data.
1066 0 : storePtr(temp, Address(result, -js::Nursery::stringHeaderSize())); // Store Zone*
1067 : } else {
1068 : // Otherwise, the zone is far from the nursery pointers. But the
1069 : // nursery pos/end pointers are still near each other.
1070 0 : movePtr(ImmPtr(zone->addressOfNurseryPosition()), temp);
1071 0 : loadPtr(Address(temp, 0), result);
1072 0 : addPtr(Imm32(totalSize), result);
1073 0 : branchPtr(Assembler::Below, Address(temp, nurseryEndAddr - nurseryPosAddr), result, fail);
1074 0 : storePtr(result, Address(temp, 0));
1075 0 : subPtr(Imm32(thingSize), result);
1076 0 : storePtr(ImmPtr(zone), Address(result, -js::Nursery::stringHeaderSize()));
1077 : }
1078 0 : }
1079 :
1080 : // Inlined equivalent of gc::AllocateString, jumping to fail if nursery
1081 : // allocation requested but unsuccessful.
1082 : void
1083 0 : MacroAssembler::allocateString(Register result, Register temp, gc::AllocKind allocKind,
1084 : gc::InitialHeap initialHeap, Label* fail)
1085 : {
1086 0 : MOZ_ASSERT(allocKind == gc::AllocKind::STRING || allocKind == gc::AllocKind::FAT_INLINE_STRING);
1087 :
1088 0 : checkAllocatorState(fail);
1089 :
1090 0 : if (shouldNurseryAllocate(allocKind, initialHeap)) {
1091 0 : MOZ_ASSERT(initialHeap == gc::DefaultHeap);
1092 0 : return nurseryAllocateString(result, temp, allocKind, fail);
1093 : }
1094 :
1095 0 : freeListAllocate(result, temp, allocKind, fail);
1096 : }
1097 :
1098 : void
1099 0 : MacroAssembler::newGCString(Register result, Register temp, Label* fail, bool attemptNursery)
1100 : {
1101 0 : allocateString(result, temp, js::gc::AllocKind::STRING,
1102 0 : attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
1103 0 : }
1104 :
1105 : void
1106 0 : MacroAssembler::newGCFatInlineString(Register result, Register temp, Label* fail, bool attemptNursery)
1107 : {
1108 0 : allocateString(result, temp, js::gc::AllocKind::FAT_INLINE_STRING,
1109 0 : attemptNursery ? gc::DefaultHeap : gc::TenuredHeap, fail);
1110 0 : }
1111 :
1112 : void
1113 0 : MacroAssembler::copySlotsFromTemplate(Register obj, const NativeTemplateObject& templateObj,
1114 : uint32_t start, uint32_t end)
1115 : {
1116 0 : uint32_t nfixed = Min(templateObj.numFixedSlots(), end);
1117 0 : for (unsigned i = start; i < nfixed; i++) {
1118 : // Template objects are not exposed to script and therefore immutable.
1119 : // However, regexp template objects are sometimes used directly (when
1120 : // the cloning is not observable), and therefore we can end up with a
1121 : // non-zero lastIndex. Detect this case here and just substitute 0, to
1122 : // avoid racing with the main thread updating this slot.
1123 0 : Value v;
1124 0 : if (templateObj.isRegExpObject() && i == RegExpObject::lastIndexSlot())
1125 0 : v = Int32Value(0);
1126 : else
1127 0 : v = templateObj.getSlot(i);
1128 0 : storeValue(v, Address(obj, NativeObject::getFixedSlotOffset(i)));
1129 : }
1130 0 : }
1131 :
1132 : void
1133 0 : MacroAssembler::fillSlotsWithConstantValue(Address base, Register temp,
1134 : uint32_t start, uint32_t end, const Value& v)
1135 : {
1136 0 : MOZ_ASSERT(v.isUndefined() || IsUninitializedLexical(v));
1137 :
1138 0 : if (start >= end)
1139 : return;
1140 :
1141 : #ifdef JS_NUNBOX32
1142 : // We only have a single spare register, so do the initialization as two
1143 : // strided writes of the tag and body.
1144 : Address addr = base;
1145 : move32(Imm32(v.toNunboxPayload()), temp);
1146 : for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
1147 : store32(temp, ToPayload(addr));
1148 :
1149 : addr = base;
1150 : move32(Imm32(v.toNunboxTag()), temp);
1151 : for (unsigned i = start; i < end; ++i, addr.offset += sizeof(GCPtrValue))
1152 : store32(temp, ToType(addr));
1153 : #else
1154 0 : moveValue(v, ValueOperand(temp));
1155 0 : for (uint32_t i = start; i < end; ++i, base.offset += sizeof(GCPtrValue))
1156 0 : storePtr(temp, base);
1157 : #endif
1158 : }
1159 :
1160 : void
1161 0 : MacroAssembler::fillSlotsWithUndefined(Address base, Register temp, uint32_t start, uint32_t end)
1162 : {
1163 0 : fillSlotsWithConstantValue(base, temp, start, end, UndefinedValue());
1164 0 : }
1165 :
1166 : void
1167 0 : MacroAssembler::fillSlotsWithUninitialized(Address base, Register temp, uint32_t start, uint32_t end)
1168 : {
1169 0 : fillSlotsWithConstantValue(base, temp, start, end, MagicValue(JS_UNINITIALIZED_LEXICAL));
1170 0 : }
1171 :
1172 : static void
1173 0 : FindStartOfUninitializedAndUndefinedSlots(const NativeTemplateObject& templateObj, uint32_t nslots,
1174 : uint32_t* startOfUninitialized,
1175 : uint32_t* startOfUndefined)
1176 : {
1177 0 : MOZ_ASSERT(nslots == templateObj.slotSpan());
1178 0 : MOZ_ASSERT(nslots > 0);
1179 :
1180 : uint32_t first = nslots;
1181 0 : for (; first != 0; --first) {
1182 0 : if (templateObj.getSlot(first - 1) != UndefinedValue())
1183 : break;
1184 : }
1185 0 : *startOfUndefined = first;
1186 :
1187 0 : if (first != 0 && IsUninitializedLexical(templateObj.getSlot(first - 1))) {
1188 0 : for (; first != 0; --first) {
1189 0 : if (!IsUninitializedLexical(templateObj.getSlot(first - 1)))
1190 : break;
1191 : }
1192 0 : *startOfUninitialized = first;
1193 : } else {
1194 0 : *startOfUninitialized = *startOfUndefined;
1195 : }
1196 0 : }
1197 :
1198 : static void
1199 0 : AllocateObjectBufferWithInit(JSContext* cx, TypedArrayObject* obj, int32_t count)
1200 : {
1201 0 : AutoUnsafeCallWithABI unsafe;
1202 :
1203 0 : obj->initPrivate(nullptr);
1204 :
1205 : // Negative numbers or zero will bail out to the slow path, which in turn will raise
1206 : // an invalid argument exception or create a correct object with zero elements.
1207 0 : if (count <= 0 || uint32_t(count) >= INT32_MAX / obj->bytesPerElement()) {
1208 0 : obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(0));
1209 0 : return;
1210 : }
1211 :
1212 0 : obj->setFixedSlot(TypedArrayObject::LENGTH_SLOT, Int32Value(count));
1213 : size_t nbytes;
1214 :
1215 0 : switch (obj->type()) {
1216 : #define CREATE_TYPED_ARRAY(T, N) \
1217 : case Scalar::N: \
1218 : MOZ_ALWAYS_TRUE(js::CalculateAllocSize<T>(count, &nbytes)); \
1219 : break;
1220 0 : JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
1221 : #undef CREATE_TYPED_ARRAY
1222 : default:
1223 0 : MOZ_CRASH("Unsupported TypedArray type");
1224 : }
1225 :
1226 0 : MOZ_ASSERT((CheckedUint32(nbytes) + sizeof(Value)).isValid());
1227 :
1228 0 : nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
1229 0 : void* buf = cx->nursery().allocateBuffer(obj, nbytes);
1230 0 : if (buf) {
1231 0 : obj->initPrivate(buf);
1232 0 : memset(buf, 0, nbytes);
1233 : }
1234 : }
1235 :
1236 : void
1237 0 : MacroAssembler::initTypedArraySlots(Register obj, Register temp, Register lengthReg,
1238 : LiveRegisterSet liveRegs, Label* fail,
1239 : TypedArrayObject* templateObj, TypedArrayLength lengthKind)
1240 : {
1241 0 : MOZ_ASSERT(templateObj->hasPrivate());
1242 0 : MOZ_ASSERT(!templateObj->hasBuffer());
1243 :
1244 0 : size_t dataSlotOffset = TypedArrayObject::dataOffset();
1245 0 : size_t dataOffset = TypedArrayObject::dataOffset() + sizeof(HeapSlot);
1246 :
1247 : static_assert(TypedArrayObject::FIXED_DATA_START == TypedArrayObject::DATA_SLOT + 1,
1248 : "fixed inline element data assumed to begin after the data slot");
1249 :
1250 : // Initialise data elements to zero.
1251 0 : int32_t length = templateObj->length();
1252 0 : size_t nbytes = length * templateObj->bytesPerElement();
1253 :
1254 0 : if (lengthKind == TypedArrayLength::Fixed && dataOffset + nbytes <= JSObject::MAX_BYTE_SIZE) {
1255 0 : MOZ_ASSERT(dataOffset + nbytes <= templateObj->tenuredSizeOfThis());
1256 :
1257 : // Store data elements inside the remaining JSObject slots.
1258 0 : computeEffectiveAddress(Address(obj, dataOffset), temp);
1259 0 : storePtr(temp, Address(obj, dataSlotOffset));
1260 :
1261 : // Write enough zero pointers into fixed data to zero every
1262 : // element. (This zeroes past the end of a byte count that's
1263 : // not a multiple of pointer size. That's okay, because fixed
1264 : // data is a count of 8-byte HeapSlots (i.e. <= pointer size),
1265 : // and we won't inline unless the desired memory fits in that
1266 : // space.)
1267 : static_assert(sizeof(HeapSlot) == 8, "Assumed 8 bytes alignment");
1268 :
1269 0 : size_t numZeroPointers = ((nbytes + 7) & ~0x7) / sizeof(char *);
1270 0 : for (size_t i = 0; i < numZeroPointers; i++)
1271 0 : storePtr(ImmWord(0), Address(obj, dataOffset + i * sizeof(char *)));
1272 : #ifdef DEBUG
1273 0 : if (nbytes == 0)
1274 0 : store8(Imm32(TypedArrayObject::ZeroLengthArrayData), Address(obj, dataSlotOffset));
1275 : #endif
1276 : } else {
1277 0 : if (lengthKind == TypedArrayLength::Fixed)
1278 0 : move32(Imm32(length), lengthReg);
1279 :
1280 : // Allocate a buffer on the heap to store the data elements.
1281 0 : liveRegs.addUnchecked(temp);
1282 0 : liveRegs.addUnchecked(obj);
1283 0 : liveRegs.addUnchecked(lengthReg);
1284 0 : PushRegsInMask(liveRegs);
1285 0 : setupUnalignedABICall(temp);
1286 0 : loadJSContext(temp);
1287 0 : passABIArg(temp);
1288 0 : passABIArg(obj);
1289 0 : passABIArg(lengthReg);
1290 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, AllocateObjectBufferWithInit));
1291 0 : PopRegsInMask(liveRegs);
1292 :
1293 : // Fail when data elements is set to NULL.
1294 0 : branchPtr(Assembler::Equal, Address(obj, dataSlotOffset), ImmWord(0), fail);
1295 : }
1296 0 : }
1297 :
1298 : void
1299 0 : MacroAssembler::initGCSlots(Register obj, Register temp, const NativeTemplateObject& templateObj,
1300 : bool initContents)
1301 : {
1302 : // Slots of non-array objects are required to be initialized.
1303 : // Use the values currently in the template object.
1304 0 : uint32_t nslots = templateObj.slotSpan();
1305 0 : if (nslots == 0)
1306 0 : return;
1307 :
1308 0 : uint32_t nfixed = templateObj.numUsedFixedSlots();
1309 0 : uint32_t ndynamic = templateObj.numDynamicSlots();
1310 :
1311 : // Attempt to group slot writes such that we minimize the amount of
1312 : // duplicated data we need to embed in code and load into registers. In
1313 : // general, most template object slots will be undefined except for any
1314 : // reserved slots. Since reserved slots come first, we split the object
1315 : // logically into independent non-UndefinedValue writes to the head and
1316 : // duplicated writes of UndefinedValue to the tail. For the majority of
1317 : // objects, the "tail" will be the entire slot range.
1318 : //
1319 : // The template object may be a CallObject, in which case we need to
1320 : // account for uninitialized lexical slots as well as undefined
1321 : // slots. Unitialized lexical slots appears in CallObjects if the function
1322 : // has parameter expressions, in which case closed over parameters have
1323 : // TDZ. Uninitialized slots come before undefined slots in CallObjects.
1324 0 : uint32_t startOfUninitialized = nslots;
1325 0 : uint32_t startOfUndefined = nslots;
1326 : FindStartOfUninitializedAndUndefinedSlots(templateObj, nslots,
1327 0 : &startOfUninitialized, &startOfUndefined);
1328 0 : MOZ_ASSERT(startOfUninitialized <= nfixed); // Reserved slots must be fixed.
1329 0 : MOZ_ASSERT(startOfUndefined >= startOfUninitialized);
1330 0 : MOZ_ASSERT_IF(!templateObj.isCallObject(), startOfUninitialized == startOfUndefined);
1331 :
1332 : // Copy over any preserved reserved slots.
1333 0 : copySlotsFromTemplate(obj, templateObj, 0, startOfUninitialized);
1334 :
1335 : // Fill the rest of the fixed slots with undefined and uninitialized.
1336 0 : if (initContents) {
1337 0 : size_t offset = NativeObject::getFixedSlotOffset(startOfUninitialized);
1338 0 : fillSlotsWithUninitialized(Address(obj, offset), temp,
1339 0 : startOfUninitialized, Min(startOfUndefined, nfixed));
1340 :
1341 0 : offset = NativeObject::getFixedSlotOffset(startOfUndefined);
1342 0 : fillSlotsWithUndefined(Address(obj, offset), temp,
1343 0 : startOfUndefined, nfixed);
1344 : }
1345 :
1346 0 : if (ndynamic) {
1347 : // We are short one register to do this elegantly. Borrow the obj
1348 : // register briefly for our slots base address.
1349 0 : push(obj);
1350 0 : loadPtr(Address(obj, NativeObject::offsetOfSlots()), obj);
1351 :
1352 : // Fill uninitialized slots if necessary. Otherwise initialize all
1353 : // slots to undefined.
1354 0 : if (startOfUndefined > nfixed) {
1355 0 : MOZ_ASSERT(startOfUninitialized != startOfUndefined);
1356 0 : fillSlotsWithUninitialized(Address(obj, 0), temp, 0, startOfUndefined - nfixed);
1357 0 : size_t offset = (startOfUndefined - nfixed) * sizeof(Value);
1358 0 : fillSlotsWithUndefined(Address(obj, offset), temp, startOfUndefined - nfixed, ndynamic);
1359 : } else {
1360 0 : fillSlotsWithUndefined(Address(obj, 0), temp, 0, ndynamic);
1361 : }
1362 :
1363 0 : pop(obj);
1364 : }
1365 : }
1366 :
1367 : #ifdef JS_GC_TRACE
1368 : static void
1369 : TraceCreateObject(JSObject *obj)
1370 : {
1371 : AutoUnsafeCallWithABI unsafe;
1372 : js::gc::gcTracer.traceCreateObject(obj);
1373 : }
1374 : #endif
1375 :
1376 : void
1377 0 : MacroAssembler::initGCThing(Register obj, Register temp, const TemplateObject& templateObj,
1378 : bool initContents)
1379 : {
1380 : // Fast initialization of an empty object returned by allocateObject().
1381 :
1382 0 : storePtr(ImmGCPtr(templateObj.group()), Address(obj, JSObject::offsetOfGroup()));
1383 :
1384 0 : if (gc::Cell* shape = templateObj.maybeShape())
1385 0 : storePtr(ImmGCPtr(shape), Address(obj, ShapedObject::offsetOfShape()));
1386 :
1387 0 : if (templateObj.isNative()) {
1388 0 : const NativeTemplateObject& ntemplate = templateObj.asNativeTemplateObject();
1389 0 : MOZ_ASSERT_IF(!ntemplate.denseElementsAreCopyOnWrite(), !ntemplate.hasDynamicElements());
1390 0 : MOZ_ASSERT_IF(ntemplate.convertDoubleElements(), ntemplate.isArrayObject());
1391 :
1392 : // If the object has dynamic slots, the slots member has already been
1393 : // filled in.
1394 0 : if (!ntemplate.hasDynamicSlots())
1395 0 : storePtr(ImmPtr(nullptr), Address(obj, NativeObject::offsetOfSlots()));
1396 :
1397 0 : if (ntemplate.denseElementsAreCopyOnWrite()) {
1398 0 : storePtr(ImmPtr(ntemplate.getDenseElements()),
1399 0 : Address(obj, NativeObject::offsetOfElements()));
1400 0 : } else if (ntemplate.isArrayObject()) {
1401 0 : int elementsOffset = NativeObject::offsetOfFixedElements();
1402 :
1403 0 : computeEffectiveAddress(Address(obj, elementsOffset), temp);
1404 0 : storePtr(temp, Address(obj, NativeObject::offsetOfElements()));
1405 :
1406 : // Fill in the elements header.
1407 0 : store32(Imm32(ntemplate.getDenseCapacity()),
1408 0 : Address(obj, elementsOffset + ObjectElements::offsetOfCapacity()));
1409 0 : store32(Imm32(ntemplate.getDenseInitializedLength()),
1410 0 : Address(obj, elementsOffset + ObjectElements::offsetOfInitializedLength()));
1411 0 : store32(Imm32(ntemplate.getArrayLength()),
1412 0 : Address(obj, elementsOffset + ObjectElements::offsetOfLength()));
1413 0 : store32(Imm32(ntemplate.convertDoubleElements()
1414 : ? ObjectElements::CONVERT_DOUBLE_ELEMENTS
1415 : : 0),
1416 0 : Address(obj, elementsOffset + ObjectElements::offsetOfFlags()));
1417 0 : MOZ_ASSERT(!ntemplate.hasPrivate());
1418 0 : } else if (ntemplate.isArgumentsObject()) {
1419 : // The caller will initialize the reserved slots.
1420 0 : MOZ_ASSERT(!initContents);
1421 0 : MOZ_ASSERT(!ntemplate.hasPrivate());
1422 0 : storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
1423 : } else {
1424 : // If the target type could be a TypedArray that maps shared memory
1425 : // then this would need to store emptyObjectElementsShared in that case.
1426 0 : MOZ_ASSERT(!ntemplate.isSharedMemory());
1427 :
1428 0 : storePtr(ImmPtr(emptyObjectElements), Address(obj, NativeObject::offsetOfElements()));
1429 :
1430 0 : initGCSlots(obj, temp, ntemplate, initContents);
1431 :
1432 0 : if (ntemplate.hasPrivate() && !ntemplate.isTypedArrayObject()) {
1433 0 : uint32_t nfixed = ntemplate.numFixedSlots();
1434 0 : Address privateSlot(obj, NativeObject::getPrivateDataOffset(nfixed));
1435 0 : if (ntemplate.isRegExpObject()) {
1436 : // RegExpObject stores a GC thing (RegExpShared*) in its
1437 : // private slot, so we have to use ImmGCPtr.
1438 0 : storePtr(ImmGCPtr(ntemplate.regExpShared()), privateSlot);
1439 : } else {
1440 0 : storePtr(ImmPtr(ntemplate.getPrivate()), privateSlot);
1441 : }
1442 : }
1443 : }
1444 0 : } else if (templateObj.isInlineTypedObject()) {
1445 0 : JS::AutoAssertNoGC nogc; // off-thread, so cannot GC
1446 0 : size_t nbytes = templateObj.getInlineTypedObjectSize();
1447 0 : const uint8_t* memory = templateObj.getInlineTypedObjectMem(nogc);
1448 :
1449 : // Memcpy the contents of the template object to the new object.
1450 0 : size_t offset = 0;
1451 0 : while (nbytes) {
1452 0 : uintptr_t value = *(uintptr_t*)(memory + offset);
1453 0 : storePtr(ImmWord(value),
1454 0 : Address(obj, InlineTypedObject::offsetOfDataStart() + offset));
1455 0 : nbytes = (nbytes < sizeof(uintptr_t)) ? 0 : nbytes - sizeof(uintptr_t);
1456 0 : offset += sizeof(uintptr_t);
1457 : }
1458 0 : } else if (templateObj.isUnboxedPlainObject()) {
1459 0 : MOZ_ASSERT(!templateObj.unboxedObjectHasExpando());
1460 0 : storePtr(ImmPtr(nullptr), Address(obj, UnboxedPlainObject::offsetOfExpando()));
1461 0 : if (initContents)
1462 0 : initUnboxedObjectContents(obj, templateObj.unboxedObjectLayout());
1463 : } else {
1464 0 : MOZ_CRASH("Unknown object");
1465 : }
1466 :
1467 : #ifdef JS_GC_TRACE
1468 : AllocatableRegisterSet regs(RegisterSet::Volatile());
1469 : LiveRegisterSet save(regs.asLiveSet());
1470 : PushRegsInMask(save);
1471 :
1472 : regs.takeUnchecked(obj);
1473 : Register temp2 = regs.takeAnyGeneral();
1474 :
1475 : setupUnalignedABICall(temp2);
1476 : passABIArg(obj);
1477 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceCreateObject));
1478 :
1479 : PopRegsInMask(save);
1480 : #endif
1481 0 : }
1482 :
1483 : void
1484 0 : MacroAssembler::initUnboxedObjectContents(Register object, const UnboxedLayout& layout)
1485 : {
1486 : // Initialize reference fields of the object, per UnboxedPlainObject::create.
1487 0 : if (const int32_t* list = layout.traceList()) {
1488 0 : while (*list != -1) {
1489 0 : storePtr(ImmGCPtr(GetJitContext()->runtime->names().empty),
1490 0 : Address(object, UnboxedPlainObject::offsetOfData() + *list));
1491 0 : list++;
1492 : }
1493 0 : list++;
1494 0 : while (*list != -1) {
1495 0 : storePtr(ImmWord(0),
1496 0 : Address(object, UnboxedPlainObject::offsetOfData() + *list));
1497 0 : list++;
1498 : }
1499 : // Unboxed objects don't have Values to initialize.
1500 0 : MOZ_ASSERT(*(list + 1) == -1);
1501 : }
1502 0 : }
1503 :
1504 : void
1505 0 : MacroAssembler::compareStrings(JSOp op, Register left, Register right, Register result,
1506 : Label* fail)
1507 : {
1508 0 : MOZ_ASSERT(left != result);
1509 0 : MOZ_ASSERT(right != result);
1510 0 : MOZ_ASSERT(IsEqualityOp(op));
1511 :
1512 0 : Label done;
1513 0 : Label notPointerEqual;
1514 : // Fast path for identical strings.
1515 0 : branchPtr(Assembler::NotEqual, left, right, ¬PointerEqual);
1516 0 : move32(Imm32(op == JSOP_EQ || op == JSOP_STRICTEQ), result);
1517 0 : jump(&done);
1518 :
1519 0 : bind(¬PointerEqual);
1520 :
1521 0 : Label notAtom;
1522 : // Optimize the equality operation to a pointer compare for two atoms.
1523 0 : Imm32 nonAtomBit(JSString::NON_ATOM_BIT);
1524 0 : branchTest32(Assembler::NonZero, Address(left, JSString::offsetOfFlags()), nonAtomBit, ¬Atom);
1525 0 : branchTest32(Assembler::NonZero, Address(right, JSString::offsetOfFlags()), nonAtomBit, ¬Atom);
1526 :
1527 0 : cmpPtrSet(JSOpToCondition(MCompare::Compare_String, op), left, right, result);
1528 0 : jump(&done);
1529 :
1530 0 : bind(¬Atom);
1531 : // Strings of different length can never be equal.
1532 0 : loadStringLength(left, result);
1533 0 : branch32(Assembler::Equal, Address(right, JSString::offsetOfLength()), result, fail);
1534 0 : move32(Imm32(op == JSOP_NE || op == JSOP_STRICTNE), result);
1535 :
1536 0 : bind(&done);
1537 0 : }
1538 :
1539 : void
1540 0 : MacroAssembler::loadStringChars(Register str, Register dest, CharEncoding encoding)
1541 : {
1542 0 : MOZ_ASSERT(str != dest);
1543 :
1544 0 : if (JitOptions.spectreStringMitigations) {
1545 0 : if (encoding == CharEncoding::Latin1) {
1546 : // If the string is a rope, zero the |str| register. The code below
1547 : // depends on str->flags so this should block speculative execution.
1548 0 : movePtr(ImmWord(0), dest);
1549 0 : test32MovePtr(Assembler::Zero,
1550 0 : Address(str, JSString::offsetOfFlags()), Imm32(JSString::LINEAR_BIT),
1551 0 : dest, str);
1552 : } else {
1553 : // If we're loading TwoByte chars, there's an additional risk:
1554 : // if the string has Latin1 chars, we could read out-of-bounds. To
1555 : // prevent this, we check both the Linear and Latin1 bits. We don't
1556 : // have a scratch register, so we use these flags also to block
1557 : // speculative execution, similar to the use of 0 above.
1558 0 : MOZ_ASSERT(encoding == CharEncoding::TwoByte);
1559 : static constexpr uint32_t Mask = JSString::LINEAR_BIT | JSString::LATIN1_CHARS_BIT;
1560 : static_assert(Mask < 1024,
1561 : "Mask should be a small, near-null value to ensure we "
1562 : "block speculative execution when it's used as string "
1563 : "pointer");
1564 0 : move32(Imm32(Mask), dest);
1565 0 : and32(Address(str, JSString::offsetOfFlags()), dest);
1566 0 : cmp32MovePtr(Assembler::NotEqual, dest, Imm32(JSString::LINEAR_BIT),
1567 0 : dest, str);
1568 : }
1569 : }
1570 :
1571 : // Load the inline chars.
1572 0 : computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1573 :
1574 : // If it's not an inline string, load the non-inline chars. Use a
1575 : // conditional move to prevent speculative execution.
1576 0 : test32LoadPtr(Assembler::Zero,
1577 0 : Address(str, JSString::offsetOfFlags()), Imm32(JSString::INLINE_CHARS_BIT),
1578 0 : Address(str, JSString::offsetOfNonInlineChars()), dest);
1579 0 : }
1580 :
1581 : void
1582 0 : MacroAssembler::loadNonInlineStringChars(Register str, Register dest, CharEncoding encoding)
1583 : {
1584 0 : MOZ_ASSERT(str != dest);
1585 :
1586 0 : if (JitOptions.spectreStringMitigations) {
1587 : // If the string is a rope, has inline chars, or has a different
1588 : // character encoding, set str to a near-null value to prevent
1589 : // speculative execution below (when reading str->nonInlineChars).
1590 :
1591 : static constexpr uint32_t Mask =
1592 : JSString::LINEAR_BIT |
1593 : JSString::INLINE_CHARS_BIT |
1594 : JSString::LATIN1_CHARS_BIT;
1595 : static_assert(Mask < 1024,
1596 : "Mask should be a small, near-null value to ensure we "
1597 : "block speculative execution when it's used as string "
1598 : "pointer");
1599 :
1600 0 : uint32_t expectedBits = JSString::LINEAR_BIT;
1601 0 : if (encoding == CharEncoding::Latin1)
1602 0 : expectedBits |= JSString::LATIN1_CHARS_BIT;
1603 :
1604 0 : move32(Imm32(Mask), dest);
1605 0 : and32(Address(str, JSString::offsetOfFlags()), dest);
1606 :
1607 0 : cmp32MovePtr(Assembler::NotEqual, dest, Imm32(expectedBits),
1608 0 : dest, str);
1609 : }
1610 :
1611 0 : loadPtr(Address(str, JSString::offsetOfNonInlineChars()), dest);
1612 0 : }
1613 :
1614 : void
1615 0 : MacroAssembler::storeNonInlineStringChars(Register chars, Register str)
1616 : {
1617 0 : MOZ_ASSERT(chars != str);
1618 0 : storePtr(chars, Address(str, JSString::offsetOfNonInlineChars()));
1619 0 : }
1620 :
1621 : void
1622 0 : MacroAssembler::loadInlineStringCharsForStore(Register str, Register dest)
1623 : {
1624 0 : computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1625 0 : }
1626 :
1627 : void
1628 0 : MacroAssembler::loadInlineStringChars(Register str, Register dest, CharEncoding encoding)
1629 : {
1630 0 : MOZ_ASSERT(str != dest);
1631 :
1632 0 : if (JitOptions.spectreStringMitigations) {
1633 : // Making this Spectre-safe is a bit complicated: using
1634 : // computeEffectiveAddress and then zeroing the output register if
1635 : // non-inline is not sufficient: when the index is very large, it would
1636 : // allow reading |nullptr + index|. Just fall back to loadStringChars
1637 : // for now.
1638 0 : loadStringChars(str, dest, encoding);
1639 : } else {
1640 0 : computeEffectiveAddress(Address(str, JSInlineString::offsetOfInlineStorage()), dest);
1641 : }
1642 0 : }
1643 :
1644 : void
1645 0 : MacroAssembler::loadRopeLeftChild(Register str, Register dest)
1646 : {
1647 0 : MOZ_ASSERT(str != dest);
1648 :
1649 0 : if (JitOptions.spectreStringMitigations) {
1650 : // Zero the output register if the input was not a rope.
1651 0 : movePtr(ImmWord(0), dest);
1652 0 : test32LoadPtr(Assembler::Zero,
1653 0 : Address(str, JSString::offsetOfFlags()), Imm32(JSString::LINEAR_BIT),
1654 0 : Address(str, JSRope::offsetOfLeft()), dest);
1655 : } else {
1656 0 : loadPtr(Address(str, JSRope::offsetOfLeft()), dest);
1657 : }
1658 0 : }
1659 :
1660 : void
1661 0 : MacroAssembler::storeRopeChildren(Register left, Register right, Register str)
1662 : {
1663 0 : storePtr(left, Address(str, JSRope::offsetOfLeft()));
1664 0 : storePtr(right, Address(str, JSRope::offsetOfRight()));
1665 0 : }
1666 :
1667 : void
1668 0 : MacroAssembler::loadDependentStringBase(Register str, Register dest)
1669 : {
1670 0 : MOZ_ASSERT(str != dest);
1671 :
1672 0 : if (JitOptions.spectreStringMitigations) {
1673 : // If the string does not have a base-string, zero the |str| register.
1674 : // The code below loads str->base so this should block speculative
1675 : // execution.
1676 0 : movePtr(ImmWord(0), dest);
1677 0 : test32MovePtr(Assembler::Zero,
1678 0 : Address(str, JSString::offsetOfFlags()), Imm32(JSString::HAS_BASE_BIT),
1679 0 : dest, str);
1680 : }
1681 :
1682 0 : loadPtr(Address(str, JSDependentString::offsetOfBase()), dest);
1683 0 : }
1684 :
1685 : void
1686 0 : MacroAssembler::leaNewDependentStringBase(Register str, Register dest)
1687 : {
1688 0 : MOZ_ASSERT(str != dest);
1689 :
1690 : // Spectre-safe because this is a newly allocated dependent string, thus we
1691 : // are certain of its type and the type of its base field.
1692 0 : computeEffectiveAddress(Address(str, JSDependentString::offsetOfBase()), dest);
1693 0 : }
1694 :
1695 : void
1696 0 : MacroAssembler::storeDependentStringBase(Register base, Register str)
1697 : {
1698 0 : storePtr(base, Address(str, JSDependentString::offsetOfBase()));
1699 0 : }
1700 :
1701 : void
1702 0 : MacroAssembler::loadStringChar(Register str, Register index, Register output, Register scratch,
1703 : Label* fail)
1704 : {
1705 0 : MOZ_ASSERT(str != output);
1706 0 : MOZ_ASSERT(str != index);
1707 0 : MOZ_ASSERT(index != output);
1708 0 : MOZ_ASSERT(output != scratch);
1709 :
1710 0 : movePtr(str, output);
1711 :
1712 : // This follows JSString::getChar.
1713 0 : Label notRope;
1714 0 : branchIfNotRope(str, ¬Rope);
1715 :
1716 0 : loadRopeLeftChild(str, output);
1717 :
1718 : // Check if the index is contained in the leftChild.
1719 : // Todo: Handle index in the rightChild.
1720 0 : spectreBoundsCheck32(index, Address(output, JSString::offsetOfLength()), scratch, fail);
1721 :
1722 : // If the left side is another rope, give up.
1723 0 : branchIfRope(output, fail);
1724 :
1725 0 : bind(¬Rope);
1726 :
1727 0 : Label isLatin1, done;
1728 : // We have to check the left/right side for ropes,
1729 : // because a TwoByte rope might have a Latin1 child.
1730 0 : branchLatin1String(output, &isLatin1);
1731 0 : loadStringChars(output, scratch, CharEncoding::TwoByte);
1732 0 : load16ZeroExtend(BaseIndex(scratch, index, TimesTwo), output);
1733 0 : jump(&done);
1734 :
1735 0 : bind(&isLatin1);
1736 0 : loadStringChars(output, scratch, CharEncoding::Latin1);
1737 0 : load8ZeroExtend(BaseIndex(scratch, index, TimesOne), output);
1738 :
1739 0 : bind(&done);
1740 0 : }
1741 :
1742 : void
1743 0 : MacroAssembler::loadStringIndexValue(Register str, Register dest, Label* fail)
1744 : {
1745 0 : MOZ_ASSERT(str != dest);
1746 :
1747 0 : load32(Address(str, JSString::offsetOfFlags()), dest);
1748 :
1749 : // Does not have a cached index value.
1750 0 : branchTest32(Assembler::Zero, dest, Imm32(JSString::INDEX_VALUE_BIT), fail);
1751 :
1752 : // Extract the index.
1753 0 : rshift32(Imm32(JSString::INDEX_VALUE_SHIFT), dest);
1754 0 : }
1755 :
1756 : void
1757 0 : MacroAssembler::typeOfObject(Register obj, Register scratch, Label* slow,
1758 : Label* isObject, Label* isCallable, Label* isUndefined)
1759 : {
1760 0 : loadObjClassUnsafe(obj, scratch);
1761 :
1762 : // Proxies can emulate undefined and have complex isCallable behavior.
1763 0 : branchTestClassIsProxy(true, scratch, slow);
1764 :
1765 : // JSFunctions are always callable.
1766 0 : branchPtr(Assembler::Equal, scratch, ImmPtr(&JSFunction::class_), isCallable);
1767 :
1768 : // Objects that emulate undefined.
1769 0 : Address flags(scratch, Class::offsetOfFlags());
1770 0 : branchTest32(Assembler::NonZero, flags, Imm32(JSCLASS_EMULATES_UNDEFINED), isUndefined);
1771 :
1772 : // Handle classes with a call hook.
1773 0 : branchPtr(Assembler::Equal, Address(scratch, offsetof(js::Class, cOps)), ImmPtr(nullptr),
1774 0 : isObject);
1775 :
1776 0 : loadPtr(Address(scratch, offsetof(js::Class, cOps)), scratch);
1777 0 : branchPtr(Assembler::Equal, Address(scratch, offsetof(js::ClassOps, call)), ImmPtr(nullptr),
1778 0 : isObject);
1779 :
1780 0 : jump(isCallable);
1781 0 : }
1782 :
1783 : void
1784 0 : MacroAssembler::loadJSContext(Register dest)
1785 : {
1786 0 : JitContext* jcx = GetJitContext();
1787 0 : movePtr(ImmPtr(jcx->runtime->mainContextPtr()), dest);
1788 0 : }
1789 :
1790 : void
1791 0 : MacroAssembler::guardGroupHasUnanalyzedNewScript(Register group, Register scratch, Label* fail)
1792 : {
1793 0 : Label noNewScript;
1794 0 : load32(Address(group, ObjectGroup::offsetOfFlags()), scratch);
1795 0 : and32(Imm32(OBJECT_FLAG_ADDENDUM_MASK), scratch);
1796 0 : branch32(Assembler::NotEqual, scratch,
1797 : Imm32(uint32_t(ObjectGroup::Addendum_NewScript) << OBJECT_FLAG_ADDENDUM_SHIFT),
1798 0 : &noNewScript);
1799 :
1800 : // Guard group->newScript()->preliminaryObjects is non-nullptr.
1801 0 : loadPtr(Address(group, ObjectGroup::offsetOfAddendum()), scratch);
1802 0 : branchPtr(Assembler::Equal,
1803 0 : Address(scratch, TypeNewScript::offsetOfPreliminaryObjects()),
1804 0 : ImmWord(0), fail);
1805 :
1806 0 : bind(&noNewScript);
1807 0 : }
1808 :
1809 : static void
1810 0 : BailoutReportOverRecursed(JSContext* cx)
1811 : {
1812 0 : ReportOverRecursed(cx);
1813 0 : }
1814 :
1815 : void
1816 0 : MacroAssembler::generateBailoutTail(Register scratch, Register bailoutInfo)
1817 : {
1818 0 : loadJSContext(scratch);
1819 0 : enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1820 :
1821 0 : Label baseline;
1822 :
1823 : // The return value from Bailout is tagged as:
1824 : // - 0x0: done (enter baseline)
1825 : // - 0x1: error (handle exception)
1826 : // - 0x2: overrecursed
1827 : JS_STATIC_ASSERT(BAILOUT_RETURN_OK == 0);
1828 : JS_STATIC_ASSERT(BAILOUT_RETURN_FATAL_ERROR == 1);
1829 : JS_STATIC_ASSERT(BAILOUT_RETURN_OVERRECURSED == 2);
1830 :
1831 0 : branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_OK), &baseline);
1832 0 : branch32(Equal, ReturnReg, Imm32(BAILOUT_RETURN_FATAL_ERROR), exceptionLabel());
1833 :
1834 : // Fall-through: overrecursed.
1835 : {
1836 0 : loadJSContext(ReturnReg);
1837 0 : setupUnalignedABICall(scratch);
1838 0 : passABIArg(ReturnReg);
1839 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, BailoutReportOverRecursed), MoveOp::GENERAL,
1840 0 : CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1841 0 : jump(exceptionLabel());
1842 : }
1843 :
1844 0 : bind(&baseline);
1845 : {
1846 : // Prepare a register set for use in this case.
1847 0 : AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
1848 0 : MOZ_ASSERT_IF(!IsHiddenSP(getStackPointer()), !regs.has(AsRegister(getStackPointer())));
1849 0 : regs.take(bailoutInfo);
1850 :
1851 : // Reset SP to the point where clobbering starts.
1852 0 : loadStackPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, incomingStack)));
1853 :
1854 0 : Register copyCur = regs.takeAny();
1855 0 : Register copyEnd = regs.takeAny();
1856 0 : Register temp = regs.takeAny();
1857 :
1858 : // Copy data onto stack.
1859 0 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackTop)), copyCur);
1860 0 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, copyStackBottom)), copyEnd);
1861 : {
1862 0 : Label copyLoop;
1863 0 : Label endOfCopy;
1864 0 : bind(©Loop);
1865 0 : branchPtr(Assembler::BelowOrEqual, copyCur, copyEnd, &endOfCopy);
1866 0 : subPtr(Imm32(4), copyCur);
1867 0 : subFromStackPtr(Imm32(4));
1868 0 : load32(Address(copyCur, 0), temp);
1869 0 : store32(temp, Address(getStackPointer(), 0));
1870 0 : jump(©Loop);
1871 0 : bind(&endOfCopy);
1872 : }
1873 :
1874 : // Enter exit frame for the FinishBailoutToBaseline call.
1875 0 : loadPtr(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)), temp);
1876 0 : load32(Address(temp, BaselineFrame::reverseOffsetOfFrameSize()), temp);
1877 0 : makeFrameDescriptor(temp, JitFrame_BaselineJS, ExitFrameLayout::Size());
1878 0 : push(temp);
1879 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1880 : // No GC things to mark on the stack, push a bare token.
1881 0 : loadJSContext(scratch);
1882 0 : enterFakeExitFrame(scratch, scratch, ExitFrameType::Bare);
1883 :
1884 : // If monitorStub is non-null, handle resumeAddr appropriately.
1885 0 : Label noMonitor;
1886 0 : Label done;
1887 0 : branchPtr(Assembler::Equal,
1888 0 : Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)),
1889 : ImmPtr(nullptr),
1890 0 : &noMonitor);
1891 :
1892 : //
1893 : // Resuming into a monitoring stub chain.
1894 : //
1895 : {
1896 : // Save needed values onto stack temporarily.
1897 0 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1898 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1899 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1900 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, monitorStub)));
1901 :
1902 : // Call a stub to free allocated memory and create arguments objects.
1903 0 : setupUnalignedABICall(temp);
1904 0 : passABIArg(bailoutInfo);
1905 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline),
1906 0 : MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1907 0 : branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1908 :
1909 : // Restore values where they need to be and resume execution.
1910 0 : AllocatableGeneralRegisterSet enterMonRegs(GeneralRegisterSet::All());
1911 0 : enterMonRegs.take(R0);
1912 0 : enterMonRegs.take(ICStubReg);
1913 0 : enterMonRegs.take(BaselineFrameReg);
1914 0 : enterMonRegs.takeUnchecked(ICTailCallReg);
1915 :
1916 0 : pop(ICStubReg);
1917 0 : pop(ICTailCallReg);
1918 0 : pop(BaselineFrameReg);
1919 0 : popValue(R0);
1920 :
1921 : // Discard exit frame.
1922 0 : addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1923 :
1924 : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
1925 0 : push(ICTailCallReg);
1926 : #endif
1927 0 : jump(Address(ICStubReg, ICStub::offsetOfStubCode()));
1928 : }
1929 :
1930 : //
1931 : // Resuming into main jitcode.
1932 : //
1933 0 : bind(&noMonitor);
1934 : {
1935 : // Save needed values onto stack temporarily.
1936 0 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR0)));
1937 0 : pushValue(Address(bailoutInfo, offsetof(BaselineBailoutInfo, valueR1)));
1938 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeFramePtr)));
1939 0 : push(Address(bailoutInfo, offsetof(BaselineBailoutInfo, resumeAddr)));
1940 :
1941 : // Call a stub to free allocated memory and create arguments objects.
1942 0 : setupUnalignedABICall(temp);
1943 0 : passABIArg(bailoutInfo);
1944 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, FinishBailoutToBaseline),
1945 0 : MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckHasExitFrame);
1946 0 : branchTest32(Zero, ReturnReg, ReturnReg, exceptionLabel());
1947 :
1948 : // Restore values where they need to be and resume execution.
1949 0 : AllocatableGeneralRegisterSet enterRegs(GeneralRegisterSet::All());
1950 0 : enterRegs.take(R0);
1951 0 : enterRegs.take(R1);
1952 0 : enterRegs.take(BaselineFrameReg);
1953 0 : Register jitcodeReg = enterRegs.takeAny();
1954 :
1955 0 : pop(jitcodeReg);
1956 0 : pop(BaselineFrameReg);
1957 0 : popValue(R1);
1958 0 : popValue(R0);
1959 :
1960 : // Discard exit frame.
1961 0 : addToStackPtr(Imm32(ExitFrameLayout::SizeWithFooter()));
1962 :
1963 0 : jump(jitcodeReg);
1964 : }
1965 : }
1966 0 : }
1967 :
1968 : void
1969 0 : MacroAssembler::assertRectifierFrameParentType(Register frameType)
1970 : {
1971 : #ifdef DEBUG
1972 : {
1973 : // Check the possible previous frame types here.
1974 0 : Label checkOk;
1975 0 : branch32(Assembler::Equal, frameType, Imm32(JitFrame_IonJS), &checkOk);
1976 0 : branch32(Assembler::Equal, frameType, Imm32(JitFrame_BaselineStub), &checkOk);
1977 0 : branch32(Assembler::Equal, frameType, Imm32(JitFrame_WasmToJSJit), &checkOk);
1978 0 : branch32(Assembler::Equal, frameType, Imm32(JitFrame_CppToJSJit), &checkOk);
1979 0 : assumeUnreachable("Unrecognized frame type preceding RectifierFrame.");
1980 0 : bind(&checkOk);
1981 : }
1982 : #endif
1983 0 : }
1984 :
1985 : void
1986 0 : MacroAssembler::loadJitCodeRaw(Register func, Register dest)
1987 : {
1988 0 : loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
1989 0 : loadPtr(Address(dest, JSScript::offsetOfJitCodeRaw()), dest);
1990 0 : }
1991 :
1992 : void
1993 0 : MacroAssembler::loadJitCodeNoArgCheck(Register func, Register dest)
1994 : {
1995 0 : loadPtr(Address(func, JSFunction::offsetOfScript()), dest);
1996 0 : loadPtr(Address(dest, JSScript::offsetOfJitCodeSkipArgCheck()), dest);
1997 0 : }
1998 :
1999 : void
2000 0 : MacroAssembler::loadBaselineFramePtr(Register framePtr, Register dest)
2001 : {
2002 0 : if (framePtr != dest)
2003 0 : movePtr(framePtr, dest);
2004 0 : subPtr(Imm32(BaselineFrame::Size()), dest);
2005 0 : }
2006 :
2007 : void
2008 0 : MacroAssembler::handleFailure()
2009 : {
2010 : // Re-entry code is irrelevant because the exception will leave the
2011 : // running function and never come back
2012 0 : TrampolinePtr excTail = GetJitContext()->runtime->jitRuntime()->getExceptionTail();
2013 0 : jump(excTail);
2014 0 : }
2015 :
2016 : #ifdef DEBUG
2017 : static void
2018 0 : AssumeUnreachable_(const char* output) {
2019 0 : MOZ_ReportAssertionFailure(output, __FILE__, __LINE__);
2020 0 : }
2021 : #endif
2022 :
2023 : void
2024 0 : MacroAssembler::assumeUnreachable(const char* output)
2025 : {
2026 : #ifdef DEBUG
2027 0 : if (!IsCompilingWasm()) {
2028 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2029 0 : LiveRegisterSet save(regs.asLiveSet());
2030 0 : PushRegsInMask(save);
2031 0 : Register temp = regs.takeAnyGeneral();
2032 :
2033 0 : setupUnalignedABICall(temp);
2034 0 : movePtr(ImmPtr(output), temp);
2035 0 : passABIArg(temp);
2036 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, AssumeUnreachable_),
2037 : MoveOp::GENERAL,
2038 0 : CheckUnsafeCallWithABI::DontCheckOther);
2039 :
2040 0 : PopRegsInMask(save);
2041 : }
2042 : #endif
2043 :
2044 0 : breakpoint();
2045 0 : }
2046 :
2047 : template<typename T>
2048 : void
2049 0 : MacroAssembler::assertTestInt32(Condition cond, const T& value, const char* output)
2050 : {
2051 : #ifdef DEBUG
2052 0 : Label ok;
2053 0 : branchTestInt32(cond, value, &ok);
2054 0 : assumeUnreachable(output);
2055 0 : bind(&ok);
2056 : #endif
2057 0 : }
2058 :
2059 : template void MacroAssembler::assertTestInt32(Condition, const Address&, const char*);
2060 :
2061 : static void
2062 0 : Printf0_(const char* output)
2063 : {
2064 0 : AutoUnsafeCallWithABI unsafe;
2065 :
2066 : // Use stderr instead of stdout because this is only used for debug
2067 : // output. stderr is less likely to interfere with the program's normal
2068 : // output, and it's always unbuffered.
2069 0 : fprintf(stderr, "%s", output);
2070 0 : }
2071 :
2072 : void
2073 0 : MacroAssembler::printf(const char* output)
2074 : {
2075 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2076 0 : LiveRegisterSet save(regs.asLiveSet());
2077 0 : PushRegsInMask(save);
2078 :
2079 0 : Register temp = regs.takeAnyGeneral();
2080 :
2081 0 : setupUnalignedABICall(temp);
2082 0 : movePtr(ImmPtr(output), temp);
2083 0 : passABIArg(temp);
2084 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf0_));
2085 :
2086 0 : PopRegsInMask(save);
2087 0 : }
2088 :
2089 : static void
2090 0 : Printf1_(const char* output, uintptr_t value)
2091 : {
2092 0 : AutoUnsafeCallWithABI unsafe;
2093 0 : AutoEnterOOMUnsafeRegion oomUnsafe;
2094 0 : js::UniqueChars line = JS_sprintf_append(nullptr, output, value);
2095 0 : if (!line)
2096 0 : oomUnsafe.crash("OOM at masm.printf");
2097 0 : fprintf(stderr, "%s", line.get());
2098 0 : }
2099 :
2100 : void
2101 0 : MacroAssembler::printf(const char* output, Register value)
2102 : {
2103 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2104 0 : LiveRegisterSet save(regs.asLiveSet());
2105 0 : PushRegsInMask(save);
2106 :
2107 0 : regs.takeUnchecked(value);
2108 :
2109 0 : Register temp = regs.takeAnyGeneral();
2110 :
2111 0 : setupUnalignedABICall(temp);
2112 0 : movePtr(ImmPtr(output), temp);
2113 0 : passABIArg(temp);
2114 0 : passABIArg(value);
2115 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, Printf1_));
2116 :
2117 0 : PopRegsInMask(save);
2118 0 : }
2119 :
2120 : #ifdef JS_TRACE_LOGGING
2121 : void
2122 0 : MacroAssembler::tracelogStartId(Register logger, uint32_t textId, bool force)
2123 : {
2124 0 : if (!force && !TraceLogTextIdEnabled(textId))
2125 0 : return;
2126 :
2127 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2128 0 : LiveRegisterSet save(regs.asLiveSet());
2129 0 : PushRegsInMask(save);
2130 0 : regs.takeUnchecked(logger);
2131 :
2132 0 : Register temp = regs.takeAnyGeneral();
2133 :
2134 0 : setupUnalignedABICall(temp);
2135 0 : passABIArg(logger);
2136 0 : move32(Imm32(textId), temp);
2137 0 : passABIArg(temp);
2138 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate), MoveOp::GENERAL,
2139 0 : CheckUnsafeCallWithABI::DontCheckOther);
2140 :
2141 0 : PopRegsInMask(save);
2142 : }
2143 :
2144 : void
2145 0 : MacroAssembler::tracelogStartId(Register logger, Register textId)
2146 : {
2147 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2148 0 : LiveRegisterSet save(regs.asLiveSet());
2149 0 : PushRegsInMask(save);
2150 0 : regs.takeUnchecked(logger);
2151 0 : regs.takeUnchecked(textId);
2152 :
2153 0 : Register temp = regs.takeAnyGeneral();
2154 :
2155 0 : setupUnalignedABICall(temp);
2156 0 : passABIArg(logger);
2157 0 : passABIArg(textId);
2158 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStartEventPrivate), MoveOp::GENERAL,
2159 0 : CheckUnsafeCallWithABI::DontCheckOther);
2160 :
2161 0 : PopRegsInMask(save);
2162 0 : }
2163 :
2164 : void
2165 0 : MacroAssembler::tracelogStartEvent(Register logger, Register event)
2166 : {
2167 0 : void (&TraceLogFunc)(TraceLoggerThread*, const TraceLoggerEvent&) = TraceLogStartEvent;
2168 :
2169 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2170 0 : LiveRegisterSet save(regs.asLiveSet());
2171 0 : PushRegsInMask(save);
2172 0 : regs.takeUnchecked(logger);
2173 0 : regs.takeUnchecked(event);
2174 :
2175 0 : Register temp = regs.takeAnyGeneral();
2176 :
2177 0 : setupUnalignedABICall(temp);
2178 0 : passABIArg(logger);
2179 0 : passABIArg(event);
2180 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogFunc), MoveOp::GENERAL,
2181 0 : CheckUnsafeCallWithABI::DontCheckOther);
2182 :
2183 0 : PopRegsInMask(save);
2184 0 : }
2185 :
2186 : void
2187 0 : MacroAssembler::tracelogStopId(Register logger, uint32_t textId, bool force)
2188 : {
2189 0 : if (!force && !TraceLogTextIdEnabled(textId))
2190 0 : return;
2191 :
2192 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2193 0 : LiveRegisterSet save(regs.asLiveSet());
2194 0 : PushRegsInMask(save);
2195 0 : regs.takeUnchecked(logger);
2196 :
2197 0 : Register temp = regs.takeAnyGeneral();
2198 :
2199 0 : setupUnalignedABICall(temp);
2200 0 : passABIArg(logger);
2201 0 : move32(Imm32(textId), temp);
2202 0 : passABIArg(temp);
2203 :
2204 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate), MoveOp::GENERAL,
2205 0 : CheckUnsafeCallWithABI::DontCheckOther);
2206 :
2207 0 : PopRegsInMask(save);
2208 : }
2209 :
2210 : void
2211 0 : MacroAssembler::tracelogStopId(Register logger, Register textId)
2212 : {
2213 0 : AllocatableRegisterSet regs(RegisterSet::Volatile());
2214 0 : LiveRegisterSet save(regs.asLiveSet());
2215 0 : PushRegsInMask(save);
2216 0 : regs.takeUnchecked(logger);
2217 0 : regs.takeUnchecked(textId);
2218 :
2219 0 : Register temp = regs.takeAnyGeneral();
2220 :
2221 0 : setupUnalignedABICall(temp);
2222 0 : passABIArg(logger);
2223 0 : passABIArg(textId);
2224 0 : callWithABI(JS_FUNC_TO_DATA_PTR(void*, TraceLogStopEventPrivate), MoveOp::GENERAL,
2225 0 : CheckUnsafeCallWithABI::DontCheckOther);
2226 :
2227 0 : PopRegsInMask(save);
2228 0 : }
2229 : #endif
2230 :
2231 : void
2232 0 : MacroAssembler::convertInt32ValueToDouble(const Address& address, Register scratch, Label* done)
2233 : {
2234 0 : branchTestInt32(Assembler::NotEqual, address, done);
2235 0 : unboxInt32(address, scratch);
2236 0 : convertInt32ToDouble(scratch, ScratchDoubleReg);
2237 0 : storeDouble(ScratchDoubleReg, address);
2238 0 : }
2239 :
2240 : void
2241 0 : MacroAssembler::convertInt32ValueToDouble(ValueOperand val)
2242 : {
2243 0 : Label done;
2244 0 : branchTestInt32(Assembler::NotEqual, val, &done);
2245 0 : unboxInt32(val, val.scratchReg());
2246 0 : convertInt32ToDouble(val.scratchReg(), ScratchDoubleReg);
2247 0 : boxDouble(ScratchDoubleReg, val, ScratchDoubleReg);
2248 0 : bind(&done);
2249 0 : }
2250 :
2251 : void
2252 0 : MacroAssembler::convertValueToFloatingPoint(ValueOperand value, FloatRegister output,
2253 : Label* fail, MIRType outputType)
2254 : {
2255 0 : Label isDouble, isInt32, isBool, isNull, done;
2256 :
2257 : {
2258 0 : ScratchTagScope tag(*this, value);
2259 0 : splitTagForTest(value, tag);
2260 :
2261 0 : branchTestDouble(Assembler::Equal, tag, &isDouble);
2262 0 : branchTestInt32(Assembler::Equal, tag, &isInt32);
2263 0 : branchTestBoolean(Assembler::Equal, tag, &isBool);
2264 0 : branchTestNull(Assembler::Equal, tag, &isNull);
2265 0 : branchTestUndefined(Assembler::NotEqual, tag, fail);
2266 : }
2267 :
2268 : // fall-through: undefined
2269 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
2270 0 : jump(&done);
2271 :
2272 0 : bind(&isNull);
2273 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2274 0 : jump(&done);
2275 :
2276 0 : bind(&isBool);
2277 0 : boolValueToFloatingPoint(value, output, outputType);
2278 0 : jump(&done);
2279 :
2280 0 : bind(&isInt32);
2281 0 : int32ValueToFloatingPoint(value, output, outputType);
2282 0 : jump(&done);
2283 :
2284 0 : bind(&isDouble);
2285 0 : FloatRegister tmp = output.asDouble();
2286 : if (outputType == MIRType::Float32 && hasMultiAlias())
2287 : tmp = ScratchDoubleReg;
2288 :
2289 0 : unboxDouble(value, tmp);
2290 0 : if (outputType == MIRType::Float32)
2291 0 : convertDoubleToFloat32(tmp, output);
2292 :
2293 0 : bind(&done);
2294 0 : }
2295 :
2296 : bool
2297 0 : MacroAssembler::convertValueToFloatingPoint(JSContext* cx, const Value& v, FloatRegister output,
2298 : Label* fail, MIRType outputType)
2299 : {
2300 0 : if (v.isNumber() || v.isString()) {
2301 : double d;
2302 0 : if (v.isNumber())
2303 0 : d = v.toNumber();
2304 0 : else if (!StringToNumber(cx, v.toString(), &d))
2305 : return false;
2306 :
2307 0 : loadConstantFloatingPoint(d, (float)d, output, outputType);
2308 0 : return true;
2309 : }
2310 :
2311 0 : if (v.isBoolean()) {
2312 0 : if (v.toBoolean())
2313 0 : loadConstantFloatingPoint(1.0, 1.0f, output, outputType);
2314 : else
2315 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2316 : return true;
2317 : }
2318 :
2319 0 : if (v.isNull()) {
2320 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2321 0 : return true;
2322 : }
2323 :
2324 0 : if (v.isUndefined()) {
2325 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
2326 0 : return true;
2327 : }
2328 :
2329 0 : MOZ_ASSERT(v.isObject() || v.isSymbol());
2330 0 : jump(fail);
2331 0 : return true;
2332 : }
2333 :
2334 : bool
2335 0 : MacroAssembler::convertConstantOrRegisterToFloatingPoint(JSContext* cx,
2336 : const ConstantOrRegister& src,
2337 : FloatRegister output, Label* fail,
2338 : MIRType outputType)
2339 : {
2340 0 : if (src.constant())
2341 0 : return convertValueToFloatingPoint(cx, src.value(), output, fail, outputType);
2342 :
2343 0 : convertTypedOrValueToFloatingPoint(src.reg(), output, fail, outputType);
2344 0 : return true;
2345 : }
2346 :
2347 : void
2348 0 : MacroAssembler::convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
2349 : Label* fail, MIRType outputType)
2350 : {
2351 0 : MOZ_ASSERT(IsFloatingPointType(outputType));
2352 :
2353 0 : if (src.hasValue()) {
2354 0 : convertValueToFloatingPoint(src.valueReg(), output, fail, outputType);
2355 0 : return;
2356 : }
2357 :
2358 0 : bool outputIsDouble = outputType == MIRType::Double;
2359 0 : switch (src.type()) {
2360 : case MIRType::Null:
2361 0 : loadConstantFloatingPoint(0.0, 0.0f, output, outputType);
2362 0 : break;
2363 : case MIRType::Boolean:
2364 : case MIRType::Int32:
2365 0 : convertInt32ToFloatingPoint(src.typedReg().gpr(), output, outputType);
2366 0 : break;
2367 : case MIRType::Float32:
2368 0 : if (outputIsDouble) {
2369 0 : convertFloat32ToDouble(src.typedReg().fpu(), output);
2370 : } else {
2371 0 : if (src.typedReg().fpu() != output)
2372 0 : moveFloat32(src.typedReg().fpu(), output);
2373 : }
2374 : break;
2375 : case MIRType::Double:
2376 0 : if (outputIsDouble) {
2377 0 : if (src.typedReg().fpu() != output)
2378 0 : moveDouble(src.typedReg().fpu(), output);
2379 : } else {
2380 0 : convertDoubleToFloat32(src.typedReg().fpu(), output);
2381 : }
2382 : break;
2383 : case MIRType::Object:
2384 : case MIRType::String:
2385 : case MIRType::Symbol:
2386 0 : jump(fail);
2387 : break;
2388 : case MIRType::Undefined:
2389 0 : loadConstantFloatingPoint(GenericNaN(), float(GenericNaN()), output, outputType);
2390 0 : break;
2391 : default:
2392 0 : MOZ_CRASH("Bad MIRType");
2393 : }
2394 : }
2395 :
2396 : void
2397 0 : MacroAssembler::outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
2398 : bool compilingWasm, wasm::BytecodeOffset callOffset)
2399 : {
2400 : #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2401 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2402 : if (widenFloatToDouble) {
2403 : convertFloat32ToDouble(src, ScratchDoubleReg);
2404 : src = ScratchDoubleReg;
2405 : }
2406 : #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2407 0 : FloatRegister srcSingle;
2408 0 : if (widenFloatToDouble) {
2409 0 : MOZ_ASSERT(src.isSingle());
2410 0 : srcSingle = src;
2411 0 : src = src.asDouble();
2412 0 : Push(srcSingle);
2413 0 : convertFloat32ToDouble(srcSingle, src);
2414 : }
2415 : #else
2416 : // Also see below
2417 : MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2418 : #endif
2419 :
2420 0 : MOZ_ASSERT(src.isDouble());
2421 :
2422 0 : if (compilingWasm) {
2423 0 : setupWasmABICall();
2424 0 : passABIArg(src, MoveOp::DOUBLE);
2425 0 : callWithABI(callOffset, wasm::SymbolicAddress::ToInt32);
2426 : } else {
2427 0 : setupUnalignedABICall(dest);
2428 0 : passABIArg(src, MoveOp::DOUBLE);
2429 0 : callWithABI(mozilla::BitwiseCast<void*, int32_t(*)(double)>(JS::ToInt32),
2430 0 : MoveOp::GENERAL, CheckUnsafeCallWithABI::DontCheckOther);
2431 : }
2432 0 : storeCallInt32Result(dest);
2433 :
2434 : #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
2435 : defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
2436 : // Nothing
2437 : #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
2438 0 : if (widenFloatToDouble)
2439 0 : Pop(srcSingle);
2440 : #else
2441 : MOZ_CRASH("MacroAssembler platform hook: outOfLineTruncateSlow");
2442 : #endif
2443 0 : }
2444 :
2445 : void
2446 0 : MacroAssembler::convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
2447 : Label* truncateFail, Label* fail,
2448 : IntConversionBehavior behavior)
2449 : {
2450 0 : switch (behavior) {
2451 : case IntConversionBehavior::Normal:
2452 : case IntConversionBehavior::NegativeZeroCheck:
2453 0 : convertDoubleToInt32(src, output, fail, behavior == IntConversionBehavior::NegativeZeroCheck);
2454 0 : break;
2455 : case IntConversionBehavior::Truncate:
2456 0 : branchTruncateDoubleMaybeModUint32(src, output, truncateFail ? truncateFail : fail);
2457 : break;
2458 : case IntConversionBehavior::ClampToUint8:
2459 : // Clamping clobbers the input register, so use a temp.
2460 0 : moveDouble(src, temp);
2461 0 : clampDoubleToUint8(temp, output);
2462 0 : break;
2463 : }
2464 0 : }
2465 :
2466 : void
2467 0 : MacroAssembler::convertValueToInt(ValueOperand value, MDefinition* maybeInput,
2468 : Label* handleStringEntry, Label* handleStringRejoin,
2469 : Label* truncateDoubleSlow,
2470 : Register stringReg, FloatRegister temp, Register output,
2471 : Label* fail, IntConversionBehavior behavior,
2472 : IntConversionInputKind conversion)
2473 : {
2474 0 : Label done, isInt32, isBool, isDouble, isNull, isString;
2475 :
2476 0 : bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
2477 : behavior == IntConversionBehavior::ClampToUint8) &&
2478 0 : handleStringEntry &&
2479 0 : handleStringRejoin;
2480 :
2481 0 : MOZ_ASSERT_IF(handleStrings, conversion == IntConversionInputKind::Any);
2482 :
2483 : {
2484 0 : ScratchTagScope tag(*this, value);
2485 0 : splitTagForTest(value, tag);
2486 :
2487 0 : maybeBranchTestType(MIRType::Int32, maybeInput, tag, &isInt32);
2488 0 : if (conversion == IntConversionInputKind::Any || conversion == IntConversionInputKind::NumbersOrBoolsOnly)
2489 0 : maybeBranchTestType(MIRType::Boolean, maybeInput, tag, &isBool);
2490 0 : maybeBranchTestType(MIRType::Double, maybeInput, tag, &isDouble);
2491 :
2492 0 : if (conversion == IntConversionInputKind::Any) {
2493 : // If we are not truncating, we fail for anything that's not
2494 : // null. Otherwise we might be able to handle strings and objects.
2495 0 : switch (behavior) {
2496 : case IntConversionBehavior::Normal:
2497 : case IntConversionBehavior::NegativeZeroCheck:
2498 : branchTestNull(Assembler::NotEqual, tag, fail);
2499 : break;
2500 :
2501 : case IntConversionBehavior::Truncate:
2502 : case IntConversionBehavior::ClampToUint8:
2503 0 : maybeBranchTestType(MIRType::Null, maybeInput, tag, &isNull);
2504 0 : if (handleStrings)
2505 0 : maybeBranchTestType(MIRType::String, maybeInput, tag, &isString);
2506 0 : maybeBranchTestType(MIRType::Object, maybeInput, tag, fail);
2507 : branchTestUndefined(Assembler::NotEqual, tag, fail);
2508 : break;
2509 : }
2510 : } else {
2511 0 : jump(fail);
2512 : }
2513 : }
2514 :
2515 : // The value is null or undefined in truncation contexts - just emit 0.
2516 0 : if (isNull.used())
2517 0 : bind(&isNull);
2518 0 : mov(ImmWord(0), output);
2519 0 : jump(&done);
2520 :
2521 : // Try converting a string into a double, then jump to the double case.
2522 0 : if (handleStrings) {
2523 0 : bind(&isString);
2524 0 : unboxString(value, stringReg);
2525 0 : jump(handleStringEntry);
2526 : }
2527 :
2528 : // Try converting double into integer.
2529 0 : if (isDouble.used() || handleStrings) {
2530 0 : if (isDouble.used()) {
2531 0 : bind(&isDouble);
2532 0 : unboxDouble(value, temp);
2533 : }
2534 :
2535 0 : if (handleStrings)
2536 0 : bind(handleStringRejoin);
2537 :
2538 0 : convertDoubleToInt(temp, output, temp, truncateDoubleSlow, fail, behavior);
2539 0 : jump(&done);
2540 : }
2541 :
2542 : // Just unbox a bool, the result is 0 or 1.
2543 0 : if (isBool.used()) {
2544 0 : bind(&isBool);
2545 0 : unboxBoolean(value, output);
2546 0 : jump(&done);
2547 : }
2548 :
2549 : // Integers can be unboxed.
2550 0 : if (isInt32.used()) {
2551 0 : bind(&isInt32);
2552 0 : unboxInt32(value, output);
2553 0 : if (behavior == IntConversionBehavior::ClampToUint8)
2554 0 : clampIntToUint8(output);
2555 : }
2556 :
2557 0 : bind(&done);
2558 0 : }
2559 :
2560 : bool
2561 0 : MacroAssembler::convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
2562 : IntConversionBehavior behavior)
2563 : {
2564 0 : bool handleStrings = (behavior == IntConversionBehavior::Truncate ||
2565 0 : behavior == IntConversionBehavior::ClampToUint8);
2566 :
2567 0 : if (v.isNumber() || (handleStrings && v.isString())) {
2568 : double d;
2569 0 : if (v.isNumber())
2570 0 : d = v.toNumber();
2571 0 : else if (!StringToNumber(cx, v.toString(), &d))
2572 : return false;
2573 :
2574 0 : switch (behavior) {
2575 : case IntConversionBehavior::Normal:
2576 : case IntConversionBehavior::NegativeZeroCheck: {
2577 : // -0 is checked anyways if we have a constant value.
2578 : int i;
2579 0 : if (mozilla::NumberIsInt32(d, &i))
2580 0 : move32(Imm32(i), output);
2581 : else
2582 0 : jump(fail);
2583 : break;
2584 : }
2585 : case IntConversionBehavior::Truncate:
2586 0 : move32(Imm32(ToInt32(d)), output);
2587 0 : break;
2588 : case IntConversionBehavior::ClampToUint8:
2589 0 : move32(Imm32(ClampDoubleToUint8(d)), output);
2590 0 : break;
2591 : }
2592 :
2593 : return true;
2594 : }
2595 :
2596 0 : if (v.isBoolean()) {
2597 0 : move32(Imm32(v.toBoolean() ? 1 : 0), output);
2598 0 : return true;
2599 : }
2600 :
2601 0 : if (v.isNull() || v.isUndefined()) {
2602 0 : move32(Imm32(0), output);
2603 0 : return true;
2604 : }
2605 :
2606 0 : MOZ_ASSERT(v.isObject() || v.isSymbol());
2607 :
2608 0 : jump(fail);
2609 0 : return true;
2610 : }
2611 :
2612 : bool
2613 0 : MacroAssembler::convertConstantOrRegisterToInt(JSContext* cx,
2614 : const ConstantOrRegister& src,
2615 : FloatRegister temp, Register output,
2616 : Label* fail, IntConversionBehavior behavior)
2617 : {
2618 0 : if (src.constant())
2619 0 : return convertValueToInt(cx, src.value(), output, fail, behavior);
2620 :
2621 0 : convertTypedOrValueToInt(src.reg(), temp, output, fail, behavior);
2622 0 : return true;
2623 : }
2624 :
2625 : void
2626 0 : MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp,
2627 : Register output, Label* fail,
2628 : IntConversionBehavior behavior)
2629 : {
2630 0 : if (src.hasValue()) {
2631 0 : convertValueToInt(src.valueReg(), temp, output, fail, behavior);
2632 : return;
2633 : }
2634 :
2635 0 : switch (src.type()) {
2636 : case MIRType::Undefined:
2637 : case MIRType::Null:
2638 0 : move32(Imm32(0), output);
2639 0 : break;
2640 : case MIRType::Boolean:
2641 : case MIRType::Int32:
2642 0 : if (src.typedReg().gpr() != output)
2643 0 : move32(src.typedReg().gpr(), output);
2644 0 : if (src.type() == MIRType::Int32 && behavior == IntConversionBehavior::ClampToUint8)
2645 0 : clampIntToUint8(output);
2646 : break;
2647 : case MIRType::Double:
2648 0 : convertDoubleToInt(src.typedReg().fpu(), output, temp, nullptr, fail, behavior);
2649 0 : break;
2650 : case MIRType::Float32:
2651 : // Conversion to Double simplifies implementation at the expense of performance.
2652 0 : convertFloat32ToDouble(src.typedReg().fpu(), temp);
2653 0 : convertDoubleToInt(temp, output, temp, nullptr, fail, behavior);
2654 0 : break;
2655 : case MIRType::String:
2656 : case MIRType::Symbol:
2657 : case MIRType::Object:
2658 0 : jump(fail);
2659 : break;
2660 : default:
2661 0 : MOZ_CRASH("Bad MIRType");
2662 : }
2663 : }
2664 :
2665 : void
2666 0 : MacroAssembler::finish()
2667 : {
2668 0 : if (failureLabel_.used()) {
2669 0 : bind(&failureLabel_);
2670 0 : handleFailure();
2671 : }
2672 :
2673 0 : MacroAssemblerSpecific::finish();
2674 :
2675 0 : MOZ_RELEASE_ASSERT(size() <= MaxCodeBytesPerProcess,
2676 : "AssemblerBuffer should ensure we don't exceed MaxCodeBytesPerProcess");
2677 :
2678 0 : if (bytesNeeded() > MaxCodeBytesPerProcess)
2679 0 : setOOM();
2680 0 : }
2681 :
2682 : void
2683 0 : MacroAssembler::link(JitCode* code)
2684 : {
2685 0 : MOZ_ASSERT(!oom());
2686 0 : linkProfilerCallSites(code);
2687 0 : }
2688 :
2689 0 : MacroAssembler::AutoProfilerCallInstrumentation::AutoProfilerCallInstrumentation(
2690 : MacroAssembler& masm
2691 0 : MOZ_GUARD_OBJECT_NOTIFIER_PARAM_IN_IMPL)
2692 : {
2693 0 : MOZ_GUARD_OBJECT_NOTIFIER_INIT;
2694 0 : if (!masm.emitProfilingInstrumentation_)
2695 : return;
2696 :
2697 0 : Register reg = CallTempReg0;
2698 0 : Register reg2 = CallTempReg1;
2699 0 : masm.push(reg);
2700 0 : masm.push(reg2);
2701 :
2702 0 : CodeOffset label = masm.movWithPatch(ImmWord(uintptr_t(-1)), reg);
2703 0 : masm.loadJSContext(reg2);
2704 0 : masm.loadPtr(Address(reg2, offsetof(JSContext, profilingActivation_)), reg2);
2705 0 : masm.storePtr(reg, Address(reg2, JitActivation::offsetOfLastProfilingCallSite()));
2706 :
2707 0 : masm.appendProfilerCallSite(label);
2708 :
2709 0 : masm.pop(reg2);
2710 0 : masm.pop(reg);
2711 : }
2712 :
2713 : void
2714 0 : MacroAssembler::linkProfilerCallSites(JitCode* code)
2715 : {
2716 0 : for (size_t i = 0; i < profilerCallSites_.length(); i++) {
2717 0 : CodeOffset offset = profilerCallSites_[i];
2718 0 : CodeLocationLabel location(code, offset);
2719 0 : PatchDataWithValueCheck(location, ImmPtr(location.raw()), ImmPtr((void*)-1));
2720 : }
2721 0 : }
2722 :
2723 : void
2724 0 : MacroAssembler::alignJitStackBasedOnNArgs(Register nargs)
2725 : {
2726 : if (JitStackValueAlignment == 1)
2727 : return;
2728 :
2729 : // A JitFrameLayout is composed of the following:
2730 : // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2731 : //
2732 : // We want to ensure that the |raddr| address is aligned.
2733 : // Which implies that we want to ensure that |this| is aligned.
2734 : static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2735 : "No need to consider the JitFrameLayout for aligning the stack");
2736 :
2737 : // Which implies that |argN| is aligned if |nargs| is even, and offset by
2738 : // |sizeof(Value)| if |nargs| is odd.
2739 : MOZ_ASSERT(JitStackValueAlignment == 2);
2740 :
2741 : // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2742 : // aligned if |nargs| is odd.
2743 :
2744 : // if (nargs % 2 == 0) {
2745 : // if (sp % JitStackAlignment == 0)
2746 : // sp -= sizeof(Value);
2747 : // MOZ_ASSERT(sp % JitStackAlignment == JitStackAlignment - sizeof(Value));
2748 : // } else {
2749 : // sp = sp & ~(JitStackAlignment - 1);
2750 : // }
2751 0 : Label odd, end;
2752 0 : Label* maybeAssert = &end;
2753 : #ifdef DEBUG
2754 0 : Label assert;
2755 0 : maybeAssert = &assert;
2756 : #endif
2757 0 : assertStackAlignment(sizeof(Value), 0);
2758 0 : branchTestPtr(Assembler::NonZero, nargs, Imm32(1), &odd);
2759 0 : branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), maybeAssert);
2760 0 : subFromStackPtr(Imm32(sizeof(Value)));
2761 : #ifdef DEBUG
2762 0 : bind(&assert);
2763 : #endif
2764 0 : assertStackAlignment(JitStackAlignment, sizeof(Value));
2765 0 : jump(&end);
2766 0 : bind(&odd);
2767 0 : andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2768 0 : bind(&end);
2769 : }
2770 :
2771 : void
2772 0 : MacroAssembler::alignJitStackBasedOnNArgs(uint32_t nargs)
2773 : {
2774 : if (JitStackValueAlignment == 1)
2775 : return;
2776 :
2777 : // A JitFrameLayout is composed of the following:
2778 : // [padding?] [argN] .. [arg1] [this] [[argc] [callee] [descr] [raddr]]
2779 : //
2780 : // We want to ensure that the |raddr| address is aligned.
2781 : // Which implies that we want to ensure that |this| is aligned.
2782 : static_assert(sizeof(JitFrameLayout) % JitStackAlignment == 0,
2783 : "No need to consider the JitFrameLayout for aligning the stack");
2784 :
2785 : // Which implies that |argN| is aligned if |nargs| is even, and offset by
2786 : // |sizeof(Value)| if |nargs| is odd.
2787 : MOZ_ASSERT(JitStackValueAlignment == 2);
2788 :
2789 : // Thus the |padding| is offset by |sizeof(Value)| if |nargs| is even, and
2790 : // aligned if |nargs| is odd.
2791 :
2792 0 : assertStackAlignment(sizeof(Value), 0);
2793 0 : if (nargs % 2 == 0) {
2794 0 : Label end;
2795 0 : branchTestStackPtr(Assembler::NonZero, Imm32(JitStackAlignment - 1), &end);
2796 0 : subFromStackPtr(Imm32(sizeof(Value)));
2797 0 : bind(&end);
2798 0 : assertStackAlignment(JitStackAlignment, sizeof(Value));
2799 : } else {
2800 0 : andToStackPtr(Imm32(~(JitStackAlignment - 1)));
2801 : }
2802 : }
2803 :
2804 : // ===============================================================
2805 :
2806 0 : MacroAssembler::MacroAssembler(JSContext* cx)
2807 : : framePushed_(0),
2808 : #ifdef DEBUG
2809 : inCall_(false),
2810 : #endif
2811 0 : emitProfilingInstrumentation_(false)
2812 : {
2813 0 : jitContext_.emplace(cx, (js::jit::TempAllocator*)nullptr);
2814 0 : alloc_.emplace(cx);
2815 0 : moveResolver_.setAllocator(*jitContext_->temp);
2816 : #if defined(JS_CODEGEN_ARM)
2817 : initWithAllocator();
2818 : m_buffer.id = GetJitContext()->getNextAssemblerId();
2819 : #elif defined(JS_CODEGEN_ARM64)
2820 : initWithAllocator();
2821 : armbuffer_.id = GetJitContext()->getNextAssemblerId();
2822 : #endif
2823 0 : }
2824 :
2825 0 : MacroAssembler::MacroAssembler()
2826 : : framePushed_(0),
2827 : #ifdef DEBUG
2828 : inCall_(false),
2829 : #endif
2830 0 : emitProfilingInstrumentation_(false)
2831 : {
2832 0 : JitContext* jcx = GetJitContext();
2833 :
2834 0 : if (!jcx->temp) {
2835 0 : JSContext* cx = jcx->cx;
2836 0 : MOZ_ASSERT(cx);
2837 0 : alloc_.emplace(cx);
2838 : }
2839 :
2840 0 : moveResolver_.setAllocator(*jcx->temp);
2841 :
2842 : #if defined(JS_CODEGEN_ARM)
2843 : initWithAllocator();
2844 : m_buffer.id = jcx->getNextAssemblerId();
2845 : #elif defined(JS_CODEGEN_ARM64)
2846 : initWithAllocator();
2847 : armbuffer_.id = jcx->getNextAssemblerId();
2848 : #endif
2849 0 : }
2850 :
2851 0 : MacroAssembler::MacroAssembler(WasmToken, TempAllocator& alloc)
2852 : : framePushed_(0),
2853 : #ifdef DEBUG
2854 : inCall_(false),
2855 : #endif
2856 0 : emitProfilingInstrumentation_(false)
2857 : {
2858 0 : moveResolver_.setAllocator(alloc);
2859 :
2860 : #if defined(JS_CODEGEN_ARM)
2861 : initWithAllocator();
2862 : m_buffer.id = 0;
2863 : #elif defined(JS_CODEGEN_ARM64)
2864 : initWithAllocator();
2865 : // Stubs + builtins + the baseline compiler all require the native SP,
2866 : // not the PSP.
2867 : SetStackPointer64(sp);
2868 : armbuffer_.id = 0;
2869 : #endif
2870 0 : }
2871 :
2872 : bool
2873 0 : MacroAssembler::icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save)
2874 : {
2875 0 : return buildOOLFakeExitFrame(fakeReturnAddr);
2876 : }
2877 :
2878 : #ifndef JS_CODEGEN_ARM64
2879 : void
2880 0 : MacroAssembler::subFromStackPtr(Register reg)
2881 : {
2882 0 : subPtr(reg, getStackPointer());
2883 0 : }
2884 : #endif // JS_CODEGEN_ARM64
2885 :
2886 : //{{{ check_macroassembler_style
2887 : // ===============================================================
2888 : // Stack manipulation functions.
2889 :
2890 : void
2891 0 : MacroAssembler::PushRegsInMask(LiveGeneralRegisterSet set)
2892 : {
2893 0 : PushRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2894 0 : }
2895 :
2896 : void
2897 0 : MacroAssembler::PopRegsInMask(LiveRegisterSet set)
2898 : {
2899 0 : PopRegsInMaskIgnore(set, LiveRegisterSet());
2900 0 : }
2901 :
2902 : void
2903 0 : MacroAssembler::PopRegsInMask(LiveGeneralRegisterSet set)
2904 : {
2905 0 : PopRegsInMask(LiveRegisterSet(set.set(), FloatRegisterSet()));
2906 0 : }
2907 :
2908 : void
2909 0 : MacroAssembler::Push(jsid id, Register scratchReg)
2910 : {
2911 0 : if (JSID_IS_GCTHING(id)) {
2912 : // If we're pushing a gcthing, then we can't just push the tagged jsid
2913 : // value since the GC won't have any idea that the push instruction
2914 : // carries a reference to a gcthing. Need to unpack the pointer,
2915 : // push it using ImmGCPtr, and then rematerialize the id at runtime.
2916 :
2917 0 : if (JSID_IS_STRING(id)) {
2918 0 : JSString* str = JSID_TO_STRING(id);
2919 0 : MOZ_ASSERT(((size_t)str & JSID_TYPE_MASK) == 0);
2920 : static_assert(JSID_TYPE_STRING == 0,
2921 : "need to orPtr JSID_TYPE_STRING tag if it's not 0");
2922 0 : Push(ImmGCPtr(str));
2923 : } else {
2924 0 : MOZ_ASSERT(JSID_IS_SYMBOL(id));
2925 0 : JS::Symbol* sym = JSID_TO_SYMBOL(id);
2926 0 : movePtr(ImmGCPtr(sym), scratchReg);
2927 0 : orPtr(Imm32(JSID_TYPE_SYMBOL), scratchReg);
2928 0 : Push(scratchReg);
2929 : }
2930 : } else {
2931 0 : Push(ImmWord(JSID_BITS(id)));
2932 : }
2933 0 : }
2934 :
2935 : void
2936 0 : MacroAssembler::Push(TypedOrValueRegister v)
2937 : {
2938 0 : if (v.hasValue()) {
2939 0 : Push(v.valueReg());
2940 0 : } else if (IsFloatingPointType(v.type())) {
2941 0 : FloatRegister reg = v.typedReg().fpu();
2942 0 : if (v.type() == MIRType::Float32) {
2943 0 : convertFloat32ToDouble(reg, ScratchDoubleReg);
2944 0 : reg = ScratchDoubleReg;
2945 : }
2946 0 : Push(reg);
2947 : } else {
2948 0 : Push(ValueTypeFromMIRType(v.type()), v.typedReg().gpr());
2949 : }
2950 0 : }
2951 :
2952 : void
2953 0 : MacroAssembler::Push(const ConstantOrRegister& v)
2954 : {
2955 0 : if (v.constant())
2956 0 : Push(v.value());
2957 : else
2958 0 : Push(v.reg());
2959 0 : }
2960 :
2961 : void
2962 0 : MacroAssembler::Push(const ValueOperand& val)
2963 : {
2964 0 : pushValue(val);
2965 0 : framePushed_ += sizeof(Value);
2966 0 : }
2967 :
2968 : void
2969 0 : MacroAssembler::Push(const Value& val)
2970 : {
2971 0 : pushValue(val);
2972 0 : framePushed_ += sizeof(Value);
2973 0 : }
2974 :
2975 : void
2976 0 : MacroAssembler::Push(JSValueType type, Register reg)
2977 : {
2978 0 : pushValue(type, reg);
2979 0 : framePushed_ += sizeof(Value);
2980 0 : }
2981 :
2982 : void
2983 0 : MacroAssembler::PushValue(const Address& addr)
2984 : {
2985 0 : MOZ_ASSERT(addr.base != getStackPointer());
2986 0 : pushValue(addr);
2987 0 : framePushed_ += sizeof(Value);
2988 0 : }
2989 :
2990 : void
2991 0 : MacroAssembler::PushEmptyRooted(VMFunction::RootType rootType)
2992 : {
2993 0 : switch (rootType) {
2994 : case VMFunction::RootNone:
2995 0 : MOZ_CRASH("Handle must have root type");
2996 : case VMFunction::RootObject:
2997 : case VMFunction::RootString:
2998 : case VMFunction::RootFunction:
2999 : case VMFunction::RootCell:
3000 0 : Push(ImmPtr(nullptr));
3001 0 : break;
3002 : case VMFunction::RootValue:
3003 0 : Push(UndefinedValue());
3004 0 : break;
3005 : case VMFunction::RootId:
3006 0 : Push(ImmWord(JSID_BITS(JSID_VOID)));
3007 0 : break;
3008 : }
3009 0 : }
3010 :
3011 : void
3012 0 : MacroAssembler::popRooted(VMFunction::RootType rootType, Register cellReg,
3013 : const ValueOperand& valueReg)
3014 : {
3015 0 : switch (rootType) {
3016 : case VMFunction::RootNone:
3017 0 : MOZ_CRASH("Handle must have root type");
3018 : case VMFunction::RootObject:
3019 : case VMFunction::RootString:
3020 : case VMFunction::RootFunction:
3021 : case VMFunction::RootCell:
3022 : case VMFunction::RootId:
3023 0 : Pop(cellReg);
3024 0 : break;
3025 : case VMFunction::RootValue:
3026 0 : Pop(valueReg);
3027 0 : break;
3028 : }
3029 0 : }
3030 :
3031 : void
3032 0 : MacroAssembler::adjustStack(int amount)
3033 : {
3034 0 : if (amount > 0)
3035 0 : freeStack(amount);
3036 0 : else if (amount < 0)
3037 0 : reserveStack(-amount);
3038 0 : }
3039 :
3040 : void
3041 0 : MacroAssembler::freeStack(uint32_t amount)
3042 : {
3043 0 : MOZ_ASSERT(amount <= framePushed_);
3044 0 : if (amount)
3045 0 : addToStackPtr(Imm32(amount));
3046 0 : framePushed_ -= amount;
3047 0 : }
3048 :
3049 : void
3050 0 : MacroAssembler::freeStack(Register amount)
3051 : {
3052 0 : addToStackPtr(amount);
3053 0 : }
3054 :
3055 : // ===============================================================
3056 : // ABI function calls.
3057 :
3058 : void
3059 0 : MacroAssembler::setupABICall()
3060 : {
3061 : #ifdef DEBUG
3062 0 : MOZ_ASSERT(!inCall_);
3063 0 : inCall_ = true;
3064 : #endif
3065 :
3066 : #ifdef JS_SIMULATOR
3067 : signature_ = 0;
3068 : #endif
3069 :
3070 : // Reinitialize the ABIArg generator.
3071 0 : abiArgs_ = ABIArgGenerator();
3072 :
3073 : #if defined(JS_CODEGEN_ARM)
3074 : // On ARM, we need to know what ABI we are using, either in the
3075 : // simulator, or based on the configure flags.
3076 : #if defined(JS_SIMULATOR_ARM)
3077 : abiArgs_.setUseHardFp(UseHardFpABI());
3078 : #elif defined(JS_CODEGEN_ARM_HARDFP)
3079 : abiArgs_.setUseHardFp(true);
3080 : #else
3081 : abiArgs_.setUseHardFp(false);
3082 : #endif
3083 : #endif
3084 :
3085 : #if defined(JS_CODEGEN_MIPS32)
3086 : // On MIPS, the system ABI use general registers pairs to encode double
3087 : // arguments, after one or 2 integer-like arguments. Unfortunately, the
3088 : // Lowering phase is not capable to express it at the moment. So we enforce
3089 : // the system ABI here.
3090 : abiArgs_.enforceO32ABI();
3091 : #endif
3092 0 : }
3093 :
3094 : void
3095 0 : MacroAssembler::setupWasmABICall()
3096 : {
3097 0 : MOZ_ASSERT(IsCompilingWasm(), "non-wasm should use setupAlignedABICall");
3098 0 : setupABICall();
3099 :
3100 : #if defined(JS_CODEGEN_ARM)
3101 : // The builtin thunk does the FP -> GPR moving on soft-FP, so
3102 : // use hard fp unconditionally.
3103 : abiArgs_.setUseHardFp(true);
3104 : #endif
3105 0 : dynamicAlignment_ = false;
3106 0 : }
3107 :
3108 : void
3109 0 : MacroAssembler::setupAlignedABICall()
3110 : {
3111 0 : MOZ_ASSERT(!IsCompilingWasm(), "wasm should use setupWasmABICall");
3112 0 : setupABICall();
3113 0 : dynamicAlignment_ = false;
3114 :
3115 : #if defined(JS_CODEGEN_ARM64)
3116 : MOZ_CRASH("Not supported on arm64");
3117 : #endif
3118 0 : }
3119 :
3120 : void
3121 0 : MacroAssembler::passABIArg(const MoveOperand& from, MoveOp::Type type)
3122 : {
3123 0 : MOZ_ASSERT(inCall_);
3124 0 : appendSignatureType(type);
3125 :
3126 0 : ABIArg arg;
3127 0 : switch (type) {
3128 : case MoveOp::FLOAT32:
3129 0 : arg = abiArgs_.next(MIRType::Float32);
3130 0 : break;
3131 : case MoveOp::DOUBLE:
3132 0 : arg = abiArgs_.next(MIRType::Double);
3133 0 : break;
3134 : case MoveOp::GENERAL:
3135 0 : arg = abiArgs_.next(MIRType::Pointer);
3136 0 : break;
3137 : default:
3138 0 : MOZ_CRASH("Unexpected argument type");
3139 : }
3140 :
3141 0 : MoveOperand to(*this, arg);
3142 0 : if (from == to)
3143 0 : return;
3144 :
3145 0 : if (oom())
3146 : return;
3147 0 : propagateOOM(moveResolver_.addMove(from, to, type));
3148 : }
3149 :
3150 : void
3151 0 : MacroAssembler::callWithABINoProfiler(void* fun, MoveOp::Type result, CheckUnsafeCallWithABI check)
3152 : {
3153 0 : appendSignatureType(result);
3154 : #ifdef JS_SIMULATOR
3155 : fun = Simulator::RedirectNativeFunction(fun, signature());
3156 : #endif
3157 :
3158 : uint32_t stackAdjust;
3159 0 : callWithABIPre(&stackAdjust);
3160 :
3161 : #ifdef DEBUG
3162 0 : if (check == CheckUnsafeCallWithABI::Check) {
3163 0 : push(ReturnReg);
3164 0 : loadJSContext(ReturnReg);
3165 0 : Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3166 0 : store32(Imm32(1), flagAddr);
3167 0 : pop(ReturnReg);
3168 : }
3169 : #endif
3170 :
3171 0 : call(ImmPtr(fun));
3172 :
3173 0 : callWithABIPost(stackAdjust, result);
3174 :
3175 : #ifdef DEBUG
3176 0 : if (check == CheckUnsafeCallWithABI::Check) {
3177 0 : Label ok;
3178 0 : push(ReturnReg);
3179 0 : loadJSContext(ReturnReg);
3180 0 : Address flagAddr(ReturnReg, JSContext::offsetOfInUnsafeCallWithABI());
3181 0 : branch32(Assembler::Equal, flagAddr, Imm32(0), &ok);
3182 0 : assumeUnreachable("callWithABI: callee did not use AutoUnsafeCallWithABI");
3183 0 : bind(&ok);
3184 0 : pop(ReturnReg);
3185 : }
3186 : #endif
3187 0 : }
3188 :
3189 : void
3190 0 : MacroAssembler::callWithABI(wasm::BytecodeOffset bytecode, wasm::SymbolicAddress imm,
3191 : MoveOp::Type result)
3192 : {
3193 0 : MOZ_ASSERT(wasm::NeedsBuiltinThunk(imm));
3194 :
3195 : // We clobber WasmTlsReg below in the loadWasmTlsRegFromFrame(), but Ion
3196 : // assumes it is non-volatile, so preserve it manually.
3197 0 : Push(WasmTlsReg);
3198 :
3199 : uint32_t stackAdjust;
3200 0 : callWithABIPre(&stackAdjust, /* callFromWasm = */ true);
3201 :
3202 : // The TLS register is used in builtin thunks and must be set, by ABI:
3203 : // reload it after passing arguments, which might have used it at spill
3204 : // points when placing arguments.
3205 0 : loadWasmTlsRegFromFrame();
3206 :
3207 0 : call(wasm::CallSiteDesc(bytecode.offset(), wasm::CallSite::Symbolic), imm);
3208 0 : callWithABIPost(stackAdjust, result, /* callFromWasm = */ true);
3209 :
3210 0 : Pop(WasmTlsReg);
3211 0 : }
3212 :
3213 : // ===============================================================
3214 : // Exit frame footer.
3215 :
3216 : void
3217 0 : MacroAssembler::linkExitFrame(Register cxreg, Register scratch)
3218 : {
3219 0 : loadPtr(Address(cxreg, JSContext::offsetOfActivation()), scratch);
3220 0 : storeStackPtr(Address(scratch, JitActivation::offsetOfPackedExitFP()));
3221 0 : }
3222 :
3223 : // ===============================================================
3224 : // Branch functions
3225 :
3226 : void
3227 0 : MacroAssembler::branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label)
3228 : {
3229 : // 16-bit loads are slow and unaligned 32-bit loads may be too so
3230 : // perform an aligned 32-bit load and adjust the bitmask accordingly.
3231 : MOZ_ASSERT(JSFunction::offsetOfNargs() % sizeof(uint32_t) == 0);
3232 : MOZ_ASSERT(JSFunction::offsetOfFlags() == JSFunction::offsetOfNargs() + 2);
3233 :
3234 : // First, ensure it's a scripted function.
3235 0 : load32(Address(fun, JSFunction::offsetOfNargs()), scratch);
3236 0 : int32_t bits = IMM32_16ADJ(JSFunction::INTERPRETED);
3237 0 : branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
3238 :
3239 : // Check if the CONSTRUCTOR bit is set.
3240 0 : bits = IMM32_16ADJ(JSFunction::CONSTRUCTOR);
3241 0 : branchTest32(Assembler::Zero, scratch, Imm32(bits), label);
3242 0 : }
3243 :
3244 : void
3245 0 : MacroAssembler::branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
3246 : const Address& group, Register scratch,
3247 : Label* label)
3248 : {
3249 : // Note: obj and scratch registers may alias.
3250 0 : MOZ_ASSERT(group.base != scratch);
3251 0 : MOZ_ASSERT(group.base != obj);
3252 :
3253 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3254 0 : branchPtr(cond, group, scratch, label);
3255 0 : }
3256 :
3257 : void
3258 0 : MacroAssembler::branchTestObjGroup(Condition cond, Register obj, const Address& group,
3259 : Register scratch, Register spectreRegToZero, Label* label)
3260 : {
3261 : // Note: obj and scratch registers may alias.
3262 0 : MOZ_ASSERT(group.base != scratch);
3263 0 : MOZ_ASSERT(group.base != obj);
3264 0 : MOZ_ASSERT(scratch != spectreRegToZero);
3265 :
3266 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3267 0 : branchPtr(cond, group, scratch, label);
3268 :
3269 0 : if (JitOptions.spectreObjectMitigationsMisc)
3270 0 : spectreZeroRegister(cond, scratch, spectreRegToZero);
3271 0 : }
3272 :
3273 : void
3274 0 : MacroAssembler::branchTestObjCompartment(Condition cond, Register obj, const Address& compartment,
3275 : Register scratch, Label* label)
3276 : {
3277 0 : MOZ_ASSERT(obj != scratch);
3278 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3279 0 : loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
3280 0 : loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3281 0 : branchPtr(cond, compartment, scratch, label);
3282 11 : }
3283 :
3284 : void
3285 40 : MacroAssembler::branchTestObjCompartment(Condition cond, Register obj,
3286 : const JS::Compartment* compartment, Register scratch,
3287 : Label* label)
3288 : {
3289 0 : MOZ_ASSERT(obj != scratch);
3290 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3291 0 : loadPtr(Address(scratch, ObjectGroup::offsetOfRealm()), scratch);
3292 0 : loadPtr(Address(scratch, Realm::offsetOfCompartment()), scratch);
3293 80 : branchPtr(cond, scratch, ImmPtr(compartment), label);
3294 40 : }
3295 :
3296 : void
3297 0 : MacroAssembler::branchIfObjGroupHasNoAddendum(Register obj, Register scratch, Label* label)
3298 : {
3299 0 : MOZ_ASSERT(obj != scratch);
3300 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3301 5 : branchPtr(Assembler::Equal,
3302 0 : Address(scratch, ObjectGroup::offsetOfAddendum()),
3303 : ImmWord(0),
3304 5 : label);
3305 5 : }
3306 :
3307 : void
3308 0 : MacroAssembler::branchIfPretenuredGroup(const ObjectGroup* group, Register scratch, Label* label)
3309 : {
3310 0 : movePtr(ImmGCPtr(group), scratch);
3311 0 : branchTest32(Assembler::NonZero, Address(scratch, ObjectGroup::offsetOfFlags()),
3312 102 : Imm32(OBJECT_FLAG_PRE_TENURE), label);
3313 102 : }
3314 :
3315 : void
3316 0 : MacroAssembler::branchIfNonNativeObj(Register obj, Register scratch, Label* label)
3317 : {
3318 0 : loadObjClassUnsafe(obj, scratch);
3319 0 : branchTest32(Assembler::NonZero, Address(scratch, Class::offsetOfFlags()),
3320 27 : Imm32(Class::NON_NATIVE), label);
3321 27 : }
3322 :
3323 : void
3324 0 : MacroAssembler::branchIfInlineTypedObject(Register obj, Register scratch, Label* label)
3325 : {
3326 0 : loadObjClassUnsafe(obj, scratch);
3327 0 : branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineOpaqueTypedObject::class_), label);
3328 0 : branchPtr(Assembler::Equal, scratch, ImmPtr(&InlineTransparentTypedObject::class_), label);
3329 0 : }
3330 :
3331 : void
3332 0 : MacroAssembler::branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType,
3333 : Label* label)
3334 : {
3335 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), scratch);
3336 :
3337 : // Guard that the object has the same representation as the one produced for
3338 : // SIMD value-type.
3339 0 : Address clasp(scratch, ObjectGroup::offsetOfClasp());
3340 : static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
3341 0 : branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
3342 0 : label);
3343 :
3344 : // obj->type()->typeDescr()
3345 : // The previous class pointer comparison implies that the addendumKind is
3346 : // Addendum_TypeDescr.
3347 0 : loadPtr(Address(scratch, ObjectGroup::offsetOfAddendum()), scratch);
3348 :
3349 : // Check for the /Kind/ reserved slot of the TypeDescr. This is an Int32
3350 : // Value which is equivalent to the object class check.
3351 : static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
3352 0 : Address typeDescrKind(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
3353 : assertTestInt32(Assembler::Equal, typeDescrKind,
3354 0 : "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
3355 0 : branch32(Assembler::NotEqual, ToPayload(typeDescrKind), Imm32(js::type::Simd), label);
3356 :
3357 : // Check if the SimdTypeDescr /Type/ matches the specialization of this
3358 : // MSimdUnbox instruction.
3359 : static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
3360 0 : Address typeDescrType(scratch, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE));
3361 : assertTestInt32(Assembler::Equal, typeDescrType,
3362 0 : "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())");
3363 0 : branch32(Assembler::NotEqual, ToPayload(typeDescrType), Imm32(int32_t(simdType)), label);
3364 0 : }
3365 :
3366 : void
3367 0 : MacroAssembler::copyObjGroupNoPreBarrier(Register sourceObj, Register destObj, Register scratch)
3368 : {
3369 0 : loadPtr(Address(sourceObj, JSObject::offsetOfGroup()), scratch);
3370 0 : storePtr(scratch, Address(destObj, JSObject::offsetOfGroup()));
3371 0 : }
3372 :
3373 : void
3374 0 : MacroAssembler::loadTypedObjectDescr(Register obj, Register dest)
3375 : {
3376 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
3377 0 : loadPtr(Address(dest, ObjectGroup::offsetOfAddendum()), dest);
3378 0 : }
3379 :
3380 : void
3381 0 : MacroAssembler::loadTypedObjectLength(Register obj, Register dest)
3382 : {
3383 0 : loadTypedObjectDescr(obj, dest);
3384 0 : unboxInt32(Address(dest, ArrayTypeDescr::offsetOfLength()), dest);
3385 0 : }
3386 :
3387 : void
3388 0 : MacroAssembler::maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label)
3389 : {
3390 0 : if (!maybeDef || maybeDef->mightBeType(type)) {
3391 0 : switch (type) {
3392 : case MIRType::Null:
3393 : branchTestNull(Equal, tag, label);
3394 : break;
3395 : case MIRType::Boolean:
3396 : branchTestBoolean(Equal, tag, label);
3397 : break;
3398 : case MIRType::Int32:
3399 : branchTestInt32(Equal, tag, label);
3400 : break;
3401 : case MIRType::Double:
3402 : branchTestDouble(Equal, tag, label);
3403 : break;
3404 : case MIRType::String:
3405 : branchTestString(Equal, tag, label);
3406 : break;
3407 : case MIRType::Symbol:
3408 : branchTestSymbol(Equal, tag, label);
3409 : break;
3410 : case MIRType::Object:
3411 : branchTestObject(Equal, tag, label);
3412 : break;
3413 : default:
3414 0 : MOZ_CRASH("Unsupported type");
3415 : }
3416 : }
3417 0 : }
3418 :
3419 : void
3420 0 : MacroAssembler::wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset)
3421 : {
3422 0 : uint32_t trapOffset = wasmTrapInstruction().offset();
3423 0 : MOZ_ASSERT_IF(!oom(), currentOffset() - trapOffset == WasmTrapInstructionLength);
3424 :
3425 0 : append(trap, wasm::TrapSite(trapOffset, bytecodeOffset));
3426 0 : }
3427 :
3428 : void
3429 0 : MacroAssembler::wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset)
3430 : {
3431 0 : Label ok;
3432 0 : branch32(Assembler::Equal, Address(tls, offsetof(wasm::TlsData, interrupt)), Imm32(0), &ok);
3433 0 : wasmTrap(wasm::Trap::CheckInterrupt, bytecodeOffset);
3434 0 : bind(&ok);
3435 0 : }
3436 :
3437 : void
3438 0 : MacroAssembler::wasmReserveStackChecked(uint32_t amount, wasm::BytecodeOffset trapOffset)
3439 : {
3440 0 : if (!amount)
3441 : return;
3442 :
3443 : // If the frame is large, don't bump sp until after the stack limit check so
3444 : // that the trap handler isn't called with a wild sp.
3445 :
3446 0 : if (amount > MAX_UNCHECKED_LEAF_FRAME_SIZE) {
3447 0 : Label ok;
3448 0 : Register scratch = ABINonArgReg0;
3449 0 : moveStackPtrTo(scratch);
3450 0 : subPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)), scratch);
3451 0 : branchPtr(Assembler::GreaterThan, scratch, Imm32(amount), &ok);
3452 0 : wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3453 0 : bind(&ok);
3454 : }
3455 :
3456 0 : reserveStack(amount);
3457 :
3458 0 : if (amount <= MAX_UNCHECKED_LEAF_FRAME_SIZE) {
3459 0 : Label ok;
3460 0 : branchStackPtrRhs(Assembler::Below,
3461 : Address(WasmTlsReg, offsetof(wasm::TlsData, stackLimit)),
3462 0 : &ok);
3463 0 : wasmTrap(wasm::Trap::StackOverflow, trapOffset);
3464 0 : bind(&ok);
3465 : }
3466 : }
3467 :
3468 : void
3469 0 : MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
3470 : {
3471 : // Load the callee, before the caller's registers are clobbered.
3472 0 : uint32_t globalDataOffset = callee.importGlobalDataOffset();
3473 0 : loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, code), ABINonArgReg0);
3474 :
3475 : #ifndef JS_CODEGEN_NONE
3476 : static_assert(ABINonArgReg0 != WasmTlsReg, "by constraint");
3477 : #endif
3478 :
3479 : // Switch to the callee's TLS and pinned registers and make the call.
3480 0 : loadWasmGlobalPtr(globalDataOffset + offsetof(wasm::FuncImportTls, tls), WasmTlsReg);
3481 0 : loadWasmPinnedRegsFromTls();
3482 :
3483 0 : call(desc, ABINonArgReg0);
3484 0 : }
3485 :
3486 : void
3487 0 : MacroAssembler::wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc,
3488 : const ABIArg& instanceArg,
3489 : wasm::SymbolicAddress builtin)
3490 : {
3491 0 : MOZ_ASSERT(instanceArg != ABIArg());
3492 :
3493 0 : if (instanceArg.kind() == ABIArg::GPR) {
3494 0 : loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), instanceArg.gpr());
3495 0 : } else if (instanceArg.kind() == ABIArg::Stack) {
3496 : // Safe to use ABINonArgReg0 since it's the last thing before the call.
3497 0 : Register scratch = ABINonArgReg0;
3498 0 : loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
3499 0 : storePtr(scratch, Address(getStackPointer(), instanceArg.offsetFromArgBase()));
3500 : } else {
3501 0 : MOZ_CRASH("Unknown abi passing style for pointer");
3502 : }
3503 :
3504 0 : call(desc, builtin);
3505 0 : }
3506 :
3507 : void
3508 0 : MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee,
3509 : bool needsBoundsCheck)
3510 : {
3511 0 : Register scratch = WasmTableCallScratchReg;
3512 0 : Register index = WasmTableCallIndexReg;
3513 :
3514 0 : if (callee.which() == wasm::CalleeDesc::AsmJSTable) {
3515 : // asm.js tables require no signature check, have had their index masked
3516 : // into range and thus need no bounds check and cannot be external.
3517 0 : loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
3518 0 : loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
3519 0 : call(desc, scratch);
3520 0 : return;
3521 : }
3522 :
3523 0 : MOZ_ASSERT(callee.which() == wasm::CalleeDesc::WasmTable);
3524 :
3525 : // Write the sig-id into the ABI sig-id register.
3526 0 : wasm::SigIdDesc sigId = callee.wasmTableSigId();
3527 0 : switch (sigId.kind()) {
3528 : case wasm::SigIdDesc::Kind::Global:
3529 0 : loadWasmGlobalPtr(sigId.globalDataOffset(), WasmTableCallSigReg);
3530 : break;
3531 : case wasm::SigIdDesc::Kind::Immediate:
3532 0 : move32(Imm32(sigId.immediate()), WasmTableCallSigReg);
3533 0 : break;
3534 : case wasm::SigIdDesc::Kind::None:
3535 : break;
3536 : }
3537 :
3538 0 : wasm::BytecodeOffset trapOffset(desc.lineOrBytecode());
3539 :
3540 : // WebAssembly throws if the index is out-of-bounds.
3541 0 : if (needsBoundsCheck) {
3542 0 : loadWasmGlobalPtr(callee.tableLengthGlobalDataOffset(), scratch);
3543 :
3544 0 : Label ok;
3545 0 : branch32(Assembler::Condition::Below, index, scratch, &ok);
3546 0 : wasmTrap(wasm::Trap::OutOfBounds, trapOffset);
3547 0 : bind(&ok);
3548 : }
3549 :
3550 : // Load the base pointer of the table.
3551 0 : loadWasmGlobalPtr(callee.tableBaseGlobalDataOffset(), scratch);
3552 :
3553 : // Load the callee from the table.
3554 0 : if (callee.wasmTableIsExternal()) {
3555 : static_assert(sizeof(wasm::ExternalTableElem) == 8 || sizeof(wasm::ExternalTableElem) == 16,
3556 : "elements of external tables are two words");
3557 : if (sizeof(wasm::ExternalTableElem) == 8) {
3558 : computeEffectiveAddress(BaseIndex(scratch, index, TimesEight), scratch);
3559 : } else {
3560 0 : lshift32(Imm32(4), index);
3561 0 : addPtr(index, scratch);
3562 : }
3563 :
3564 0 : loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, tls)), WasmTlsReg);
3565 :
3566 0 : Label nonNull;
3567 0 : branchTest32(Assembler::NonZero, WasmTlsReg, WasmTlsReg, &nonNull);
3568 0 : wasmTrap(wasm::Trap::IndirectCallToNull, trapOffset);
3569 0 : bind(&nonNull);
3570 :
3571 0 : loadWasmPinnedRegsFromTls();
3572 :
3573 0 : loadPtr(Address(scratch, offsetof(wasm::ExternalTableElem, code)), scratch);
3574 : } else {
3575 0 : loadPtr(BaseIndex(scratch, index, ScalePointer), scratch);
3576 :
3577 0 : Label nonNull;
3578 0 : branchTest32(Assembler::NonZero, scratch, scratch, &nonNull);
3579 0 : wasmTrap(wasm::Trap::IndirectCallToNull, trapOffset);
3580 0 : bind(&nonNull);
3581 : }
3582 :
3583 0 : call(desc, scratch);
3584 : }
3585 :
3586 : void
3587 20 : MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register temp1, Register temp2,
3588 : Register temp3, Label* noBarrier)
3589 : {
3590 0 : MOZ_ASSERT(temp1 != PreBarrierReg);
3591 20 : MOZ_ASSERT(temp2 != PreBarrierReg);
3592 20 : MOZ_ASSERT(temp3 != PreBarrierReg);
3593 :
3594 : // Load the GC thing in temp1.
3595 20 : if (type == MIRType::Value) {
3596 0 : unboxGCThingForPreBarrierTrampoline(Address(PreBarrierReg, 0), temp1);
3597 : } else {
3598 16 : MOZ_ASSERT(type == MIRType::Object ||
3599 : type == MIRType::String ||
3600 : type == MIRType::Shape ||
3601 : type == MIRType::ObjectGroup);
3602 16 : loadPtr(Address(PreBarrierReg, 0), temp1);
3603 : }
3604 :
3605 : #ifdef DEBUG
3606 : // The caller should have checked for null pointers.
3607 0 : Label nonZero;
3608 0 : branchTestPtr(Assembler::NonZero, temp1, temp1, &nonZero);
3609 20 : assumeUnreachable("JIT pre-barrier: unexpected nullptr");
3610 20 : bind(&nonZero);
3611 : #endif
3612 :
3613 : // Load the chunk address in temp2.
3614 40 : movePtr(ImmWord(~gc::ChunkMask), temp2);
3615 20 : andPtr(temp1, temp2);
3616 :
3617 : // If the GC thing is in the nursery, we don't need to barrier it.
3618 0 : if (type == MIRType::Value || type == MIRType::Object || type == MIRType::String) {
3619 36 : branch32(Assembler::Equal, Address(temp2, gc::ChunkLocationOffset),
3620 24 : Imm32(int32_t(gc::ChunkLocation::Nursery)), noBarrier);
3621 : } else {
3622 : #ifdef DEBUG
3623 0 : Label isTenured;
3624 0 : branch32(Assembler::NotEqual, Address(temp2, gc::ChunkLocationOffset),
3625 0 : Imm32(int32_t(gc::ChunkLocation::Nursery)), &isTenured);
3626 8 : assumeUnreachable("JIT pre-barrier: unexpected nursery pointer");
3627 8 : bind(&isTenured);
3628 : #endif
3629 : }
3630 :
3631 : // If it's a permanent atom or symbol from a parent runtime we don't
3632 : // need to barrier it.
3633 0 : if (type == MIRType::Value || type == MIRType::String) {
3634 24 : branchPtr(Assembler::NotEqual, Address(temp2, gc::ChunkRuntimeOffset),
3635 8 : ImmPtr(rt), noBarrier);
3636 : } else {
3637 : #ifdef DEBUG
3638 0 : Label thisRuntime;
3639 0 : branchPtr(Assembler::Equal, Address(temp2, gc::ChunkRuntimeOffset),
3640 0 : ImmPtr(rt), &thisRuntime);
3641 12 : assumeUnreachable("JIT pre-barrier: unexpected runtime");
3642 12 : bind(&thisRuntime);
3643 : #endif
3644 : }
3645 :
3646 : // Determine the bit index and store in temp1.
3647 : //
3648 : // bit = (addr & js::gc::ChunkMask) / js::gc::CellBytesPerMarkBit +
3649 : // static_cast<uint32_t>(colorBit);
3650 : static_assert(gc::CellBytesPerMarkBit == 8,
3651 : "Calculation below relies on this");
3652 : static_assert(size_t(gc::ColorBit::BlackBit) == 0,
3653 : "Calculation below relies on this");
3654 20 : andPtr(Imm32(gc::ChunkMask), temp1);
3655 20 : rshiftPtr(Imm32(3), temp1);
3656 :
3657 : static const size_t nbits = sizeof(uintptr_t) * CHAR_BIT;
3658 : static_assert(nbits == JS_BITS_PER_WORD,
3659 : "Calculation below relies on this");
3660 :
3661 : // Load the bitmap word in temp2.
3662 : //
3663 : // word = chunk.bitmap[bit / nbits];
3664 0 : movePtr(temp1, temp3);
3665 : #if JS_BITS_PER_WORD == 64
3666 20 : rshiftPtr(Imm32(6), temp1);
3667 40 : loadPtr(BaseIndex(temp2, temp1, TimesEight, gc::ChunkMarkBitmapOffset), temp2);
3668 : #else
3669 : rshiftPtr(Imm32(5), temp1);
3670 : loadPtr(BaseIndex(temp2, temp1, TimesFour, gc::ChunkMarkBitmapOffset), temp2);
3671 : #endif
3672 :
3673 : // Load the mask in temp1.
3674 : //
3675 : // mask = uintptr_t(1) << (bit % nbits);
3676 20 : andPtr(Imm32(nbits - 1), temp3);
3677 0 : move32(Imm32(1), temp1);
3678 : #ifdef JS_CODEGEN_X64
3679 20 : MOZ_ASSERT(temp3 == rcx);
3680 40 : shlq_cl(temp1);
3681 : #elif JS_CODEGEN_X86
3682 : MOZ_ASSERT(temp3 == ecx);
3683 : shll_cl(temp1);
3684 : #elif JS_CODEGEN_ARM
3685 : ma_lsl(temp3, temp1, temp1);
3686 : #elif JS_CODEGEN_ARM64
3687 : Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
3688 : #elif JS_CODEGEN_MIPS32
3689 : ma_sll(temp1, temp1, temp3);
3690 : #elif JS_CODEGEN_MIPS64
3691 : ma_dsll(temp1, temp1, temp3);
3692 : #elif JS_CODEGEN_NONE
3693 : MOZ_CRASH();
3694 : #else
3695 : # error "Unknown architecture"
3696 : #endif
3697 :
3698 : // No barrier is needed if the bit is set, |word & mask != 0|.
3699 20 : branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
3700 20 : }
3701 :
3702 : // ========================================================================
3703 : // Spectre Mitigations.
3704 :
3705 : void
3706 0 : MacroAssembler::spectreMaskIndex(Register index, Register length, Register output)
3707 : {
3708 0 : MOZ_ASSERT(JitOptions.spectreIndexMasking);
3709 51 : MOZ_ASSERT(length != output);
3710 0 : MOZ_ASSERT(index != output);
3711 :
3712 0 : move32(Imm32(0), output);
3713 51 : cmp32Move32(Assembler::Below, index, length, index, output);
3714 51 : }
3715 :
3716 : void
3717 0 : MacroAssembler::spectreMaskIndex(Register index, const Address& length, Register output)
3718 : {
3719 0 : MOZ_ASSERT(JitOptions.spectreIndexMasking);
3720 0 : MOZ_ASSERT(index != length.base);
3721 2 : MOZ_ASSERT(length.base != output);
3722 0 : MOZ_ASSERT(index != output);
3723 :
3724 0 : move32(Imm32(0), output);
3725 2 : cmp32Move32(Assembler::Below, index, length, index, output);
3726 2 : }
3727 :
3728 : void
3729 0 : MacroAssembler::boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure)
3730 : {
3731 46 : MOZ_ASSERT(mozilla::IsPowerOfTwo(length));
3732 92 : branch32(Assembler::AboveOrEqual, index, Imm32(length), failure);
3733 :
3734 : // Note: it's fine to clobber the input register, as this is a no-op: it
3735 : // only affects speculative execution.
3736 0 : if (JitOptions.spectreIndexMasking)
3737 92 : and32(Imm32(length - 1), index);
3738 46 : }
3739 :
3740 : //}}} check_macroassembler_style
3741 :
3742 : void
3743 0 : MacroAssembler::memoryBarrierBefore(const Synchronization& sync) {
3744 0 : memoryBarrier(sync.barrierBefore);
3745 0 : }
3746 :
3747 : void
3748 0 : MacroAssembler::memoryBarrierAfter(const Synchronization& sync) {
3749 0 : memoryBarrier(sync.barrierAfter);
3750 0 : }
3751 :
3752 : void
3753 0 : MacroAssembler::loadWasmTlsRegFromFrame(Register dest)
3754 : {
3755 0 : loadPtr(Address(getStackPointer(), framePushed() + offsetof(wasm::Frame, tls)), dest);
3756 0 : }
3757 :
3758 : void
3759 0 : MacroAssembler::BranchGCPtr::emit(MacroAssembler& masm)
3760 : {
3761 0 : MOZ_ASSERT(isInitialized());
3762 0 : masm.branchPtr(cond(), reg(), ptr_, jump());
3763 0 : }
3764 :
3765 : void
3766 1 : MacroAssembler::debugAssertIsObject(const ValueOperand& val)
3767 : {
3768 : #ifdef DEBUG
3769 0 : Label ok;
3770 0 : branchTestObject(Assembler::Equal, val, &ok);
3771 1 : assumeUnreachable("Expected an object!");
3772 0 : bind(&ok);
3773 : #endif
3774 1 : }
3775 :
3776 : void
3777 0 : MacroAssembler::debugAssertObjHasFixedSlots(Register obj, Register scratch)
3778 : {
3779 : #ifdef DEBUG
3780 0 : Label hasFixedSlots;
3781 0 : loadPtr(Address(obj, ShapedObject::offsetOfShape()), scratch);
3782 0 : branchTest32(Assembler::NonZero,
3783 0 : Address(scratch, Shape::offsetOfImmutableFlags()),
3784 : Imm32(Shape::fixedSlotsMask()),
3785 0 : &hasFixedSlots);
3786 0 : assumeUnreachable("Expected a fixed slot");
3787 0 : bind(&hasFixedSlots);
3788 : #endif
3789 0 : }
3790 :
3791 : void
3792 3 : MacroAssembler::branchIfNativeIteratorNotReusable(Register ni, Label* notReusable)
3793 : {
3794 : // See NativeIterator::isReusable.
3795 3 : Address flagsAddr(ni, NativeIterator::offsetOfFlags());
3796 :
3797 : #ifdef DEBUG
3798 6 : Label niIsInitialized;
3799 6 : branchTest32(Assembler::NonZero,
3800 : flagsAddr,
3801 : Imm32(NativeIterator::Flags::Initialized),
3802 0 : &niIsInitialized);
3803 : assumeUnreachable("Expected a NativeIterator that's been completely "
3804 3 : "initialized");
3805 3 : bind(&niIsInitialized);
3806 : #endif
3807 :
3808 6 : branchTest32(Assembler::NonZero,
3809 : flagsAddr,
3810 : Imm32(NativeIterator::Flags::NotReusable),
3811 3 : notReusable);
3812 3 : }
3813 :
3814 : template <typename T, size_t N, typename P>
3815 : static bool
3816 1896 : AddPendingReadBarrier(Vector<T*, N, P>& list, T* value)
3817 : {
3818 : // Check if value is already present in tail of list.
3819 : // TODO: Consider using a hash table here.
3820 0 : const size_t TailWindow = 4;
3821 :
3822 0 : size_t len = list.length();
3823 9172 : for (size_t i = 0; i < Min(len, TailWindow); i++) {
3824 3877 : if (list[len - i - 1] == value)
3825 : return true;
3826 : }
3827 :
3828 709 : return list.append(value);
3829 : }
3830 :
3831 : JSObject*
3832 0 : MacroAssembler::getSingletonAndDelayBarrier(const TypeSet* types, size_t i)
3833 : {
3834 1086 : JSObject* object = types->getSingletonNoBarrier(i);
3835 1086 : if (!object)
3836 : return nullptr;
3837 :
3838 499 : if (!AddPendingReadBarrier(pendingObjectReadBarriers_, object))
3839 0 : setOOM();
3840 :
3841 : return object;
3842 : }
3843 :
3844 : ObjectGroup*
3845 0 : MacroAssembler::getGroupAndDelayBarrier(const TypeSet* types, size_t i)
3846 : {
3847 1413 : ObjectGroup* group = types->getGroupNoBarrier(i);
3848 1413 : if (!group)
3849 : return nullptr;
3850 :
3851 1397 : if (!AddPendingReadBarrier(pendingObjectGroupReadBarriers_, group))
3852 0 : setOOM();
3853 :
3854 : return group;
3855 : }
3856 :
3857 : void
3858 0 : MacroAssembler::performPendingReadBarriers()
3859 : {
3860 0 : for (JSObject* object : pendingObjectReadBarriers_)
3861 0 : JSObject::readBarrier(object);
3862 0 : for (ObjectGroup* group : pendingObjectGroupReadBarriers_)
3863 1308 : ObjectGroup::readBarrier(group);
3864 2811 : }
3865 :
3866 : namespace js {
3867 : namespace jit {
3868 :
3869 : #ifdef DEBUG
3870 : template <class RegisterType>
3871 44189 : AutoGenericRegisterScope<RegisterType>::AutoGenericRegisterScope(MacroAssembler& masm, RegisterType reg)
3872 0 : : RegisterType(reg), masm_(masm), released_(false)
3873 : {
3874 44189 : masm.debugTrackedRegisters_.add(reg);
3875 44190 : }
3876 :
3877 : template AutoGenericRegisterScope<Register>::AutoGenericRegisterScope(MacroAssembler& masm, Register reg);
3878 : template AutoGenericRegisterScope<FloatRegister>::AutoGenericRegisterScope(MacroAssembler& masm, FloatRegister reg);
3879 : #endif // DEBUG
3880 :
3881 : #ifdef DEBUG
3882 : template <class RegisterType>
3883 0 : AutoGenericRegisterScope<RegisterType>::~AutoGenericRegisterScope()
3884 : {
3885 0 : if (!released_)
3886 44191 : release();
3887 44191 : }
3888 :
3889 : template AutoGenericRegisterScope<Register>::~AutoGenericRegisterScope();
3890 : template AutoGenericRegisterScope<FloatRegister>::~AutoGenericRegisterScope();
3891 :
3892 : template <class RegisterType>
3893 : void
3894 0 : AutoGenericRegisterScope<RegisterType>::release()
3895 : {
3896 0 : MOZ_ASSERT(!released_);
3897 0 : released_ = true;
3898 0 : const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
3899 44197 : masm_.debugTrackedRegisters_.take(reg);
3900 44197 : }
3901 :
3902 : template void AutoGenericRegisterScope<Register>::release();
3903 : template void AutoGenericRegisterScope<FloatRegister>::release();
3904 :
3905 : template <class RegisterType>
3906 : void
3907 0 : AutoGenericRegisterScope<RegisterType>::reacquire()
3908 : {
3909 0 : MOZ_ASSERT(released_);
3910 0 : released_ = false;
3911 0 : const RegisterType& reg = *dynamic_cast<RegisterType*>(this);
3912 : masm_.debugTrackedRegisters_.add(reg);
3913 : }
3914 :
3915 : template void AutoGenericRegisterScope<Register>::reacquire();
3916 : template void AutoGenericRegisterScope<FloatRegister>::reacquire();
3917 :
3918 : #endif // DEBUG
3919 :
3920 : } // namespace jit
3921 : } // namespace js
|