Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #ifndef jit_MacroAssembler_h
8 : #define jit_MacroAssembler_h
9 :
10 : #include "mozilla/EndianUtils.h"
11 : #include "mozilla/MacroForEach.h"
12 : #include "mozilla/MathAlgorithms.h"
13 :
14 : #include "vm/Realm.h"
15 :
16 : #if defined(JS_CODEGEN_X86)
17 : # include "jit/x86/MacroAssembler-x86.h"
18 : #elif defined(JS_CODEGEN_X64)
19 : # include "jit/x64/MacroAssembler-x64.h"
20 : #elif defined(JS_CODEGEN_ARM)
21 : # include "jit/arm/MacroAssembler-arm.h"
22 : #elif defined(JS_CODEGEN_ARM64)
23 : # include "jit/arm64/MacroAssembler-arm64.h"
24 : #elif defined(JS_CODEGEN_MIPS32)
25 : # include "jit/mips32/MacroAssembler-mips32.h"
26 : #elif defined(JS_CODEGEN_MIPS64)
27 : # include "jit/mips64/MacroAssembler-mips64.h"
28 : #elif defined(JS_CODEGEN_NONE)
29 : # include "jit/none/MacroAssembler-none.h"
30 : #else
31 : # error "Unknown architecture!"
32 : #endif
33 : #include "jit/AtomicOp.h"
34 : #include "jit/IonInstrumentation.h"
35 : #include "jit/IonTypes.h"
36 : #include "jit/JitRealm.h"
37 : #include "jit/TemplateObject.h"
38 : #include "jit/VMFunctions.h"
39 : #include "vm/ProxyObject.h"
40 : #include "vm/Shape.h"
41 : #include "vm/TypedArrayObject.h"
42 : #include "vm/UnboxedObject.h"
43 :
44 : // * How to read/write MacroAssembler method declarations:
45 : //
46 : // The following macros are made to avoid #ifdef around each method declarations
47 : // of the Macro Assembler, and they are also used as an hint on the location of
48 : // the implementations of each method. For example, the following declaration
49 : //
50 : // void Pop(FloatRegister t) DEFINED_ON(x86_shared, arm);
51 : //
52 : // suggests the MacroAssembler::Pop(FloatRegister) method is implemented in
53 : // x86-shared/MacroAssembler-x86-shared.h, and also in arm/MacroAssembler-arm.h.
54 : //
55 : // - If there is no annotation, then there is only one generic definition in
56 : // MacroAssembler.cpp.
57 : //
58 : // - If the declaration is "inline", then the method definition(s) would be in
59 : // the "-inl.h" variant of the same file(s).
60 : //
61 : // The script check_macroassembler_style.py (check-masm target of the Makefile)
62 : // is used to verify that method definitions are matching the annotation added
63 : // to the method declarations. If there is any difference, then you either
64 : // forgot to define the method in one of the macro assembler, or you forgot to
65 : // update the annotation of the macro assembler declaration.
66 : //
67 : // Some convenient short-cuts are used to avoid repeating the same list of
68 : // architectures on each method declaration, such as PER_ARCH and
69 : // PER_SHARED_ARCH.
70 : //
71 : // Functions that are architecture-agnostic and are the same for all
72 : // architectures, that it's necessary to define inline *in this header* to
73 : // avoid used-before-defined warnings/errors that would occur if the
74 : // definitions were in MacroAssembler-inl.h, should use the OOL_IN_HEADER
75 : // marker at end of the declaration:
76 : //
77 : // inline uint32_t framePushed() const OOL_IN_HEADER;
78 : //
79 : // Such functions should then be defined immediately after MacroAssembler's
80 : // definition, for example like so:
81 : //
82 : // //{{{ check_macroassembler_style
83 : // inline uint32_t
84 : // MacroAssembler::framePushed() const
85 : // {
86 : // return framePushed_;
87 : // }
88 : // ////}}} check_macroassembler_style
89 :
90 :
91 : # define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
92 : # define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
93 :
94 : // * How this macro works:
95 : //
96 : // DEFINED_ON is a macro which check if, for the current architecture, the
97 : // method is defined on the macro assembler or not.
98 : //
99 : // For each architecture, we have a macro named DEFINED_ON_arch. This macro is
100 : // empty if this is not the current architecture. Otherwise it must be either
101 : // set to "define" or "crash" (only use for the none target so-far).
102 : //
103 : // The DEFINED_ON macro maps the list of architecture names given as argument to
104 : // a list of macro names. For example,
105 : //
106 : // DEFINED_ON(arm, x86_shared)
107 : //
108 : // is expanded to
109 : //
110 : // DEFINED_ON_none DEFINED_ON_arm DEFINED_ON_x86_shared
111 : //
112 : // which are later expanded on ARM, x86, x64 by DEFINED_ON_EXPAND_ARCH_RESULTS
113 : // to
114 : //
115 : // define
116 : //
117 : // or if the JIT is disabled or set to no architecture to
118 : //
119 : // crash
120 : //
121 : // or to nothing, if the current architecture is not listed in the list of
122 : // arguments of DEFINED_ON. Note, only one of the DEFINED_ON_arch macro
123 : // contributes to the non-empty result, which is the macro of the current
124 : // architecture if it is listed in the arguments of DEFINED_ON.
125 : //
126 : // This result is appended to DEFINED_ON_RESULT_ before expanding the macro,
127 : // which result is either no annotation, a MOZ_CRASH(), or a "= delete"
128 : // annotation on the method declaration.
129 :
130 : # define DEFINED_ON_x86
131 : # define DEFINED_ON_x64
132 : # define DEFINED_ON_x86_shared
133 : # define DEFINED_ON_arm
134 : # define DEFINED_ON_arm64
135 : # define DEFINED_ON_mips32
136 : # define DEFINED_ON_mips64
137 : # define DEFINED_ON_mips_shared
138 : # define DEFINED_ON_none
139 :
140 : // Specialize for each architecture.
141 : #if defined(JS_CODEGEN_X86)
142 : # undef DEFINED_ON_x86
143 : # define DEFINED_ON_x86 define
144 : # undef DEFINED_ON_x86_shared
145 : # define DEFINED_ON_x86_shared define
146 : #elif defined(JS_CODEGEN_X64)
147 : # undef DEFINED_ON_x64
148 : # define DEFINED_ON_x64 define
149 : # undef DEFINED_ON_x86_shared
150 : # define DEFINED_ON_x86_shared define
151 : #elif defined(JS_CODEGEN_ARM)
152 : # undef DEFINED_ON_arm
153 : # define DEFINED_ON_arm define
154 : #elif defined(JS_CODEGEN_ARM64)
155 : # undef DEFINED_ON_arm64
156 : # define DEFINED_ON_arm64 define
157 : #elif defined(JS_CODEGEN_MIPS32)
158 : # undef DEFINED_ON_mips32
159 : # define DEFINED_ON_mips32 define
160 : # undef DEFINED_ON_mips_shared
161 : # define DEFINED_ON_mips_shared define
162 : #elif defined(JS_CODEGEN_MIPS64)
163 : # undef DEFINED_ON_mips64
164 : # define DEFINED_ON_mips64 define
165 : # undef DEFINED_ON_mips_shared
166 : # define DEFINED_ON_mips_shared define
167 : #elif defined(JS_CODEGEN_NONE)
168 : # undef DEFINED_ON_none
169 : # define DEFINED_ON_none crash
170 : #else
171 : # error "Unknown architecture!"
172 : #endif
173 :
174 : # define DEFINED_ON_RESULT_crash { MOZ_CRASH(); }
175 : # define DEFINED_ON_RESULT_define
176 : # define DEFINED_ON_RESULT_ = delete
177 :
178 : # define DEFINED_ON_DISPATCH_RESULT_2(Macro, Result) \
179 : Macro ## Result
180 : # define DEFINED_ON_DISPATCH_RESULT(...) \
181 : DEFINED_ON_DISPATCH_RESULT_2(DEFINED_ON_RESULT_, __VA_ARGS__)
182 :
183 : // We need to let the evaluation of MOZ_FOR_EACH terminates.
184 : # define DEFINED_ON_EXPAND_ARCH_RESULTS_3(ParenResult) \
185 : DEFINED_ON_DISPATCH_RESULT ParenResult
186 : # define DEFINED_ON_EXPAND_ARCH_RESULTS_2(ParenResult) \
187 : DEFINED_ON_EXPAND_ARCH_RESULTS_3 (ParenResult)
188 : # define DEFINED_ON_EXPAND_ARCH_RESULTS(ParenResult) \
189 : DEFINED_ON_EXPAND_ARCH_RESULTS_2 (ParenResult)
190 :
191 : # define DEFINED_ON_FWDARCH(Arch) DEFINED_ON_ ## Arch
192 : # define DEFINED_ON_MAP_ON_ARCHS(ArchList) \
193 : DEFINED_ON_EXPAND_ARCH_RESULTS( \
194 : (MOZ_FOR_EACH(DEFINED_ON_FWDARCH, (), ArchList)))
195 :
196 : # define DEFINED_ON(...) \
197 : DEFINED_ON_MAP_ON_ARCHS((none, __VA_ARGS__))
198 :
199 : # define PER_ARCH DEFINED_ON(ALL_ARCH)
200 : # define PER_SHARED_ARCH DEFINED_ON(ALL_SHARED_ARCH)
201 : # define OOL_IN_HEADER
202 :
203 : #if MOZ_LITTLE_ENDIAN
204 : #define IMM32_16ADJ(X) (X) << 16
205 : #else
206 : #define IMM32_16ADJ(X) (X)
207 : #endif
208 :
209 : namespace js {
210 : namespace jit {
211 :
212 : // Defined in JitFrames.h
213 : enum class ExitFrameType : uint8_t;
214 :
215 : class AutoSaveLiveRegisters;
216 :
217 : enum class CheckUnsafeCallWithABI {
218 : // Require the callee to use AutoUnsafeCallWithABI.
219 : Check,
220 :
221 : // We pushed an exit frame so this callWithABI can safely GC and walk the
222 : // stack.
223 : DontCheckHasExitFrame,
224 :
225 : // Don't check this callWithABI uses AutoUnsafeCallWithABI, for instance
226 : // because we're calling a simple helper function (like malloc or js_free)
227 : // that we can't change and/or that we know won't GC.
228 : DontCheckOther,
229 : };
230 :
231 : enum class CharEncoding { Latin1, TwoByte };
232 :
233 : // The public entrypoint for emitting assembly. Note that a MacroAssembler can
234 : // use cx->lifoAlloc, so take care not to interleave masm use with other
235 : // lifoAlloc use if one will be destroyed before the other.
236 14100 : class MacroAssembler : public MacroAssemblerSpecific
237 : {
238 : MacroAssembler* thisFromCtor() {
239 : return this;
240 : }
241 :
242 : public:
243 : /*
244 : * Base class for creating a branch.
245 : */
246 : class Branch
247 : {
248 : bool init_;
249 : Condition cond_;
250 : Label* jump_;
251 : Register reg_;
252 :
253 : public:
254 : Branch()
255 0 : : init_(false),
256 : cond_(Equal),
257 : jump_(nullptr),
258 0 : reg_(Register::FromCode(0)) // Quell compiler warnings.
259 : { }
260 :
261 : Branch(Condition cond, Register reg, Label* jump)
262 : : init_(true),
263 : cond_(cond),
264 : jump_(jump),
265 : reg_(reg)
266 : { }
267 :
268 : bool isInitialized() const {
269 : return init_;
270 : }
271 :
272 : Condition cond() const {
273 : return cond_;
274 : }
275 :
276 : Label* jump() const {
277 : return jump_;
278 : }
279 :
280 : Register reg() const {
281 : return reg_;
282 : }
283 :
284 : void invertCondition() {
285 0 : cond_ = InvertCondition(cond_);
286 : }
287 :
288 : void relink(Label* jump) {
289 0 : jump_ = jump;
290 : }
291 : };
292 :
293 : /*
294 : * Creates a branch based on a GCPtr.
295 : */
296 : class BranchGCPtr : public Branch
297 : {
298 : ImmGCPtr ptr_;
299 :
300 : public:
301 : BranchGCPtr()
302 0 : : Branch(),
303 0 : ptr_(ImmGCPtr(nullptr))
304 : { }
305 :
306 : BranchGCPtr(Condition cond, Register reg, ImmGCPtr ptr, Label* jump)
307 : : Branch(cond, reg, jump),
308 0 : ptr_(ptr)
309 : { }
310 :
311 : void emit(MacroAssembler& masm);
312 : };
313 :
314 : mozilla::Maybe<JitContext> jitContext_;
315 : mozilla::Maybe<AutoJitContextAlloc> alloc_;
316 :
317 : private:
318 : // Labels for handling exceptions and failures.
319 : NonAssertingLabel failureLabel_;
320 :
321 : protected:
322 : // Constructors are protected. Use one of the derived classes!
323 : MacroAssembler();
324 :
325 : // This constructor should only be used when there is no JitContext active
326 : // (for example, Trampoline-$(ARCH).cpp and IonCaches.cpp).
327 : explicit MacroAssembler(JSContext* cx);
328 :
329 : // wasm compilation handles its own JitContext-pushing
330 : struct WasmToken {};
331 : explicit MacroAssembler(WasmToken, TempAllocator& alloc);
332 :
333 : public:
334 1911 : MoveResolver& moveResolver() {
335 : // As an optimization, the MoveResolver is a persistent data structure
336 : // shared between visitors in the CodeGenerator. This assertion
337 : // checks that state is not leaking from visitor to visitor
338 : // via an unresolved addMove().
339 3822 : MOZ_ASSERT(moveResolver_.hasNoPendingMoves());
340 1911 : return moveResolver_;
341 : }
342 :
343 : size_t instructionsSize() const {
344 5622 : return size();
345 : }
346 :
347 : #ifdef JS_HAS_HIDDEN_SP
348 : void Push(RegisterOrSP reg);
349 : #endif
350 :
351 : //{{{ check_macroassembler_decl_style
352 : public:
353 : // ===============================================================
354 : // MacroAssembler high-level usage.
355 :
356 : // Flushes the assembly buffer, on platforms that need it.
357 : void flush() PER_SHARED_ARCH;
358 :
359 : // Add a comment that is visible in the pretty printed assembly code.
360 : void comment(const char* msg) PER_SHARED_ARCH;
361 :
362 : // ===============================================================
363 : // Frame manipulation functions.
364 :
365 : inline uint32_t framePushed() const OOL_IN_HEADER;
366 : inline void setFramePushed(uint32_t framePushed) OOL_IN_HEADER;
367 : inline void adjustFrame(int32_t value) OOL_IN_HEADER;
368 :
369 : // Adjust the frame, to account for implicit modification of the stack
370 : // pointer, such that callee can remove arguments on the behalf of the
371 : // caller.
372 : inline void implicitPop(uint32_t bytes) OOL_IN_HEADER;
373 :
374 : private:
375 : // This field is used to statically (at compilation time) emulate a frame
376 : // pointer by keeping track of stack manipulations.
377 : //
378 : // It is maintained by all stack manipulation functions below.
379 : uint32_t framePushed_;
380 :
381 : public:
382 : // ===============================================================
383 : // Stack manipulation functions.
384 :
385 : void PushRegsInMask(LiveRegisterSet set)
386 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
387 : void PushRegsInMask(LiveGeneralRegisterSet set);
388 :
389 : // Like PushRegsInMask, but instead of pushing the registers, store them to
390 : // |dest|. |dest| should point to the end of the reserved space, so the
391 : // first register will be stored at |dest.offset - sizeof(register)|.
392 : void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
393 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
394 :
395 : void PopRegsInMask(LiveRegisterSet set);
396 : void PopRegsInMask(LiveGeneralRegisterSet set);
397 : void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
398 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
399 :
400 : void Push(const Operand op) DEFINED_ON(x86_shared);
401 : void Push(Register reg) PER_SHARED_ARCH;
402 : void Push(Register reg1, Register reg2, Register reg3, Register reg4) DEFINED_ON(arm64);
403 : void Push(const Imm32 imm) PER_SHARED_ARCH;
404 : void Push(const ImmWord imm) PER_SHARED_ARCH;
405 : void Push(const ImmPtr imm) PER_SHARED_ARCH;
406 : void Push(const ImmGCPtr ptr) PER_SHARED_ARCH;
407 : void Push(FloatRegister reg) PER_SHARED_ARCH;
408 : void PushFlags() DEFINED_ON(x86_shared);
409 : void Push(jsid id, Register scratchReg);
410 : void Push(TypedOrValueRegister v);
411 : void Push(const ConstantOrRegister& v);
412 : void Push(const ValueOperand& val);
413 : void Push(const Value& val);
414 : void Push(JSValueType type, Register reg);
415 : void PushValue(const Address& addr);
416 : void PushEmptyRooted(VMFunction::RootType rootType);
417 : inline CodeOffset PushWithPatch(ImmWord word);
418 : inline CodeOffset PushWithPatch(ImmPtr imm);
419 :
420 : void Pop(const Operand op) DEFINED_ON(x86_shared);
421 : void Pop(Register reg) PER_SHARED_ARCH;
422 : void Pop(FloatRegister t) PER_SHARED_ARCH;
423 : void Pop(const ValueOperand& val) PER_SHARED_ARCH;
424 : void PopFlags() DEFINED_ON(x86_shared);
425 : void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared);
426 : void popRooted(VMFunction::RootType rootType, Register cellReg, const ValueOperand& valueReg);
427 :
428 : // Move the stack pointer based on the requested amount.
429 : void adjustStack(int amount);
430 : void freeStack(uint32_t amount);
431 :
432 : // Warning: This method does not update the framePushed() counter.
433 : void freeStack(Register amount);
434 :
435 : private:
436 : // ===============================================================
437 : // Register allocation fields.
438 : #ifdef DEBUG
439 : friend AutoRegisterScope;
440 : friend AutoFloatRegisterScope;
441 : // Used to track register scopes for debug builds.
442 : // Manipulated by the AutoGenericRegisterScope class.
443 : AllocatableRegisterSet debugTrackedRegisters_;
444 : #endif // DEBUG
445 :
446 : public:
447 : // ===============================================================
448 : // Simple call functions.
449 :
450 : CodeOffset call(Register reg) PER_SHARED_ARCH;
451 : CodeOffset call(Label* label) PER_SHARED_ARCH;
452 : void call(const Address& addr) PER_SHARED_ARCH;
453 : void call(ImmWord imm) PER_SHARED_ARCH;
454 : // Call a target native function, which is neither traceable nor movable.
455 : void call(ImmPtr imm) PER_SHARED_ARCH;
456 : void call(wasm::SymbolicAddress imm) PER_SHARED_ARCH;
457 : inline void call(const wasm::CallSiteDesc& desc, wasm::SymbolicAddress imm);
458 :
459 : // Call a target JitCode, which must be traceable, and may be movable.
460 : void call(JitCode* c) PER_SHARED_ARCH;
461 :
462 : inline void call(TrampolinePtr code);
463 :
464 : inline void call(const wasm::CallSiteDesc& desc, const Register reg);
465 : inline void call(const wasm::CallSiteDesc& desc, uint32_t funcDefIndex);
466 : inline void call(const wasm::CallSiteDesc& desc, wasm::Trap trap);
467 :
468 : CodeOffset callWithPatch() PER_SHARED_ARCH;
469 : void patchCall(uint32_t callerOffset, uint32_t calleeOffset) PER_SHARED_ARCH;
470 :
471 : // Push the return address and make a call. On platforms where this function
472 : // is not defined, push the link register (pushReturnAddress) at the entry
473 : // point of the callee.
474 : void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
475 : void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
476 :
477 : // These do not adjust framePushed().
478 : void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
479 : void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
480 :
481 : public:
482 : // ===============================================================
483 : // Patchable near/far jumps.
484 :
485 : // "Far jumps" provide the ability to jump to any uint32_t offset from any
486 : // other uint32_t offset without using a constant pool (thus returning a
487 : // simple CodeOffset instead of a CodeOffsetJump).
488 : CodeOffset farJumpWithPatch() PER_SHARED_ARCH;
489 : void patchFarJump(CodeOffset farJump, uint32_t targetOffset) PER_SHARED_ARCH;
490 :
491 : // Emit a nop that can be patched to and from a nop and a call with int32
492 : // relative displacement.
493 : CodeOffset nopPatchableToCall(const wasm::CallSiteDesc& desc) PER_SHARED_ARCH;
494 : static void patchNopToCall(uint8_t* callsite, uint8_t* target) PER_SHARED_ARCH;
495 : static void patchCallToNop(uint8_t* callsite) PER_SHARED_ARCH;
496 :
497 : public:
498 : // ===============================================================
499 : // ABI function calls.
500 :
501 : // Setup a call to C/C++ code, given the assumption that the framePushed
502 : // accruately define the state of the stack, and that the top of the stack
503 : // was properly aligned. Note that this only supports cdecl.
504 : void setupAlignedABICall(); // CRASH_ON(arm64)
505 :
506 : // As setupAlignedABICall, but for WebAssembly native ABI calls, which pass
507 : // through a builtin thunk that uses the wasm ABI. All the wasm ABI calls
508 : // can be native, since we always know the stack alignment a priori.
509 : void setupWasmABICall(); // CRASH_ON(arm64)
510 :
511 : // Setup an ABI call for when the alignment is not known. This may need a
512 : // scratch register.
513 : void setupUnalignedABICall(Register scratch) PER_ARCH;
514 :
515 : // Arguments must be assigned to a C/C++ call in order. They are moved
516 : // in parallel immediately before performing the call. This process may
517 : // temporarily use more stack, in which case esp-relative addresses will be
518 : // automatically adjusted. It is extremely important that esp-relative
519 : // addresses are computed *after* setupABICall(). Furthermore, no
520 : // operations should be emitted while setting arguments.
521 : void passABIArg(const MoveOperand& from, MoveOp::Type type);
522 : inline void passABIArg(Register reg);
523 : inline void passABIArg(FloatRegister reg, MoveOp::Type type);
524 :
525 : inline void callWithABI(void* fun, MoveOp::Type result = MoveOp::GENERAL,
526 : CheckUnsafeCallWithABI check = CheckUnsafeCallWithABI::Check);
527 : inline void callWithABI(Register fun, MoveOp::Type result = MoveOp::GENERAL);
528 : inline void callWithABI(const Address& fun, MoveOp::Type result = MoveOp::GENERAL);
529 :
530 : void callWithABI(wasm::BytecodeOffset offset, wasm::SymbolicAddress fun,
531 : MoveOp::Type result = MoveOp::GENERAL);
532 :
533 : private:
534 : // Reinitialize the variables which have to be cleared before making a call
535 : // with callWithABI.
536 : void setupABICall();
537 :
538 : // Reserve the stack and resolve the arguments move.
539 : void callWithABIPre(uint32_t* stackAdjust, bool callFromWasm = false) PER_ARCH;
540 :
541 : // Emits a call to a C/C++ function, resolving all argument moves.
542 : void callWithABINoProfiler(void* fun, MoveOp::Type result, CheckUnsafeCallWithABI check);
543 : void callWithABINoProfiler(Register fun, MoveOp::Type result) PER_ARCH;
544 : void callWithABINoProfiler(const Address& fun, MoveOp::Type result) PER_ARCH;
545 :
546 : // Restore the stack to its state before the setup function call.
547 : void callWithABIPost(uint32_t stackAdjust, MoveOp::Type result, bool callFromWasm = false) PER_ARCH;
548 :
549 : // Create the signature to be able to decode the arguments of a native
550 : // function, when calling a function within the simulator.
551 : inline void appendSignatureType(MoveOp::Type type);
552 : inline ABIFunctionType signature() const;
553 :
554 : // Private variables used to handle moves between registers given as
555 : // arguments to passABIArg and the list of ABI registers expected for the
556 : // signature of the function.
557 : MoveResolver moveResolver_;
558 :
559 : // Architecture specific implementation which specify how registers & stack
560 : // offsets are used for calling a function.
561 : ABIArgGenerator abiArgs_;
562 :
563 : #ifdef DEBUG
564 : // Flag use to assert that we use ABI function in the right context.
565 : bool inCall_;
566 : #endif
567 :
568 : // If set by setupUnalignedABICall then callWithABI will pop the stack
569 : // register which is on the stack.
570 : bool dynamicAlignment_;
571 :
572 : #ifdef JS_SIMULATOR
573 : // The signature is used to accumulate all types of arguments which are used
574 : // by the caller. This is used by the simulators to decode the arguments
575 : // properly, and cast the function pointer to the right type.
576 : uint32_t signature_;
577 : #endif
578 :
579 : public:
580 : // ===============================================================
581 : // Jit Frames.
582 : //
583 : // These functions are used to build the content of the Jit frames. See
584 : // CommonFrameLayout class, and all its derivatives. The content should be
585 : // pushed in the opposite order as the fields of the structures, such that
586 : // the structures can be used to interpret the content of the stack.
587 :
588 : // Call the Jit function, and push the return address (or let the callee
589 : // push the return address).
590 : //
591 : // These functions return the offset of the return address, in order to use
592 : // the return address to index the safepoints, which are used to list all
593 : // live registers.
594 : inline uint32_t callJitNoProfiler(Register callee);
595 : inline uint32_t callJit(Register callee);
596 : inline uint32_t callJit(JitCode* code);
597 : inline uint32_t callJit(TrampolinePtr code);
598 :
599 : // The frame descriptor is the second field of all Jit frames, pushed before
600 : // calling the Jit function. It is a composite value defined in JitFrames.h
601 : inline void makeFrameDescriptor(Register frameSizeReg, FrameType type, uint32_t headerSize);
602 :
603 : // Push the frame descriptor, based on the statically known framePushed.
604 : inline void pushStaticFrameDescriptor(FrameType type, uint32_t headerSize);
605 :
606 : // Push the callee token of a JSFunction which pointer is stored in the
607 : // |callee| register. The callee token is packed with a |constructing| flag
608 : // which correspond to the fact that the JS function is called with "new" or
609 : // not.
610 : inline void PushCalleeToken(Register callee, bool constructing);
611 :
612 : // Unpack a callee token located at the |token| address, and return the
613 : // JSFunction pointer in the |dest| register.
614 : inline void loadFunctionFromCalleeToken(Address token, Register dest);
615 :
616 : // This function emulates a call by pushing an exit frame on the stack,
617 : // except that the fake-function is inlined within the body of the caller.
618 : //
619 : // This function assumes that the current frame is an IonJS frame.
620 : //
621 : // This function returns the offset of the /fake/ return address, in order to use
622 : // the return address to index the safepoints, which are used to list all
623 : // live registers.
624 : //
625 : // This function should be balanced with a call to adjustStack, to pop the
626 : // exit frame and emulate the return statement of the inlined function.
627 : inline uint32_t buildFakeExitFrame(Register scratch);
628 :
629 : private:
630 : // This function is used by buildFakeExitFrame to push a fake return address
631 : // on the stack. This fake return address should never be used for resuming
632 : // any execution, and can even be an invalid pointer into the instruction
633 : // stream, as long as it does not alias any other.
634 : uint32_t pushFakeReturnAddress(Register scratch) PER_SHARED_ARCH;
635 :
636 : public:
637 : // ===============================================================
638 : // Exit frame footer.
639 : //
640 : // When calling outside the Jit we push an exit frame. To mark the stack
641 : // correctly, we have to push additional information, called the Exit frame
642 : // footer, which is used to identify how the stack is marked.
643 : //
644 : // See JitFrames.h, and MarkJitExitFrame in JitFrames.cpp.
645 :
646 : // Push stub code and the VMFunction pointer.
647 : inline void enterExitFrame(Register cxreg, Register scratch, const VMFunction* f);
648 :
649 : // Push an exit frame token to identify which fake exit frame this footer
650 : // corresponds to.
651 : inline void enterFakeExitFrame(Register cxreg, Register scratch, ExitFrameType type);
652 :
653 : // Push an exit frame token for a native call.
654 : inline void enterFakeExitFrameForNative(Register cxreg, Register scratch, bool isConstructing);
655 :
656 : // Pop ExitFrame footer in addition to the extra frame.
657 : inline void leaveExitFrame(size_t extraFrame = 0);
658 :
659 : private:
660 : // Save the top of the stack into JitActivation::packedExitFP of the
661 : // current thread, which should be the location of the latest exit frame.
662 : void linkExitFrame(Register cxreg, Register scratch);
663 :
664 : public:
665 : // ===============================================================
666 : // Move instructions
667 :
668 : inline void move64(Imm64 imm, Register64 dest) PER_ARCH;
669 : inline void move64(Register64 src, Register64 dest) PER_ARCH;
670 :
671 : inline void moveFloat32ToGPR(FloatRegister src, Register dest) PER_SHARED_ARCH;
672 : inline void moveGPRToFloat32(Register src, FloatRegister dest) PER_SHARED_ARCH;
673 :
674 : inline void moveDoubleToGPR64(FloatRegister src, Register64 dest) PER_ARCH;
675 : inline void moveGPR64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
676 :
677 : inline void move8SignExtend(Register src, Register dest) PER_SHARED_ARCH;
678 : inline void move16SignExtend(Register src, Register dest) PER_SHARED_ARCH;
679 :
680 : // move64To32 will clear the high bits of `dest` on 64-bit systems.
681 : inline void move64To32(Register64 src, Register dest) PER_ARCH;
682 :
683 : inline void move32To64ZeroExtend(Register src, Register64 dest) PER_ARCH;
684 :
685 : // On x86, `dest` must be edx:eax for the sign extend operations.
686 : inline void move8To64SignExtend(Register src, Register64 dest) PER_ARCH;
687 : inline void move16To64SignExtend(Register src, Register64 dest) PER_ARCH;
688 : inline void move32To64SignExtend(Register src, Register64 dest) PER_ARCH;
689 :
690 : // Copy a constant, typed-register, or a ValueOperand into a ValueOperand
691 : // destination.
692 : inline void moveValue(const ConstantOrRegister& src, const ValueOperand& dest);
693 : void moveValue(const TypedOrValueRegister& src, const ValueOperand& dest) PER_ARCH;
694 : void moveValue(const ValueOperand& src, const ValueOperand& dest) PER_ARCH;
695 : void moveValue(const Value& src, const ValueOperand& dest) PER_ARCH;
696 :
697 : public:
698 : // ===============================================================
699 : // Logical instructions
700 :
701 : inline void not32(Register reg) PER_SHARED_ARCH;
702 :
703 : inline void and32(Register src, Register dest) PER_SHARED_ARCH;
704 : inline void and32(Imm32 imm, Register dest) PER_SHARED_ARCH;
705 : inline void and32(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
706 : inline void and32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
707 : inline void and32(const Address& src, Register dest) PER_SHARED_ARCH;
708 :
709 : inline void andPtr(Register src, Register dest) PER_ARCH;
710 : inline void andPtr(Imm32 imm, Register dest) PER_ARCH;
711 :
712 : inline void and64(Imm64 imm, Register64 dest) PER_ARCH;
713 : inline void or64(Imm64 imm, Register64 dest) PER_ARCH;
714 : inline void xor64(Imm64 imm, Register64 dest) PER_ARCH;
715 :
716 : inline void or32(Register src, Register dest) PER_SHARED_ARCH;
717 : inline void or32(Imm32 imm, Register dest) PER_SHARED_ARCH;
718 : inline void or32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
719 :
720 : inline void orPtr(Register src, Register dest) PER_ARCH;
721 : inline void orPtr(Imm32 imm, Register dest) PER_ARCH;
722 :
723 : inline void and64(Register64 src, Register64 dest) PER_ARCH;
724 : inline void or64(Register64 src, Register64 dest) PER_ARCH;
725 : inline void xor64(Register64 src, Register64 dest) PER_ARCH;
726 :
727 : inline void xor32(Register src, Register dest) PER_SHARED_ARCH;
728 : inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
729 :
730 : inline void xorPtr(Register src, Register dest) PER_ARCH;
731 : inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
732 :
733 : inline void and64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
734 : inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
735 : inline void xor64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
736 :
737 : // ===============================================================
738 : // Arithmetic functions
739 :
740 : inline void add32(Register src, Register dest) PER_SHARED_ARCH;
741 : inline void add32(Imm32 imm, Register dest) PER_SHARED_ARCH;
742 : inline void add32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
743 : inline void add32(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86_shared);
744 :
745 : inline void addPtr(Register src, Register dest) PER_ARCH;
746 : inline void addPtr(Register src1, Register src2, Register dest) DEFINED_ON(arm64);
747 : inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
748 : inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
749 : inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
750 : inline void addPtr(ImmPtr imm, Register dest);
751 : inline void addPtr(Imm32 imm, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
752 : inline void addPtr(Imm32 imm, const AbsoluteAddress& dest) DEFINED_ON(x86, x64);
753 : inline void addPtr(const Address& src, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
754 :
755 : inline void add64(Register64 src, Register64 dest) PER_ARCH;
756 : inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
757 : inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
758 : inline void add64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
759 :
760 : inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
761 :
762 : // Compute dest=SP-imm where dest is a pointer registers and not SP. The
763 : // offset returned from sub32FromStackPtrWithPatch() must be passed to
764 : // patchSub32FromStackPtr().
765 : inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
766 : inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
767 :
768 : inline void addDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
769 : inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
770 :
771 : inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
772 : inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
773 : inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
774 :
775 : inline void subPtr(Register src, Register dest) PER_ARCH;
776 : inline void subPtr(Register src, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
777 : inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
778 : inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
779 : inline void subPtr(const Address& addr, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
780 :
781 : inline void sub64(Register64 src, Register64 dest) PER_ARCH;
782 : inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
783 : inline void sub64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
784 :
785 : inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
786 :
787 : inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
788 :
789 : // On x86-shared, srcDest must be eax and edx will be clobbered.
790 : inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
791 :
792 : inline void mul32(Register src1, Register src2, Register dest, Label* onOver, Label* onZero) DEFINED_ON(arm64);
793 :
794 : inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
795 : inline void mul64(const Operand& src, const Register64& dest, const Register temp)
796 : DEFINED_ON(x64, mips64);
797 : inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
798 : inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
799 : DEFINED_ON(x86, x64, arm, mips32, mips64);
800 : inline void mul64(const Register64& src, const Register64& dest, const Register temp)
801 : PER_ARCH;
802 :
803 : inline void mulBy3(Register src, Register dest) PER_ARCH;
804 :
805 : inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
806 : inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
807 :
808 : inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
809 :
810 : // Perform an integer division, returning the integer part rounded toward zero.
811 : // rhs must not be zero, and the division must not overflow.
812 : //
813 : // On x86_shared, srcDest must be eax and edx will be clobbered.
814 : // On ARM, the chip must have hardware division instructions.
815 : inline void quotient32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
816 :
817 : // Perform an integer division, returning the remainder part.
818 : // rhs must not be zero, and the division must not overflow.
819 : //
820 : // On x86_shared, srcDest must be eax and edx will be clobbered.
821 : // On ARM, the chip must have hardware division instructions.
822 : inline void remainder32(Register rhs, Register srcDest, bool isUnsigned) PER_SHARED_ARCH;
823 :
824 : inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
825 : inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
826 :
827 : inline void inc64(AbsoluteAddress dest) PER_ARCH;
828 :
829 : inline void neg32(Register reg) PER_SHARED_ARCH;
830 : inline void neg64(Register64 reg) DEFINED_ON(x86, x64, arm, mips32, mips64);
831 :
832 : inline void negateFloat(FloatRegister reg) PER_SHARED_ARCH;
833 :
834 : inline void negateDouble(FloatRegister reg) PER_SHARED_ARCH;
835 :
836 : inline void absFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
837 : inline void absDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
838 :
839 : inline void sqrtFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
840 : inline void sqrtDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
841 :
842 : // srcDest = {min,max}{Float32,Double}(srcDest, other)
843 : // For min and max, handle NaN specially if handleNaN is true.
844 :
845 : inline void minFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
846 : inline void minDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
847 :
848 : inline void maxFloat32(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
849 : inline void maxDouble(FloatRegister other, FloatRegister srcDest, bool handleNaN) PER_SHARED_ARCH;
850 :
851 : // ===============================================================
852 : // Shift functions
853 :
854 : // For shift-by-register there may be platform-specific
855 : // variations, for example, x86 will perform the shift mod 32 but
856 : // ARM will perform the shift mod 256.
857 : //
858 : // For shift-by-immediate the platform assembler may restrict the
859 : // immediate, for example, the ARM assembler requires the count
860 : // for 32-bit shifts to be in the range [0,31].
861 :
862 : inline void lshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
863 : inline void rshift32(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
864 : inline void rshift32Arithmetic(Imm32 shift, Register srcDest) PER_SHARED_ARCH;
865 :
866 : inline void lshiftPtr(Imm32 imm, Register dest) PER_ARCH;
867 : inline void rshiftPtr(Imm32 imm, Register dest) PER_ARCH;
868 : inline void rshiftPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
869 : inline void rshiftPtrArithmetic(Imm32 imm, Register dest) PER_ARCH;
870 :
871 : inline void lshift64(Imm32 imm, Register64 dest) PER_ARCH;
872 : inline void rshift64(Imm32 imm, Register64 dest) PER_ARCH;
873 : inline void rshift64Arithmetic(Imm32 imm, Register64 dest) PER_ARCH;
874 :
875 : // On x86_shared these have the constraint that shift must be in CL.
876 : inline void lshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
877 : inline void rshift32(Register shift, Register srcDest) PER_SHARED_ARCH;
878 : inline void rshift32Arithmetic(Register shift, Register srcDest) PER_SHARED_ARCH;
879 :
880 : inline void lshift64(Register shift, Register64 srcDest) PER_ARCH;
881 : inline void rshift64(Register shift, Register64 srcDest) PER_ARCH;
882 : inline void rshift64Arithmetic(Register shift, Register64 srcDest) PER_ARCH;
883 :
884 : // ===============================================================
885 : // Rotation functions
886 : // Note: - on x86 and x64 the count register must be in CL.
887 : // - on x64 the temp register should be InvalidReg.
888 :
889 : inline void rotateLeft(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
890 : inline void rotateLeft(Register count, Register input, Register dest) PER_SHARED_ARCH;
891 : inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
892 : inline void rotateLeft64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
893 : inline void rotateLeft64(Imm32 count, Register64 input, Register64 dest, Register temp)
894 : PER_ARCH;
895 : inline void rotateLeft64(Register count, Register64 input, Register64 dest, Register temp)
896 : PER_ARCH;
897 :
898 : inline void rotateRight(Imm32 count, Register input, Register dest) PER_SHARED_ARCH;
899 : inline void rotateRight(Register count, Register input, Register dest) PER_SHARED_ARCH;
900 : inline void rotateRight64(Imm32 count, Register64 input, Register64 dest) DEFINED_ON(x64);
901 : inline void rotateRight64(Register count, Register64 input, Register64 dest) DEFINED_ON(x64);
902 : inline void rotateRight64(Imm32 count, Register64 input, Register64 dest, Register temp)
903 : PER_ARCH;
904 : inline void rotateRight64(Register count, Register64 input, Register64 dest, Register temp)
905 : PER_ARCH;
906 :
907 : // ===============================================================
908 : // Bit counting functions
909 :
910 : // knownNotZero may be true only if the src is known not to be zero.
911 : inline void clz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
912 : inline void ctz32(Register src, Register dest, bool knownNotZero) PER_SHARED_ARCH;
913 :
914 : inline void clz64(Register64 src, Register dest) PER_ARCH;
915 : inline void ctz64(Register64 src, Register dest) PER_ARCH;
916 :
917 : // On x86_shared, temp may be Invalid only if the chip has the POPCNT instruction.
918 : // On ARM, temp may never be Invalid.
919 : inline void popcnt32(Register src, Register dest, Register temp) PER_SHARED_ARCH;
920 :
921 : // temp may be invalid only if the chip has the POPCNT instruction.
922 : inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
923 :
924 : // ===============================================================
925 : // Condition functions
926 :
927 : template <typename T1, typename T2>
928 : inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
929 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
930 :
931 : template <typename T1, typename T2>
932 : inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest)
933 : PER_ARCH;
934 :
935 : // ===============================================================
936 : // Branch functions
937 :
938 : template <class L>
939 : inline void branch32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
940 : template <class L>
941 : inline void branch32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
942 :
943 : inline void branch32(Condition cond, const Address& lhs, Register rhs, Label* label) PER_SHARED_ARCH;
944 : inline void branch32(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
945 :
946 : inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
947 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
948 : inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
949 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
950 :
951 : inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs, Label* label)
952 : DEFINED_ON(x86_shared);
953 : inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
954 :
955 : inline void branch32(Condition cond, const Operand& lhs, Register rhs, Label* label) DEFINED_ON(x86_shared);
956 : inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs, Label* label) DEFINED_ON(x86_shared);
957 :
958 : inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, Label* label)
959 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
960 :
961 : // The supported condition are Equal, NotEqual, LessThan(orEqual), GreaterThan(orEqual),
962 : // Below(orEqual) and Above(orEqual).
963 : // When a fail label is not defined it will fall through to next instruction,
964 : // else jump to the fail label.
965 : inline void branch64(Condition cond, Register64 lhs, Imm64 val, Label* success,
966 : Label* fail = nullptr) PER_ARCH;
967 : inline void branch64(Condition cond, Register64 lhs, Register64 rhs, Label* success,
968 : Label* fail = nullptr) PER_ARCH;
969 : // On x86 and x64 NotEqual and Equal conditions are allowed for the branch64 variants
970 : // with Address as lhs. On others only the NotEqual condition.
971 : inline void branch64(Condition cond, const Address& lhs, Imm64 val, Label* label) PER_ARCH;
972 :
973 : // Compare the value at |lhs| with the value at |rhs|. The scratch
974 : // register *must not* be the base of |lhs| or |rhs|.
975 : inline void branch64(Condition cond, const Address& lhs, const Address& rhs, Register scratch,
976 : Label* label) PER_ARCH;
977 :
978 : template <class L>
979 : inline void branchPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
980 : inline void branchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
981 : inline void branchPtr(Condition cond, Register lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
982 : inline void branchPtr(Condition cond, Register lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
983 : inline void branchPtr(Condition cond, Register lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
984 :
985 : template <class L>
986 : inline void branchPtr(Condition cond, const Address& lhs, Register rhs, L label) PER_SHARED_ARCH;
987 : inline void branchPtr(Condition cond, const Address& lhs, ImmPtr rhs, Label* label) PER_SHARED_ARCH;
988 : inline void branchPtr(Condition cond, const Address& lhs, ImmGCPtr rhs, Label* label) PER_SHARED_ARCH;
989 : inline void branchPtr(Condition cond, const Address& lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
990 :
991 : inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs, Label* label) PER_SHARED_ARCH;
992 :
993 : inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, Register rhs, Label* label)
994 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
995 : inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, Label* label)
996 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
997 :
998 : inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, Label* label)
999 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1000 :
1001 : // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
1002 : // chunk trailer, or nullptr if it is in the tenured heap.
1003 : void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
1004 :
1005 : void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, Label* label)
1006 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1007 : void branchPtrInNurseryChunk(Condition cond, const Address& address, Register temp, Label* label)
1008 : DEFINED_ON(x86);
1009 : void branchValueIsNurseryObject(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
1010 : void branchValueIsNurseryCell(Condition cond, const Address& address, Register temp, Label* label) PER_ARCH;
1011 : void branchValueIsNurseryCell(Condition cond, ValueOperand value, Register temp, Label* label) PER_ARCH;
1012 :
1013 : // This function compares a Value (lhs) which is having a private pointer
1014 : // boxed inside a js::Value, with a raw pointer (rhs).
1015 : inline void branchPrivatePtr(Condition cond, const Address& lhs, Register rhs, Label* label) PER_ARCH;
1016 :
1017 : inline void branchFloat(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
1018 : Label* label) PER_SHARED_ARCH;
1019 :
1020 : // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will jump to
1021 : // the failure label. This particular variant is allowed to return the value module 2**32,
1022 : // which isn't implemented on all architectures.
1023 : // E.g. the x64 variants will do this only in the int64_t range.
1024 : inline void branchTruncateFloat32MaybeModUint32(FloatRegister src, Register dest, Label* fail)
1025 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1026 : inline void branchTruncateDoubleMaybeModUint32(FloatRegister src, Register dest, Label* fail)
1027 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1028 :
1029 : // Truncate a double/float32 to intptr and when it doesn't fit jump to the failure label.
1030 : inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest, Label* fail)
1031 : DEFINED_ON(x86, x64);
1032 : inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest, Label* fail)
1033 : DEFINED_ON(x86, x64);
1034 :
1035 : // Truncate a double/float32 to int32 and when it doesn't fit jump to the failure label.
1036 : inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest, Label* fail)
1037 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1038 : inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest, Label* fail)
1039 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1040 :
1041 : inline void branchDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs,
1042 : Label* label) PER_SHARED_ARCH;
1043 :
1044 : inline void branchDoubleNotInInt64Range(Address src, Register temp, Label* fail);
1045 : inline void branchDoubleNotInUInt64Range(Address src, Register temp, Label* fail);
1046 : inline void branchFloat32NotInInt64Range(Address src, Register temp, Label* fail);
1047 : inline void branchFloat32NotInUInt64Range(Address src, Register temp, Label* fail);
1048 :
1049 : template <typename T>
1050 : inline void branchAdd32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
1051 : template <typename T>
1052 : inline void branchSub32(Condition cond, T src, Register dest, Label* label) PER_SHARED_ARCH;
1053 :
1054 : inline void decBranchPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1055 :
1056 : template <class L>
1057 : inline void branchTest32(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
1058 : template <class L>
1059 : inline void branchTest32(Condition cond, Register lhs, Imm32 rhs, L label) PER_SHARED_ARCH;
1060 : inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh, Label* label) PER_SHARED_ARCH;
1061 : inline void branchTest32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, Label* label)
1062 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1063 :
1064 : template <class L>
1065 : inline void branchTestPtr(Condition cond, Register lhs, Register rhs, L label) PER_SHARED_ARCH;
1066 : inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1067 : inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, Label* label) PER_SHARED_ARCH;
1068 :
1069 : template <class L>
1070 : inline void branchTest64(Condition cond, Register64 lhs, Register64 rhs, Register temp,
1071 : L label) PER_ARCH;
1072 :
1073 : // Branches to |label| if |reg| is false. |reg| should be a C++ bool.
1074 : template <class L>
1075 : inline void branchIfFalseBool(Register reg, L label);
1076 :
1077 : // Branches to |label| if |reg| is true. |reg| should be a C++ bool.
1078 : inline void branchIfTrueBool(Register reg, Label* label);
1079 :
1080 : inline void branchIfRope(Register str, Label* label);
1081 : inline void branchIfRopeOrExternal(Register str, Register temp, Label* label);
1082 :
1083 : inline void branchIfNotRope(Register str, Label* label);
1084 :
1085 : inline void branchLatin1String(Register string, Label* label);
1086 : inline void branchTwoByteString(Register string, Label* label);
1087 :
1088 : inline void branchIfFunctionHasNoJitEntry(Register fun, bool isConstructing, Label* label);
1089 : inline void branchIfInterpreted(Register fun, Label* label);
1090 :
1091 : inline void branchFunctionKind(Condition cond, JSFunction::FunctionKind kind, Register fun,
1092 : Register scratch, Label* label);
1093 :
1094 : void branchIfNotInterpretedConstructor(Register fun, Register scratch, Label* label);
1095 :
1096 : inline void branchIfObjectEmulatesUndefined(Register objReg, Register scratch, Label* slowCheck,
1097 : Label* label);
1098 :
1099 : // For all methods below: spectreRegToZero is a register that will be zeroed
1100 : // on speculatively executed code paths (when the branch should be taken but
1101 : // branch prediction speculates it isn't). Usually this will be the object
1102 : // register but the caller may pass a different register.
1103 :
1104 : inline void branchTestObjClass(Condition cond, Register obj, const js::Class* clasp,
1105 : Register scratch, Register spectreRegToZero, Label* label);
1106 : inline void branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
1107 : const js::Class* clasp, Register scratch,
1108 : Label* label);
1109 :
1110 : inline void branchTestObjClass(Condition cond, Register obj, const Address& clasp,
1111 : Register scratch, Register spectreRegToZero, Label* label);
1112 : inline void branchTestObjClassNoSpectreMitigations(Condition cond, Register obj,
1113 : const Address& clasp, Register scratch,
1114 : Label* label);
1115 :
1116 : inline void branchTestObjShape(Condition cond, Register obj, const Shape* shape,
1117 : Register scratch, Register spectreRegToZero, Label* label);
1118 : inline void branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj,
1119 : const Shape* shape, Label* label);
1120 :
1121 : inline void branchTestObjShape(Condition cond, Register obj, Register shape, Register scratch,
1122 : Register spectreRegToZero, Label* label);
1123 : inline void branchTestObjShapeNoSpectreMitigations(Condition cond, Register obj,
1124 : Register shape, Label* label);
1125 :
1126 : inline void branchTestObjGroup(Condition cond, Register obj, const ObjectGroup* group,
1127 : Register scratch, Register spectreRegToZero, Label* label);
1128 : inline void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
1129 : const ObjectGroup* group, Label* label);
1130 :
1131 : inline void branchTestObjGroup(Condition cond, Register obj, Register group, Register scratch,
1132 : Register spectreRegToZero, Label* label);
1133 : inline void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj,
1134 : Register group, Label* label);
1135 :
1136 : void branchTestObjGroup(Condition cond, Register obj, const Address& group, Register scratch,
1137 : Register spectreRegToZero, Label* label);
1138 : void branchTestObjGroupNoSpectreMitigations(Condition cond, Register obj, const Address& group,
1139 : Register scratch, Label* label);
1140 :
1141 : // TODO: audit/fix callers to be Spectre safe.
1142 : inline void branchTestObjShapeUnsafe(Condition cond, Register obj, Register shape, Label* label);
1143 : inline void branchTestObjGroupUnsafe(Condition cond, Register obj, const ObjectGroup* group,
1144 : Label* label);
1145 :
1146 : void branchTestObjCompartment(Condition cond, Register obj, const Address& compartment,
1147 : Register scratch, Label* label);
1148 : void branchTestObjCompartment(Condition cond, Register obj, const JS::Compartment* compartment,
1149 : Register scratch, Label* label);
1150 : void branchIfObjGroupHasNoAddendum(Register obj, Register scratch, Label* label);
1151 : void branchIfPretenuredGroup(const ObjectGroup* group, Register scratch, Label* label);
1152 :
1153 : void branchIfNonNativeObj(Register obj, Register scratch, Label* label);
1154 :
1155 : void branchIfInlineTypedObject(Register obj, Register scratch, Label* label);
1156 :
1157 : void branchIfNotSimdObject(Register obj, Register scratch, SimdType simdType, Label* label);
1158 :
1159 : inline void branchTestClassIsProxy(bool proxy, Register clasp, Label* label);
1160 :
1161 : inline void branchTestObjectIsProxy(bool proxy, Register object, Register scratch, Label* label);
1162 :
1163 : inline void branchTestProxyHandlerFamily(Condition cond, Register proxy, Register scratch,
1164 : const void* handlerp, Label* label);
1165 :
1166 : void copyObjGroupNoPreBarrier(Register sourceObj, Register destObj, Register scratch);
1167 :
1168 : void loadTypedObjectDescr(Register obj, Register dest);
1169 : void loadTypedObjectLength(Register obj, Register dest);
1170 :
1171 : // Emit type case branch on tag matching if the type tag in the definition
1172 : // might actually be that type.
1173 : void maybeBranchTestType(MIRType type, MDefinition* maybeDef, Register tag, Label* label);
1174 :
1175 : inline void branchTestNeedsIncrementalBarrier(Condition cond, Label* label);
1176 :
1177 : // Perform a type-test on a tag of a Value (32bits boxing), or the tagged
1178 : // value (64bits boxing).
1179 : inline void branchTestUndefined(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1180 : inline void branchTestInt32(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1181 : inline void branchTestDouble(Condition cond, Register tag, Label* label)
1182 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1183 : inline void branchTestNumber(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1184 : inline void branchTestBoolean(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1185 : inline void branchTestString(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1186 : inline void branchTestSymbol(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1187 : inline void branchTestNull(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1188 : inline void branchTestObject(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1189 : inline void branchTestPrimitive(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1190 : inline void branchTestMagic(Condition cond, Register tag, Label* label) PER_SHARED_ARCH;
1191 :
1192 : // Perform a type-test on a Value, addressed by Address or BaseIndex, or
1193 : // loaded into ValueOperand.
1194 : // BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
1195 : // All Variants clobber the ScratchReg on arm64.
1196 : inline void branchTestUndefined(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1197 : inline void branchTestUndefined(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1198 : inline void branchTestUndefined(Condition cond, const ValueOperand& value, Label* label)
1199 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1200 :
1201 : inline void branchTestInt32(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1202 : inline void branchTestInt32(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1203 : inline void branchTestInt32(Condition cond, const ValueOperand& value, Label* label)
1204 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1205 :
1206 : inline void branchTestDouble(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1207 : inline void branchTestDouble(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1208 : inline void branchTestDouble(Condition cond, const ValueOperand& value, Label* label)
1209 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1210 :
1211 : inline void branchTestNumber(Condition cond, const ValueOperand& value, Label* label)
1212 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1213 :
1214 : inline void branchTestBoolean(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1215 : inline void branchTestBoolean(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1216 : inline void branchTestBoolean(Condition cond, const ValueOperand& value, Label* label)
1217 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1218 :
1219 : inline void branchTestString(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1220 : inline void branchTestString(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1221 : inline void branchTestString(Condition cond, const ValueOperand& value, Label* label)
1222 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1223 :
1224 : inline void branchTestSymbol(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1225 : inline void branchTestSymbol(Condition cond, const ValueOperand& value, Label* label)
1226 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1227 :
1228 : inline void branchTestNull(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1229 : inline void branchTestNull(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1230 : inline void branchTestNull(Condition cond, const ValueOperand& value, Label* label)
1231 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1232 :
1233 : // Clobbers the ScratchReg on x64.
1234 : inline void branchTestObject(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1235 : inline void branchTestObject(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1236 : inline void branchTestObject(Condition cond, const ValueOperand& value, Label* label)
1237 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1238 :
1239 : inline void branchTestGCThing(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1240 : inline void branchTestGCThing(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1241 :
1242 : inline void branchTestPrimitive(Condition cond, const ValueOperand& value, Label* label)
1243 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1244 :
1245 : inline void branchTestMagic(Condition cond, const Address& address, Label* label) PER_SHARED_ARCH;
1246 : inline void branchTestMagic(Condition cond, const BaseIndex& address, Label* label) PER_SHARED_ARCH;
1247 : template <class L>
1248 : inline void branchTestMagic(Condition cond, const ValueOperand& value, L label)
1249 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1250 :
1251 : inline void branchTestMagic(Condition cond, const Address& valaddr, JSWhyMagic why, Label* label) PER_ARCH;
1252 :
1253 : inline void branchTestMagicValue(Condition cond, const ValueOperand& val, JSWhyMagic why,
1254 : Label* label);
1255 :
1256 : void branchTestValue(Condition cond, const ValueOperand& lhs,
1257 : const Value& rhs, Label* label) PER_ARCH;
1258 :
1259 : // Checks if given Value is evaluated to true or false in a condition.
1260 : // The type of the value should match the type of the method.
1261 : inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value, Label* label)
1262 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1263 : inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg, Label* label) PER_SHARED_ARCH;
1264 : inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value, Label* label) PER_ARCH;
1265 : inline void branchTestStringTruthy(bool truthy, const ValueOperand& value, Label* label)
1266 : DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
1267 :
1268 : // Create an unconditional branch to the address given as argument.
1269 : inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH;
1270 :
1271 : private:
1272 :
1273 : template <typename T, typename S, typename L>
1274 : inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
1275 : DEFINED_ON(x86_shared);
1276 :
1277 : void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
1278 : DEFINED_ON(x86);
1279 : template <typename T>
1280 : void branchValueIsNurseryCellImpl(Condition cond, const T& value, Register temp, Label* label)
1281 : DEFINED_ON(arm64, x64);
1282 :
1283 : template <typename T>
1284 : inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
1285 : DEFINED_ON(arm, arm64, x86_shared);
1286 : template <typename T>
1287 : inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
1288 : DEFINED_ON(arm, arm64, x86_shared);
1289 : template <typename T>
1290 : inline void branchTestDoubleImpl(Condition cond, const T& t, Label* label)
1291 : DEFINED_ON(arm, arm64, x86_shared);
1292 : template <typename T>
1293 : inline void branchTestNumberImpl(Condition cond, const T& t, Label* label)
1294 : DEFINED_ON(arm, arm64, x86_shared);
1295 : template <typename T>
1296 : inline void branchTestBooleanImpl(Condition cond, const T& t, Label* label)
1297 : DEFINED_ON(arm, arm64, x86_shared);
1298 : template <typename T>
1299 : inline void branchTestStringImpl(Condition cond, const T& t, Label* label)
1300 : DEFINED_ON(arm, arm64, x86_shared);
1301 : template <typename T>
1302 : inline void branchTestSymbolImpl(Condition cond, const T& t, Label* label)
1303 : DEFINED_ON(arm, arm64, x86_shared);
1304 : template <typename T>
1305 : inline void branchTestNullImpl(Condition cond, const T& t, Label* label)
1306 : DEFINED_ON(arm, arm64, x86_shared);
1307 : template <typename T>
1308 : inline void branchTestObjectImpl(Condition cond, const T& t, Label* label)
1309 : DEFINED_ON(arm, arm64, x86_shared);
1310 : template <typename T>
1311 : inline void branchTestGCThingImpl(Condition cond, const T& t, Label* label)
1312 : DEFINED_ON(arm, arm64, x86_shared);
1313 : template <typename T>
1314 : inline void branchTestPrimitiveImpl(Condition cond, const T& t, Label* label)
1315 : DEFINED_ON(arm, arm64, x86_shared);
1316 : template <typename T, class L>
1317 : inline void branchTestMagicImpl(Condition cond, const T& t, L label)
1318 : DEFINED_ON(arm, arm64, x86_shared);
1319 :
1320 : public:
1321 :
1322 : inline void cmp32Move32(Condition cond, Register lhs, Register rhs, Register src,
1323 : Register dest)
1324 : DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1325 :
1326 : inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs, Register src,
1327 : Register dest)
1328 : DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1329 :
1330 : inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs, Register src,
1331 : Register dest)
1332 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1333 :
1334 : inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, const Address& src,
1335 : Register dest)
1336 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1337 :
1338 : inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask, Register src,
1339 : Register dest)
1340 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1341 :
1342 : // Conditional move for Spectre mitigations.
1343 : inline void spectreMovePtr(Condition cond, Register src, Register dest)
1344 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1345 :
1346 : // Zeroes dest if the condition is true.
1347 : inline void spectreZeroRegister(Condition cond, Register scratch, Register dest)
1348 : DEFINED_ON(arm, arm64, mips_shared, x86_shared);
1349 :
1350 : // Performs a bounds check and zeroes the index register if out-of-bounds
1351 : // (to mitigate Spectre).
1352 : private:
1353 :
1354 : inline void spectreBoundsCheck32(Register index, const Operand& length, Register maybeScratch,
1355 : Label* failure)
1356 : DEFINED_ON(x86);
1357 :
1358 : public:
1359 :
1360 : inline void spectreBoundsCheck32(Register index, Register length, Register maybeScratch,
1361 : Label* failure)
1362 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1363 : inline void spectreBoundsCheck32(Register index, const Address& length, Register maybeScratch,
1364 : Label* failure)
1365 : DEFINED_ON(arm, arm64, mips_shared, x86, x64);
1366 :
1367 : // ========================================================================
1368 : // Canonicalization primitives.
1369 : inline void canonicalizeDouble(FloatRegister reg);
1370 : inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
1371 :
1372 : inline void canonicalizeFloat(FloatRegister reg);
1373 : inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
1374 :
1375 : inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
1376 : DEFINED_ON(x86_shared);
1377 :
1378 : public:
1379 : // ========================================================================
1380 : // Memory access primitives.
1381 : inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
1382 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1383 : inline void storeUncanonicalizedDouble(FloatRegister src, const BaseIndex& dest)
1384 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1385 : inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
1386 : DEFINED_ON(x86_shared);
1387 :
1388 : template<class T>
1389 : inline void storeDouble(FloatRegister src, const T& dest);
1390 :
1391 : inline void boxDouble(FloatRegister src, const Address& dest);
1392 : using MacroAssemblerSpecific::boxDouble;
1393 :
1394 : inline void storeUncanonicalizedFloat32(FloatRegister src, const Address& dest)
1395 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1396 : inline void storeUncanonicalizedFloat32(FloatRegister src, const BaseIndex& dest)
1397 : DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
1398 : inline void storeUncanonicalizedFloat32(FloatRegister src, const Operand& dest)
1399 : DEFINED_ON(x86_shared);
1400 :
1401 : template<class T>
1402 : inline void storeFloat32(FloatRegister src, const T& dest);
1403 :
1404 : inline void storeFloat32x3(FloatRegister src, const Address& dest) PER_SHARED_ARCH;
1405 : inline void storeFloat32x3(FloatRegister src, const BaseIndex& dest) PER_SHARED_ARCH;
1406 :
1407 : template <typename T>
1408 : void storeUnboxedValue(const ConstantOrRegister& value, MIRType valueType, const T& dest,
1409 : MIRType slotType) PER_ARCH;
1410 :
1411 : inline void memoryBarrier(MemoryBarrierBits barrier) PER_SHARED_ARCH;
1412 :
1413 : public:
1414 : // ========================================================================
1415 : // Truncate floating point.
1416 :
1417 : // Undefined behaviour when truncation is outside Int64 range.
1418 : // Needs a temp register if SSE3 is not present.
1419 : inline void truncateFloat32ToInt64(Address src, Address dest, Register temp)
1420 : DEFINED_ON(x86_shared);
1421 : inline void truncateFloat32ToUInt64(Address src, Address dest, Register temp,
1422 : FloatRegister floatTemp)
1423 : DEFINED_ON(x86, x64);
1424 : inline void truncateDoubleToInt64(Address src, Address dest, Register temp)
1425 : DEFINED_ON(x86_shared);
1426 : inline void truncateDoubleToUInt64(Address src, Address dest, Register temp,
1427 : FloatRegister floatTemp)
1428 : DEFINED_ON(x86, x64);
1429 :
1430 : public:
1431 : // ========================================================================
1432 : // Convert floating point.
1433 :
1434 : // temp required on x86 and x64; must be undefined on mips64.
1435 : void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp)
1436 : DEFINED_ON(arm64, mips64, x64, x86);
1437 :
1438 : void convertInt64ToFloat32(Register64 src, FloatRegister dest)
1439 : DEFINED_ON(arm64, mips64, x64, x86);
1440 :
1441 : bool convertUInt64ToDoubleNeedsTemp() PER_ARCH;
1442 :
1443 : // temp required when convertUInt64ToDoubleNeedsTemp() returns true.
1444 : void convertUInt64ToDouble(Register64 src, FloatRegister dest, Register temp) PER_ARCH;
1445 :
1446 : void convertInt64ToDouble(Register64 src, FloatRegister dest)
1447 : DEFINED_ON(arm64, mips64, x64, x86);
1448 :
1449 : public:
1450 : // ========================================================================
1451 : // wasm support
1452 :
1453 : CodeOffset wasmTrapInstruction() PER_SHARED_ARCH;
1454 :
1455 : void wasmTrap(wasm::Trap trap, wasm::BytecodeOffset bytecodeOffset);
1456 : void wasmInterruptCheck(Register tls, wasm::BytecodeOffset bytecodeOffset);
1457 : void wasmReserveStackChecked(uint32_t amount, wasm::BytecodeOffset trapOffset);
1458 :
1459 : // Emit a bounds check against the wasm heap limit, jumping to 'label' if
1460 : // 'cond' holds. Required when WASM_HUGE_MEMORY is not defined. If
1461 : // JitOptions.spectreMaskIndex is true, in speculative executions 'index' is
1462 : // saturated in-place to 'boundsCheckLimit'.
1463 : void wasmBoundsCheck(Condition cond, Register index, Register boundsCheckLimit, Label* label)
1464 : DEFINED_ON(arm, arm64, mips32, mips64, x86);
1465 :
1466 : void wasmBoundsCheck(Condition cond, Register index, Address boundsCheckLimit, Label* label)
1467 : DEFINED_ON(arm, arm64, mips32, mips64, x86);
1468 :
1469 : // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
1470 : void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out) DEFINED_ON(x86, x64);
1471 : void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out) DEFINED_ON(x86, x64);
1472 : void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr) DEFINED_ON(x86, x64);
1473 : void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr) DEFINED_ON(x86);
1474 :
1475 : // For all the ARM and ARM64 wasmLoad and wasmStore functions, `ptr` MUST
1476 : // equal `ptrScratch`, and that register will be updated based on conditions
1477 : // listed below (where it is only mentioned as `ptr`).
1478 :
1479 : // `ptr` will be updated if access.offset() != 0 or access.type() == Scalar::Int64.
1480 : void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1481 : Register ptrScratch, AnyRegister output)
1482 : DEFINED_ON(arm, arm64, mips_shared);
1483 : void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1484 : Register ptrScratch, Register64 output)
1485 : DEFINED_ON(arm, arm64, mips32, mips64);
1486 : void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Register memoryBase,
1487 : Register ptr, Register ptrScratch)
1488 : DEFINED_ON(arm, arm64, mips_shared);
1489 : void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Register memoryBase,
1490 : Register ptr, Register ptrScratch)
1491 : DEFINED_ON(arm, arm64, mips32, mips64);
1492 :
1493 : // `ptr` will always be updated.
1494 : void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1495 : Register ptrScratch, Register output, Register tmp)
1496 : DEFINED_ON(arm, mips32, mips64);
1497 :
1498 : // ARM: `ptr` will always be updated and `tmp1` is always needed. `tmp2` is
1499 : // needed for Float32; `tmp2` and `tmp3` are needed for Float64. Temps must
1500 : // be Invalid when they are not needed.
1501 : // MIPS: `ptr` will always be updated.
1502 : void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1503 : Register ptrScratch, FloatRegister output, Register tmp1, Register tmp2,
1504 : Register tmp3)
1505 : DEFINED_ON(arm, mips32, mips64);
1506 :
1507 : // `ptr` will always be updated.
1508 : void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, Register ptr,
1509 : Register ptrScratch, Register64 output, Register tmp)
1510 : DEFINED_ON(arm, mips32, mips64);
1511 :
1512 : // ARM: `ptr` and `value` will always be updated. 'tmp' must be Invalid.
1513 : // MIPS: `ptr` will always be updated.
1514 : void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, Register memoryBase,
1515 : Register ptr, Register ptrScratch, Register tmp)
1516 : DEFINED_ON(arm, mips32, mips64);
1517 :
1518 : // `ptr` will always be updated.
1519 : void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, FloatRegister floatValue,
1520 : Register memoryBase, Register ptr, Register ptrScratch, Register tmp)
1521 : DEFINED_ON(arm, mips32, mips64);
1522 :
1523 : // `ptr` will always be updated.
1524 : void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
1525 : Register memoryBase, Register ptr, Register ptrScratch,
1526 : Register tmp)
1527 : DEFINED_ON(arm, mips32, mips64);
1528 :
1529 : // wasm specific methods, used in both the wasm baseline compiler and ion.
1530 :
1531 : // The truncate-to-int32 methods do not bind the rejoin label; clients must
1532 : // do so if oolWasmTruncateCheckF64ToI32() can jump to it.
1533 : void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, bool isSaturating,
1534 : Label* oolEntry) PER_ARCH;
1535 : void wasmTruncateDoubleToInt32(FloatRegister input, Register output, bool isSaturating,
1536 : Label* oolEntry) PER_SHARED_ARCH;
1537 : void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output, TruncFlags flags,
1538 : wasm::BytecodeOffset off, Label* rejoin)
1539 : DEFINED_ON(arm, arm64, x86_shared, mips_shared);
1540 :
1541 : void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, bool isSaturating,
1542 : Label* oolEntry) PER_ARCH;
1543 : void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, bool isSaturating,
1544 : Label* oolEntry) PER_SHARED_ARCH;
1545 : void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output, TruncFlags flags,
1546 : wasm::BytecodeOffset off, Label* rejoin)
1547 : DEFINED_ON(arm, arm64, x86_shared, mips_shared);
1548 :
1549 : // The truncate-to-int64 methods will always bind the `oolRejoin` label
1550 : // after the last emitted instruction.
1551 : void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, bool isSaturating,
1552 : Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble)
1553 : DEFINED_ON(arm64, x86, x64, mips64);
1554 : void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, bool isSaturating,
1555 : Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble)
1556 : DEFINED_ON(arm64, x86, x64, mips64);
1557 : void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output, TruncFlags flags,
1558 : wasm::BytecodeOffset off, Label* rejoin)
1559 : DEFINED_ON(arm, arm64, x86_shared, mips_shared);
1560 :
1561 : void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, bool isSaturating,
1562 : Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble)
1563 : DEFINED_ON(arm64, x86, x64, mips64);
1564 : void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, bool isSaturating,
1565 : Label* oolEntry, Label* oolRejoin, FloatRegister tempDouble)
1566 : DEFINED_ON(arm64, x86, x64, mips64);
1567 : void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output, TruncFlags flags,
1568 : wasm::BytecodeOffset off, Label* rejoin)
1569 : DEFINED_ON(arm, arm64, x86_shared, mips_shared);
1570 :
1571 : // This function takes care of loading the callee's TLS and pinned regs but
1572 : // it is the caller's responsibility to save/restore TLS or pinned regs.
1573 : void wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
1574 :
1575 : // WasmTableCallIndexReg must contain the index of the indirect call.
1576 : void wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, bool needsBoundsCheck);
1577 :
1578 : // This function takes care of loading the pointer to the current instance
1579 : // as the implicit first argument. It preserves TLS and pinned registers.
1580 : // (TLS & pinned regs are non-volatile registers in the system ABI).
1581 : void wasmCallBuiltinInstanceMethod(const wasm::CallSiteDesc& desc, const ABIArg& instanceArg,
1582 : wasm::SymbolicAddress builtin);
1583 :
1584 : // As enterFakeExitFrame(), but using register conventions appropriate for
1585 : // wasm stubs.
1586 : void enterFakeExitFrameForWasm(Register cxreg, Register scratch, ExitFrameType type)
1587 : PER_SHARED_ARCH;
1588 :
1589 : public:
1590 : // ========================================================================
1591 : // Barrier functions.
1592 :
1593 : void emitPreBarrierFastPath(JSRuntime* rt, MIRType type, Register temp1, Register temp2,
1594 : Register temp3, Label* noBarrier);
1595 :
1596 : public:
1597 : // ========================================================================
1598 : // Clamping functions.
1599 :
1600 : inline void clampIntToUint8(Register reg) PER_SHARED_ARCH;
1601 :
1602 : public:
1603 : // ========================================================================
1604 : // Primitive atomic operations.
1605 : //
1606 : // If the access is from JS and the eventual destination of the result is a
1607 : // js::Value, it's probably best to use the JS-specific versions of these,
1608 : // see further below.
1609 : //
1610 : // Temp registers must be defined unless otherwise noted in the per-function
1611 : // constraints.
1612 :
1613 : // 8-bit, 16-bit, and 32-bit wide operations.
1614 : //
1615 : // The 8-bit and 16-bit operations zero-extend or sign-extend the result to
1616 : // 32 bits, according to `type`. On 64-bit systems, the upper 32 bits of the
1617 : // result will be zero on some platforms (eg, on x64) and will be the sign
1618 : // extension of the lower bits on other platforms (eg, MIPS).
1619 :
1620 : // CompareExchange with memory. Return the value that was in memory,
1621 : // whether we wrote or not.
1622 : //
1623 : // x86-shared: `output` must be eax.
1624 : // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
1625 : // and 16-bit wide operations.
1626 :
1627 : void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
1628 : Register expected, Register replacement, Register output)
1629 : DEFINED_ON(arm, arm64, x86_shared);
1630 :
1631 : void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
1632 : Register expected, Register replacement, Register output)
1633 : DEFINED_ON(arm, arm64, x86_shared);
1634 :
1635 :
1636 : void compareExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
1637 : Register expected, Register replacement, Register valueTemp,
1638 : Register offsetTemp, Register maskTemp, Register output)
1639 : DEFINED_ON(mips_shared);
1640 :
1641 : void compareExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
1642 : Register expected, Register replacement, Register valueTemp,
1643 : Register offsetTemp, Register maskTemp, Register output)
1644 : DEFINED_ON(mips_shared);
1645 :
1646 : // Exchange with memory. Return the value initially in memory.
1647 : // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
1648 : // and 16-bit wide operations.
1649 :
1650 : void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
1651 : Register value, Register output)
1652 : DEFINED_ON(arm, arm64, x86_shared);
1653 :
1654 : void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
1655 : Register value, Register output)
1656 : DEFINED_ON(arm, arm64, x86_shared);
1657 :
1658 : void atomicExchange(Scalar::Type type, const Synchronization& sync, const Address& mem,
1659 : Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
1660 : Register output)
1661 : DEFINED_ON(mips_shared);
1662 :
1663 : void atomicExchange(Scalar::Type type, const Synchronization& sync, const BaseIndex& mem,
1664 : Register value, Register valueTemp, Register offsetTemp, Register maskTemp,
1665 : Register output)
1666 : DEFINED_ON(mips_shared);
1667 :
1668 : // Read-modify-write with memory. Return the value in memory before the
1669 : // operation.
1670 : //
1671 : // x86-shared:
1672 : // For 8-bit operations, `value` and `output` must have a byte subregister.
1673 : // For Add and Sub, `temp` must be invalid.
1674 : // For And, Or, and Xor, `output` must be eax and `temp` must have a byte subregister.
1675 : //
1676 : // ARM: Registers `value` and `output` must differ.
1677 : // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
1678 : // and 16-bit wide operations; `value` and `output` must differ.
1679 :
1680 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1681 : Register value, const Address& mem, Register temp, Register output)
1682 : DEFINED_ON(arm, arm64, x86_shared);
1683 :
1684 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1685 : Imm32 value, const Address& mem, Register temp, Register output)
1686 : DEFINED_ON(x86_shared);
1687 :
1688 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1689 : Register value, const BaseIndex& mem, Register temp, Register output)
1690 : DEFINED_ON(arm, arm64, x86_shared);
1691 :
1692 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1693 : Imm32 value, const BaseIndex& mem, Register temp, Register output)
1694 : DEFINED_ON(x86_shared);
1695 :
1696 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1697 : Register value, const Address& mem, Register valueTemp,
1698 : Register offsetTemp, Register maskTemp, Register output)
1699 : DEFINED_ON(mips_shared);
1700 :
1701 : void atomicFetchOp(Scalar::Type type, const Synchronization& sync, AtomicOp op,
1702 : Register value, const BaseIndex& mem, Register valueTemp,
1703 : Register offsetTemp, Register maskTemp, Register output)
1704 : DEFINED_ON(mips_shared);
1705 :
1706 : // Read-modify-write with memory. Return no value.
1707 : // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
1708 : // and 16-bit wide operations.
1709 :
1710 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
1711 : const Address& mem, Register temp)
1712 : DEFINED_ON(arm, arm64, x86_shared);
1713 :
1714 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
1715 : const Address& mem, Register temp)
1716 : DEFINED_ON(x86_shared);
1717 :
1718 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
1719 : const BaseIndex& mem, Register temp)
1720 : DEFINED_ON(arm, arm64, x86_shared);
1721 :
1722 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Imm32 value,
1723 : const BaseIndex& mem, Register temp)
1724 : DEFINED_ON(x86_shared);
1725 :
1726 :
1727 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
1728 : const Address& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
1729 : DEFINED_ON(mips_shared);
1730 :
1731 : void atomicEffectOp(Scalar::Type type, const Synchronization& sync, AtomicOp op, Register value,
1732 : const BaseIndex& mem, Register valueTemp, Register offsetTemp, Register maskTemp)
1733 : DEFINED_ON(mips_shared);
1734 :
1735 : // 64-bit wide operations.
1736 :
1737 : // 64-bit atomic load. On 64-bit systems, use regular wasm load with
1738 : // Synchronization::Load, not this method.
1739 : //
1740 : // x86: `temp` must be ecx:ebx; `output` must be edx:eax.
1741 : // ARM: `temp` should be invalid; `output` must be (even,odd) pair.
1742 : // MIPS32: `temp` should be invalid.
1743 :
1744 : void atomicLoad64(const Synchronization& sync, const Address& mem, Register64 temp,
1745 : Register64 output)
1746 : DEFINED_ON(arm, mips32, x86);
1747 :
1748 : void atomicLoad64(const Synchronization& sync, const BaseIndex& mem, Register64 temp,
1749 : Register64 output)
1750 : DEFINED_ON(arm, mips32, x86);
1751 :
1752 : // x86: `expected` must be the same as `output`, and must be edx:eax
1753 : // x86: `replacement` must be ecx:ebx
1754 : // x64: `output` must be rax.
1755 : // ARM: Registers must be distinct; `replacement` and `output` must be (even,odd) pairs.
1756 : // MIPS: Registers must be distinct.
1757 :
1758 : void compareExchange64(const Synchronization& sync, const Address& mem, Register64 expected,
1759 : Register64 replacement, Register64 output) PER_ARCH;
1760 :
1761 : void compareExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 expected,
1762 : Register64 replacement, Register64 output) PER_ARCH;
1763 :
1764 : // x86: `value` must be ecx:ebx; `output` must be edx:eax.
1765 : // ARM: Registers must be distinct; `value` and `output` must be (even,odd) pairs.
1766 : // MIPS: Registers must be distinct.
1767 :
1768 : void atomicExchange64(const Synchronization& sync, const Address& mem, Register64 value,
1769 : Register64 output) PER_ARCH;
1770 :
1771 : void atomicExchange64(const Synchronization& sync, const BaseIndex& mem, Register64 value,
1772 : Register64 output) PER_ARCH;
1773 :
1774 : // x86: `output` must be edx:eax, `temp` must be ecx:ebx.
1775 : // x64: For And, Or, and Xor `output` must be rax.
1776 : // ARM: Registers must be distinct; `temp` and `output` must be (even,odd) pairs.
1777 : // MIPS: Registers must be distinct.
1778 : // MIPS32: `temp` should be invalid.
1779 :
1780 : void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
1781 : const Address& mem, Register64 temp, Register64 output)
1782 : DEFINED_ON(arm, arm64, mips32, mips64, x64);
1783 :
1784 : void atomicFetchOp64(const Synchronization& sync, AtomicOp op, Register64 value,
1785 : const BaseIndex& mem, Register64 temp, Register64 output)
1786 : DEFINED_ON(arm, arm64, mips32, mips64, x64);
1787 :
1788 : void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
1789 : const Address& mem, Register64 temp, Register64 output)
1790 : DEFINED_ON(x86);
1791 :
1792 : void atomicFetchOp64(const Synchronization& sync, AtomicOp op, const Address& value,
1793 : const BaseIndex& mem, Register64 temp, Register64 output)
1794 : DEFINED_ON(x86);
1795 :
1796 : void atomicEffectOp64(const Synchronization& sync, AtomicOp op, Register64 value,
1797 : const BaseIndex& mem)
1798 : DEFINED_ON(x64);
1799 :
1800 : // ========================================================================
1801 : // JS atomic operations.
1802 : //
1803 : // Here the arrayType must be a type that is valid for JS. As of 2017 that
1804 : // is an 8-bit, 16-bit, or 32-bit integer type.
1805 : //
1806 : // If arrayType is Scalar::Uint32 then:
1807 : //
1808 : // - `output` must be a float register (this is bug 1077305)
1809 : // - if the operation takes one temp register then `temp` must be defined
1810 : // - if the operation takes two temp registers then `temp2` must be defined.
1811 : //
1812 : // Otherwise `output` must be a GPR and `temp`/`temp2` should be InvalidReg.
1813 : // (`temp1` must always be valid.)
1814 : //
1815 : // For additional register constraints, see the primitive 32-bit operations
1816 : // above.
1817 :
1818 : void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
1819 : Register expected, Register replacement, Register temp,
1820 : AnyRegister output)
1821 : DEFINED_ON(arm, arm64, x86_shared);
1822 :
1823 : void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
1824 : const BaseIndex& mem, Register expected, Register replacement,
1825 : Register temp, AnyRegister output)
1826 : DEFINED_ON(arm, arm64, x86_shared);
1827 :
1828 : void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
1829 : Register expected, Register replacement, Register valueTemp,
1830 : Register offsetTemp, Register maskTemp, Register temp,
1831 : AnyRegister output)
1832 : DEFINED_ON(mips_shared);
1833 :
1834 : void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
1835 : Register expected, Register replacement, Register valueTemp,
1836 : Register offsetTemp, Register maskTemp, Register temp,
1837 : AnyRegister output)
1838 : DEFINED_ON(mips_shared);
1839 :
1840 : void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
1841 : Register value, Register temp, AnyRegister output)
1842 : DEFINED_ON(arm, arm64, x86_shared);
1843 :
1844 : void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
1845 : Register value, Register temp, AnyRegister output)
1846 : DEFINED_ON(arm, arm64, x86_shared);
1847 :
1848 : void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const Address& mem,
1849 : Register value, Register valueTemp, Register offsetTemp,
1850 : Register maskTemp, Register temp, AnyRegister output)
1851 : DEFINED_ON(mips_shared);
1852 :
1853 : void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, const BaseIndex& mem,
1854 : Register value, Register valueTemp, Register offsetTemp,
1855 : Register maskTemp, Register temp, AnyRegister output)
1856 : DEFINED_ON(mips_shared);
1857 :
1858 :
1859 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1860 : Register value, const Address& mem, Register temp1, Register temp2,
1861 : AnyRegister output)
1862 : DEFINED_ON(arm, arm64, x86_shared);
1863 :
1864 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1865 : Register value, const BaseIndex& mem, Register temp1, Register temp2,
1866 : AnyRegister output)
1867 : DEFINED_ON(arm, arm64, x86_shared);
1868 :
1869 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1870 : Imm32 value, const Address& mem, Register temp1, Register temp2,
1871 : AnyRegister output)
1872 : DEFINED_ON(x86_shared);
1873 :
1874 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1875 : Imm32 value, const BaseIndex& mem, Register temp1, Register temp2,
1876 : AnyRegister output)
1877 : DEFINED_ON(x86_shared);
1878 :
1879 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1880 : Register value, const Address& mem, Register valueTemp,
1881 : Register offsetTemp, Register maskTemp, Register temp,
1882 : AnyRegister output)
1883 : DEFINED_ON(mips_shared);
1884 :
1885 : void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1886 : Register value, const BaseIndex& mem, Register valueTemp,
1887 : Register offsetTemp, Register maskTemp, Register temp,
1888 : AnyRegister output)
1889 : DEFINED_ON(mips_shared);
1890 :
1891 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1892 : Register value, const Address& mem, Register temp)
1893 : DEFINED_ON(arm, arm64, x86_shared);
1894 :
1895 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1896 : Register value, const BaseIndex& mem, Register temp)
1897 : DEFINED_ON(arm, arm64, x86_shared);
1898 :
1899 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1900 : Imm32 value, const Address& mem, Register temp)
1901 : DEFINED_ON(x86_shared);
1902 :
1903 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1904 : Imm32 value, const BaseIndex& mem, Register temp)
1905 : DEFINED_ON(x86_shared);
1906 :
1907 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1908 : Register value, const Address& mem, Register valueTemp,
1909 : Register offsetTemp, Register maskTemp)
1910 : DEFINED_ON(mips_shared);
1911 :
1912 : void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, AtomicOp op,
1913 : Register value, const BaseIndex& mem, Register valueTemp,
1914 : Register offsetTemp, Register maskTemp)
1915 : DEFINED_ON(mips_shared);
1916 :
1917 : // ========================================================================
1918 : // Spectre Mitigations.
1919 : //
1920 : // Spectre attacks are side-channel attacks based on cache pollution or
1921 : // slow-execution of some instructions. We have multiple spectre mitigations
1922 : // possible:
1923 : //
1924 : // - Stop speculative executions, with memory barriers. Memory barriers
1925 : // force all branches depending on loads to be resolved, and thus
1926 : // resolve all miss-speculated paths.
1927 : //
1928 : // - Use conditional move instructions. Some CPUs have a branch predictor,
1929 : // and not a flag predictor. In such cases, using a conditional move
1930 : // instruction to zero some pointer/index is enough to add a
1931 : // data-dependency which prevents any futher executions until the load is
1932 : // resolved.
1933 :
1934 : void spectreMaskIndex(Register index, Register length, Register output);
1935 : void spectreMaskIndex(Register index, const Address& length, Register output);
1936 :
1937 : // The length must be a power of two. Performs a bounds check and Spectre index
1938 : // masking.
1939 : void boundsCheck32PowerOfTwo(Register index, uint32_t length, Label* failure);
1940 :
1941 : void speculationBarrier() PER_SHARED_ARCH;
1942 :
1943 : //}}} check_macroassembler_decl_style
1944 : public:
1945 :
1946 : // Emits a test of a value against all types in a TypeSet. A scratch
1947 : // register is required.
1948 : template <typename Source>
1949 : void guardTypeSet(const Source& address, const TypeSet* types, BarrierKind kind,
1950 : Register unboxScratch, Register objScratch, Register spectreRegToZero,
1951 : Label* miss);
1952 :
1953 : void guardObjectType(Register obj, const TypeSet* types, Register scratch,
1954 : Register spectreRegToZero, Label* miss);
1955 :
1956 : #ifdef DEBUG
1957 : void guardTypeSetMightBeIncomplete(const TypeSet* types, Register obj, Register scratch,
1958 : Label* label);
1959 : #endif
1960 :
1961 : // Unsafe here means the caller is responsible for Spectre mitigations if
1962 : // needed. Prefer branchTestObjGroup or one of the other masm helpers!
1963 : void loadObjGroupUnsafe(Register obj, Register dest) {
1964 0 : loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
1965 : }
1966 0 : void loadObjClassUnsafe(Register obj, Register dest) {
1967 181 : loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
1968 0 : loadPtr(Address(dest, ObjectGroup::offsetOfClasp()), dest);
1969 0 : }
1970 :
1971 : template <typename EmitPreBarrier>
1972 : inline void storeObjGroup(Register group, Register obj, EmitPreBarrier emitPreBarrier);
1973 : template <typename EmitPreBarrier>
1974 : inline void storeObjGroup(ObjectGroup* group, Register obj, EmitPreBarrier emitPreBarrier);
1975 : template <typename EmitPreBarrier>
1976 : inline void storeObjShape(Register shape, Register obj, EmitPreBarrier emitPreBarrier);
1977 : template <typename EmitPreBarrier>
1978 : inline void storeObjShape(Shape* shape, Register obj, EmitPreBarrier emitPreBarrier);
1979 :
1980 : void loadObjPrivate(Register obj, uint32_t nfixed, Register dest) {
1981 21 : loadPtr(Address(obj, NativeObject::getPrivateDataOffset(nfixed)), dest);
1982 : }
1983 :
1984 357 : void loadObjProto(Register obj, Register dest) {
1985 357 : loadPtr(Address(obj, JSObject::offsetOfGroup()), dest);
1986 0 : loadPtr(Address(dest, ObjectGroup::offsetOfProto()), dest);
1987 0 : }
1988 :
1989 : void loadStringLength(Register str, Register dest) {
1990 232 : load32(Address(str, JSString::offsetOfLength()), dest);
1991 : }
1992 :
1993 : void loadStringChars(Register str, Register dest, CharEncoding encoding);
1994 :
1995 : void loadNonInlineStringChars(Register str, Register dest, CharEncoding encoding);
1996 : void loadNonInlineStringCharsForStore(Register str, Register dest);
1997 : void storeNonInlineStringChars(Register chars, Register str);
1998 :
1999 : void loadInlineStringChars(Register str, Register dest, CharEncoding encoding);
2000 : void loadInlineStringCharsForStore(Register str, Register dest);
2001 :
2002 : void loadStringChar(Register str, Register index, Register output, Register scratch,
2003 : Label* fail);
2004 :
2005 : void loadRopeLeftChild(Register str, Register dest);
2006 : void storeRopeChildren(Register left, Register right, Register str);
2007 :
2008 : void loadDependentStringBase(Register str, Register dest);
2009 : void storeDependentStringBase(Register base, Register str);
2010 : void leaNewDependentStringBase(Register str, Register dest);
2011 :
2012 : void loadStringIndexValue(Register str, Register dest, Label* fail);
2013 :
2014 : void loadJSContext(Register dest);
2015 0 : void loadJitActivation(Register dest) {
2016 0 : loadJSContext(dest);
2017 0 : loadPtr(Address(dest, offsetof(JSContext, activation_)), dest);
2018 0 : }
2019 :
2020 : void guardGroupHasUnanalyzedNewScript(Register group, Register scratch, Label* fail);
2021 :
2022 : void loadWasmTlsRegFromFrame(Register dest = WasmTlsReg);
2023 :
2024 : template<typename T>
2025 271 : void loadTypedOrValue(const T& src, TypedOrValueRegister dest) {
2026 271 : if (dest.hasValue())
2027 0 : loadValue(src, dest.valueReg());
2028 : else
2029 0 : loadUnboxedValue(src, dest.type(), dest.typedReg());
2030 271 : }
2031 :
2032 : template<typename T>
2033 0 : void loadElementTypedOrValue(const T& src, TypedOrValueRegister dest, bool holeCheck,
2034 : Label* hole) {
2035 0 : if (dest.hasValue()) {
2036 0 : loadValue(src, dest.valueReg());
2037 0 : if (holeCheck)
2038 0 : branchTestMagic(Assembler::Equal, dest.valueReg(), hole);
2039 : } else {
2040 0 : if (holeCheck)
2041 : branchTestMagic(Assembler::Equal, src, hole);
2042 0 : loadUnboxedValue(src, dest.type(), dest.typedReg());
2043 : }
2044 0 : }
2045 :
2046 : template <typename T>
2047 172 : void storeTypedOrValue(TypedOrValueRegister src, const T& dest) {
2048 172 : if (src.hasValue()) {
2049 0 : storeValue(src.valueReg(), dest);
2050 0 : } else if (IsFloatingPointType(src.type())) {
2051 0 : FloatRegister reg = src.typedReg().fpu();
2052 0 : if (src.type() == MIRType::Float32) {
2053 0 : convertFloat32ToDouble(reg, ScratchDoubleReg);
2054 0 : reg = ScratchDoubleReg;
2055 : }
2056 0 : storeDouble(reg, dest);
2057 : } else {
2058 0 : storeValue(ValueTypeFromMIRType(src.type()), src.typedReg().gpr(), dest);
2059 : }
2060 0 : }
2061 :
2062 : template <typename T>
2063 : inline void storeObjectOrNull(Register src, const T& dest);
2064 :
2065 : template <typename T>
2066 311 : void storeConstantOrRegister(const ConstantOrRegister& src, const T& dest) {
2067 311 : if (src.constant())
2068 0 : storeValue(src.value(), dest);
2069 : else
2070 0 : storeTypedOrValue(src.reg(), dest);
2071 311 : }
2072 :
2073 : void storeCallPointerResult(Register reg) {
2074 211 : if (reg != ReturnReg)
2075 185 : mov(ReturnReg, reg);
2076 : }
2077 :
2078 : inline void storeCallBoolResult(Register reg);
2079 : inline void storeCallInt32Result(Register reg);
2080 :
2081 0 : void storeCallFloatResult(FloatRegister reg) {
2082 0 : if (reg != ReturnDoubleReg)
2083 0 : moveDouble(ReturnDoubleReg, reg);
2084 0 : }
2085 :
2086 : inline void storeCallResultValue(AnyRegister dest, JSValueType type);
2087 :
2088 : void storeCallResultValue(ValueOperand dest) {
2089 : #if defined(JS_NUNBOX32)
2090 : // reshuffle the return registers used for a call result to store into
2091 : // dest, using ReturnReg as a scratch register if necessary. This must
2092 : // only be called after returning from a call, at a point when the
2093 : // return register is not live. XXX would be better to allow wrappers
2094 : // to store the return value to different places.
2095 : if (dest.typeReg() == JSReturnReg_Data) {
2096 : if (dest.payloadReg() == JSReturnReg_Type) {
2097 : // swap the two registers.
2098 : mov(JSReturnReg_Type, ReturnReg);
2099 : mov(JSReturnReg_Data, JSReturnReg_Type);
2100 : mov(ReturnReg, JSReturnReg_Data);
2101 : } else {
2102 : mov(JSReturnReg_Data, dest.payloadReg());
2103 : mov(JSReturnReg_Type, dest.typeReg());
2104 : }
2105 : } else {
2106 : mov(JSReturnReg_Type, dest.typeReg());
2107 : mov(JSReturnReg_Data, dest.payloadReg());
2108 : }
2109 : #elif defined(JS_PUNBOX64)
2110 347 : if (dest.valueReg() != JSReturnReg)
2111 181 : mov(JSReturnReg, dest.valueReg());
2112 : #else
2113 : #error "Bad architecture"
2114 : #endif
2115 : }
2116 :
2117 : inline void storeCallResultValue(TypedOrValueRegister dest);
2118 :
2119 : template <typename T>
2120 943 : void guardedCallPreBarrier(const T& address, MIRType type) {
2121 1886 : Label done;
2122 :
2123 0 : branchTestNeedsIncrementalBarrier(Assembler::Zero, &done);
2124 :
2125 0 : if (type == MIRType::Value)
2126 20 : branchTestGCThing(Assembler::NotEqual, address, &done);
2127 0 : else if (type == MIRType::Object || type == MIRType::String)
2128 0 : branchPtr(Assembler::Equal, address, ImmWord(0), &done);
2129 :
2130 0 : Push(PreBarrierReg);
2131 1008 : computeEffectiveAddress(address, PreBarrierReg);
2132 :
2133 0 : const JitRuntime* rt = GetJitContext()->runtime->jitRuntime();
2134 943 : TrampolinePtr preBarrier = rt->preBarrier(type);
2135 :
2136 0 : call(preBarrier);
2137 943 : Pop(PreBarrierReg);
2138 :
2139 0 : bind(&done);
2140 943 : }
2141 :
2142 : template<typename T>
2143 : void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
2144 : bool canonicalizeDoubles = true, unsigned numElems = 0);
2145 :
2146 : template<typename T>
2147 : void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
2148 : Register temp, Label* fail);
2149 :
2150 : template<typename S, typename T>
2151 0 : void storeToTypedIntArray(Scalar::Type arrayType, const S& value, const T& dest) {
2152 : switch (arrayType) {
2153 : case Scalar::Int8:
2154 : case Scalar::Uint8:
2155 : case Scalar::Uint8Clamped:
2156 0 : store8(value, dest);
2157 0 : break;
2158 : case Scalar::Int16:
2159 : case Scalar::Uint16:
2160 0 : store16(value, dest);
2161 0 : break;
2162 : case Scalar::Int32:
2163 : case Scalar::Uint32:
2164 0 : store32(value, dest);
2165 0 : break;
2166 : default:
2167 0 : MOZ_CRASH("Invalid typed array type");
2168 : }
2169 0 : }
2170 :
2171 : void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
2172 : unsigned numElems = 0);
2173 : void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
2174 : unsigned numElems = 0);
2175 :
2176 : void memoryBarrierBefore(const Synchronization& sync);
2177 : void memoryBarrierAfter(const Synchronization& sync);
2178 :
2179 : // Load a property from an UnboxedPlainObject or UnboxedArrayObject.
2180 : template <typename T>
2181 : void loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output);
2182 :
2183 : // Store a property to an UnboxedPlainObject, without triggering barriers.
2184 : // If failure is null, the value definitely has a type suitable for storing
2185 : // in the property.
2186 : template <typename T>
2187 : void storeUnboxedProperty(T address, JSValueType type,
2188 : const ConstantOrRegister& value, Label* failure);
2189 :
2190 : void debugAssertIsObject(const ValueOperand& val);
2191 : void debugAssertObjHasFixedSlots(Register obj, Register scratch);
2192 :
2193 : void branchIfNativeIteratorNotReusable(Register ni, Label* notReusable);
2194 :
2195 : using MacroAssemblerSpecific::extractTag;
2196 50 : Register extractTag(const TypedOrValueRegister& reg, Register scratch) {
2197 50 : if (reg.hasValue())
2198 0 : return extractTag(reg.valueReg(), scratch);
2199 0 : mov(ImmWord(MIRTypeToTag(reg.type())), scratch);
2200 0 : return scratch;
2201 : }
2202 :
2203 : using MacroAssemblerSpecific::extractObject;
2204 25 : Register extractObject(const TypedOrValueRegister& reg, Register scratch) {
2205 25 : if (reg.hasValue())
2206 0 : return extractObject(reg.valueReg(), scratch);
2207 0 : MOZ_ASSERT(reg.type() == MIRType::Object);
2208 0 : return reg.typedReg().gpr();
2209 : }
2210 :
2211 : // Inline version of js_TypedArray_uint8_clamp_double.
2212 : // This function clobbers the input register.
2213 : void clampDoubleToUint8(FloatRegister input, Register output) PER_ARCH;
2214 :
2215 : using MacroAssemblerSpecific::ensureDouble;
2216 :
2217 : template <typename S>
2218 0 : void ensureDouble(const S& source, FloatRegister dest, Label* failure) {
2219 0 : Label isDouble, done;
2220 0 : branchTestDouble(Assembler::Equal, source, &isDouble);
2221 0 : branchTestInt32(Assembler::NotEqual, source, failure);
2222 :
2223 0 : convertInt32ToDouble(source, dest);
2224 0 : jump(&done);
2225 :
2226 0 : bind(&isDouble);
2227 0 : unboxDouble(source, dest);
2228 :
2229 0 : bind(&done);
2230 0 : }
2231 :
2232 : // Inline allocation.
2233 : private:
2234 : void checkAllocatorState(Label* fail);
2235 : bool shouldNurseryAllocate(gc::AllocKind allocKind, gc::InitialHeap initialHeap);
2236 : void nurseryAllocateObject(Register result, Register temp, gc::AllocKind allocKind,
2237 : size_t nDynamicSlots, Label* fail);
2238 : void freeListAllocate(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
2239 : void allocateObject(Register result, Register temp, gc::AllocKind allocKind,
2240 : uint32_t nDynamicSlots, gc::InitialHeap initialHeap, Label* fail);
2241 : void nurseryAllocateString(Register result, Register temp, gc::AllocKind allocKind,
2242 : Label* fail);
2243 : void allocateString(Register result, Register temp, gc::AllocKind allocKind,
2244 : gc::InitialHeap initialHeap, Label* fail);
2245 : void allocateNonObject(Register result, Register temp, gc::AllocKind allocKind, Label* fail);
2246 : void copySlotsFromTemplate(Register obj, const NativeTemplateObject& templateObj,
2247 : uint32_t start, uint32_t end);
2248 : void fillSlotsWithConstantValue(Address addr, Register temp, uint32_t start, uint32_t end,
2249 : const Value& v);
2250 : void fillSlotsWithUndefined(Address addr, Register temp, uint32_t start, uint32_t end);
2251 : void fillSlotsWithUninitialized(Address addr, Register temp, uint32_t start, uint32_t end);
2252 :
2253 : void initGCSlots(Register obj, Register temp, const NativeTemplateObject& templateObj,
2254 : bool initContents);
2255 :
2256 : public:
2257 : void callMallocStub(size_t nbytes, Register result, Label* fail);
2258 : void callFreeStub(Register slots);
2259 : void createGCObject(Register result, Register temp, const TemplateObject& templateObj,
2260 : gc::InitialHeap initialHeap, Label* fail, bool initContents = true);
2261 :
2262 : void initGCThing(Register obj, Register temp, const TemplateObject& templateObj,
2263 : bool initContents = true);
2264 :
2265 : enum class TypedArrayLength { Fixed, Dynamic };
2266 :
2267 : void initTypedArraySlots(Register obj, Register temp, Register lengthReg,
2268 : LiveRegisterSet liveRegs, Label* fail,
2269 : TypedArrayObject* templateObj, TypedArrayLength lengthKind);
2270 :
2271 : void initUnboxedObjectContents(Register object, const UnboxedLayout& layout);
2272 :
2273 : void newGCString(Register result, Register temp, Label* fail, bool attemptNursery);
2274 : void newGCFatInlineString(Register result, Register temp, Label* fail, bool attemptNursery);
2275 :
2276 : // Compares two strings for equality based on the JSOP.
2277 : // This checks for identical pointers, atoms and length and fails for everything else.
2278 : void compareStrings(JSOp op, Register left, Register right, Register result,
2279 : Label* fail);
2280 :
2281 : // Result of the typeof operation. Falls back to slow-path for proxies.
2282 : void typeOfObject(Register objReg, Register scratch, Label* slow,
2283 : Label* isObject, Label* isCallable, Label* isUndefined);
2284 :
2285 : public:
2286 : // Generates code used to complete a bailout.
2287 : void generateBailoutTail(Register scratch, Register bailoutInfo);
2288 :
2289 : void assertRectifierFrameParentType(Register frameType);
2290 :
2291 : public:
2292 : #ifndef JS_CODEGEN_ARM64
2293 : // StackPointer manipulation functions.
2294 : // On ARM64, the StackPointer is implemented as two synchronized registers.
2295 : // Code shared across platforms must use these functions to be valid.
2296 : template <typename T> inline void addToStackPtr(T t);
2297 : template <typename T> inline void addStackPtrTo(T t);
2298 :
2299 : void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64);
2300 : void subFromStackPtr(Register reg);
2301 :
2302 : template <typename T>
2303 12 : void subStackPtrFrom(T t) { subPtr(getStackPointer(), t); }
2304 :
2305 : template <typename T>
2306 29 : void andToStackPtr(T t) { andPtr(t, getStackPointer()); }
2307 : template <typename T>
2308 : void andStackPtrTo(T t) { andPtr(getStackPointer(), t); }
2309 :
2310 : template <typename T>
2311 1648 : void moveToStackPtr(T t) { movePtr(t, getStackPointer()); }
2312 : template <typename T>
2313 0 : void moveStackPtrTo(T t) { movePtr(getStackPointer(), t); }
2314 :
2315 : template <typename T>
2316 4 : void loadStackPtr(T t) { loadPtr(t, getStackPointer()); }
2317 : template <typename T>
2318 0 : void storeStackPtr(T t) { storePtr(getStackPointer(), t); }
2319 :
2320 : // StackPointer testing functions.
2321 : // On ARM64, sp can function as the zero register depending on context.
2322 : // Code shared across platforms must use these functions to be valid.
2323 : template <typename T>
2324 : inline void branchTestStackPtr(Condition cond, T t, Label* label);
2325 : template <typename T>
2326 : inline void branchStackPtr(Condition cond, T rhs, Label* label);
2327 : template <typename T>
2328 : inline void branchStackPtrRhs(Condition cond, T lhs, Label* label);
2329 :
2330 : // Move the stack pointer based on the requested amount.
2331 : inline void reserveStack(uint32_t amount);
2332 : #else // !JS_CODEGEN_ARM64
2333 : void reserveStack(uint32_t amount);
2334 : #endif
2335 :
2336 : public:
2337 : void enableProfilingInstrumentation() {
2338 0 : emitProfilingInstrumentation_ = true;
2339 : }
2340 :
2341 : private:
2342 : // This class is used to surround call sites throughout the assembler. This
2343 : // is used by callWithABI, and callJit functions, except if suffixed by
2344 : // NoProfiler.
2345 : class AutoProfilerCallInstrumentation {
2346 : MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER;
2347 :
2348 : public:
2349 : explicit AutoProfilerCallInstrumentation(MacroAssembler& masm
2350 : MOZ_GUARD_OBJECT_NOTIFIER_PARAM);
2351 26460 : ~AutoProfilerCallInstrumentation() {}
2352 : };
2353 : friend class AutoProfilerCallInstrumentation;
2354 :
2355 : void appendProfilerCallSite(CodeOffset label) {
2356 0 : propagateOOM(profilerCallSites_.append(label));
2357 : }
2358 :
2359 : // Fix up the code pointers to be written for locations where profilerCallSite
2360 : // emitted moves of RIP to a register.
2361 : void linkProfilerCallSites(JitCode* code);
2362 :
2363 : // This field is used to manage profiling instrumentation output. If
2364 : // provided and enabled, then instrumentation will be emitted around call
2365 : // sites.
2366 : bool emitProfilingInstrumentation_;
2367 :
2368 : // Record locations of the call sites.
2369 : Vector<CodeOffset, 0, SystemAllocPolicy> profilerCallSites_;
2370 :
2371 : public:
2372 : void loadJitCodeRaw(Register callee, Register dest);
2373 : void loadJitCodeNoArgCheck(Register callee, Register dest);
2374 :
2375 : void loadBaselineFramePtr(Register framePtr, Register dest);
2376 :
2377 305 : void pushBaselineFramePtr(Register framePtr, Register scratch) {
2378 305 : loadBaselineFramePtr(framePtr, scratch);
2379 0 : push(scratch);
2380 0 : }
2381 :
2382 : void PushBaselineFramePtr(Register framePtr, Register scratch) {
2383 778 : loadBaselineFramePtr(framePtr, scratch);
2384 778 : Push(scratch);
2385 : }
2386 :
2387 : using MacroAssemblerSpecific::movePtr;
2388 :
2389 152 : void movePtr(TrampolinePtr ptr, Register dest) {
2390 304 : movePtr(ImmPtr(ptr.value), dest);
2391 0 : }
2392 :
2393 : private:
2394 : void handleFailure();
2395 :
2396 : public:
2397 : Label* exceptionLabel() {
2398 : // Exceptions are currently handled the same way as sequential failures.
2399 68 : return &failureLabel_;
2400 : }
2401 :
2402 : Label* failureLabel() {
2403 982 : return &failureLabel_;
2404 : }
2405 :
2406 : void finish();
2407 : void link(JitCode* code);
2408 :
2409 : void assumeUnreachable(const char* output);
2410 :
2411 : template<typename T>
2412 : void assertTestInt32(Condition cond, const T& value, const char* output);
2413 :
2414 : void printf(const char* output);
2415 : void printf(const char* output, Register value);
2416 :
2417 : #ifdef JS_TRACE_LOGGING
2418 1630 : void loadTraceLogger(Register logger) {
2419 1630 : loadJSContext(logger);
2420 0 : loadPtr(Address(logger, offsetof(JSContext, traceLogger)), logger);
2421 0 : }
2422 : void tracelogStartId(Register logger, uint32_t textId, bool force = false);
2423 : void tracelogStartId(Register logger, Register textId);
2424 : void tracelogStartEvent(Register logger, Register event);
2425 : void tracelogStopId(Register logger, uint32_t textId, bool force = false);
2426 : void tracelogStopId(Register logger, Register textId);
2427 : #endif
2428 :
2429 : #define DISPATCH_FLOATING_POINT_OP(method, type, arg1d, arg1f, arg2) \
2430 : MOZ_ASSERT(IsFloatingPointType(type)); \
2431 : if (type == MIRType::Double) \
2432 : method##Double(arg1d, arg2); \
2433 : else \
2434 : method##Float32(arg1f, arg2); \
2435 :
2436 0 : void loadConstantFloatingPoint(double d, float f, FloatRegister dest, MIRType destType) {
2437 0 : DISPATCH_FLOATING_POINT_OP(loadConstant, destType, d, f, dest);
2438 0 : }
2439 0 : void boolValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
2440 0 : DISPATCH_FLOATING_POINT_OP(boolValueTo, destType, value, value, dest);
2441 0 : }
2442 0 : void int32ValueToFloatingPoint(ValueOperand value, FloatRegister dest, MIRType destType) {
2443 0 : DISPATCH_FLOATING_POINT_OP(int32ValueTo, destType, value, value, dest);
2444 0 : }
2445 0 : void convertInt32ToFloatingPoint(Register src, FloatRegister dest, MIRType destType) {
2446 0 : DISPATCH_FLOATING_POINT_OP(convertInt32To, destType, src, src, dest);
2447 0 : }
2448 :
2449 : #undef DISPATCH_FLOATING_POINT_OP
2450 :
2451 : void convertValueToFloatingPoint(ValueOperand value, FloatRegister output, Label* fail,
2452 : MIRType outputType);
2453 : MOZ_MUST_USE bool convertValueToFloatingPoint(JSContext* cx, const Value& v,
2454 : FloatRegister output, Label* fail,
2455 : MIRType outputType);
2456 : MOZ_MUST_USE bool convertConstantOrRegisterToFloatingPoint(JSContext* cx,
2457 : const ConstantOrRegister& src,
2458 : FloatRegister output, Label* fail,
2459 : MIRType outputType);
2460 : void convertTypedOrValueToFloatingPoint(TypedOrValueRegister src, FloatRegister output,
2461 : Label* fail, MIRType outputType);
2462 :
2463 : void outOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble,
2464 : bool compilingWasm, wasm::BytecodeOffset callOffset);
2465 :
2466 : void convertInt32ValueToDouble(const Address& address, Register scratch, Label* done);
2467 : void convertInt32ValueToDouble(ValueOperand val);
2468 :
2469 : void convertValueToDouble(ValueOperand value, FloatRegister output, Label* fail) {
2470 0 : convertValueToFloatingPoint(value, output, fail, MIRType::Double);
2471 : }
2472 : MOZ_MUST_USE bool convertValueToDouble(JSContext* cx, const Value& v, FloatRegister output,
2473 : Label* fail) {
2474 : return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Double);
2475 : }
2476 : MOZ_MUST_USE bool convertConstantOrRegisterToDouble(JSContext* cx,
2477 : const ConstantOrRegister& src,
2478 : FloatRegister output, Label* fail)
2479 : {
2480 0 : return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Double);
2481 : }
2482 : void convertTypedOrValueToDouble(TypedOrValueRegister src, FloatRegister output, Label* fail) {
2483 : convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Double);
2484 : }
2485 :
2486 : void convertValueToFloat(ValueOperand value, FloatRegister output, Label* fail) {
2487 0 : convertValueToFloatingPoint(value, output, fail, MIRType::Float32);
2488 : }
2489 : MOZ_MUST_USE bool convertValueToFloat(JSContext* cx, const Value& v, FloatRegister output,
2490 : Label* fail) {
2491 : return convertValueToFloatingPoint(cx, v, output, fail, MIRType::Float32);
2492 : }
2493 : MOZ_MUST_USE bool convertConstantOrRegisterToFloat(JSContext* cx,
2494 : const ConstantOrRegister& src,
2495 : FloatRegister output, Label* fail)
2496 : {
2497 0 : return convertConstantOrRegisterToFloatingPoint(cx, src, output, fail, MIRType::Float32);
2498 : }
2499 : void convertTypedOrValueToFloat(TypedOrValueRegister src, FloatRegister output, Label* fail) {
2500 : convertTypedOrValueToFloatingPoint(src, output, fail, MIRType::Float32);
2501 : }
2502 : //
2503 : // Functions for converting values to int.
2504 : //
2505 : void convertDoubleToInt(FloatRegister src, Register output, FloatRegister temp,
2506 : Label* truncateFail, Label* fail, IntConversionBehavior behavior);
2507 :
2508 : // Strings may be handled by providing labels to jump to when the behavior
2509 : // is truncation or clamping. The subroutine, usually an OOL call, is
2510 : // passed the unboxed string in |stringReg| and should convert it to a
2511 : // double store into |temp|.
2512 : void convertValueToInt(ValueOperand value, MDefinition* input,
2513 : Label* handleStringEntry, Label* handleStringRejoin,
2514 : Label* truncateDoubleSlow,
2515 : Register stringReg, FloatRegister temp, Register output,
2516 : Label* fail, IntConversionBehavior behavior,
2517 : IntConversionInputKind conversion = IntConversionInputKind::Any);
2518 : void convertValueToInt(ValueOperand value, FloatRegister temp, Register output, Label* fail,
2519 : IntConversionBehavior behavior)
2520 : {
2521 : convertValueToInt(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
2522 0 : fail, behavior);
2523 : }
2524 : MOZ_MUST_USE bool convertValueToInt(JSContext* cx, const Value& v, Register output, Label* fail,
2525 : IntConversionBehavior behavior);
2526 : MOZ_MUST_USE bool convertConstantOrRegisterToInt(JSContext* cx,
2527 : const ConstantOrRegister& src,
2528 : FloatRegister temp, Register output,
2529 : Label* fail, IntConversionBehavior behavior);
2530 : void convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister temp, Register output,
2531 : Label* fail, IntConversionBehavior behavior);
2532 :
2533 : // This carries over the MToNumberInt32 operation on the ValueOperand
2534 : // input; see comment at the top of this class.
2535 : void convertValueToInt32(ValueOperand value, MDefinition* input,
2536 : FloatRegister temp, Register output, Label* fail,
2537 : bool negativeZeroCheck,
2538 : IntConversionInputKind conversion = IntConversionInputKind::Any)
2539 : {
2540 0 : convertValueToInt(value, input, nullptr, nullptr, nullptr, InvalidReg, temp, output, fail,
2541 : negativeZeroCheck
2542 : ? IntConversionBehavior::NegativeZeroCheck
2543 : : IntConversionBehavior::Normal,
2544 0 : conversion);
2545 : }
2546 :
2547 : // This carries over the MTruncateToInt32 operation on the ValueOperand
2548 : // input; see the comment at the top of this class.
2549 : void truncateValueToInt32(ValueOperand value, MDefinition* input,
2550 : Label* handleStringEntry, Label* handleStringRejoin,
2551 : Label* truncateDoubleSlow,
2552 : Register stringReg, FloatRegister temp, Register output, Label* fail)
2553 : {
2554 : convertValueToInt(value, input, handleStringEntry, handleStringRejoin, truncateDoubleSlow,
2555 0 : stringReg, temp, output, fail, IntConversionBehavior::Truncate);
2556 : }
2557 :
2558 0 : void truncateValueToInt32(ValueOperand value, FloatRegister temp, Register output, Label* fail)
2559 : {
2560 : truncateValueToInt32(value, nullptr, nullptr, nullptr, nullptr, InvalidReg, temp, output,
2561 0 : fail);
2562 0 : }
2563 :
2564 : MOZ_MUST_USE bool truncateConstantOrRegisterToInt32(JSContext* cx,
2565 : const ConstantOrRegister& src,
2566 : FloatRegister temp, Register output,
2567 : Label* fail)
2568 : {
2569 0 : return convertConstantOrRegisterToInt(cx, src, temp, output, fail, IntConversionBehavior::Truncate);
2570 : }
2571 :
2572 : // Convenience functions for clamping values to uint8.
2573 : void clampValueToUint8(ValueOperand value, MDefinition* input,
2574 : Label* handleStringEntry, Label* handleStringRejoin,
2575 : Register stringReg, FloatRegister temp, Register output, Label* fail)
2576 : {
2577 : convertValueToInt(value, input, handleStringEntry, handleStringRejoin, nullptr,
2578 0 : stringReg, temp, output, fail, IntConversionBehavior::ClampToUint8);
2579 : }
2580 :
2581 : MOZ_MUST_USE bool clampConstantOrRegisterToUint8(JSContext* cx,
2582 : const ConstantOrRegister& src,
2583 : FloatRegister temp, Register output,
2584 : Label* fail)
2585 : {
2586 : return convertConstantOrRegisterToInt(cx, src, temp, output, fail,
2587 0 : IntConversionBehavior::ClampToUint8);
2588 : }
2589 :
2590 : MOZ_MUST_USE bool icBuildOOLFakeExitFrame(void* fakeReturnAddr, AutoSaveLiveRegisters& save);
2591 :
2592 : // Align the stack pointer based on the number of arguments which are pushed
2593 : // on the stack, such that the JitFrameLayout would be correctly aligned on
2594 : // the JitStackAlignment.
2595 : void alignJitStackBasedOnNArgs(Register nargs);
2596 : void alignJitStackBasedOnNArgs(uint32_t nargs);
2597 :
2598 : inline void assertStackAlignment(uint32_t alignment, int32_t offset = 0);
2599 :
2600 : void performPendingReadBarriers();
2601 :
2602 : private:
2603 : // Methods to get a singleton object or object group from a type set without
2604 : // a read barrier, and record the result so that we can perform the barrier
2605 : // later.
2606 : JSObject* getSingletonAndDelayBarrier(const TypeSet* types, size_t i);
2607 : ObjectGroup* getGroupAndDelayBarrier(const TypeSet* types, size_t i);
2608 :
2609 : Vector<JSObject*, 0, SystemAllocPolicy> pendingObjectReadBarriers_;
2610 : Vector<ObjectGroup*, 0, SystemAllocPolicy> pendingObjectGroupReadBarriers_;
2611 : };
2612 :
2613 : // StackMacroAssembler checks no GC will happen while it's on the stack.
2614 5546 : class MOZ_RAII StackMacroAssembler : public MacroAssembler
2615 : {
2616 : JS::AutoCheckCannotGC nogc;
2617 :
2618 : public:
2619 : StackMacroAssembler()
2620 5538 : : MacroAssembler()
2621 : {}
2622 : explicit StackMacroAssembler(JSContext* cx)
2623 8 : : MacroAssembler(cx)
2624 : {}
2625 : };
2626 :
2627 : // WasmMacroAssembler does not contain GC pointers, so it doesn't need the no-GC
2628 : // checking StackMacroAssembler has.
2629 : class MOZ_RAII WasmMacroAssembler : public MacroAssembler
2630 : {
2631 : public:
2632 : explicit WasmMacroAssembler(TempAllocator& alloc)
2633 0 : : MacroAssembler(WasmToken(), alloc)
2634 : {}
2635 0 : ~WasmMacroAssembler() {
2636 0 : assertNoGCThings();
2637 : }
2638 : };
2639 :
2640 : // Heap-allocated MacroAssembler used for Ion off-thread code generation.
2641 : // GC cancels off-thread compilations.
2642 47 : class IonHeapMacroAssembler : public MacroAssembler
2643 : {
2644 : public:
2645 48 : IonHeapMacroAssembler()
2646 48 : : MacroAssembler()
2647 : {
2648 0 : MOZ_ASSERT(CurrentThreadIsIonCompiling());
2649 48 : }
2650 : };
2651 :
2652 : //{{{ check_macroassembler_style
2653 : inline uint32_t
2654 : MacroAssembler::framePushed() const
2655 : {
2656 : return framePushed_;
2657 : }
2658 :
2659 : inline void
2660 : MacroAssembler::setFramePushed(uint32_t framePushed)
2661 : {
2662 460331 : framePushed_ = framePushed;
2663 : }
2664 :
2665 : inline void
2666 239994 : MacroAssembler::adjustFrame(int32_t value)
2667 : {
2668 0 : MOZ_ASSERT_IF(value < 0, framePushed_ >= uint32_t(-value));
2669 910580 : setFramePushed(framePushed_ + value);
2670 0 : }
2671 :
2672 : inline void
2673 189858 : MacroAssembler::implicitPop(uint32_t bytes)
2674 : {
2675 0 : MOZ_ASSERT(bytes % sizeof(intptr_t) == 0);
2676 189858 : MOZ_ASSERT(bytes <= INT32_MAX);
2677 0 : adjustFrame(-int32_t(bytes));
2678 0 : }
2679 : //}}} check_macroassembler_style
2680 :
2681 : static inline Assembler::DoubleCondition
2682 13 : JSOpToDoubleCondition(JSOp op)
2683 : {
2684 0 : switch (op) {
2685 : case JSOP_EQ:
2686 : case JSOP_STRICTEQ:
2687 : return Assembler::DoubleEqual;
2688 : case JSOP_NE:
2689 : case JSOP_STRICTNE:
2690 3 : return Assembler::DoubleNotEqualOrUnordered;
2691 : case JSOP_LT:
2692 0 : return Assembler::DoubleLessThan;
2693 : case JSOP_LE:
2694 0 : return Assembler::DoubleLessThanOrEqual;
2695 : case JSOP_GT:
2696 0 : return Assembler::DoubleGreaterThan;
2697 : case JSOP_GE:
2698 0 : return Assembler::DoubleGreaterThanOrEqual;
2699 : default:
2700 0 : MOZ_CRASH("Unexpected comparison operation");
2701 : }
2702 : }
2703 :
2704 : // Note: the op may have been inverted during lowering (to put constants in a
2705 : // position where they can be immediates), so it is important to use the
2706 : // lir->jsop() instead of the mir->jsop() when it is present.
2707 : static inline Assembler::Condition
2708 314 : JSOpToCondition(JSOp op, bool isSigned)
2709 : {
2710 0 : if (isSigned) {
2711 314 : switch (op) {
2712 : case JSOP_EQ:
2713 : case JSOP_STRICTEQ:
2714 : return Assembler::Equal;
2715 : case JSOP_NE:
2716 : case JSOP_STRICTNE:
2717 38 : return Assembler::NotEqual;
2718 : case JSOP_LT:
2719 0 : return Assembler::LessThan;
2720 : case JSOP_LE:
2721 0 : return Assembler::LessThanOrEqual;
2722 : case JSOP_GT:
2723 0 : return Assembler::GreaterThan;
2724 : case JSOP_GE:
2725 0 : return Assembler::GreaterThanOrEqual;
2726 : default:
2727 0 : MOZ_CRASH("Unrecognized comparison operation");
2728 : }
2729 : } else {
2730 0 : switch (op) {
2731 : case JSOP_EQ:
2732 : case JSOP_STRICTEQ:
2733 : return Assembler::Equal;
2734 : case JSOP_NE:
2735 : case JSOP_STRICTNE:
2736 0 : return Assembler::NotEqual;
2737 : case JSOP_LT:
2738 0 : return Assembler::Below;
2739 : case JSOP_LE:
2740 0 : return Assembler::BelowOrEqual;
2741 : case JSOP_GT:
2742 0 : return Assembler::Above;
2743 : case JSOP_GE:
2744 0 : return Assembler::AboveOrEqual;
2745 : default:
2746 0 : MOZ_CRASH("Unrecognized comparison operation");
2747 : }
2748 : }
2749 : }
2750 :
2751 : static inline size_t
2752 : StackDecrementForCall(uint32_t alignment, size_t bytesAlreadyPushed, size_t bytesToPush)
2753 : {
2754 : return bytesToPush +
2755 0 : ComputeByteAlignment(bytesAlreadyPushed + bytesToPush, alignment);
2756 : }
2757 :
2758 : static inline MIRType
2759 : ToMIRType(MIRType t)
2760 : {
2761 : return t;
2762 : }
2763 :
2764 : static inline MIRType
2765 0 : ToMIRType(ABIArgType argType)
2766 : {
2767 0 : switch (argType) {
2768 : case ArgType_General: return MIRType::Int32;
2769 0 : case ArgType_Double: return MIRType::Double;
2770 0 : case ArgType_Float32: return MIRType::Float32;
2771 0 : case ArgType_Int64: return MIRType::Int64;
2772 : default: break;
2773 : }
2774 0 : MOZ_CRASH("unexpected argType");
2775 : }
2776 :
2777 : template <class VecT>
2778 : class ABIArgIter
2779 : {
2780 : ABIArgGenerator gen_;
2781 : const VecT& types_;
2782 : unsigned i_;
2783 :
2784 0 : void settle() { if (!done()) gen_.next(ToMIRType(types_[i_])); }
2785 :
2786 : public:
2787 0 : explicit ABIArgIter(const VecT& types) : types_(types), i_(0) { settle(); }
2788 0 : void operator++(int) { MOZ_ASSERT(!done()); i_++; settle(); }
2789 0 : bool done() const { return i_ == types_.length(); }
2790 :
2791 0 : ABIArg* operator->() { MOZ_ASSERT(!done()); return &gen_.current(); }
2792 0 : ABIArg& operator*() { MOZ_ASSERT(!done()); return gen_.current(); }
2793 :
2794 0 : unsigned index() const { MOZ_ASSERT(!done()); return i_; }
2795 0 : MIRType mirType() const { MOZ_ASSERT(!done()); return ToMIRType(types_[i_]); }
2796 0 : uint32_t stackBytesConsumedSoFar() const { return gen_.stackBytesConsumedSoFar(); }
2797 : };
2798 :
2799 : } // namespace jit
2800 : } // namespace js
2801 :
2802 : #endif /* jit_MacroAssembler_h */
|