Line data Source code
1 : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
2 : * vim: set ts=8 sts=4 et sw=4 tw=99:
3 : * This Source Code Form is subject to the terms of the Mozilla Public
4 : * License, v. 2.0. If a copy of the MPL was not distributed with this
5 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
6 :
7 : #include "jit/BaselineCacheIRCompiler.h"
8 :
9 : #include "jit/CacheIR.h"
10 : #include "jit/Linker.h"
11 : #include "jit/SharedICHelpers.h"
12 : #include "jit/VMFunctions.h"
13 : #include "proxy/DeadObjectProxy.h"
14 : #include "proxy/Proxy.h"
15 :
16 : #include "jit/MacroAssembler-inl.h"
17 : #include "jit/SharedICHelpers-inl.h"
18 : #include "vm/JSContext-inl.h"
19 : #include "vm/Realm-inl.h"
20 :
21 : using namespace js;
22 : using namespace js::jit;
23 :
24 : using mozilla::Maybe;
25 :
26 : class AutoStubFrame;
27 :
28 : Address
29 33 : CacheRegisterAllocator::addressOf(MacroAssembler& masm, BaselineFrameSlot slot) const
30 : {
31 0 : uint32_t offset = stackPushed_ + ICStackValueOffset + slot.slot() * sizeof(JS::Value);
32 48 : return Address(masm.getStackPointer(), offset);
33 : }
34 :
35 : // BaselineCacheIRCompiler compiles CacheIR to BaselineIC native code.
36 379 : class MOZ_RAII BaselineCacheIRCompiler : public CacheIRCompiler
37 : {
38 : #ifdef DEBUG
39 : // Some Baseline IC stubs can be used in IonMonkey through SharedStubs.
40 : // Those stubs have different machine code, so we need to track whether
41 : // we're compiling for Baseline or Ion.
42 : ICStubEngine engine_;
43 : #endif
44 :
45 :
46 : bool inStubFrame_;
47 : bool makesGCCalls_;
48 :
49 : MOZ_MUST_USE bool callVM(MacroAssembler& masm, const VMFunction& fun);
50 :
51 : MOZ_MUST_USE bool callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
52 : LiveGeneralRegisterSet saveRegs);
53 :
54 : MOZ_MUST_USE bool emitStoreSlotShared(bool isFixed);
55 : MOZ_MUST_USE bool emitAddAndStoreSlotShared(CacheOp op);
56 :
57 : public:
58 : friend class AutoStubFrame;
59 :
60 : BaselineCacheIRCompiler(JSContext* cx, const CacheIRWriter& writer, ICStubEngine engine,
61 : uint32_t stubDataOffset)
62 379 : : CacheIRCompiler(cx, writer, stubDataOffset, Mode::Baseline, StubFieldPolicy::Address),
63 : #ifdef DEBUG
64 : engine_(engine),
65 : #endif
66 : inStubFrame_(false),
67 379 : makesGCCalls_(false)
68 : {}
69 :
70 : MOZ_MUST_USE bool init(CacheKind kind);
71 :
72 : JitCode* compile();
73 :
74 : bool makesGCCalls() const { return makesGCCalls_; }
75 :
76 : private:
77 : #define DEFINE_OP(op) MOZ_MUST_USE bool emit##op();
78 : CACHE_IR_OPS(DEFINE_OP)
79 : #undef DEFINE_OP
80 :
81 : Address stubAddress(uint32_t offset) const {
82 999 : return Address(ICStubReg, stubDataOffset_ + offset);
83 : }
84 : };
85 :
86 : #define DEFINE_SHARED_OP(op) \
87 : bool BaselineCacheIRCompiler::emit##op() { return CacheIRCompiler::emit##op(); }
88 985 : CACHE_IR_SHARED_OPS(DEFINE_SHARED_OP)
89 : #undef DEFINE_SHARED_OP
90 :
91 : enum class CallCanGC { CanGC, CanNotGC };
92 :
93 : // Instructions that have to perform a callVM require a stub frame. Call its
94 : // enter() and leave() methods to enter/leave the stub frame.
95 : class MOZ_RAII AutoStubFrame
96 : {
97 : BaselineCacheIRCompiler& compiler;
98 : #ifdef DEBUG
99 : uint32_t framePushedAtEnterStubFrame_;
100 : #endif
101 :
102 : AutoStubFrame(const AutoStubFrame&) = delete;
103 : void operator=(const AutoStubFrame&) = delete;
104 :
105 : public:
106 : explicit AutoStubFrame(BaselineCacheIRCompiler& compiler)
107 139 : : compiler(compiler)
108 : #ifdef DEBUG
109 139 : , framePushedAtEnterStubFrame_(0)
110 : #endif
111 : { }
112 :
113 0 : void enter(MacroAssembler& masm, Register scratch, CallCanGC canGC = CallCanGC::CanGC) {
114 0 : MOZ_ASSERT(compiler.allocator.stackPushed() == 0);
115 139 : MOZ_ASSERT(compiler.engine_ == ICStubEngine::Baseline);
116 :
117 139 : EmitBaselineEnterStubFrame(masm, scratch);
118 :
119 : #ifdef DEBUG
120 139 : framePushedAtEnterStubFrame_ = masm.framePushed();
121 : #endif
122 :
123 0 : MOZ_ASSERT(!compiler.inStubFrame_);
124 0 : compiler.inStubFrame_ = true;
125 0 : if (canGC == CallCanGC::CanGC)
126 0 : compiler.makesGCCalls_ = true;
127 0 : }
128 0 : void leave(MacroAssembler& masm, bool calledIntoIon = false) {
129 0 : MOZ_ASSERT(compiler.inStubFrame_);
130 139 : compiler.inStubFrame_ = false;
131 :
132 : #ifdef DEBUG
133 0 : masm.setFramePushed(framePushedAtEnterStubFrame_);
134 139 : if (calledIntoIon)
135 : masm.adjustFrame(sizeof(intptr_t)); // Calls into ion have this extra.
136 : #endif
137 :
138 0 : EmitBaselineLeaveStubFrame(masm, calledIntoIon);
139 139 : }
140 :
141 : #ifdef DEBUG
142 0 : ~AutoStubFrame() {
143 0 : MOZ_ASSERT(!compiler.inStubFrame_);
144 139 : }
145 : #endif
146 : };
147 :
148 : bool
149 125 : BaselineCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun)
150 : {
151 125 : MOZ_ASSERT(inStubFrame_);
152 :
153 0 : TrampolinePtr code = cx_->runtime()->jitRuntime()->getVMWrapper(fun);
154 0 : MOZ_ASSERT(fun.expectTailCall == NonTailCall);
155 125 : MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
156 :
157 0 : EmitBaselineCallVM(code, masm);
158 125 : return true;
159 : }
160 :
161 : JitCode*
162 379 : BaselineCacheIRCompiler::compile()
163 : {
164 : #ifndef JS_USE_LINK_REGISTER
165 : // The first value contains the return addres,
166 : // which we pull into ICTailCallReg for tail calls.
167 379 : masm.adjustFrame(sizeof(intptr_t));
168 : #endif
169 : #ifdef JS_CODEGEN_ARM
170 : masm.setSecondScratchReg(BaselineSecondScratchReg);
171 : #endif
172 :
173 0 : do {
174 4632 : switch (reader.readOp()) {
175 : #define DEFINE_OP(op) \
176 : case CacheOp::op: \
177 : if (!emit##op()) \
178 : return nullptr; \
179 : break;
180 1183 : CACHE_IR_OPS(DEFINE_OP)
181 : #undef DEFINE_OP
182 :
183 : default:
184 0 : MOZ_CRASH("Invalid op");
185 : }
186 :
187 0 : allocator.nextOp();
188 2316 : } while (reader.more());
189 :
190 0 : MOZ_ASSERT(!inStubFrame_);
191 379 : masm.assumeUnreachable("Should have returned from IC");
192 :
193 : // Done emitting the main IC code. Now emit the failure paths.
194 0 : for (size_t i = 0; i < failurePaths.length(); i++) {
195 813 : if (!emitFailurePath(i))
196 : return nullptr;
197 813 : EmitStubGuardFailure(masm);
198 : }
199 :
200 0 : Linker linker(masm);
201 0 : AutoFlushICache afc("getStubCode");
202 0 : Rooted<JitCode*> newStubCode(cx_, linker.newCode(cx_, CodeKind::Baseline));
203 0 : if (!newStubCode) {
204 0 : cx_->recoverFromOutOfMemory();
205 0 : return nullptr;
206 : }
207 :
208 : return newStubCode;
209 : }
210 :
211 : bool
212 446 : BaselineCacheIRCompiler::emitGuardShape()
213 : {
214 0 : ObjOperandId objId = reader.objOperandId();
215 0 : Register obj = allocator.useRegister(masm, objId);
216 892 : AutoScratchRegister scratch1(allocator, masm);
217 :
218 446 : bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
219 :
220 0 : Maybe<AutoScratchRegister> maybeScratch2;
221 0 : if (needSpectreMitigations)
222 329 : maybeScratch2.emplace(allocator, masm);
223 :
224 : FailurePath* failure;
225 446 : if (!addFailurePath(&failure))
226 : return false;
227 :
228 0 : Address addr(stubAddress(reader.stubOffset()));
229 0 : masm.loadPtr(addr, scratch1);
230 0 : if (needSpectreMitigations) {
231 0 : masm.branchTestObjShape(Assembler::NotEqual, obj, scratch1, *maybeScratch2, obj,
232 329 : failure->label());
233 : } else {
234 234 : masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, obj, scratch1,
235 : failure->label());
236 : }
237 :
238 : return true;
239 : }
240 :
241 : bool
242 105 : BaselineCacheIRCompiler::emitGuardGroup()
243 : {
244 0 : ObjOperandId objId = reader.objOperandId();
245 0 : Register obj = allocator.useRegister(masm, objId);
246 210 : AutoScratchRegister scratch1(allocator, masm);
247 :
248 105 : bool needSpectreMitigations = objectGuardNeedsSpectreMitigations(objId);
249 :
250 0 : Maybe<AutoScratchRegister> maybeScratch2;
251 0 : if (needSpectreMitigations)
252 100 : maybeScratch2.emplace(allocator, masm);
253 :
254 : FailurePath* failure;
255 105 : if (!addFailurePath(&failure))
256 : return false;
257 :
258 0 : Address addr(stubAddress(reader.stubOffset()));
259 0 : masm.loadPtr(addr, scratch1);
260 0 : if (needSpectreMitigations) {
261 0 : masm.branchTestObjGroup(Assembler::NotEqual, obj, scratch1, *maybeScratch2, obj,
262 100 : failure->label());
263 : } else {
264 0 : masm.branchTestObjGroupNoSpectreMitigations(Assembler::NotEqual, obj, scratch1,
265 5 : failure->label());
266 : }
267 :
268 : return true;
269 : }
270 :
271 : bool
272 0 : BaselineCacheIRCompiler::emitGuardProto()
273 : {
274 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
275 0 : AutoScratchRegister scratch(allocator, masm);
276 :
277 : FailurePath* failure;
278 0 : if (!addFailurePath(&failure))
279 : return false;
280 :
281 0 : Address addr(stubAddress(reader.stubOffset()));
282 0 : masm.loadObjProto(obj, scratch);
283 0 : masm.branchPtr(Assembler::NotEqual, addr, scratch, failure->label());
284 0 : return true;
285 : }
286 :
287 : bool
288 11 : BaselineCacheIRCompiler::emitGuardCompartment()
289 : {
290 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
291 22 : AutoScratchRegister scratch(allocator, masm);
292 :
293 : FailurePath* failure;
294 11 : if (!addFailurePath(&failure))
295 : return false;
296 :
297 : // Verify that the global wrapper is still valid, as
298 : // it is pre-requisite for doing the compartment check.
299 0 : Address globalWrapper(stubAddress(reader.stubOffset()));
300 0 : masm.loadPtr(globalWrapper, scratch);
301 0 : Address handlerAddr(scratch, ProxyObject::offsetOfHandler());
302 33 : masm.branchPtr(Assembler::Equal, handlerAddr, ImmPtr(&DeadObjectProxy::singleton), failure->label());
303 :
304 0 : Address addr(stubAddress(reader.stubOffset()));
305 0 : masm.branchTestObjCompartment(Assembler::NotEqual, obj, addr, scratch, failure->label());
306 11 : return true;
307 : }
308 :
309 : bool
310 0 : BaselineCacheIRCompiler::emitGuardAnyClass()
311 : {
312 0 : ObjOperandId objId = reader.objOperandId();
313 0 : Register obj = allocator.useRegister(masm, objId);
314 0 : AutoScratchRegister scratch(allocator, masm);
315 :
316 : FailurePath* failure;
317 0 : if (!addFailurePath(&failure))
318 : return false;
319 :
320 0 : Address testAddr(stubAddress(reader.stubOffset()));
321 0 : if (objectGuardNeedsSpectreMitigations(objId)) {
322 0 : masm.branchTestObjClass(Assembler::NotEqual, obj, testAddr, scratch, obj,
323 0 : failure->label());
324 : } else {
325 0 : masm.branchTestObjClassNoSpectreMitigations(Assembler::NotEqual, obj, testAddr, scratch,
326 0 : failure->label());
327 : }
328 :
329 : return true;
330 : }
331 :
332 : bool
333 11 : BaselineCacheIRCompiler::emitGuardHasProxyHandler()
334 : {
335 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
336 22 : AutoScratchRegister scratch(allocator, masm);
337 :
338 : FailurePath* failure;
339 11 : if (!addFailurePath(&failure))
340 : return false;
341 :
342 0 : Address testAddr(stubAddress(reader.stubOffset()));
343 11 : masm.loadPtr(testAddr, scratch);
344 :
345 0 : Address handlerAddr(obj, ProxyObject::offsetOfHandler());
346 0 : masm.branchPtr(Assembler::NotEqual, handlerAddr, scratch, failure->label());
347 11 : return true;
348 : }
349 :
350 : bool
351 1 : BaselineCacheIRCompiler::emitGuardSpecificObject()
352 : {
353 3 : Register obj = allocator.useRegister(masm, reader.objOperandId());
354 :
355 : FailurePath* failure;
356 1 : if (!addFailurePath(&failure))
357 : return false;
358 :
359 0 : Address addr(stubAddress(reader.stubOffset()));
360 0 : masm.branchPtr(Assembler::NotEqual, addr, obj, failure->label());
361 1 : return true;
362 : }
363 :
364 : bool
365 62 : BaselineCacheIRCompiler::emitGuardSpecificAtom()
366 : {
367 0 : Register str = allocator.useRegister(masm, reader.stringOperandId());
368 124 : AutoScratchRegister scratch(allocator, masm);
369 :
370 : FailurePath* failure;
371 62 : if (!addFailurePath(&failure))
372 : return false;
373 :
374 186 : Address atomAddr(stubAddress(reader.stubOffset()));
375 :
376 0 : Label done;
377 124 : masm.branchPtr(Assembler::Equal, atomAddr, str, &done);
378 :
379 : // The pointers are not equal, so if the input string is also an atom it
380 : // must be a different string.
381 0 : masm.branchTest32(Assembler::Zero, Address(str, JSString::offsetOfFlags()),
382 62 : Imm32(JSString::NON_ATOM_BIT), failure->label());
383 :
384 : // Check the length.
385 0 : masm.loadPtr(atomAddr, scratch);
386 0 : masm.loadStringLength(scratch, scratch);
387 0 : masm.branch32(Assembler::NotEqual, Address(str, JSString::offsetOfLength()),
388 62 : scratch, failure->label());
389 :
390 : // We have a non-atomized string with the same length. Call a helper
391 : // function to do the comparison.
392 0 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
393 62 : masm.PushRegsInMask(volatileRegs);
394 :
395 0 : masm.setupUnalignedABICall(scratch);
396 0 : masm.loadPtr(atomAddr, scratch);
397 0 : masm.passABIArg(scratch);
398 0 : masm.passABIArg(str);
399 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, EqualStringsHelper));
400 124 : masm.mov(ReturnReg, scratch);
401 :
402 0 : LiveRegisterSet ignore;
403 0 : ignore.add(scratch);
404 0 : masm.PopRegsInMaskIgnore(volatileRegs, ignore);
405 186 : masm.branchIfFalseBool(scratch, failure->label());
406 :
407 62 : masm.bind(&done);
408 : return true;
409 : }
410 :
411 : bool
412 18 : BaselineCacheIRCompiler::emitGuardSpecificSymbol()
413 : {
414 54 : Register sym = allocator.useRegister(masm, reader.symbolOperandId());
415 :
416 : FailurePath* failure;
417 18 : if (!addFailurePath(&failure))
418 : return false;
419 :
420 0 : Address addr(stubAddress(reader.stubOffset()));
421 0 : masm.branchPtr(Assembler::NotEqual, addr, sym, failure->label());
422 18 : return true;
423 : }
424 :
425 : bool
426 0 : BaselineCacheIRCompiler::emitGuardXrayExpandoShapeAndDefaultProto()
427 : {
428 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
429 0 : bool hasExpando = reader.readBool();
430 0 : Address shapeWrapperAddress(stubAddress(reader.stubOffset()));
431 :
432 0 : AutoScratchRegister scratch(allocator, masm);
433 0 : Maybe<AutoScratchRegister> scratch2, scratch3;
434 0 : if (hasExpando) {
435 0 : scratch2.emplace(allocator, masm);
436 0 : scratch3.emplace(allocator, masm);
437 : }
438 :
439 : FailurePath* failure;
440 0 : if (!addFailurePath(&failure))
441 : return false;
442 :
443 0 : masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
444 0 : Address holderAddress(scratch, sizeof(Value) * GetXrayJitInfo()->xrayHolderSlot);
445 0 : Address expandoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->holderExpandoSlot));
446 :
447 0 : if (hasExpando) {
448 0 : masm.branchTestObject(Assembler::NotEqual, holderAddress, failure->label());
449 0 : masm.unboxObject(holderAddress, scratch);
450 0 : masm.branchTestObject(Assembler::NotEqual, expandoAddress, failure->label());
451 0 : masm.unboxObject(expandoAddress, scratch);
452 :
453 : // Unwrap the expando before checking its shape.
454 0 : masm.loadPtr(Address(scratch, ProxyObject::offsetOfReservedSlots()), scratch);
455 0 : masm.unboxObject(Address(scratch, detail::ProxyReservedSlots::offsetOfPrivateSlot()), scratch);
456 :
457 0 : masm.loadPtr(shapeWrapperAddress, scratch2.ref());
458 0 : LoadShapeWrapperContents(masm, scratch2.ref(), scratch2.ref(), failure->label());
459 0 : masm.branchTestObjShape(Assembler::NotEqual, scratch, *scratch2, *scratch3, scratch,
460 0 : failure->label());
461 :
462 : // The reserved slots on the expando should all be in fixed slots.
463 0 : Address protoAddress(scratch, NativeObject::getFixedSlotOffset(GetXrayJitInfo()->expandoProtoSlot));
464 0 : masm.branchTestUndefined(Assembler::NotEqual, protoAddress, failure->label());
465 : } else {
466 0 : Label done;
467 0 : masm.branchTestObject(Assembler::NotEqual, holderAddress, &done);
468 0 : masm.unboxObject(holderAddress, scratch);
469 0 : masm.branchTestObject(Assembler::Equal, expandoAddress, failure->label());
470 0 : masm.bind(&done);
471 : }
472 :
473 : return true;
474 : }
475 :
476 : bool
477 4 : BaselineCacheIRCompiler::emitGuardFunctionPrototype()
478 : {
479 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
480 12 : Register prototypeObject = allocator.useRegister(masm, reader.objOperandId());
481 :
482 : // Allocate registers before the failure path to make sure they're registered
483 : // by addFailurePath.
484 0 : AutoScratchRegister scratch1(allocator, masm);
485 8 : AutoScratchRegister scratch2(allocator, masm);
486 :
487 : FailurePath* failure;
488 4 : if (!addFailurePath(&failure))
489 : return false;
490 :
491 : // Guard on the .prototype object.
492 0 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch1);
493 0 : masm.load32(Address(stubAddress(reader.stubOffset())), scratch2);
494 0 : BaseValueIndex prototypeSlot(scratch1, scratch2);
495 0 : masm.branchTestObject(Assembler::NotEqual, prototypeSlot, failure->label());
496 0 : masm.unboxObject(prototypeSlot, scratch1);
497 8 : masm.branchPtr(Assembler::NotEqual,
498 : prototypeObject,
499 4 : scratch1, failure->label());
500 :
501 4 : return true;
502 : }
503 :
504 : bool
505 5 : BaselineCacheIRCompiler::emitLoadValueResult()
506 : {
507 0 : AutoOutputRegister output(*this);
508 0 : masm.loadValue(stubAddress(reader.stubOffset()), output.valueReg());
509 5 : return true;
510 : }
511 :
512 : bool
513 15 : BaselineCacheIRCompiler::emitLoadFixedSlotResult()
514 : {
515 0 : AutoOutputRegister output(*this);
516 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
517 30 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
518 :
519 0 : masm.load32(stubAddress(reader.stubOffset()), scratch);
520 0 : masm.loadValue(BaseIndex(obj, scratch, TimesOne), output.valueReg());
521 30 : return true;
522 : }
523 :
524 : bool
525 44 : BaselineCacheIRCompiler::emitLoadDynamicSlotResult()
526 : {
527 0 : AutoOutputRegister output(*this);
528 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
529 0 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
530 88 : AutoScratchRegister scratch2(allocator, masm);
531 :
532 0 : masm.load32(stubAddress(reader.stubOffset()), scratch);
533 0 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
534 0 : masm.loadValue(BaseIndex(scratch2, scratch, TimesOne), output.valueReg());
535 88 : return true;
536 : }
537 :
538 : bool
539 4 : BaselineCacheIRCompiler::emitGuardHasGetterSetter()
540 : {
541 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
542 0 : Address shapeAddr = stubAddress(reader.stubOffset());
543 :
544 8 : AutoScratchRegister scratch1(allocator, masm);
545 0 : AutoScratchRegister scratch2(allocator, masm);
546 :
547 : FailurePath* failure;
548 4 : if (!addFailurePath(&failure))
549 : return false;
550 :
551 16 : LiveRegisterSet volatileRegs(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
552 0 : volatileRegs.takeUnchecked(scratch1);
553 0 : volatileRegs.takeUnchecked(scratch2);
554 4 : masm.PushRegsInMask(volatileRegs);
555 :
556 0 : masm.setupUnalignedABICall(scratch1);
557 0 : masm.loadJSContext(scratch1);
558 0 : masm.passABIArg(scratch1);
559 0 : masm.passABIArg(obj);
560 4 : masm.loadPtr(shapeAddr, scratch2);
561 0 : masm.passABIArg(scratch2);
562 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, ObjectHasGetterSetter));
563 0 : masm.mov(ReturnReg, scratch1);
564 0 : masm.PopRegsInMask(volatileRegs);
565 :
566 0 : masm.branchIfFalseBool(scratch1, failure->label());
567 0 : return true;
568 : }
569 :
570 : bool
571 1 : BaselineCacheIRCompiler::emitCallScriptedGetterResult()
572 : {
573 0 : MOZ_ASSERT(engine_ == ICStubEngine::Baseline);
574 :
575 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
576 0 : Address getterAddr(stubAddress(reader.stubOffset()));
577 :
578 0 : AutoScratchRegister code(allocator, masm);
579 0 : AutoScratchRegister callee(allocator, masm);
580 24 : AutoScratchRegister scratch(allocator, masm);
581 :
582 : // First, ensure our getter is non-lazy.
583 : {
584 : FailurePath* failure;
585 0 : if (!addFailurePath(&failure))
586 0 : return false;
587 :
588 0 : masm.loadPtr(getterAddr, callee);
589 0 : masm.branchIfFunctionHasNoJitEntry(callee, /* constructing */ false, failure->label());
590 12 : masm.loadJitCodeRaw(callee, code);
591 : }
592 :
593 12 : allocator.discardStack(masm);
594 :
595 0 : AutoStubFrame stubFrame(*this);
596 0 : stubFrame.enter(masm, scratch);
597 :
598 : // Align the stack such that the JitFrameLayout is aligned on
599 : // JitStackAlignment.
600 0 : masm.alignJitStackBasedOnNArgs(0);
601 :
602 : // Getter is called with 0 arguments, just |obj| as thisv.
603 : // Note that we use Push, not push, so that callJit will align the stack
604 : // properly on ARM.
605 0 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
606 :
607 0 : EmitBaselineCreateStubFrameDescriptor(masm, scratch, JitFrameLayout::Size());
608 0 : masm.Push(Imm32(0)); // ActualArgc is 0
609 12 : masm.Push(callee);
610 0 : masm.Push(scratch);
611 :
612 : // Handle arguments underflow.
613 24 : Label noUnderflow;
614 24 : masm.load16ZeroExtend(Address(callee, JSFunction::offsetOfNargs()), callee);
615 0 : masm.branch32(Assembler::Equal, callee, Imm32(0), &noUnderflow);
616 : {
617 : // Call the arguments rectifier.
618 48 : TrampolinePtr argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
619 0 : masm.movePtr(argumentsRectifier, code);
620 : }
621 :
622 0 : masm.bind(&noUnderflow);
623 0 : masm.callJit(code);
624 :
625 12 : stubFrame.leave(masm, true);
626 : return true;
627 : }
628 :
629 : typedef bool (*CallNativeGetterFn)(JSContext*, HandleFunction, HandleObject, MutableHandleValue);
630 1 : static const VMFunction CallNativeGetterInfo =
631 3 : FunctionInfo<CallNativeGetterFn>(CallNativeGetter, "CallNativeGetter");
632 :
633 : bool
634 0 : BaselineCacheIRCompiler::emitCallNativeGetterResult()
635 : {
636 57 : Register obj = allocator.useRegister(masm, reader.objOperandId());
637 0 : Address getterAddr(stubAddress(reader.stubOffset()));
638 :
639 0 : AutoScratchRegister scratch(allocator, masm);
640 :
641 19 : allocator.discardStack(masm);
642 :
643 38 : AutoStubFrame stubFrame(*this);
644 0 : stubFrame.enter(masm, scratch);
645 :
646 : // Load the callee in the scratch register.
647 19 : masm.loadPtr(getterAddr, scratch);
648 :
649 0 : masm.Push(obj);
650 19 : masm.Push(scratch);
651 :
652 0 : if (!callVM(masm, CallNativeGetterInfo))
653 : return false;
654 :
655 19 : stubFrame.leave(masm);
656 19 : return true;
657 : }
658 :
659 : typedef bool (*ProxyGetPropertyFn)(JSContext*, HandleObject, HandleId, MutableHandleValue);
660 1 : static const VMFunction ProxyGetPropertyInfo =
661 3 : FunctionInfo<ProxyGetPropertyFn>(ProxyGetProperty, "ProxyGetProperty");
662 :
663 : bool
664 7 : BaselineCacheIRCompiler::emitCallProxyGetResult()
665 : {
666 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
667 0 : Address idAddr(stubAddress(reader.stubOffset()));
668 :
669 0 : AutoScratchRegister scratch(allocator, masm);
670 :
671 7 : allocator.discardStack(masm);
672 :
673 14 : AutoStubFrame stubFrame(*this);
674 0 : stubFrame.enter(masm, scratch);
675 :
676 : // Load the jsid in the scratch register.
677 7 : masm.loadPtr(idAddr, scratch);
678 :
679 7 : masm.Push(scratch);
680 0 : masm.Push(obj);
681 :
682 7 : if (!callVM(masm, ProxyGetPropertyInfo))
683 : return false;
684 :
685 0 : stubFrame.leave(masm);
686 7 : return true;
687 : }
688 :
689 : typedef bool (*ProxyGetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
690 1 : static const VMFunction ProxyGetPropertyByValueInfo =
691 0 : FunctionInfo<ProxyGetPropertyByValueFn>(ProxyGetPropertyByValue, "ProxyGetPropertyByValue");
692 :
693 : bool
694 0 : BaselineCacheIRCompiler::emitCallProxyGetByValueResult()
695 : {
696 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
697 4 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
698 :
699 0 : AutoScratchRegister scratch(allocator, masm);
700 :
701 2 : allocator.discardStack(masm);
702 :
703 4 : AutoStubFrame stubFrame(*this);
704 0 : stubFrame.enter(masm, scratch);
705 :
706 2 : masm.Push(idVal);
707 2 : masm.Push(obj);
708 :
709 2 : if (!callVM(masm, ProxyGetPropertyByValueInfo))
710 : return false;
711 :
712 2 : stubFrame.leave(masm);
713 0 : return true;
714 : }
715 :
716 : typedef bool (*ProxyHasFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
717 0 : static const VMFunction ProxyHasInfo = FunctionInfo<ProxyHasFn>(ProxyHas, "ProxyHas");
718 :
719 : typedef bool (*ProxyHasOwnFn)(JSContext*, HandleObject, HandleValue, MutableHandleValue);
720 2 : static const VMFunction ProxyHasOwnInfo = FunctionInfo<ProxyHasOwnFn>(ProxyHasOwn, "ProxyHasOwn");
721 :
722 : bool
723 0 : BaselineCacheIRCompiler::emitCallProxyHasPropResult()
724 : {
725 6 : Register obj = allocator.useRegister(masm, reader.objOperandId());
726 0 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
727 2 : bool hasOwn = reader.readBool();
728 :
729 0 : AutoScratchRegister scratch(allocator, masm);
730 :
731 2 : allocator.discardStack(masm);
732 :
733 4 : AutoStubFrame stubFrame(*this);
734 0 : stubFrame.enter(masm, scratch);
735 :
736 2 : masm.Push(idVal);
737 2 : masm.Push(obj);
738 :
739 2 : if (hasOwn) {
740 0 : if (!callVM(masm, ProxyHasOwnInfo))
741 : return false;
742 : } else {
743 0 : if (!callVM(masm, ProxyHasInfo))
744 : return false;
745 : }
746 :
747 0 : stubFrame.leave(masm);
748 0 : return true;
749 : }
750 :
751 : bool
752 14 : BaselineCacheIRCompiler::emitLoadUnboxedPropertyResult()
753 : {
754 28 : AutoOutputRegister output(*this);
755 42 : Register obj = allocator.useRegister(masm, reader.objOperandId());
756 0 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
757 :
758 28 : JSValueType fieldType = reader.valueType();
759 42 : Address fieldOffset(stubAddress(reader.stubOffset()));
760 14 : masm.load32(fieldOffset, scratch);
761 0 : masm.loadUnboxedProperty(BaseIndex(obj, scratch, TimesOne), fieldType, output);
762 28 : return true;
763 : }
764 :
765 : bool
766 6 : BaselineCacheIRCompiler::emitGuardFrameHasNoArgumentsObject()
767 : {
768 : FailurePath* failure;
769 0 : if (!addFailurePath(&failure))
770 : return false;
771 :
772 30 : masm.branchTest32(Assembler::NonZero,
773 0 : Address(BaselineFrameReg, BaselineFrame::reverseOffsetOfFlags()),
774 : Imm32(BaselineFrame::HAS_ARGS_OBJ),
775 0 : failure->label());
776 6 : return true;
777 : }
778 :
779 : bool
780 0 : BaselineCacheIRCompiler::emitLoadFrameCalleeResult()
781 : {
782 0 : AutoOutputRegister output(*this);
783 0 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
784 :
785 0 : Address callee(BaselineFrameReg, BaselineFrame::offsetOfCalleeToken());
786 0 : masm.loadFunctionFromCalleeToken(callee, scratch);
787 0 : masm.tagValue(JSVAL_TYPE_OBJECT, scratch, output.valueReg());
788 0 : return true;
789 : }
790 :
791 : bool
792 0 : BaselineCacheIRCompiler::emitLoadFrameNumActualArgsResult()
793 : {
794 6 : AutoOutputRegister output(*this);
795 6 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
796 :
797 6 : Address actualArgs(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs());
798 0 : masm.loadPtr(actualArgs, scratch);
799 0 : masm.tagValue(JSVAL_TYPE_INT32, scratch, output.valueReg());
800 0 : return true;
801 : }
802 :
803 : bool
804 0 : BaselineCacheIRCompiler::emitLoadTypedObjectResult()
805 : {
806 0 : AutoOutputRegister output(*this);
807 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
808 0 : AutoScratchRegister scratch1(allocator, masm);
809 0 : AutoScratchRegister scratch2(allocator, masm);
810 :
811 0 : TypedThingLayout layout = reader.typedThingLayout();
812 0 : uint32_t typeDescr = reader.typeDescrKey();
813 0 : Address fieldOffset(stubAddress(reader.stubOffset()));
814 :
815 : // Get the object's data pointer.
816 0 : LoadTypedThingData(masm, layout, obj, scratch1);
817 :
818 : // Get the address being written to.
819 0 : masm.load32(fieldOffset, scratch2);
820 0 : masm.addPtr(scratch2, scratch1);
821 :
822 0 : Address fieldAddr(scratch1, 0);
823 0 : emitLoadTypedObjectResultShared(fieldAddr, scratch2, typeDescr, output);
824 0 : return true;
825 : }
826 :
827 : bool
828 3 : BaselineCacheIRCompiler::emitLoadFrameArgumentResult()
829 : {
830 0 : AutoOutputRegister output(*this);
831 0 : Register index = allocator.useRegister(masm, reader.int32OperandId());
832 0 : AutoScratchRegister scratch1(allocator, masm);
833 6 : AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
834 :
835 : FailurePath* failure;
836 0 : if (!addFailurePath(&failure))
837 : return false;
838 :
839 : // Bounds check.
840 6 : masm.loadPtr(Address(BaselineFrameReg, BaselineFrame::offsetOfNumActualArgs()), scratch1);
841 0 : masm.spectreBoundsCheck32(index, scratch1, scratch2, failure->label());
842 :
843 : // Load the argument.
844 0 : masm.loadValue(BaseValueIndex(BaselineFrameReg, index, BaselineFrame::offsetOfArg(0)),
845 3 : output.valueReg());
846 3 : return true;
847 : }
848 :
849 : bool
850 0 : BaselineCacheIRCompiler::emitLoadEnvironmentFixedSlotResult()
851 : {
852 0 : AutoOutputRegister output(*this);
853 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
854 16 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
855 :
856 : FailurePath* failure;
857 0 : if (!addFailurePath(&failure))
858 : return false;
859 :
860 0 : masm.load32(stubAddress(reader.stubOffset()), scratch);
861 16 : BaseIndex slot(obj, scratch, TimesOne);
862 :
863 : // Check for uninitialized lexicals.
864 0 : masm.branchTestMagic(Assembler::Equal, slot, failure->label());
865 :
866 : // Load the value.
867 0 : masm.loadValue(slot, output.valueReg());
868 0 : return true;
869 : }
870 :
871 : bool
872 0 : BaselineCacheIRCompiler::emitLoadEnvironmentDynamicSlotResult()
873 : {
874 0 : AutoOutputRegister output(*this);
875 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
876 0 : AutoScratchRegister scratch(allocator, masm);
877 0 : AutoScratchRegisterMaybeOutput scratch2(allocator, masm, output);
878 :
879 : FailurePath* failure;
880 0 : if (!addFailurePath(&failure))
881 : return false;
882 :
883 36 : masm.load32(stubAddress(reader.stubOffset()), scratch);
884 0 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
885 :
886 : // Check for uninitialized lexicals.
887 24 : BaseIndex slot(scratch2, scratch, TimesOne);
888 0 : masm.branchTestMagic(Assembler::Equal, slot, failure->label());
889 :
890 : // Load the value.
891 12 : masm.loadValue(slot, output.valueReg());
892 12 : return true;
893 : }
894 :
895 : bool
896 0 : BaselineCacheIRCompiler::emitLoadStringResult()
897 : {
898 0 : AutoOutputRegister output(*this);
899 26 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
900 :
901 0 : masm.loadPtr(stubAddress(reader.stubOffset()), scratch);
902 13 : masm.tagValue(JSVAL_TYPE_STRING, scratch, output.valueReg());
903 26 : return true;
904 : }
905 :
906 : typedef bool (*StringSplitHelperFn)(JSContext*, HandleString, HandleString, HandleObjectGroup,
907 : uint32_t limit, MutableHandleValue);
908 0 : static const VMFunction StringSplitHelperInfo =
909 3 : FunctionInfo<StringSplitHelperFn>(StringSplitHelper, "StringSplitHelper");
910 :
911 : bool
912 0 : BaselineCacheIRCompiler::emitCallStringSplitResult()
913 : {
914 3 : Register str = allocator.useRegister(masm, reader.stringOperandId());
915 3 : Register sep = allocator.useRegister(masm, reader.stringOperandId());
916 0 : Address groupAddr(stubAddress(reader.stubOffset()));
917 :
918 0 : AutoScratchRegister scratch(allocator, masm);
919 0 : allocator.discardStack(masm);
920 :
921 0 : AutoStubFrame stubFrame(*this);
922 1 : stubFrame.enter(masm, scratch);
923 :
924 : // Load the group in the scratch register.
925 1 : masm.loadPtr(groupAddr, scratch);
926 :
927 0 : masm.Push(Imm32(INT32_MAX));
928 0 : masm.Push(scratch);
929 1 : masm.Push(sep);
930 1 : masm.Push(str);
931 :
932 0 : if (!callVM(masm, StringSplitHelperInfo))
933 : return false;
934 :
935 0 : stubFrame.leave(masm);
936 0 : return true;
937 : }
938 :
939 : bool
940 0 : BaselineCacheIRCompiler::emitCompareStringResult()
941 : {
942 0 : AutoOutputRegister output(*this);
943 :
944 51 : Register left = allocator.useRegister(masm, reader.stringOperandId());
945 0 : Register right = allocator.useRegister(masm, reader.stringOperandId());
946 0 : JSOp op = reader.jsop();
947 :
948 34 : AutoScratchRegisterMaybeOutput scratch(allocator, masm, output);
949 :
950 : FailurePath* failure;
951 17 : if (!addFailurePath(&failure))
952 : return false;
953 :
954 17 : allocator.discardStack(masm);
955 :
956 0 : Label slow, done;
957 17 : masm.compareStrings(op, left, right, scratch, &slow);
958 0 : masm.jump(&done);
959 0 : masm.bind(&slow);
960 : {
961 34 : AutoStubFrame stubFrame(*this);
962 0 : stubFrame.enter(masm, scratch);
963 :
964 17 : masm.Push(right);
965 0 : masm.Push(left);
966 :
967 17 : if (!callVM(masm, (op == JSOP_EQ || op == JSOP_STRICTEQ) ?
968 : StringsEqualInfo :
969 : StringsNotEqualInfo))
970 : {
971 0 : return false;
972 : }
973 0 : stubFrame.leave(masm);
974 0 : masm.mov(ReturnReg, scratch);
975 : }
976 0 : masm.bind(&done);
977 17 : masm.tagValue(JSVAL_TYPE_BOOLEAN, scratch, output.valueReg());
978 17 : return true;
979 : }
980 :
981 : bool
982 67 : BaselineCacheIRCompiler::callTypeUpdateIC(Register obj, ValueOperand val, Register scratch,
983 : LiveGeneralRegisterSet saveRegs)
984 : {
985 : // Ensure the stack is empty for the VM call below.
986 67 : allocator.discardStack(masm);
987 :
988 : // R0 contains the value that needs to be typechecked.
989 67 : MOZ_ASSERT(val == R0);
990 134 : MOZ_ASSERT(scratch == R1.scratchReg());
991 :
992 : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
993 : static const bool CallClobbersTailReg = false;
994 : #else
995 : static const bool CallClobbersTailReg = true;
996 : #endif
997 :
998 : // Call the first type update stub.
999 : if (CallClobbersTailReg)
1000 : masm.push(ICTailCallReg);
1001 134 : masm.push(ICStubReg);
1002 201 : masm.loadPtr(Address(ICStubReg, ICUpdatedStub::offsetOfFirstUpdateStub()),
1003 0 : ICStubReg);
1004 0 : masm.call(Address(ICStubReg, ICStub::offsetOfStubCode()));
1005 0 : masm.pop(ICStubReg);
1006 : if (CallClobbersTailReg)
1007 : masm.pop(ICTailCallReg);
1008 :
1009 : // The update IC will store 0 or 1 in |scratch|, R1.scratchReg(), reflecting
1010 : // if the value in R0 type-checked properly or not.
1011 134 : Label done;
1012 134 : masm.branch32(Assembler::Equal, scratch, Imm32(1), &done);
1013 :
1014 0 : AutoStubFrame stubFrame(*this);
1015 67 : stubFrame.enter(masm, scratch, CallCanGC::CanNotGC);
1016 :
1017 0 : masm.PushRegsInMask(saveRegs);
1018 :
1019 0 : masm.Push(val);
1020 134 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
1021 0 : masm.Push(ICStubReg);
1022 :
1023 : // Load previous frame pointer, push BaselineFrame*.
1024 67 : masm.loadPtr(Address(BaselineFrameReg, 0), scratch);
1025 67 : masm.pushBaselineFramePtr(scratch, scratch);
1026 :
1027 0 : if (!callVM(masm, DoTypeUpdateFallbackInfo))
1028 : return false;
1029 :
1030 67 : masm.PopRegsInMask(saveRegs);
1031 :
1032 0 : stubFrame.leave(masm);
1033 :
1034 0 : masm.bind(&done);
1035 67 : return true;
1036 : }
1037 :
1038 : bool
1039 9 : BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
1040 : {
1041 0 : ObjOperandId objId = reader.objOperandId();
1042 27 : Address offsetAddr = stubAddress(reader.stubOffset());
1043 :
1044 : // Allocate the fixed registers first. These need to be fixed for
1045 : // callTypeUpdateIC.
1046 18 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
1047 18 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1048 :
1049 0 : Register obj = allocator.useRegister(masm, objId);
1050 18 : Maybe<AutoScratchRegister> scratch2;
1051 0 : if (!isFixed)
1052 0 : scratch2.emplace(allocator, masm);
1053 :
1054 0 : LiveGeneralRegisterSet saveRegs;
1055 9 : saveRegs.add(obj);
1056 0 : saveRegs.add(val);
1057 0 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
1058 : return false;
1059 :
1060 9 : masm.load32(offsetAddr, scratch1);
1061 :
1062 0 : if (isFixed) {
1063 12 : BaseIndex slot(obj, scratch1, TimesOne);
1064 0 : EmitPreBarrier(masm, slot, MIRType::Value);
1065 0 : masm.storeValue(val, slot);
1066 : } else {
1067 0 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2.ref());
1068 6 : BaseIndex slot(scratch2.ref(), scratch1, TimesOne);
1069 0 : EmitPreBarrier(masm, slot, MIRType::Value);
1070 0 : masm.storeValue(val, slot);
1071 : }
1072 :
1073 9 : emitPostBarrierSlot(obj, val, scratch1);
1074 9 : return true;
1075 : }
1076 :
1077 : bool
1078 0 : BaselineCacheIRCompiler::emitStoreFixedSlot()
1079 : {
1080 1 : return emitStoreSlotShared(true);
1081 : }
1082 :
1083 : bool
1084 0 : BaselineCacheIRCompiler::emitStoreDynamicSlot()
1085 : {
1086 1 : return emitStoreSlotShared(false);
1087 : }
1088 :
1089 : bool
1090 43 : BaselineCacheIRCompiler::emitAddAndStoreSlotShared(CacheOp op)
1091 : {
1092 0 : ObjOperandId objId = reader.objOperandId();
1093 129 : Address offsetAddr = stubAddress(reader.stubOffset());
1094 :
1095 : // Allocate the fixed registers first. These need to be fixed for
1096 : // callTypeUpdateIC.
1097 86 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
1098 86 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1099 :
1100 0 : Register obj = allocator.useRegister(masm, objId);
1101 86 : AutoScratchRegister scratch2(allocator, masm);
1102 :
1103 0 : bool changeGroup = reader.readBool();
1104 129 : Address newGroupAddr = stubAddress(reader.stubOffset());
1105 0 : Address newShapeAddr = stubAddress(reader.stubOffset());
1106 :
1107 0 : if (op == CacheOp::AllocateAndStoreDynamicSlot) {
1108 : // We have to (re)allocate dynamic slots. Do this first, as it's the
1109 : // only fallible operation here. This simplifies the callTypeUpdateIC
1110 : // call below: it does not have to worry about saving registers used by
1111 : // failure paths. Note that growSlotsDontReportOOM is fallible but does
1112 : // not GC.
1113 30 : Address numNewSlotsAddr = stubAddress(reader.stubOffset());
1114 :
1115 : FailurePath* failure;
1116 10 : if (!addFailurePath(&failure))
1117 0 : return false;
1118 :
1119 1 : LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
1120 10 : masm.PushRegsInMask(save);
1121 :
1122 0 : masm.setupUnalignedABICall(scratch1);
1123 10 : masm.loadJSContext(scratch1);
1124 0 : masm.passABIArg(scratch1);
1125 0 : masm.passABIArg(obj);
1126 0 : masm.load32(numNewSlotsAddr, scratch2);
1127 0 : masm.passABIArg(scratch2);
1128 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::growSlotsDontReportOOM));
1129 0 : masm.mov(ReturnReg, scratch1);
1130 :
1131 0 : LiveRegisterSet ignore;
1132 10 : ignore.add(scratch1);
1133 0 : masm.PopRegsInMaskIgnore(save, ignore);
1134 :
1135 0 : masm.branchIfFalseBool(scratch1, failure->label());
1136 : }
1137 :
1138 43 : LiveGeneralRegisterSet saveRegs;
1139 43 : saveRegs.add(obj);
1140 0 : saveRegs.add(val);
1141 0 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
1142 : return false;
1143 :
1144 43 : if (changeGroup) {
1145 : // Changing object's group from a partially to fully initialized group,
1146 : // per the acquired properties analysis. Only change the group if the
1147 : // old group still has a newScript. This only applies to PlainObjects.
1148 6 : Label noGroupChange;
1149 3 : masm.branchIfObjGroupHasNoAddendum(obj, scratch1, &noGroupChange);
1150 :
1151 : // Update the object's group.
1152 3 : masm.loadPtr(newGroupAddr, scratch1);
1153 3 : masm.storeObjGroup(scratch1, obj, [](MacroAssembler& masm, const Address& addr) {
1154 0 : EmitPreBarrier(masm, addr, MIRType::ObjectGroup);
1155 0 : });
1156 :
1157 0 : masm.bind(&noGroupChange);
1158 : }
1159 :
1160 : // Update the object's shape.
1161 43 : masm.loadPtr(newShapeAddr, scratch1);
1162 43 : masm.storeObjShape(scratch1, obj, [](MacroAssembler& masm, const Address& addr) {
1163 0 : EmitPreBarrier(masm, addr, MIRType::Shape);
1164 0 : });
1165 :
1166 : // Perform the store. No pre-barrier required since this is a new
1167 : // initialization.
1168 43 : masm.load32(offsetAddr, scratch1);
1169 43 : if (op == CacheOp::AddAndStoreFixedSlot) {
1170 0 : BaseIndex slot(obj, scratch1, TimesOne);
1171 0 : masm.storeValue(val, slot);
1172 : } else {
1173 0 : MOZ_ASSERT(op == CacheOp::AddAndStoreDynamicSlot ||
1174 : op == CacheOp::AllocateAndStoreDynamicSlot);
1175 0 : masm.loadPtr(Address(obj, NativeObject::offsetOfSlots()), scratch2);
1176 50 : BaseIndex slot(scratch2, scratch1, TimesOne);
1177 0 : masm.storeValue(val, slot);
1178 : }
1179 :
1180 43 : emitPostBarrierSlot(obj, val, scratch1);
1181 43 : return true;
1182 : }
1183 :
1184 : bool
1185 0 : BaselineCacheIRCompiler::emitAddAndStoreFixedSlot()
1186 : {
1187 1 : return emitAddAndStoreSlotShared(CacheOp::AddAndStoreFixedSlot);
1188 : }
1189 :
1190 : bool
1191 0 : BaselineCacheIRCompiler::emitAddAndStoreDynamicSlot()
1192 : {
1193 1 : return emitAddAndStoreSlotShared(CacheOp::AddAndStoreDynamicSlot);
1194 : }
1195 :
1196 : bool
1197 0 : BaselineCacheIRCompiler::emitAllocateAndStoreDynamicSlot()
1198 : {
1199 1 : return emitAddAndStoreSlotShared(CacheOp::AllocateAndStoreDynamicSlot);
1200 : }
1201 :
1202 : bool
1203 12 : BaselineCacheIRCompiler::emitStoreUnboxedProperty()
1204 : {
1205 0 : ObjOperandId objId = reader.objOperandId();
1206 24 : JSValueType fieldType = reader.valueType();
1207 0 : Address offsetAddr = stubAddress(reader.stubOffset());
1208 :
1209 : // Allocate the fixed registers first. These need to be fixed for
1210 : // callTypeUpdateIC.
1211 24 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1212 24 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1213 :
1214 0 : Register obj = allocator.useRegister(masm, objId);
1215 :
1216 : // We only need the type update IC if we are storing an object.
1217 12 : if (fieldType == JSVAL_TYPE_OBJECT) {
1218 4 : LiveGeneralRegisterSet saveRegs;
1219 0 : saveRegs.add(obj);
1220 0 : saveRegs.add(val);
1221 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1222 0 : return false;
1223 : }
1224 :
1225 12 : masm.load32(offsetAddr, scratch);
1226 24 : BaseIndex fieldAddr(obj, scratch, TimesOne);
1227 :
1228 : // Note that the storeUnboxedProperty call here is infallible, as the
1229 : // IR emitter is responsible for guarding on |val|'s type.
1230 12 : EmitICUnboxedPreBarrier(masm, fieldAddr, fieldType);
1231 : masm.storeUnboxedProperty(fieldAddr, fieldType,
1232 0 : ConstantOrRegister(TypedOrValueRegister(val)),
1233 12 : /* failure = */ nullptr);
1234 :
1235 0 : if (UnboxedTypeNeedsPostBarrier(fieldType))
1236 8 : emitPostBarrierSlot(obj, val, scratch);
1237 : return true;
1238 : }
1239 :
1240 : bool
1241 0 : BaselineCacheIRCompiler::emitStoreTypedObjectReferenceProperty()
1242 : {
1243 0 : ObjOperandId objId = reader.objOperandId();
1244 0 : Address offsetAddr = stubAddress(reader.stubOffset());
1245 0 : TypedThingLayout layout = reader.typedThingLayout();
1246 0 : ReferenceTypeDescr::Type type = reader.referenceTypeDescrType();
1247 :
1248 : // Allocate the fixed registers first. These need to be fixed for
1249 : // callTypeUpdateIC.
1250 0 : AutoScratchRegister scratch1(allocator, masm, R1.scratchReg());
1251 0 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1252 :
1253 0 : Register obj = allocator.useRegister(masm, objId);
1254 0 : AutoScratchRegister scratch2(allocator, masm);
1255 :
1256 : // We don't need a type update IC if the property is always a string.
1257 0 : if (type != ReferenceTypeDescr::TYPE_STRING) {
1258 0 : LiveGeneralRegisterSet saveRegs;
1259 0 : saveRegs.add(obj);
1260 0 : saveRegs.add(val);
1261 0 : if (!callTypeUpdateIC(obj, val, scratch1, saveRegs))
1262 0 : return false;
1263 : }
1264 :
1265 : // Compute the address being written to.
1266 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1267 0 : masm.addPtr(offsetAddr, scratch1);
1268 0 : Address dest(scratch1, 0);
1269 :
1270 0 : emitStoreTypedObjectReferenceProp(val, type, dest, scratch2);
1271 0 : emitPostBarrierSlot(obj, val, scratch1);
1272 :
1273 0 : return true;
1274 : }
1275 :
1276 : bool
1277 0 : BaselineCacheIRCompiler::emitStoreTypedObjectScalarProperty()
1278 : {
1279 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1280 0 : Address offsetAddr = stubAddress(reader.stubOffset());
1281 0 : TypedThingLayout layout = reader.typedThingLayout();
1282 0 : Scalar::Type type = reader.scalarType();
1283 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1284 0 : AutoScratchRegister scratch1(allocator, masm);
1285 0 : AutoScratchRegister scratch2(allocator, masm);
1286 :
1287 : FailurePath* failure;
1288 0 : if (!addFailurePath(&failure))
1289 : return false;
1290 :
1291 : // Compute the address being written to.
1292 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1293 0 : masm.addPtr(offsetAddr, scratch1);
1294 0 : Address dest(scratch1, 0);
1295 :
1296 0 : StoreToTypedArray(cx_, masm, type, val, dest, scratch2, failure->label());
1297 0 : return true;
1298 : }
1299 :
1300 : bool
1301 2 : BaselineCacheIRCompiler::emitStoreDenseElement()
1302 : {
1303 0 : ObjOperandId objId = reader.objOperandId();
1304 4 : Int32OperandId indexId = reader.int32OperandId();
1305 :
1306 : // Allocate the fixed registers first. These need to be fixed for
1307 : // callTypeUpdateIC.
1308 4 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1309 4 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1310 :
1311 0 : Register obj = allocator.useRegister(masm, objId);
1312 2 : Register index = allocator.useRegister(masm, indexId);
1313 :
1314 : FailurePath* failure;
1315 2 : if (!addFailurePath(&failure))
1316 : return false;
1317 :
1318 : // Load obj->elements in scratch.
1319 4 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1320 :
1321 : // Bounds check. Unfortunately we don't have more registers available on
1322 : // x86, so use InvalidReg and emit slightly slower code on x86.
1323 2 : Register spectreTemp = InvalidReg;
1324 4 : Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1325 0 : masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
1326 :
1327 : // Hole check.
1328 4 : BaseObjectElementIndex element(scratch, index);
1329 6 : masm.branchTestMagic(Assembler::Equal, element, failure->label());
1330 :
1331 : // Perform a single test to see if we either need to convert double
1332 : // elements, clone the copy on write elements in the object or fail
1333 : // due to a frozen element.
1334 4 : Label noSpecialHandling;
1335 4 : Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
1336 0 : masm.branchTest32(Assembler::Zero, elementsFlags,
1337 : Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS |
1338 : ObjectElements::COPY_ON_WRITE |
1339 : ObjectElements::FROZEN),
1340 2 : &noSpecialHandling);
1341 :
1342 : // Fail if we need to clone copy on write elements or to throw due
1343 : // to a frozen element.
1344 8 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1345 : Imm32(ObjectElements::COPY_ON_WRITE |
1346 : ObjectElements::FROZEN),
1347 2 : failure->label());
1348 :
1349 : // We need to convert int32 values being stored into doubles. Note that
1350 : // double arrays are only created by IonMonkey, so if we have no FP support
1351 : // Ion is disabled and there should be no double arrays.
1352 6 : if (cx_->runtime()->jitSupportsFloatingPoint) {
1353 : // It's fine to convert the value in place in Baseline. We can't do
1354 : // this in Ion.
1355 2 : masm.convertInt32ValueToDouble(val);
1356 : } else {
1357 0 : masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
1358 : }
1359 :
1360 2 : masm.bind(&noSpecialHandling);
1361 :
1362 : // Call the type update IC. After this everything must be infallible as we
1363 : // don't save all registers here.
1364 2 : LiveGeneralRegisterSet saveRegs;
1365 2 : saveRegs.add(obj);
1366 0 : saveRegs.add(index);
1367 0 : saveRegs.add(val);
1368 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1369 : return false;
1370 :
1371 : // Perform the store. Reload obj->elements because callTypeUpdateIC
1372 : // used the scratch register.
1373 4 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1374 4 : EmitPreBarrier(masm, element, MIRType::Value);
1375 0 : masm.storeValue(val, element);
1376 :
1377 0 : emitPostBarrierElement(obj, val, scratch, index);
1378 2 : return true;
1379 : }
1380 :
1381 : bool
1382 6 : BaselineCacheIRCompiler::emitStoreDenseElementHole()
1383 : {
1384 0 : ObjOperandId objId = reader.objOperandId();
1385 12 : Int32OperandId indexId = reader.int32OperandId();
1386 :
1387 : // Allocate the fixed registers first. These need to be fixed for
1388 : // callTypeUpdateIC.
1389 12 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1390 12 : ValueOperand val = allocator.useFixedValueRegister(masm, reader.valOperandId(), R0);
1391 :
1392 0 : Register obj = allocator.useRegister(masm, objId);
1393 6 : Register index = allocator.useRegister(masm, indexId);
1394 :
1395 0 : bool handleAdd = reader.readBool();
1396 :
1397 : FailurePath* failure;
1398 6 : if (!addFailurePath(&failure))
1399 : return false;
1400 :
1401 : // Load obj->elements in scratch.
1402 12 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1403 :
1404 0 : BaseObjectElementIndex element(scratch, index);
1405 12 : Address initLength(scratch, ObjectElements::offsetOfInitializedLength());
1406 0 : Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
1407 :
1408 : // Check for copy-on-write elements. Note that this stub is not attached for
1409 : // non-extensible objects, so the shape guard ensures there are no sealed or
1410 : // frozen elements.
1411 24 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1412 : Imm32(ObjectElements::COPY_ON_WRITE),
1413 0 : failure->label());
1414 :
1415 : // We don't have enough registers on x86 so use InvalidReg. This will emit
1416 : // slightly less efficient code on x86.
1417 6 : Register spectreTemp = InvalidReg;
1418 :
1419 0 : if (handleAdd) {
1420 : // Bounds check.
1421 0 : Label capacityOk, outOfBounds;
1422 5 : masm.spectreBoundsCheck32(index, initLength, spectreTemp, &outOfBounds);
1423 0 : masm.jump(&capacityOk);
1424 :
1425 : // If we're out-of-bounds, only handle the index == initLength case.
1426 5 : masm.bind(&outOfBounds);
1427 10 : masm.branch32(Assembler::NotEqual, initLength, index, failure->label());
1428 :
1429 : // If index < capacity, we can add a dense element inline. If not we
1430 : // need to allocate more elements.
1431 10 : Label allocElement;
1432 10 : Address capacity(scratch, ObjectElements::offsetOfCapacity());
1433 0 : masm.spectreBoundsCheck32(index, capacity, spectreTemp, &allocElement);
1434 0 : masm.jump(&capacityOk);
1435 :
1436 : // Check for non-writable array length. We only have to do this if
1437 : // index >= capacity.
1438 5 : masm.bind(&allocElement);
1439 20 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1440 : Imm32(ObjectElements::NONWRITABLE_ARRAY_LENGTH),
1441 0 : failure->label());
1442 :
1443 0 : LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
1444 10 : save.takeUnchecked(scratch);
1445 0 : masm.PushRegsInMask(save);
1446 :
1447 0 : masm.setupUnalignedABICall(scratch);
1448 5 : masm.loadJSContext(scratch);
1449 0 : masm.passABIArg(scratch);
1450 0 : masm.passABIArg(obj);
1451 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::addDenseElementDontReportOOM));
1452 0 : masm.mov(ReturnReg, scratch);
1453 :
1454 0 : masm.PopRegsInMask(save);
1455 15 : masm.branchIfFalseBool(scratch, failure->label());
1456 :
1457 : // Load the reallocated elements pointer.
1458 10 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1459 :
1460 0 : masm.bind(&capacityOk);
1461 :
1462 : // We increment initLength after the callTypeUpdateIC call, to ensure
1463 : // the type update code doesn't read uninitialized memory.
1464 : } else {
1465 : // Fail if index >= initLength.
1466 2 : masm.spectreBoundsCheck32(index, initLength, spectreTemp, failure->label());
1467 : }
1468 :
1469 : // Check if we have to convert a double element.
1470 12 : Label noConversion;
1471 12 : masm.branchTest32(Assembler::Zero, elementsFlags,
1472 : Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
1473 0 : &noConversion);
1474 :
1475 : // We need to convert int32 values being stored into doubles. Note that
1476 : // double arrays are only created by IonMonkey, so if we have no FP support
1477 : // Ion is disabled and there should be no double arrays.
1478 18 : if (cx_->runtime()->jitSupportsFloatingPoint) {
1479 : // It's fine to convert the value in place in Baseline. We can't do
1480 : // this in Ion.
1481 6 : masm.convertInt32ValueToDouble(val);
1482 : } else {
1483 0 : masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
1484 : }
1485 :
1486 6 : masm.bind(&noConversion);
1487 :
1488 : // Call the type update IC. After this everything must be infallible as we
1489 : // don't save all registers here.
1490 6 : LiveGeneralRegisterSet saveRegs;
1491 6 : saveRegs.add(obj);
1492 0 : saveRegs.add(index);
1493 0 : saveRegs.add(val);
1494 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1495 : return false;
1496 :
1497 : // Reload obj->elements as callTypeUpdateIC used the scratch register.
1498 12 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1499 :
1500 0 : Label doStore;
1501 6 : if (handleAdd) {
1502 : // If index == initLength, increment initLength.
1503 0 : Label inBounds;
1504 5 : masm.branch32(Assembler::NotEqual, initLength, index, &inBounds);
1505 :
1506 : // Increment initLength.
1507 5 : masm.add32(Imm32(1), initLength);
1508 :
1509 : // If length is now <= index, increment length too.
1510 10 : Label skipIncrementLength;
1511 10 : Address length(scratch, ObjectElements::offsetOfLength());
1512 0 : masm.branch32(Assembler::Above, length, index, &skipIncrementLength);
1513 0 : masm.add32(Imm32(1), length);
1514 0 : masm.bind(&skipIncrementLength);
1515 :
1516 : // Skip EmitPreBarrier as the memory is uninitialized.
1517 10 : masm.jump(&doStore);
1518 :
1519 0 : masm.bind(&inBounds);
1520 : }
1521 :
1522 12 : EmitPreBarrier(masm, element, MIRType::Value);
1523 :
1524 0 : masm.bind(&doStore);
1525 6 : masm.storeValue(val, element);
1526 :
1527 0 : emitPostBarrierElement(obj, val, scratch, index);
1528 : return true;
1529 : }
1530 :
1531 : bool
1532 3 : BaselineCacheIRCompiler::emitArrayPush()
1533 : {
1534 0 : ObjOperandId objId = reader.objOperandId();
1535 6 : ValOperandId rhsId = reader.valOperandId();
1536 :
1537 : // Allocate the fixed registers first. These need to be fixed for
1538 : // callTypeUpdateIC.
1539 6 : AutoScratchRegister scratch(allocator, masm, R1.scratchReg());
1540 3 : ValueOperand val = allocator.useFixedValueRegister(masm, rhsId, R0);
1541 :
1542 0 : Register obj = allocator.useRegister(masm, objId);
1543 6 : AutoScratchRegister scratchLength(allocator, masm);
1544 :
1545 : FailurePath* failure;
1546 3 : if (!addFailurePath(&failure))
1547 : return false;
1548 :
1549 : // Load obj->elements in scratch.
1550 6 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1551 :
1552 0 : Address elementsInitLength(scratch, ObjectElements::offsetOfInitializedLength());
1553 6 : Address elementsLength(scratch, ObjectElements::offsetOfLength());
1554 0 : Address elementsFlags(scratch, ObjectElements::offsetOfFlags());
1555 :
1556 : // Check for copy-on-write elements. Note that this stub is not attached for
1557 : // non-extensible objects, so the shape guard ensures there are no sealed or
1558 : // frozen elements.
1559 12 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1560 : Imm32(ObjectElements::COPY_ON_WRITE),
1561 0 : failure->label());
1562 :
1563 : // Fail if length != initLength.
1564 3 : masm.load32(elementsInitLength, scratchLength);
1565 6 : masm.branch32(Assembler::NotEqual, elementsLength, scratchLength, failure->label());
1566 :
1567 : // If scratchLength < capacity, we can add a dense element inline. If not we
1568 : // need to allocate more elements.
1569 9 : Label capacityOk, allocElement;
1570 6 : Address capacity(scratch, ObjectElements::offsetOfCapacity());
1571 0 : masm.spectreBoundsCheck32(scratchLength, capacity, InvalidReg, &allocElement);
1572 0 : masm.jump(&capacityOk);
1573 :
1574 : // Check for non-writable array length. We only have to do this if
1575 : // index >= capacity.
1576 3 : masm.bind(&allocElement);
1577 12 : masm.branchTest32(Assembler::NonZero, elementsFlags,
1578 : Imm32(ObjectElements::NONWRITABLE_ARRAY_LENGTH),
1579 0 : failure->label());
1580 :
1581 0 : LiveRegisterSet save(GeneralRegisterSet::Volatile(), liveVolatileFloatRegs());
1582 6 : save.takeUnchecked(scratch);
1583 0 : masm.PushRegsInMask(save);
1584 :
1585 0 : masm.setupUnalignedABICall(scratch);
1586 3 : masm.loadJSContext(scratch);
1587 0 : masm.passABIArg(scratch);
1588 0 : masm.passABIArg(obj);
1589 0 : masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, NativeObject::addDenseElementDontReportOOM));
1590 0 : masm.mov(ReturnReg, scratch);
1591 :
1592 0 : masm.PopRegsInMask(save);
1593 9 : masm.branchIfFalseBool(scratch, failure->label());
1594 :
1595 : // Load the reallocated elements pointer.
1596 6 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1597 :
1598 0 : masm.bind(&capacityOk);
1599 :
1600 : // Check if we have to convert a double element.
1601 6 : Label noConversion;
1602 6 : masm.branchTest32(Assembler::Zero, elementsFlags,
1603 : Imm32(ObjectElements::CONVERT_DOUBLE_ELEMENTS),
1604 0 : &noConversion);
1605 :
1606 : // We need to convert int32 values being stored into doubles. Note that
1607 : // double arrays are only created by IonMonkey, so if we have no FP support
1608 : // Ion is disabled and there should be no double arrays.
1609 9 : if (cx_->runtime()->jitSupportsFloatingPoint) {
1610 : // It's fine to convert the value in place in Baseline. We can't do
1611 : // this in Ion.
1612 3 : masm.convertInt32ValueToDouble(val);
1613 : } else {
1614 0 : masm.assumeUnreachable("There shouldn't be double arrays when there is no FP support.");
1615 : }
1616 :
1617 3 : masm.bind(&noConversion);
1618 :
1619 : // Call the type update IC. After this everything must be infallible as we
1620 : // don't save all registers here.
1621 3 : LiveGeneralRegisterSet saveRegs;
1622 3 : saveRegs.add(obj);
1623 0 : saveRegs.add(val);
1624 0 : if (!callTypeUpdateIC(obj, val, scratch, saveRegs))
1625 : return false;
1626 :
1627 : // Reload obj->elements as callTypeUpdateIC used the scratch register.
1628 6 : masm.loadPtr(Address(obj, NativeObject::offsetOfElements()), scratch);
1629 :
1630 : // Increment initLength and length.
1631 3 : masm.add32(Imm32(1), elementsInitLength);
1632 3 : masm.load32(elementsLength, scratchLength);
1633 0 : masm.add32(Imm32(1), elementsLength);
1634 :
1635 : // Store the value.
1636 6 : BaseObjectElementIndex element(scratch, scratchLength);
1637 3 : masm.storeValue(val, element);
1638 0 : emitPostBarrierElement(obj, val, scratch, scratchLength);
1639 :
1640 : // Return value is new length.
1641 6 : masm.add32(Imm32(1), scratchLength);
1642 3 : masm.tagValue(JSVAL_TYPE_INT32, scratchLength, val);
1643 :
1644 0 : return true;
1645 : }
1646 :
1647 : bool
1648 0 : BaselineCacheIRCompiler::emitStoreTypedElement()
1649 : {
1650 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1651 0 : Register index = allocator.useRegister(masm, reader.int32OperandId());
1652 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1653 :
1654 0 : TypedThingLayout layout = reader.typedThingLayout();
1655 0 : Scalar::Type type = reader.scalarType();
1656 0 : bool handleOOB = reader.readBool();
1657 :
1658 0 : AutoScratchRegister scratch1(allocator, masm);
1659 :
1660 : FailurePath* failure;
1661 0 : if (!addFailurePath(&failure))
1662 : return false;
1663 :
1664 : // Bounds check.
1665 0 : Label done;
1666 0 : LoadTypedThingLength(masm, layout, obj, scratch1);
1667 :
1668 : // Unfortunately we don't have more registers available on x86, so use
1669 : // InvalidReg and emit slightly slower code on x86.
1670 0 : Register spectreTemp = InvalidReg;
1671 0 : masm.spectreBoundsCheck32(index, scratch1, spectreTemp, handleOOB ? &done : failure->label());
1672 :
1673 : // Load the elements vector.
1674 0 : LoadTypedThingData(masm, layout, obj, scratch1);
1675 :
1676 0 : BaseIndex dest(scratch1, index, ScaleFromElemWidth(Scalar::byteSize(type)));
1677 :
1678 : // Use ICStubReg as second scratch register. TODO: consider doing the RHS
1679 : // type check/conversion as a separate IR instruction so we can simplify
1680 : // this.
1681 0 : Register scratch2 = ICStubReg;
1682 0 : masm.push(scratch2);
1683 :
1684 0 : Label fail;
1685 0 : StoreToTypedArray(cx_, masm, type, val, dest, scratch2, &fail);
1686 0 : masm.pop(scratch2);
1687 0 : masm.jump(&done);
1688 :
1689 0 : masm.bind(&fail);
1690 0 : masm.pop(scratch2);
1691 0 : masm.jump(failure->label());
1692 :
1693 0 : masm.bind(&done);
1694 : return true;
1695 : }
1696 :
1697 : typedef bool (*CallNativeSetterFn)(JSContext*, HandleFunction, HandleObject, HandleValue);
1698 1 : static const VMFunction CallNativeSetterInfo =
1699 3 : FunctionInfo<CallNativeSetterFn>(CallNativeSetter, "CallNativeSetter");
1700 :
1701 : bool
1702 3 : BaselineCacheIRCompiler::emitCallNativeSetter()
1703 : {
1704 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1705 9 : Address setterAddr(stubAddress(reader.stubOffset()));
1706 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1707 :
1708 0 : AutoScratchRegister scratch(allocator, masm);
1709 :
1710 0 : allocator.discardStack(masm);
1711 :
1712 0 : AutoStubFrame stubFrame(*this);
1713 3 : stubFrame.enter(masm, scratch);
1714 :
1715 : // Load the callee in the scratch register.
1716 3 : masm.loadPtr(setterAddr, scratch);
1717 :
1718 0 : masm.Push(val);
1719 3 : masm.Push(obj);
1720 0 : masm.Push(scratch);
1721 :
1722 0 : if (!callVM(masm, CallNativeSetterInfo))
1723 : return false;
1724 :
1725 3 : stubFrame.leave(masm);
1726 3 : return true;
1727 : }
1728 :
1729 : bool
1730 2 : BaselineCacheIRCompiler::emitCallScriptedSetter()
1731 : {
1732 0 : AutoScratchRegister scratch1(allocator, masm);
1733 4 : AutoScratchRegister scratch2(allocator, masm);
1734 :
1735 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1736 6 : Address setterAddr(stubAddress(reader.stubOffset()));
1737 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1738 :
1739 : // First, ensure our setter is non-lazy. This also loads the callee in
1740 : // scratch1.
1741 : {
1742 : FailurePath* failure;
1743 2 : if (!addFailurePath(&failure))
1744 0 : return false;
1745 :
1746 1 : masm.loadPtr(setterAddr, scratch1);
1747 6 : masm.branchIfFunctionHasNoJitEntry(scratch1, /* constructing */ false, failure->label());
1748 : }
1749 :
1750 2 : allocator.discardStack(masm);
1751 :
1752 0 : AutoStubFrame stubFrame(*this);
1753 2 : stubFrame.enter(masm, scratch2);
1754 :
1755 : // Align the stack such that the JitFrameLayout is aligned on
1756 : // JitStackAlignment.
1757 2 : masm.alignJitStackBasedOnNArgs(1);
1758 :
1759 : // Setter is called with 1 argument, and |obj| as thisv. Note that we use
1760 : // Push, not push, so that callJit will align the stack properly on ARM.
1761 2 : masm.Push(val);
1762 4 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
1763 :
1764 : // Now that the object register is no longer needed, use it as second
1765 : // scratch.
1766 2 : EmitBaselineCreateStubFrameDescriptor(masm, scratch2, JitFrameLayout::Size());
1767 2 : masm.Push(Imm32(1)); // ActualArgc
1768 :
1769 : // Push callee.
1770 2 : masm.Push(scratch1);
1771 :
1772 : // Push frame descriptor.
1773 2 : masm.Push(scratch2);
1774 :
1775 : // Load callee->nargs in scratch2 and the JIT code in scratch.
1776 4 : Label noUnderflow;
1777 4 : masm.load16ZeroExtend(Address(scratch1, JSFunction::offsetOfNargs()), scratch2);
1778 0 : masm.loadJitCodeRaw(scratch1, scratch1);
1779 :
1780 : // Handle arguments underflow.
1781 4 : masm.branch32(Assembler::BelowOrEqual, scratch2, Imm32(1), &noUnderflow);
1782 : {
1783 : // Call the arguments rectifier.
1784 8 : TrampolinePtr argumentsRectifier = cx_->runtime()->jitRuntime()->getArgumentsRectifier();
1785 2 : masm.movePtr(argumentsRectifier, scratch1);
1786 : }
1787 :
1788 2 : masm.bind(&noUnderflow);
1789 2 : masm.callJit(scratch1);
1790 :
1791 0 : stubFrame.leave(masm, true);
1792 : return true;
1793 : }
1794 :
1795 : typedef bool (*SetArrayLengthFn)(JSContext*, HandleObject, HandleValue, bool);
1796 1 : static const VMFunction SetArrayLengthInfo =
1797 3 : FunctionInfo<SetArrayLengthFn>(SetArrayLength, "SetArrayLength");
1798 :
1799 : bool
1800 2 : BaselineCacheIRCompiler::emitCallSetArrayLength()
1801 : {
1802 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1803 2 : bool strict = reader.readBool();
1804 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1805 :
1806 0 : AutoScratchRegister scratch(allocator, masm);
1807 :
1808 0 : allocator.discardStack(masm);
1809 :
1810 0 : AutoStubFrame stubFrame(*this);
1811 2 : stubFrame.enter(masm, scratch);
1812 :
1813 0 : masm.Push(Imm32(strict));
1814 2 : masm.Push(val);
1815 0 : masm.Push(obj);
1816 :
1817 0 : if (!callVM(masm, SetArrayLengthInfo))
1818 : return false;
1819 :
1820 2 : stubFrame.leave(masm);
1821 2 : return true;
1822 : }
1823 :
1824 : typedef bool (*ProxySetPropertyFn)(JSContext*, HandleObject, HandleId, HandleValue, bool);
1825 1 : static const VMFunction ProxySetPropertyInfo =
1826 3 : FunctionInfo<ProxySetPropertyFn>(ProxySetProperty, "ProxySetProperty");
1827 :
1828 : bool
1829 2 : BaselineCacheIRCompiler::emitCallProxySet()
1830 : {
1831 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1832 4 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1833 0 : Address idAddr(stubAddress(reader.stubOffset()));
1834 0 : bool strict = reader.readBool();
1835 :
1836 0 : AutoScratchRegister scratch(allocator, masm);
1837 :
1838 0 : allocator.discardStack(masm);
1839 :
1840 0 : AutoStubFrame stubFrame(*this);
1841 2 : stubFrame.enter(masm, scratch);
1842 :
1843 : // Load the jsid in the scratch register.
1844 2 : masm.loadPtr(idAddr, scratch);
1845 :
1846 0 : masm.Push(Imm32(strict));
1847 2 : masm.Push(val);
1848 0 : masm.Push(scratch);
1849 0 : masm.Push(obj);
1850 :
1851 0 : if (!callVM(masm, ProxySetPropertyInfo))
1852 : return false;
1853 :
1854 2 : stubFrame.leave(masm);
1855 2 : return true;
1856 : }
1857 :
1858 : typedef bool (*ProxySetPropertyByValueFn)(JSContext*, HandleObject, HandleValue, HandleValue, bool);
1859 1 : static const VMFunction ProxySetPropertyByValueInfo =
1860 3 : FunctionInfo<ProxySetPropertyByValueFn>(ProxySetPropertyByValue, "ProxySetPropertyByValue");
1861 :
1862 : bool
1863 0 : BaselineCacheIRCompiler::emitCallProxySetByValue()
1864 : {
1865 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1866 0 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
1867 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1868 0 : bool strict = reader.readBool();
1869 :
1870 0 : allocator.discardStack(masm);
1871 :
1872 : // We need a scratch register but we don't have any registers available on
1873 : // x86, so temporarily store |obj| in the frame's scratch slot.
1874 0 : int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
1875 0 : masm.storePtr(obj, Address(BaselineFrameReg, scratchOffset));
1876 :
1877 0 : AutoStubFrame stubFrame(*this);
1878 0 : stubFrame.enter(masm, obj);
1879 :
1880 : // Restore |obj|. Because we entered a stub frame we first have to load
1881 : // the original frame pointer.
1882 0 : masm.loadPtr(Address(BaselineFrameReg, 0), obj);
1883 0 : masm.loadPtr(Address(obj, scratchOffset), obj);
1884 :
1885 0 : masm.Push(Imm32(strict));
1886 0 : masm.Push(val);
1887 0 : masm.Push(idVal);
1888 0 : masm.Push(obj);
1889 :
1890 0 : if (!callVM(masm, ProxySetPropertyByValueInfo))
1891 : return false;
1892 :
1893 0 : stubFrame.leave(masm);
1894 0 : return true;
1895 : }
1896 :
1897 : bool
1898 3 : BaselineCacheIRCompiler::emitMegamorphicSetElement()
1899 : {
1900 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1901 6 : ValueOperand idVal = allocator.useValueRegister(masm, reader.valOperandId());
1902 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
1903 0 : bool strict = reader.readBool();
1904 :
1905 0 : allocator.discardStack(masm);
1906 :
1907 : // We need a scratch register but we don't have any registers available on
1908 : // x86, so temporarily store |obj| in the frame's scratch slot.
1909 3 : int scratchOffset = BaselineFrame::reverseOffsetOfScratchValue();
1910 3 : masm.storePtr(obj, Address(BaselineFrameReg, scratchOffset));
1911 :
1912 0 : AutoStubFrame stubFrame(*this);
1913 3 : stubFrame.enter(masm, obj);
1914 :
1915 : // Restore |obj|. Because we entered a stub frame we first have to load
1916 : // the original frame pointer.
1917 3 : masm.loadPtr(Address(BaselineFrameReg, 0), obj);
1918 3 : masm.loadPtr(Address(obj, scratchOffset), obj);
1919 :
1920 0 : masm.Push(Imm32(strict));
1921 6 : masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(obj)));
1922 0 : masm.Push(val);
1923 0 : masm.Push(idVal);
1924 0 : masm.Push(obj);
1925 :
1926 0 : if (!callVM(masm, SetObjectElementInfo))
1927 : return false;
1928 :
1929 3 : stubFrame.leave(masm);
1930 3 : return true;
1931 : }
1932 :
1933 : bool
1934 0 : BaselineCacheIRCompiler::emitTypeMonitorResult()
1935 : {
1936 1 : allocator.discardStack(masm);
1937 146 : EmitEnterTypeMonitorIC(masm);
1938 0 : return true;
1939 : }
1940 :
1941 : bool
1942 233 : BaselineCacheIRCompiler::emitReturnFromIC()
1943 : {
1944 0 : allocator.discardStack(masm);
1945 466 : EmitReturnFromIC(masm);
1946 0 : return true;
1947 : }
1948 :
1949 : bool
1950 15 : BaselineCacheIRCompiler::emitLoadStackValue()
1951 : {
1952 0 : ValueOperand val = allocator.defineValueRegister(masm, reader.valOperandId());
1953 45 : Address addr = allocator.addressOf(masm, BaselineFrameSlot(reader.uint32Immediate()));
1954 0 : masm.loadValue(addr, val);
1955 0 : return true;
1956 : }
1957 :
1958 : bool
1959 3 : BaselineCacheIRCompiler::emitGuardAndGetIterator()
1960 : {
1961 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
1962 :
1963 0 : AutoScratchRegister scratch1(allocator, masm);
1964 6 : AutoScratchRegister scratch2(allocator, masm);
1965 0 : AutoScratchRegister niScratch(allocator, masm);
1966 :
1967 0 : Address iterAddr(stubAddress(reader.stubOffset()));
1968 9 : Address enumeratorsAddr(stubAddress(reader.stubOffset()));
1969 :
1970 0 : Register output = allocator.defineRegister(masm, reader.objOperandId());
1971 :
1972 : FailurePath* failure;
1973 3 : if (!addFailurePath(&failure))
1974 : return false;
1975 :
1976 : // Load our PropertyIteratorObject* and its NativeIterator.
1977 3 : masm.loadPtr(iterAddr, output);
1978 6 : masm.loadObjPrivate(output, JSObject::ITER_CLASS_NFIXED_SLOTS, niScratch);
1979 :
1980 : // Ensure the iterator is reusable: see NativeIterator::isReusable.
1981 6 : masm.branchIfNativeIteratorNotReusable(niScratch, failure->label());
1982 :
1983 : // Pre-write barrier for store to 'objectBeingIterated_'.
1984 6 : Address iterObjAddr(niScratch, NativeIterator::offsetOfObjectBeingIterated());
1985 6 : EmitPreBarrier(masm, iterObjAddr, MIRType::Object);
1986 :
1987 : // Mark iterator as active.
1988 6 : Address iterFlagsAddr(niScratch, NativeIterator::offsetOfFlags());
1989 3 : masm.storePtr(obj, iterObjAddr);
1990 0 : masm.or32(Imm32(NativeIterator::Flags::Active), iterFlagsAddr);
1991 :
1992 : // Post-write barrier for stores to 'objectBeingIterated_'.
1993 12 : emitPostBarrierSlot(output, TypedOrValueRegister(MIRType::Object, AnyRegister(obj)), scratch1);
1994 :
1995 : // Chain onto the active iterator stack. Note that Baseline CacheIR stub
1996 : // code is shared across compartments within a Zone, so we can't bake in
1997 : // compartment->enumerators here.
1998 3 : masm.loadPtr(enumeratorsAddr, scratch1);
1999 6 : masm.loadPtr(Address(scratch1, 0), scratch1);
2000 0 : emitRegisterEnumerator(scratch1, niScratch, scratch2);
2001 :
2002 0 : return true;
2003 : }
2004 :
2005 : bool
2006 1 : BaselineCacheIRCompiler::emitGuardDOMExpandoMissingOrGuardShape()
2007 : {
2008 0 : ValueOperand val = allocator.useValueRegister(masm, reader.valOperandId());
2009 2 : AutoScratchRegister shapeScratch(allocator, masm);
2010 0 : AutoScratchRegister objScratch(allocator, masm);
2011 0 : Address shapeAddr(stubAddress(reader.stubOffset()));
2012 :
2013 : FailurePath* failure;
2014 1 : if (!addFailurePath(&failure))
2015 : return false;
2016 :
2017 2 : Label done;
2018 2 : masm.branchTestUndefined(Assembler::Equal, val, &done);
2019 :
2020 0 : masm.debugAssertIsObject(val);
2021 1 : masm.loadPtr(shapeAddr, shapeScratch);
2022 0 : masm.unboxObject(val, objScratch);
2023 : // The expando object is not used in this case, so we don't need Spectre
2024 : // mitigations.
2025 2 : masm.branchTestObjShapeNoSpectreMitigations(Assembler::NotEqual, objScratch, shapeScratch,
2026 1 : failure->label());
2027 :
2028 0 : masm.bind(&done);
2029 : return true;
2030 : }
2031 :
2032 : bool
2033 1 : BaselineCacheIRCompiler::emitLoadDOMExpandoValueGuardGeneration()
2034 : {
2035 0 : Register obj = allocator.useRegister(masm, reader.objOperandId());
2036 3 : Address expandoAndGenerationAddr(stubAddress(reader.stubOffset()));
2037 0 : Address generationAddr(stubAddress(reader.stubOffset()));
2038 :
2039 0 : AutoScratchRegister scratch(allocator, masm);
2040 2 : ValueOperand output = allocator.defineValueRegister(masm, reader.valOperandId());
2041 :
2042 : FailurePath* failure;
2043 1 : if (!addFailurePath(&failure))
2044 : return false;
2045 :
2046 2 : masm.loadPtr(Address(obj, ProxyObject::offsetOfReservedSlots()), scratch);
2047 2 : Address expandoAddr(scratch, detail::ProxyReservedSlots::offsetOfPrivateSlot());
2048 :
2049 : // Load the ExpandoAndGeneration* in the output scratch register and guard
2050 : // it matches the proxy's ExpandoAndGeneration.
2051 1 : masm.loadPtr(expandoAndGenerationAddr, output.scratchReg());
2052 3 : masm.branchPrivatePtr(Assembler::NotEqual, expandoAddr, output.scratchReg(), failure->label());
2053 :
2054 : // Guard expandoAndGeneration->generation matches the expected generation.
2055 2 : masm.branch64(Assembler::NotEqual,
2056 4 : Address(output.scratchReg(), ExpandoAndGeneration::offsetOfGeneration()),
2057 : generationAddr,
2058 0 : scratch, failure->label());
2059 :
2060 : // Load expandoAndGeneration->expando into the output Value register.
2061 3 : masm.loadValue(Address(output.scratchReg(), ExpandoAndGeneration::offsetOfExpando()), output);
2062 1 : return true;
2063 : }
2064 :
2065 : bool
2066 379 : BaselineCacheIRCompiler::init(CacheKind kind)
2067 : {
2068 0 : if (!allocator.init())
2069 : return false;
2070 :
2071 : // Baseline ICs monitor values when needed, so returning doubles is fine.
2072 379 : allowDoubleResult_.emplace(true);
2073 :
2074 0 : size_t numInputs = writer_.numInputOperands();
2075 :
2076 : // Baseline passes the first 2 inputs in R0/R1, other Values are stored on
2077 : // the stack.
2078 758 : size_t numInputsInRegs = std::min(numInputs, size_t(2));
2079 379 : AllocatableGeneralRegisterSet available(ICStubCompiler::availableGeneralRegs(numInputsInRegs));
2080 :
2081 0 : switch (kind) {
2082 : case CacheKind::GetIntrinsic:
2083 0 : MOZ_ASSERT(numInputs == 0);
2084 : break;
2085 : case CacheKind::GetProp:
2086 : case CacheKind::TypeOf:
2087 : case CacheKind::GetIterator:
2088 : case CacheKind::ToBool:
2089 : case CacheKind::UnaryArith:
2090 132 : MOZ_ASSERT(numInputs == 1);
2091 132 : allocator.initInputLocation(0, R0);
2092 0 : break;
2093 : case CacheKind::Compare:
2094 : case CacheKind::GetElem:
2095 : case CacheKind::GetPropSuper:
2096 : case CacheKind::SetProp:
2097 : case CacheKind::In:
2098 : case CacheKind::HasOwn:
2099 : case CacheKind::InstanceOf:
2100 175 : MOZ_ASSERT(numInputs == 2);
2101 175 : allocator.initInputLocation(0, R0);
2102 0 : allocator.initInputLocation(1, R1);
2103 0 : break;
2104 : case CacheKind::GetElemSuper:
2105 0 : MOZ_ASSERT(numInputs == 3);
2106 0 : allocator.initInputLocation(0, BaselineFrameSlot(0));
2107 0 : allocator.initInputLocation(1, R0);
2108 0 : allocator.initInputLocation(2, R1);
2109 0 : break;
2110 : case CacheKind::SetElem:
2111 0 : MOZ_ASSERT(numInputs == 3);
2112 33 : allocator.initInputLocation(0, R0);
2113 0 : allocator.initInputLocation(1, R1);
2114 0 : allocator.initInputLocation(2, BaselineFrameSlot(0));
2115 0 : break;
2116 : case CacheKind::GetName:
2117 : case CacheKind::BindName:
2118 29 : MOZ_ASSERT(numInputs == 1);
2119 29 : allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_OBJECT);
2120 : #if defined(JS_NUNBOX32)
2121 : // availableGeneralRegs can't know that GetName/BindName is only using
2122 : // the payloadReg and not typeReg on x86.
2123 : available.add(R0.typeReg());
2124 : #endif
2125 29 : break;
2126 : case CacheKind::Call:
2127 0 : MOZ_ASSERT(numInputs == 1);
2128 5 : allocator.initInputLocation(0, R0.scratchReg(), JSVAL_TYPE_INT32);
2129 : #if defined(JS_NUNBOX32)
2130 : // availableGeneralRegs can't know that Call is only using
2131 : // the payloadReg and not typeReg on x86.
2132 : available.add(R0.typeReg());
2133 : #endif
2134 5 : break;
2135 : }
2136 :
2137 : // Baseline doesn't allocate float registers so none of them are live.
2138 758 : liveFloatRegs_ = LiveFloatRegisterSet(FloatRegisterSet());
2139 :
2140 0 : allocator.initAvailableRegs(available);
2141 379 : outputUnchecked_.emplace(R0);
2142 0 : return true;
2143 : }
2144 :
2145 : static const size_t MaxOptimizedCacheIRStubs = 16;
2146 :
2147 : ICStub*
2148 11144 : js::jit::AttachBaselineCacheIRStub(JSContext* cx, const CacheIRWriter& writer,
2149 : CacheKind kind, BaselineCacheIRStubKind stubKind,
2150 : ICStubEngine engine, JSScript* outerScript,
2151 : ICFallbackStub* stub, bool* attached)
2152 : {
2153 : // We shouldn't GC or report OOM (or any other exception) here.
2154 22288 : AutoAssertNoPendingException aanpe(cx);
2155 22288 : JS::AutoCheckCannotGC nogc;
2156 :
2157 0 : MOZ_ASSERT(!*attached);
2158 :
2159 0 : if (writer.failed())
2160 : return nullptr;
2161 :
2162 : // Just a sanity check: the caller should ensure we don't attach an
2163 : // unlimited number of stubs.
2164 11144 : MOZ_ASSERT(stub->numOptimizedStubs() < MaxOptimizedCacheIRStubs);
2165 :
2166 0 : uint32_t stubDataOffset = 0;
2167 : switch (stubKind) {
2168 : case BaselineCacheIRStubKind::Monitored:
2169 : stubDataOffset = sizeof(ICCacheIR_Monitored);
2170 : break;
2171 : case BaselineCacheIRStubKind::Regular:
2172 : stubDataOffset = sizeof(ICCacheIR_Regular);
2173 : break;
2174 : case BaselineCacheIRStubKind::Updated:
2175 : stubDataOffset = sizeof(ICCacheIR_Updated);
2176 : break;
2177 : }
2178 :
2179 22288 : JitZone* jitZone = cx->zone()->jitZone();
2180 :
2181 : // Check if we already have JitCode for this stub.
2182 : CacheIRStubInfo* stubInfo;
2183 22288 : CacheIRStubKey::Lookup lookup(kind, engine, writer.codeStart(), writer.codeLength());
2184 11144 : JitCode* code = jitZone->getBaselineCacheIRStubCode(lookup, &stubInfo);
2185 0 : if (!code) {
2186 : // We have to generate stub code.
2187 0 : JitContext jctx(cx, nullptr);
2188 758 : BaselineCacheIRCompiler comp(cx, writer, engine, stubDataOffset);
2189 0 : if (!comp.init(kind))
2190 0 : return nullptr;
2191 :
2192 0 : code = comp.compile();
2193 379 : if (!code)
2194 : return nullptr;
2195 :
2196 : // Allocate the shared CacheIRStubInfo. Note that the
2197 : // putBaselineCacheIRStubCode call below will transfer ownership
2198 : // to the stub code HashMap, so we don't have to worry about freeing
2199 : // it below.
2200 379 : MOZ_ASSERT(!stubInfo);
2201 379 : stubInfo = CacheIRStubInfo::New(kind, engine, comp.makesGCCalls(), stubDataOffset, writer);
2202 0 : if (!stubInfo)
2203 : return nullptr;
2204 :
2205 1137 : CacheIRStubKey key(stubInfo);
2206 379 : if (!jitZone->putBaselineCacheIRStubCode(lookup, key, code))
2207 0 : return nullptr;
2208 : }
2209 :
2210 11144 : MOZ_ASSERT(code);
2211 11144 : MOZ_ASSERT(stubInfo);
2212 0 : MOZ_ASSERT(stubInfo->stubDataSize() == writer.stubDataSize());
2213 :
2214 : // Ensure we don't attach duplicate stubs. This can happen if a stub failed
2215 : // for some reason and the IR generator doesn't check for exactly the same
2216 : // conditions.
2217 72068 : for (ICStubConstIterator iter = stub->beginChainConst(); !iter.atEnd(); iter++) {
2218 19397 : bool updated = false;
2219 0 : switch (stubKind) {
2220 : case BaselineCacheIRStubKind::Regular: {
2221 0 : if (!iter->isCacheIR_Regular())
2222 19318 : continue;
2223 0 : auto otherStub = iter->toCacheIR_Regular();
2224 0 : if (otherStub->stubInfo() != stubInfo)
2225 : continue;
2226 0 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2227 : continue;
2228 : break;
2229 : }
2230 : case BaselineCacheIRStubKind::Monitored: {
2231 26166 : if (!iter->isCacheIR_Monitored())
2232 : continue;
2233 0 : auto otherStub = iter->toCacheIR_Monitored();
2234 6082 : if (otherStub->stubInfo() != stubInfo)
2235 : continue;
2236 0 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2237 : continue;
2238 : break;
2239 : }
2240 : case BaselineCacheIRStubKind::Updated: {
2241 5508 : if (!iter->isCacheIR_Updated())
2242 : continue;
2243 0 : auto otherStub = iter->toCacheIR_Updated();
2244 1056 : if (otherStub->stubInfo() != stubInfo)
2245 : continue;
2246 0 : if (!writer.stubDataEqualsMaybeUpdate(otherStub->stubDataStart(), &updated))
2247 : continue;
2248 : break;
2249 : }
2250 : }
2251 :
2252 : // We found a stub that's exactly the same as the stub we're about to
2253 : // attach. Just return nullptr, the caller should do nothing in this
2254 : // case.
2255 79 : if (updated)
2256 0 : *attached = true;
2257 0 : return nullptr;
2258 : }
2259 :
2260 : // Time to allocate and attach a new stub.
2261 :
2262 11065 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2263 :
2264 0 : ICStubSpace* stubSpace = ICStubCompiler::StubSpaceForStub(stubInfo->makesGCCalls(),
2265 11065 : outerScript, engine);
2266 0 : void* newStubMem = stubSpace->alloc(bytesNeeded);
2267 0 : if (!newStubMem)
2268 : return nullptr;
2269 :
2270 11065 : switch (stubKind) {
2271 : case BaselineCacheIRStubKind::Regular: {
2272 0 : auto newStub = new(newStubMem) ICCacheIR_Regular(code, stubInfo);
2273 2367 : writer.copyStubData(newStub->stubDataStart());
2274 0 : stub->addNewStub(newStub);
2275 0 : *attached = true;
2276 0 : return newStub;
2277 : }
2278 : case BaselineCacheIRStubKind::Monitored: {
2279 : ICTypeMonitor_Fallback* typeMonitorFallback =
2280 7000 : stub->toMonitoredFallbackStub()->getFallbackMonitorStub(cx, outerScript);
2281 7000 : if (!typeMonitorFallback) {
2282 0 : cx->recoverFromOutOfMemory();
2283 0 : return nullptr;
2284 : }
2285 0 : ICStub* monitorStub = typeMonitorFallback->firstMonitorStub();
2286 14000 : auto newStub = new(newStubMem) ICCacheIR_Monitored(code, monitorStub, stubInfo);
2287 0 : writer.copyStubData(newStub->stubDataStart());
2288 0 : stub->addNewStub(newStub);
2289 0 : *attached = true;
2290 0 : return newStub;
2291 : }
2292 : case BaselineCacheIRStubKind::Updated: {
2293 1698 : auto newStub = new(newStubMem) ICCacheIR_Updated(code, stubInfo);
2294 1698 : if (!newStub->initUpdatingChain(cx, stubSpace)) {
2295 0 : cx->recoverFromOutOfMemory();
2296 0 : return nullptr;
2297 : }
2298 0 : writer.copyStubData(newStub->stubDataStart());
2299 1698 : stub->addNewStub(newStub);
2300 0 : *attached = true;
2301 0 : return newStub;
2302 : }
2303 : }
2304 :
2305 0 : MOZ_CRASH("Invalid kind");
2306 : }
2307 :
2308 : uint8_t*
2309 0 : ICCacheIR_Regular::stubDataStart()
2310 : {
2311 1 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2312 : }
2313 :
2314 : uint8_t*
2315 0 : ICCacheIR_Monitored::stubDataStart()
2316 : {
2317 1 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2318 : }
2319 :
2320 : uint8_t*
2321 0 : ICCacheIR_Updated::stubDataStart()
2322 : {
2323 1 : return reinterpret_cast<uint8_t*>(this) + stubInfo_->stubDataOffset();
2324 : }
2325 :
2326 : /* static */ ICCacheIR_Regular*
2327 0 : ICCacheIR_Regular::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
2328 : ICCacheIR_Regular& other)
2329 : {
2330 0 : const CacheIRStubInfo* stubInfo = other.stubInfo();
2331 0 : MOZ_ASSERT(stubInfo->makesGCCalls());
2332 :
2333 0 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2334 0 : void* newStub = space->alloc(bytesNeeded);
2335 0 : if (!newStub)
2336 : return nullptr;
2337 :
2338 0 : ICCacheIR_Regular* res = new(newStub) ICCacheIR_Regular(other.jitCode(), stubInfo);
2339 0 : stubInfo->copyStubData(&other, res);
2340 0 : return res;
2341 : }
2342 :
2343 :
2344 : /* static */ ICCacheIR_Monitored*
2345 0 : ICCacheIR_Monitored::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
2346 : ICCacheIR_Monitored& other)
2347 : {
2348 0 : const CacheIRStubInfo* stubInfo = other.stubInfo();
2349 0 : MOZ_ASSERT(stubInfo->makesGCCalls());
2350 :
2351 0 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2352 0 : void* newStub = space->alloc(bytesNeeded);
2353 0 : if (!newStub)
2354 : return nullptr;
2355 :
2356 0 : ICCacheIR_Monitored* res = new(newStub) ICCacheIR_Monitored(other.jitCode(), firstMonitorStub,
2357 0 : stubInfo);
2358 0 : stubInfo->copyStubData(&other, res);
2359 0 : return res;
2360 : }
2361 :
2362 : /* static */ ICCacheIR_Updated*
2363 0 : ICCacheIR_Updated::Clone(JSContext* cx, ICStubSpace* space, ICStub* firstMonitorStub,
2364 : ICCacheIR_Updated& other)
2365 : {
2366 0 : const CacheIRStubInfo* stubInfo = other.stubInfo();
2367 0 : MOZ_ASSERT(stubInfo->makesGCCalls());
2368 :
2369 0 : size_t bytesNeeded = stubInfo->stubDataOffset() + stubInfo->stubDataSize();
2370 0 : void* newStub = space->alloc(bytesNeeded);
2371 0 : if (!newStub)
2372 : return nullptr;
2373 :
2374 0 : ICCacheIR_Updated* res = new(newStub) ICCacheIR_Updated(other.jitCode(), stubInfo);
2375 0 : res->updateStubGroup() = other.updateStubGroup();
2376 0 : res->updateStubId() = other.updateStubId();
2377 :
2378 0 : stubInfo->copyStubData(&other, res);
2379 0 : return res;
2380 : }
|