LCOV - code coverage report
Current view: top level - js/src/jit/shared - CodeGenerator-shared.cpp (source / functions) Hit Total Coverage
Test: output.info Lines: 143 742 19.3 %
Date: 2018-08-07 16:42:27 Functions: 0 0 -
Legend: Lines: hit not hit

          Line data    Source code
       1             : /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
       2             :  * vim: set ts=8 sts=4 et sw=4 tw=99:
       3             :  * This Source Code Form is subject to the terms of the Mozilla Public
       4             :  * License, v. 2.0. If a copy of the MPL was not distributed with this
       5             :  * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
       6             : 
       7             : #include "jit/shared/CodeGenerator-shared-inl.h"
       8             : 
       9             : #include "mozilla/DebugOnly.h"
      10             : 
      11             : #include <utility>
      12             : 
      13             : #include "jit/CodeGenerator.h"
      14             : #include "jit/CompactBuffer.h"
      15             : #include "jit/JitcodeMap.h"
      16             : #include "jit/JitSpewer.h"
      17             : #include "jit/MacroAssembler.h"
      18             : #include "jit/MIR.h"
      19             : #include "jit/MIRGenerator.h"
      20             : #include "jit/OptimizationTracking.h"
      21             : #include "js/Conversions.h"
      22             : #include "vm/TraceLogging.h"
      23             : 
      24             : #include "jit/JitFrames-inl.h"
      25             : #include "jit/MacroAssembler-inl.h"
      26             : 
      27             : using namespace js;
      28             : using namespace js::jit;
      29             : 
      30             : using mozilla::BitwiseCast;
      31             : using mozilla::DebugOnly;
      32             : 
      33             : namespace js {
      34             : namespace jit {
      35             : 
      36             : MacroAssembler&
      37           0 : CodeGeneratorShared::ensureMasm(MacroAssembler* masmArg)
      38             : {
      39           0 :     if (masmArg)
      40             :         return *masmArg;
      41          48 :     maybeMasm_.emplace();
      42          48 :     return *maybeMasm_;
      43             : }
      44             : 
      45           0 : CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, MacroAssembler* masmArg)
      46             :   : maybeMasm_(),
      47          48 :     masm(ensureMasm(masmArg)),
      48             :     gen(gen),
      49             :     graph(*graph),
      50             :     current(nullptr),
      51             :     snapshots_(),
      52             :     recovers_(),
      53             :     deoptTable_(),
      54             : #ifdef DEBUG
      55             :     pushedArgs_(0),
      56             : #endif
      57             :     lastOsiPointOffset_(0),
      58          48 :     safepoints_(graph->totalSlotCount(), (gen->info().nargs() + 1) * sizeof(Value)),
      59             :     returnLabel_(),
      60             :     stubSpace_(),
      61             :     nativeToBytecodeMap_(nullptr),
      62             :     nativeToBytecodeMapSize_(0),
      63             :     nativeToBytecodeTableOffset_(0),
      64             :     nativeToBytecodeNumRegions_(0),
      65             :     nativeToBytecodeScriptList_(nullptr),
      66             :     nativeToBytecodeScriptListLength_(0),
      67             :     trackedOptimizationsMap_(nullptr),
      68             :     trackedOptimizationsMapSize_(0),
      69             :     trackedOptimizationsRegionTableOffset_(0),
      70             :     trackedOptimizationsTypesTableOffset_(0),
      71             :     trackedOptimizationsAttemptsTableOffset_(0),
      72             :     osrEntryOffset_(0),
      73             :     skipArgCheckEntryOffset_(0),
      74             : #ifdef CHECK_OSIPOINT_REGISTERS
      75           0 :     checkOsiPointRegisters(JitOptions.checkOsiPointRegisters),
      76             : #endif
      77          96 :     frameDepth_(graph->paddedLocalSlotsSize() + graph->argumentsSize()),
      78           0 :     frameInitialAdjustment_(0)
      79             : {
      80          48 :     if (gen->isProfilerInstrumentationEnabled())
      81           0 :         masm.enableProfilingInstrumentation();
      82             : 
      83          96 :     if (gen->compilingWasm()) {
      84             :         // Since wasm uses the system ABI which does not necessarily use a
      85             :         // regular array where all slots are sizeof(Value), it maintains the max
      86             :         // argument stack depth separately.
      87           0 :         MOZ_ASSERT(graph->argumentSlotCount() == 0);
      88           0 :         frameDepth_ += gen->wasmMaxStackArgBytes();
      89             : 
      90           0 :         if (gen->usesSimd()) {
      91             :             // If the function uses any SIMD then we may need to insert padding
      92             :             // so that local slots are aligned for SIMD.
      93           0 :             frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame), WasmStackAlignment);
      94           0 :             frameDepth_ += frameInitialAdjustment_;
      95             : 
      96             :             // Keep the stack aligned. Some SIMD sequences build values on the
      97             :             // stack and need the stack aligned.
      98           0 :             frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
      99           0 :                                                 WasmStackAlignment);
     100           0 :         } else if (gen->needsStaticStackAlignment()) {
     101             :             // An MWasmCall does not align the stack pointer at calls sites but
     102             :             // instead relies on the a priori stack adjustment. This must be the
     103             :             // last adjustment of frameDepth_.
     104           0 :             frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
     105           0 :                                                 WasmStackAlignment);
     106             :         }
     107             : 
     108             :         // FrameSizeClass is only used for bailing, which cannot happen in
     109             :         // wasm code.
     110           0 :         frameClass_ = FrameSizeClass::None();
     111             :     } else {
     112           0 :         frameClass_ = FrameSizeClass::FromDepth(frameDepth_);
     113             :     }
     114          48 : }
     115             : 
     116             : bool
     117           0 : CodeGeneratorShared::generatePrologue()
     118             : {
     119          96 :     MOZ_ASSERT(masm.framePushed() == 0);
     120         192 :     MOZ_ASSERT(!gen->compilingWasm());
     121             : 
     122             : #ifdef JS_USE_LINK_REGISTER
     123             :     masm.pushReturnAddress();
     124             : #endif
     125             : 
     126             :     // If profiling, save the current frame pointer to a per-thread global field.
     127         192 :     if (isProfilerInstrumentationEnabled())
     128           0 :         masm.profilerEnterFrame(masm.getStackPointer(), CallTempReg0);
     129             : 
     130             :     // Ensure that the Ion frame is properly aligned.
     131          96 :     masm.assertStackAlignment(JitStackAlignment, 0);
     132             : 
     133             :     // Note that this automatically sets MacroAssembler::framePushed().
     134         192 :     masm.reserveStack(frameSize());
     135           0 :     masm.checkStackAlignment();
     136             : 
     137          96 :     emitTracelogIonStart();
     138          96 :     return true;
     139             : }
     140             : 
     141             : bool
     142           0 : CodeGeneratorShared::generateEpilogue()
     143             : {
     144          96 :     MOZ_ASSERT(!gen->compilingWasm());
     145           0 :     masm.bind(&returnLabel_);
     146             : 
     147           0 :     emitTracelogIonStop();
     148             : 
     149          48 :     masm.freeStack(frameSize());
     150          48 :     MOZ_ASSERT(masm.framePushed() == 0);
     151             : 
     152             :     // If profiling, reset the per-thread global lastJitFrame to point to
     153             :     // the previous frame.
     154          96 :     if (isProfilerInstrumentationEnabled())
     155           0 :         masm.profilerExitFrame();
     156             : 
     157          96 :     masm.ret();
     158             : 
     159             :     // On systems that use a constant pool, this is a good time to emit.
     160          48 :     masm.flushBuffer();
     161          48 :     return true;
     162             : }
     163             : 
     164             : bool
     165          48 : CodeGeneratorShared::generateOutOfLineCode()
     166             : {
     167             :     // OOL paths should not attempt to use |current| as it's the last block
     168             :     // instead of the block corresponding to the OOL path.
     169           0 :     current = nullptr;
     170             : 
     171        3331 :     for (size_t i = 0; i < outOfLineCode_.length(); i++) {
     172             :         // Add native => bytecode mapping entries for OOL sites.
     173             :         // Not enabled on wasm yet since it doesn't contain bytecode mappings.
     174        6566 :         if (!gen->compilingWasm()) {
     175        3283 :             if (!addNativeToBytecodeEntry(outOfLineCode_[i]->bytecodeSite()))
     176             :                 return false;
     177             :         }
     178             : 
     179        3283 :         if (!gen->alloc().ensureBallast())
     180             :             return false;
     181             : 
     182           0 :         JitSpew(JitSpew_Codegen, "# Emitting out of line code");
     183             : 
     184           0 :         masm.setFramePushed(outOfLineCode_[i]->framePushed());
     185        6566 :         lastPC_ = outOfLineCode_[i]->pc();
     186           0 :         outOfLineCode_[i]->bind(&masm);
     187             : 
     188        3283 :         outOfLineCode_[i]->generate(this);
     189             :     }
     190             : 
     191          96 :     return !masm.oom();
     192             : }
     193             : 
     194             : void
     195           0 : CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const MInstruction* mir)
     196             : {
     197           0 :     MOZ_ASSERT(mir);
     198         967 :     addOutOfLineCode(code, mir->trackedSite());
     199         967 : }
     200             : 
     201             : void
     202           0 : CodeGeneratorShared::addOutOfLineCode(OutOfLineCode* code, const BytecodeSite* site)
     203             : {
     204           0 :     code->setFramePushed(masm.framePushed());
     205           0 :     code->setBytecodeSite(site);
     206           0 :     MOZ_ASSERT_IF(!gen->compilingWasm(), code->script()->containsPC(code->pc()));
     207        6566 :     masm.propagateOOM(outOfLineCode_.append(code));
     208        3283 : }
     209             : 
     210             : bool
     211        8627 : CodeGeneratorShared::addNativeToBytecodeEntry(const BytecodeSite* site)
     212             : {
     213             :     // Skip the table entirely if profiling is not enabled.
     214       17254 :     if (!isProfilerInstrumentationEnabled())
     215             :         return true;
     216             : 
     217             :     // Fails early if the last added instruction caused the macro assembler to
     218             :     // run out of memory as continuity assumption below do not hold.
     219           0 :     if (masm.oom())
     220             :         return false;
     221             : 
     222           0 :     MOZ_ASSERT(site);
     223           0 :     MOZ_ASSERT(site->tree());
     224           0 :     MOZ_ASSERT(site->pc());
     225             : 
     226           0 :     InlineScriptTree* tree = site->tree();
     227           0 :     jsbytecode* pc = site->pc();
     228           0 :     uint32_t nativeOffset = masm.currentOffset();
     229             : 
     230           0 :     MOZ_ASSERT_IF(nativeToBytecodeList_.empty(), nativeOffset == 0);
     231             : 
     232           0 :     if (!nativeToBytecodeList_.empty()) {
     233           0 :         size_t lastIdx = nativeToBytecodeList_.length() - 1;
     234           0 :         NativeToBytecode& lastEntry = nativeToBytecodeList_[lastIdx];
     235             : 
     236           0 :         MOZ_ASSERT(nativeOffset >= lastEntry.nativeOffset.offset());
     237             : 
     238             :         // If the new entry is for the same inlineScriptTree and same
     239             :         // bytecodeOffset, but the nativeOffset has changed, do nothing.
     240             :         // The same site just generated some more code.
     241           0 :         if (lastEntry.tree == tree && lastEntry.pc == pc) {
     242           0 :             JitSpew(JitSpew_Profiling, " => In-place update [%zu-%" PRIu32 "]",
     243           0 :                     lastEntry.nativeOffset.offset(), nativeOffset);
     244           0 :             return true;
     245             :         }
     246             : 
     247             :         // If the new entry is for the same native offset, then update the
     248             :         // previous entry with the new bytecode site, since the previous
     249             :         // bytecode site did not generate any native code.
     250           0 :         if (lastEntry.nativeOffset.offset() == nativeOffset) {
     251           0 :             lastEntry.tree = tree;
     252           0 :             lastEntry.pc = pc;
     253           0 :             JitSpew(JitSpew_Profiling, " => Overwriting zero-length native region.");
     254             : 
     255             :             // This overwrite might have made the entry merge-able with a
     256             :             // previous one.  If so, merge it.
     257           0 :             if (lastIdx > 0) {
     258           0 :                 NativeToBytecode& nextToLastEntry = nativeToBytecodeList_[lastIdx - 1];
     259           0 :                 if (nextToLastEntry.tree == lastEntry.tree && nextToLastEntry.pc == lastEntry.pc) {
     260           0 :                     JitSpew(JitSpew_Profiling, " => Merging with previous region");
     261           0 :                     nativeToBytecodeList_.erase(&lastEntry);
     262             :                 }
     263             :             }
     264             : 
     265           0 :             dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
     266           0 :             return true;
     267             :         }
     268             :     }
     269             : 
     270             :     // Otherwise, some native code was generated for the previous bytecode site.
     271             :     // Add a new entry for code that is about to be generated.
     272           0 :     NativeToBytecode entry;
     273           0 :     entry.nativeOffset = CodeOffset(nativeOffset);
     274           0 :     entry.tree = tree;
     275           0 :     entry.pc = pc;
     276           0 :     if (!nativeToBytecodeList_.append(entry))
     277             :         return false;
     278             : 
     279           0 :     JitSpew(JitSpew_Profiling, " => Push new entry.");
     280           0 :     dumpNativeToBytecodeEntry(nativeToBytecodeList_.length() - 1);
     281           0 :     return true;
     282             : }
     283             : 
     284             : void
     285          48 : CodeGeneratorShared::dumpNativeToBytecodeEntries()
     286             : {
     287             : #ifdef JS_JITSPEW
     288           0 :     InlineScriptTree* topTree = gen->info().inlineScriptTree();
     289           0 :     JitSpewStart(JitSpew_Profiling, "Native To Bytecode Entries for %s:%u\n",
     290           0 :                  topTree->script()->filename(), topTree->script()->lineno());
     291          48 :     for (unsigned i = 0; i < nativeToBytecodeList_.length(); i++)
     292           0 :         dumpNativeToBytecodeEntry(i);
     293             : #endif
     294          48 : }
     295             : 
     296             : void
     297           0 : CodeGeneratorShared::dumpNativeToBytecodeEntry(uint32_t idx)
     298             : {
     299             : #ifdef JS_JITSPEW
     300           0 :     NativeToBytecode& ref = nativeToBytecodeList_[idx];
     301           0 :     InlineScriptTree* tree = ref.tree;
     302           0 :     JSScript* script = tree->script();
     303           0 :     uint32_t nativeOffset = ref.nativeOffset.offset();
     304           0 :     unsigned nativeDelta = 0;
     305           0 :     unsigned pcDelta = 0;
     306           0 :     if (idx + 1 < nativeToBytecodeList_.length()) {
     307           0 :         NativeToBytecode* nextRef = &ref + 1;
     308           0 :         nativeDelta = nextRef->nativeOffset.offset() - nativeOffset;
     309           0 :         if (nextRef->tree == ref.tree)
     310           0 :             pcDelta = nextRef->pc - ref.pc;
     311             :     }
     312           0 :     JitSpewStart(JitSpew_Profiling, "    %08zx [+%-6d] => %-6ld [%-4d] {%-10s} (%s:%u",
     313             :                  ref.nativeOffset.offset(),
     314             :                  nativeDelta,
     315             :                  (long) (ref.pc - script->code()),
     316             :                  pcDelta,
     317           0 :                  CodeName[JSOp(*ref.pc)],
     318           0 :                  script->filename(), script->lineno());
     319             : 
     320           0 :     for (tree = tree->caller(); tree; tree = tree->caller()) {
     321           0 :         JitSpewCont(JitSpew_Profiling, " <= %s:%u", tree->script()->filename(),
     322           0 :                                                     tree->script()->lineno());
     323             :     }
     324           0 :     JitSpewCont(JitSpew_Profiling, ")");
     325           0 :     JitSpewFin(JitSpew_Profiling);
     326             : #endif
     327           0 : }
     328             : 
     329             : bool
     330           0 : CodeGeneratorShared::addTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
     331             : {
     332           0 :     if (!isOptimizationTrackingEnabled())
     333             :         return true;
     334             : 
     335           0 :     MOZ_ASSERT(optimizations);
     336             : 
     337           0 :     uint32_t nativeOffset = masm.currentOffset();
     338             : 
     339           0 :     if (!trackedOptimizations_.empty()) {
     340           0 :         NativeToTrackedOptimizations& lastEntry = trackedOptimizations_.back();
     341           0 :         MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= lastEntry.endOffset.offset());
     342             : 
     343             :         // If we're still generating code for the same set of optimizations,
     344             :         // we are done.
     345           0 :         if (lastEntry.optimizations == optimizations)
     346             :             return true;
     347             :     }
     348             : 
     349             :     // If we're generating code for a new set of optimizations, add a new
     350             :     // entry.
     351           0 :     NativeToTrackedOptimizations entry;
     352           0 :     entry.startOffset = CodeOffset(nativeOffset);
     353           0 :     entry.endOffset = CodeOffset(nativeOffset);
     354           0 :     entry.optimizations = optimizations;
     355           0 :     return trackedOptimizations_.append(entry);
     356             : }
     357             : 
     358             : void
     359           0 : CodeGeneratorShared::extendTrackedOptimizationsEntry(const TrackedOptimizations* optimizations)
     360             : {
     361           0 :     if (!isOptimizationTrackingEnabled())
     362             :         return;
     363             : 
     364           0 :     uint32_t nativeOffset = masm.currentOffset();
     365           0 :     NativeToTrackedOptimizations& entry = trackedOptimizations_.back();
     366           0 :     MOZ_ASSERT(entry.optimizations == optimizations);
     367           0 :     MOZ_ASSERT_IF(!masm.oom(), nativeOffset >= entry.endOffset.offset());
     368             : 
     369           0 :     entry.endOffset = CodeOffset(nativeOffset);
     370             : 
     371             :     // If we generated no code, remove the last entry.
     372           0 :     if (nativeOffset == entry.startOffset.offset())
     373           0 :         trackedOptimizations_.popBack();
     374             : }
     375             : 
     376             : // see OffsetOfFrameSlot
     377             : static inline int32_t
     378           0 : ToStackIndex(LAllocation* a)
     379             : {
     380           0 :     if (a->isStackSlot()) {
     381       65928 :         MOZ_ASSERT(a->toStackSlot()->slot() >= 1);
     382           0 :         return a->toStackSlot()->slot();
     383             :     }
     384         860 :     return -int32_t(sizeof(JitFrameLayout) + a->toArgument()->index());
     385             : }
     386             : 
     387             : void
     388       61232 : CodeGeneratorShared::encodeAllocation(LSnapshot* snapshot, MDefinition* mir,
     389             :                                       uint32_t* allocIndex)
     390             : {
     391       61232 :     if (mir->isBox())
     392         607 :         mir = mir->toBox()->getOperand(0);
     393             : 
     394             :     MIRType type =
     395           0 :         mir->isRecoveredOnBailout() ? MIRType::None :
     396       58742 :         mir->isUnused() ? MIRType::MagicOptimizedOut :
     397           0 :         mir->type();
     398             : 
     399           0 :     RValueAllocation alloc;
     400             : 
     401       61239 :     switch (type) {
     402             :       case MIRType::None:
     403             :       {
     404           0 :         MOZ_ASSERT(mir->isRecoveredOnBailout());
     405           0 :         uint32_t index = 0;
     406           0 :         LRecoverInfo* recoverInfo = snapshot->recoverInfo();
     407           0 :         MNode** it = recoverInfo->begin();
     408           0 :         MNode** end = recoverInfo->end();
     409           0 :         while (it != end && mir != *it) {
     410        1680 :             ++it;
     411        1680 :             ++index;
     412             :         }
     413             : 
     414             :         // This MDefinition is recovered, thus it should be listed in the
     415             :         // LRecoverInfo.
     416        2491 :         MOZ_ASSERT(it != end && mir == *it);
     417             : 
     418             :         // Lambda should have a default value readable for iterating over the
     419             :         // inner frames.
     420           0 :         if (mir->isLambda() || mir->isLambdaArrow()) {
     421         864 :             MConstant* constant = mir->isLambda() ? mir->toLambda()->functionOperand()
     422           0 :                                                   : mir->toLambdaArrow()->functionOperand();
     423             :             uint32_t cstIndex;
     424         576 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &cstIndex));
     425         576 :             alloc = RValueAllocation::RecoverInstruction(index, cstIndex);
     426             :             break;
     427             :         }
     428             : 
     429        2203 :         alloc = RValueAllocation::RecoverInstruction(index);
     430        2203 :         break;
     431             :       }
     432             :       case MIRType::Undefined:
     433       13442 :         alloc = RValueAllocation::Undefined();
     434           0 :         break;
     435             :       case MIRType::Null:
     436         189 :         alloc = RValueAllocation::Null();
     437         189 :         break;
     438             :       case MIRType::Int32:
     439             :       case MIRType::String:
     440             :       case MIRType::Symbol:
     441             :       case MIRType::Object:
     442             :       case MIRType::ObjectOrNull:
     443             :       case MIRType::Boolean:
     444             :       case MIRType::Double:
     445             :       {
     446           0 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     447       37108 :         if (payload->isConstant()) {
     448           0 :             MConstant* constant = mir->toConstant();
     449             :             uint32_t index;
     450       10276 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
     451       10276 :             alloc = RValueAllocation::ConstantPool(index);
     452             :             break;
     453             :         }
     454             : 
     455             :         JSValueType valueType =
     456           0 :             (type == MIRType::ObjectOrNull) ? JSVAL_TYPE_OBJECT : ValueTypeFromMIRType(type);
     457             : 
     458           0 :         MOZ_DIAGNOSTIC_ASSERT(payload->isMemory() || payload->isRegister());
     459           0 :         if (payload->isMemory())
     460           0 :             alloc = RValueAllocation::Typed(valueType, ToStackIndex(payload));
     461           0 :         else if (payload->isGeneralReg())
     462           0 :             alloc = RValueAllocation::Typed(valueType, ToRegister(payload));
     463           0 :         else if (payload->isFloatReg())
     464           0 :             alloc = RValueAllocation::Double(ToFloatRegister(payload));
     465             :         else
     466           0 :             MOZ_CRASH("Unexpected payload type.");
     467             :         break;
     468             :       }
     469             :       case MIRType::Float32:
     470             :       case MIRType::Int8x16:
     471             :       case MIRType::Int16x8:
     472             :       case MIRType::Int32x4:
     473             :       case MIRType::Float32x4:
     474             :       case MIRType::Bool8x16:
     475             :       case MIRType::Bool16x8:
     476             :       case MIRType::Bool32x4:
     477             :       {
     478           0 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     479           0 :         if (payload->isConstant()) {
     480           0 :             MConstant* constant = mir->toConstant();
     481             :             uint32_t index;
     482           0 :             masm.propagateOOM(graph.addConstantToPool(constant->toJSValue(), &index));
     483           0 :             alloc = RValueAllocation::ConstantPool(index);
     484             :             break;
     485             :         }
     486             : 
     487           0 :         MOZ_ASSERT(payload->isMemory() || payload->isFloatReg());
     488           0 :         if (payload->isFloatReg())
     489           0 :             alloc = RValueAllocation::AnyFloat(ToFloatRegister(payload));
     490             :         else
     491           0 :             alloc = RValueAllocation::AnyFloat(ToStackIndex(payload));
     492             :         break;
     493             :       }
     494             :       case MIRType::MagicOptimizedArguments:
     495             :       case MIRType::MagicOptimizedOut:
     496             :       case MIRType::MagicUninitializedLexical:
     497             :       case MIRType::MagicIsConstructing:
     498             :       {
     499             :         uint32_t index;
     500        2490 :         JSWhyMagic why = JS_GENERIC_MAGIC;
     501        2490 :         switch (type) {
     502             :           case MIRType::MagicOptimizedArguments:
     503             :             why = JS_OPTIMIZED_ARGUMENTS;
     504             :             break;
     505             :           case MIRType::MagicOptimizedOut:
     506         949 :             why = JS_OPTIMIZED_OUT;
     507           0 :             break;
     508             :           case MIRType::MagicUninitializedLexical:
     509        1190 :             why = JS_UNINITIALIZED_LEXICAL;
     510           0 :             break;
     511             :           case MIRType::MagicIsConstructing:
     512         258 :             why = JS_IS_CONSTRUCTING;
     513           1 :             break;
     514             :           default:
     515           0 :             MOZ_CRASH("Invalid Magic MIRType");
     516             :         }
     517             : 
     518           0 :         Value v = MagicValue(why);
     519        4980 :         masm.propagateOOM(graph.addConstantToPool(v, &index));
     520        4980 :         alloc = RValueAllocation::ConstantPool(index);
     521             :         break;
     522             :       }
     523             :       default:
     524             :       {
     525        5519 :         MOZ_ASSERT(mir->type() == MIRType::Value);
     526        5519 :         LAllocation* payload = snapshot->payloadOfSlot(*allocIndex);
     527             : #ifdef JS_NUNBOX32
     528             :         LAllocation* type = snapshot->typeOfSlot(*allocIndex);
     529             :         if (type->isRegister()) {
     530             :             if (payload->isRegister())
     531             :                 alloc = RValueAllocation::Untyped(ToRegister(type), ToRegister(payload));
     532             :             else
     533             :                 alloc = RValueAllocation::Untyped(ToRegister(type), ToStackIndex(payload));
     534             :         } else {
     535             :             if (payload->isRegister())
     536             :                 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToRegister(payload));
     537             :             else
     538             :                 alloc = RValueAllocation::Untyped(ToStackIndex(type), ToStackIndex(payload));
     539             :         }
     540             : #elif JS_PUNBOX64
     541        5519 :         if (payload->isRegister())
     542           0 :             alloc = RValueAllocation::Untyped(ToRegister(payload));
     543             :         else
     544        7350 :             alloc = RValueAllocation::Untyped(ToStackIndex(payload));
     545             : #endif
     546             :         break;
     547             :       }
     548             :     }
     549       61228 :     MOZ_DIAGNOSTIC_ASSERT(alloc.valid());
     550             : 
     551             :     // This set an extra bit as part of the RValueAllocation, such that we know
     552             :     // that recover instruction have to be executed without wrapping the
     553             :     // instruction in a no-op recover instruction.
     554       61228 :     if (mir->isIncompleteObject())
     555           0 :         alloc.setNeedSideEffect();
     556             : 
     557           0 :     masm.propagateOOM(snapshots_.add(alloc));
     558             : 
     559       61229 :     *allocIndex += mir->isRecoveredOnBailout() ? 0 : 1;
     560       61229 : }
     561             : 
     562             : void
     563           0 : CodeGeneratorShared::encode(LRecoverInfo* recover)
     564             : {
     565        2458 :     if (recover->recoverOffset() != INVALID_RECOVER_OFFSET)
     566             :         return;
     567             : 
     568           0 :     uint32_t numInstructions = recover->numInstructions();
     569        1455 :     JitSpew(JitSpew_IonSnapshots, "Encoding LRecoverInfo %p (frameCount %u, instructions %u)",
     570           0 :             (void*)recover, recover->mir()->frameCount(), numInstructions);
     571             : 
     572           0 :     MResumePoint::Mode mode = recover->mir()->mode();
     573        1455 :     MOZ_ASSERT(mode != MResumePoint::Outer);
     574           0 :     bool resumeAfter = (mode == MResumePoint::ResumeAfter);
     575             : 
     576           0 :     RecoverOffset offset = recovers_.startRecover(numInstructions, resumeAfter);
     577             : 
     578        7383 :     for (MNode* insn : *recover)
     579           0 :         recovers_.writeInstruction(insn);
     580             : 
     581           0 :     recovers_.endRecover();
     582        1455 :     recover->setRecoverOffset(offset);
     583        2910 :     masm.propagateOOM(!recovers_.oom());
     584             : }
     585             : 
     586             : void
     587           0 : CodeGeneratorShared::encode(LSnapshot* snapshot)
     588             : {
     589        3592 :     if (snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET)
     590           0 :         return;
     591             : 
     592        2458 :     LRecoverInfo* recoverInfo = snapshot->recoverInfo();
     593           0 :     encode(recoverInfo);
     594             : 
     595        2458 :     RecoverOffset recoverOffset = recoverInfo->recoverOffset();
     596        2458 :     MOZ_ASSERT(recoverOffset != INVALID_RECOVER_OFFSET);
     597             : 
     598             :     JitSpew(JitSpew_IonSnapshots, "Encoding LSnapshot %p (LRecover %p)",
     599           0 :             (void*)snapshot, (void*) recoverInfo);
     600             : 
     601        2458 :     SnapshotOffset offset = snapshots_.startSnapshot(recoverOffset, snapshot->bailoutKind());
     602             : 
     603             : #ifdef TRACK_SNAPSHOTS
     604           0 :     uint32_t pcOpcode = 0;
     605           0 :     uint32_t lirOpcode = 0;
     606           0 :     uint32_t lirId = 0;
     607        2458 :     uint32_t mirOpcode = 0;
     608           0 :     uint32_t mirId = 0;
     609             : 
     610           0 :     if (LNode* ins = instruction()) {
     611           0 :         lirOpcode = uint32_t(ins->op());
     612           0 :         lirId = ins->id();
     613           0 :         if (ins->mirRaw()) {
     614           0 :             mirOpcode = uint32_t(ins->mirRaw()->op());
     615           0 :             mirId = ins->mirRaw()->id();
     616        1134 :             if (ins->mirRaw()->trackedPc())
     617        1134 :                 pcOpcode = *ins->mirRaw()->trackedPc();
     618             :         }
     619             :     }
     620        2458 :     snapshots_.trackSnapshot(pcOpcode, mirOpcode, mirId, lirOpcode, lirId);
     621             : #endif
     622             : 
     623           0 :     uint32_t allocIndex = 0;
     624           0 :     for (LRecoverInfo::OperandIter it(recoverInfo); !it; ++it) {
     625           0 :         DebugOnly<uint32_t> allocWritten = snapshots_.allocWritten();
     626       61231 :         encodeAllocation(snapshot, *it, &allocIndex);
     627      122458 :         MOZ_ASSERT_IF(!snapshots_.oom(), allocWritten + 1 == snapshots_.allocWritten());
     628             :     }
     629             : 
     630           0 :     MOZ_ASSERT(allocIndex == snapshot->numSlots());
     631           0 :     snapshots_.endSnapshot();
     632        2458 :     snapshot->setSnapshotOffset(offset);
     633        7374 :     masm.propagateOOM(!snapshots_.oom());
     634             : }
     635             : 
     636             : bool
     637           0 : CodeGeneratorShared::assignBailoutId(LSnapshot* snapshot)
     638             : {
     639           0 :     MOZ_ASSERT(snapshot->snapshotOffset() != INVALID_SNAPSHOT_OFFSET);
     640             : 
     641             :     // Can we not use bailout tables at all?
     642           0 :     if (!deoptTable_)
     643             :         return false;
     644             : 
     645           0 :     MOZ_ASSERT(frameClass_ != FrameSizeClass::None());
     646             : 
     647           0 :     if (snapshot->bailoutId() != INVALID_BAILOUT_ID)
     648             :         return true;
     649             : 
     650             :     // Is the bailout table full?
     651           0 :     if (bailouts_.length() >= BAILOUT_TABLE_SIZE)
     652             :         return false;
     653             : 
     654           0 :     unsigned bailoutId = bailouts_.length();
     655           0 :     snapshot->setBailoutId(bailoutId);
     656           0 :     JitSpew(JitSpew_IonSnapshots, "Assigned snapshot bailout id %u", bailoutId);
     657           0 :     masm.propagateOOM(bailouts_.append(snapshot->snapshotOffset()));
     658           0 :     return true;
     659             : }
     660             : 
     661             : bool
     662           0 : CodeGeneratorShared::encodeSafepoints()
     663             : {
     664        1112 :     for (SafepointIndex& index : safepointIndices_) {
     665           0 :         LSafepoint* safepoint = index.safepoint();
     666             : 
     667        1034 :         if (!safepoint->encoded())
     668           0 :             safepoints_.encode(safepoint);
     669             : 
     670        1034 :         index.resolve();
     671             :     }
     672             : 
     673          78 :     return !safepoints_.oom();
     674             : }
     675             : 
     676             : bool
     677           0 : CodeGeneratorShared::createNativeToBytecodeScriptList(JSContext* cx)
     678             : {
     679           0 :     js::Vector<JSScript*, 0, SystemAllocPolicy> scriptList;
     680           0 :     InlineScriptTree* tree = gen->info().inlineScriptTree();
     681             :     for (;;) {
     682             :         // Add script from current tree.
     683           0 :         bool found = false;
     684           0 :         for (uint32_t i = 0; i < scriptList.length(); i++) {
     685           0 :             if (scriptList[i] == tree->script()) {
     686             :                 found = true;
     687             :                 break;
     688             :             }
     689             :         }
     690           0 :         if (!found) {
     691           0 :             if (!scriptList.append(tree->script()))
     692             :                 return false;
     693             :         }
     694             : 
     695             :         // Process rest of tree
     696             : 
     697             :         // If children exist, emit children.
     698           0 :         if (tree->hasChildren()) {
     699           0 :             tree = tree->firstChild();
     700           0 :             continue;
     701             :         }
     702             : 
     703             :         // Otherwise, find the first tree up the chain (including this one)
     704             :         // that contains a next sibling.
     705           0 :         while (!tree->hasNextCallee() && tree->hasCaller())
     706           0 :             tree = tree->caller();
     707             : 
     708             :         // If we found a sibling, use it.
     709           0 :         if (tree->hasNextCallee()) {
     710           0 :             tree = tree->nextCallee();
     711           0 :             continue;
     712             :         }
     713             : 
     714             :         // Otherwise, we must have reached the top without finding any siblings.
     715           0 :         MOZ_ASSERT(tree->isOutermostCaller());
     716             :         break;
     717             :     }
     718             : 
     719             :     // Allocate array for list.
     720           0 :     JSScript** data = cx->zone()->pod_malloc<JSScript*>(scriptList.length());
     721           0 :     if (!data)
     722             :         return false;
     723             : 
     724           0 :     for (uint32_t i = 0; i < scriptList.length(); i++)
     725           0 :         data[i] = scriptList[i];
     726             : 
     727             :     // Success.
     728           0 :     nativeToBytecodeScriptListLength_ = scriptList.length();
     729           0 :     nativeToBytecodeScriptList_ = data;
     730           0 :     return true;
     731             : }
     732             : 
     733             : bool
     734           0 : CodeGeneratorShared::generateCompactNativeToBytecodeMap(JSContext* cx, JitCode* code)
     735             : {
     736           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ == 0);
     737           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ == nullptr);
     738           0 :     MOZ_ASSERT(nativeToBytecodeMap_ == nullptr);
     739           0 :     MOZ_ASSERT(nativeToBytecodeMapSize_ == 0);
     740           0 :     MOZ_ASSERT(nativeToBytecodeTableOffset_ == 0);
     741           0 :     MOZ_ASSERT(nativeToBytecodeNumRegions_ == 0);
     742             : 
     743           0 :     if (!createNativeToBytecodeScriptList(cx))
     744             :         return false;
     745             : 
     746           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
     747           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
     748             : 
     749           0 :     CompactBufferWriter writer;
     750           0 :     uint32_t tableOffset = 0;
     751           0 :     uint32_t numRegions = 0;
     752             : 
     753           0 :     if (!JitcodeIonTable::WriteIonTable(
     754             :             writer, nativeToBytecodeScriptList_, nativeToBytecodeScriptListLength_,
     755           0 :             &nativeToBytecodeList_[0],
     756           0 :             &nativeToBytecodeList_[0] + nativeToBytecodeList_.length(),
     757             :             &tableOffset, &numRegions))
     758             :     {
     759           0 :         js_free(nativeToBytecodeScriptList_);
     760           0 :         return false;
     761             :     }
     762             : 
     763           0 :     MOZ_ASSERT(tableOffset > 0);
     764           0 :     MOZ_ASSERT(numRegions > 0);
     765             : 
     766             :     // Writer is done, copy it to sized buffer.
     767           0 :     uint8_t* data = cx->zone()->pod_malloc<uint8_t>(writer.length());
     768           0 :     if (!data) {
     769           0 :         js_free(nativeToBytecodeScriptList_);
     770           0 :         return false;
     771             :     }
     772             : 
     773           0 :     memcpy(data, writer.buffer(), writer.length());
     774           0 :     nativeToBytecodeMap_ = data;
     775           0 :     nativeToBytecodeMapSize_ = writer.length();
     776           0 :     nativeToBytecodeTableOffset_ = tableOffset;
     777           0 :     nativeToBytecodeNumRegions_ = numRegions;
     778             : 
     779           0 :     verifyCompactNativeToBytecodeMap(code);
     780             : 
     781           0 :     JitSpew(JitSpew_Profiling, "Compact Native To Bytecode Map [%p-%p]",
     782           0 :             data, data + nativeToBytecodeMapSize_);
     783             : 
     784           0 :     return true;
     785             : }
     786             : 
     787             : void
     788           0 : CodeGeneratorShared::verifyCompactNativeToBytecodeMap(JitCode* code)
     789             : {
     790             : #ifdef DEBUG
     791           0 :     MOZ_ASSERT(nativeToBytecodeScriptListLength_ > 0);
     792           0 :     MOZ_ASSERT(nativeToBytecodeScriptList_ != nullptr);
     793           0 :     MOZ_ASSERT(nativeToBytecodeMap_ != nullptr);
     794           0 :     MOZ_ASSERT(nativeToBytecodeMapSize_ > 0);
     795           0 :     MOZ_ASSERT(nativeToBytecodeTableOffset_ > 0);
     796           0 :     MOZ_ASSERT(nativeToBytecodeNumRegions_ > 0);
     797             : 
     798             :     // The pointer to the table must be 4-byte aligned
     799           0 :     const uint8_t* tablePtr = nativeToBytecodeMap_ + nativeToBytecodeTableOffset_;
     800           0 :     MOZ_ASSERT(uintptr_t(tablePtr) % sizeof(uint32_t) == 0);
     801             : 
     802             :     // Verify that numRegions was encoded correctly.
     803           0 :     const JitcodeIonTable* ionTable = reinterpret_cast<const JitcodeIonTable*>(tablePtr);
     804           0 :     MOZ_ASSERT(ionTable->numRegions() == nativeToBytecodeNumRegions_);
     805             : 
     806             :     // Region offset for first region should be at the start of the payload region.
     807             :     // Since the offsets are backward from the start of the table, the first entry
     808             :     // backoffset should be equal to the forward table offset from the start of the
     809             :     // allocated data.
     810           0 :     MOZ_ASSERT(ionTable->regionOffset(0) == nativeToBytecodeTableOffset_);
     811             : 
     812             :     // Verify each region.
     813           0 :     for (uint32_t i = 0; i < ionTable->numRegions(); i++) {
     814             :         // Back-offset must point into the payload region preceding the table, not before it.
     815           0 :         MOZ_ASSERT(ionTable->regionOffset(i) <= nativeToBytecodeTableOffset_);
     816             : 
     817             :         // Back-offset must point to a later area in the payload region than previous
     818             :         // back-offset.  This means that back-offsets decrease monotonically.
     819           0 :         MOZ_ASSERT_IF(i > 0, ionTable->regionOffset(i) < ionTable->regionOffset(i - 1));
     820             : 
     821           0 :         JitcodeRegionEntry entry = ionTable->regionEntry(i);
     822             : 
     823             :         // Ensure native code offset for region falls within jitcode.
     824           0 :         MOZ_ASSERT(entry.nativeOffset() <= code->instructionsSize());
     825             : 
     826             :         // Read out script/pc stack and verify.
     827             :         JitcodeRegionEntry::ScriptPcIterator scriptPcIter = entry.scriptPcIterator();
     828           0 :         while (scriptPcIter.hasMore()) {
     829           0 :             uint32_t scriptIdx = 0, pcOffset = 0;
     830           0 :             scriptPcIter.readNext(&scriptIdx, &pcOffset);
     831             : 
     832             :             // Ensure scriptIdx refers to a valid script in the list.
     833           0 :             MOZ_ASSERT(scriptIdx < nativeToBytecodeScriptListLength_);
     834           0 :             JSScript* script = nativeToBytecodeScriptList_[scriptIdx];
     835             : 
     836             :             // Ensure pcOffset falls within the script.
     837           0 :             MOZ_ASSERT(pcOffset < script->length());
     838             :         }
     839             : 
     840             :         // Obtain the original nativeOffset and pcOffset and script.
     841           0 :         uint32_t curNativeOffset = entry.nativeOffset();
     842           0 :         JSScript* script = nullptr;
     843           0 :         uint32_t curPcOffset = 0;
     844             :         {
     845           0 :             uint32_t scriptIdx = 0;
     846           0 :             scriptPcIter.reset();
     847           0 :             scriptPcIter.readNext(&scriptIdx, &curPcOffset);
     848           0 :             script = nativeToBytecodeScriptList_[scriptIdx];
     849             :         }
     850             : 
     851             :         // Read out nativeDeltas and pcDeltas and verify.
     852           0 :         JitcodeRegionEntry::DeltaIterator deltaIter = entry.deltaIterator();
     853           0 :         while (deltaIter.hasMore()) {
     854           0 :             uint32_t nativeDelta = 0;
     855           0 :             int32_t pcDelta = 0;
     856           0 :             deltaIter.readNext(&nativeDelta, &pcDelta);
     857             : 
     858           0 :             curNativeOffset += nativeDelta;
     859           0 :             curPcOffset = uint32_t(int32_t(curPcOffset) + pcDelta);
     860             : 
     861             :             // Ensure that nativeOffset still falls within jitcode after delta.
     862           0 :             MOZ_ASSERT(curNativeOffset <= code->instructionsSize());
     863             : 
     864             :             // Ensure that pcOffset still falls within bytecode after delta.
     865           0 :             MOZ_ASSERT(curPcOffset < script->length());
     866             :         }
     867             :     }
     868             : #endif // DEBUG
     869           0 : }
     870             : 
     871             : bool
     872           0 : CodeGeneratorShared::generateCompactTrackedOptimizationsMap(JSContext* cx, JitCode* code,
     873             :                                                             IonTrackedTypeVector* allTypes)
     874             : {
     875           0 :     MOZ_ASSERT(trackedOptimizationsMap_ == nullptr);
     876           0 :     MOZ_ASSERT(trackedOptimizationsMapSize_ == 0);
     877           0 :     MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ == 0);
     878           0 :     MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ == 0);
     879           0 :     MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ == 0);
     880             : 
     881           0 :     if (trackedOptimizations_.empty())
     882             :         return true;
     883             : 
     884           0 :     UniqueTrackedOptimizations unique(cx);
     885           0 :     if (!unique.init())
     886             :         return false;
     887             : 
     888             :     // Iterate through all entries to deduplicate their optimization attempts.
     889           0 :     for (size_t i = 0; i < trackedOptimizations_.length(); i++) {
     890           0 :         NativeToTrackedOptimizations& entry = trackedOptimizations_[i];
     891           0 :         if (!unique.add(entry.optimizations))
     892             :             return false;
     893             :     }
     894             : 
     895             :     // Sort the unique optimization attempts by frequency to stabilize the
     896             :     // attempts' indices in the compact table we will write later.
     897           0 :     if (!unique.sortByFrequency(cx))
     898             :         return false;
     899             : 
     900             :     // Write out the ranges and the table.
     901           0 :     CompactBufferWriter writer;
     902             :     uint32_t numRegions;
     903             :     uint32_t regionTableOffset;
     904             :     uint32_t typesTableOffset;
     905             :     uint32_t attemptsTableOffset;
     906           0 :     if (!WriteIonTrackedOptimizationsTable(cx, writer,
     907           0 :                                            trackedOptimizations_.begin(),
     908           0 :                                            trackedOptimizations_.end(),
     909             :                                            unique, &numRegions,
     910             :                                            &regionTableOffset, &typesTableOffset,
     911             :                                            &attemptsTableOffset, allTypes))
     912             :     {
     913             :         return false;
     914             :     }
     915             : 
     916           0 :     MOZ_ASSERT(regionTableOffset > 0);
     917           0 :     MOZ_ASSERT(typesTableOffset > 0);
     918           0 :     MOZ_ASSERT(attemptsTableOffset > 0);
     919           0 :     MOZ_ASSERT(typesTableOffset > regionTableOffset);
     920           0 :     MOZ_ASSERT(attemptsTableOffset > typesTableOffset);
     921             : 
     922             :     // Copy over the table out of the writer's buffer.
     923           0 :     uint8_t* data = cx->zone()->pod_malloc<uint8_t>(writer.length());
     924           0 :     if (!data)
     925             :         return false;
     926             : 
     927           0 :     memcpy(data, writer.buffer(), writer.length());
     928           0 :     trackedOptimizationsMap_ = data;
     929           0 :     trackedOptimizationsMapSize_ = writer.length();
     930           0 :     trackedOptimizationsRegionTableOffset_ = regionTableOffset;
     931           0 :     trackedOptimizationsTypesTableOffset_ = typesTableOffset;
     932           0 :     trackedOptimizationsAttemptsTableOffset_ = attemptsTableOffset;
     933             : 
     934           0 :     verifyCompactTrackedOptimizationsMap(code, numRegions, unique, allTypes);
     935             : 
     936           0 :     JitSpew(JitSpew_OptimizationTrackingExtended,
     937             :             "== Compact Native To Optimizations Map [%p-%p] size %u",
     938           0 :             data, data + trackedOptimizationsMapSize_, trackedOptimizationsMapSize_);
     939           0 :     JitSpew(JitSpew_OptimizationTrackingExtended,
     940             :             "     with type list of length %zu, size %zu",
     941           0 :             allTypes->length(), allTypes->length() * sizeof(IonTrackedTypeWithAddendum));
     942             : 
     943           0 :     return true;
     944             : }
     945             : 
     946             : #ifdef DEBUG
     947             : class ReadTempAttemptsVectorOp : public JS::ForEachTrackedOptimizationAttemptOp
     948             : {
     949             :     TempOptimizationAttemptsVector* attempts_;
     950             :     bool oom_;
     951             : 
     952             :   public:
     953             :     explicit ReadTempAttemptsVectorOp(TempOptimizationAttemptsVector* attempts)
     954           0 :       : attempts_(attempts), oom_(false)
     955             :     { }
     956             : 
     957             :     bool oom() {
     958             :         return oom_;
     959             :     }
     960             : 
     961           0 :     void operator()(JS::TrackedStrategy strategy, JS::TrackedOutcome outcome) override {
     962           0 :         if (!attempts_->append(OptimizationAttempt(strategy, outcome)))
     963           0 :             oom_ = true;
     964           0 :     }
     965             : };
     966             : 
     967           0 : struct ReadTempTypeInfoVectorOp : public IonTrackedOptimizationsTypeInfo::ForEachOp
     968             : {
     969             :     TempAllocator& alloc_;
     970             :     TempOptimizationTypeInfoVector* types_;
     971             :     TempTypeList accTypes_;
     972             :     bool oom_;
     973             : 
     974             :   public:
     975             :     ReadTempTypeInfoVectorOp(TempAllocator& alloc, TempOptimizationTypeInfoVector* types)
     976           0 :       : alloc_(alloc),
     977             :         types_(types),
     978             :         accTypes_(alloc),
     979           0 :         oom_(false)
     980             :     { }
     981             : 
     982             :     bool oom() {
     983             :         return oom_;
     984             :     }
     985             : 
     986           0 :     void readType(const IonTrackedTypeWithAddendum& tracked) override {
     987           0 :         if (!accTypes_.append(tracked.type))
     988           0 :             oom_ = true;
     989           0 :     }
     990             : 
     991           0 :     void operator()(JS::TrackedTypeSite site, MIRType mirType) override {
     992           0 :         OptimizationTypeInfo ty(alloc_, site, mirType);
     993           0 :         for (uint32_t i = 0; i < accTypes_.length(); i++) {
     994           0 :             if (!ty.trackType(accTypes_[i]))
     995           0 :                 oom_ = true;
     996             :         }
     997           0 :         if (!types_->append(std::move(ty)))
     998           0 :             oom_ = true;
     999           0 :         accTypes_.clear();
    1000           0 :     }
    1001             : };
    1002             : #endif // DEBUG
    1003             : 
    1004             : void
    1005           0 : CodeGeneratorShared::verifyCompactTrackedOptimizationsMap(JitCode* code, uint32_t numRegions,
    1006             :                                                           const UniqueTrackedOptimizations& unique,
    1007             :                                                           const IonTrackedTypeVector* allTypes)
    1008             : {
    1009             : #ifdef DEBUG
    1010           0 :     MOZ_ASSERT(trackedOptimizationsMap_ != nullptr);
    1011           0 :     MOZ_ASSERT(trackedOptimizationsMapSize_ > 0);
    1012           0 :     MOZ_ASSERT(trackedOptimizationsRegionTableOffset_ > 0);
    1013           0 :     MOZ_ASSERT(trackedOptimizationsTypesTableOffset_ > 0);
    1014           0 :     MOZ_ASSERT(trackedOptimizationsAttemptsTableOffset_ > 0);
    1015             : 
    1016             :     // Table pointers must all be 4-byte aligned.
    1017             :     const uint8_t* regionTableAddr = trackedOptimizationsMap_ +
    1018           0 :                                      trackedOptimizationsRegionTableOffset_;
    1019             :     const uint8_t* typesTableAddr = trackedOptimizationsMap_ +
    1020           0 :                                     trackedOptimizationsTypesTableOffset_;
    1021             :     const uint8_t* attemptsTableAddr = trackedOptimizationsMap_ +
    1022           0 :                                        trackedOptimizationsAttemptsTableOffset_;
    1023           0 :     MOZ_ASSERT(uintptr_t(regionTableAddr) % sizeof(uint32_t) == 0);
    1024           0 :     MOZ_ASSERT(uintptr_t(typesTableAddr) % sizeof(uint32_t) == 0);
    1025           0 :     MOZ_ASSERT(uintptr_t(attemptsTableAddr) % sizeof(uint32_t) == 0);
    1026             : 
    1027             :     // Assert that the number of entries matches up for the tables.
    1028             :     const IonTrackedOptimizationsRegionTable* regionTable =
    1029           0 :         (const IonTrackedOptimizationsRegionTable*) regionTableAddr;
    1030           0 :     MOZ_ASSERT(regionTable->numEntries() == numRegions);
    1031             :     const IonTrackedOptimizationsTypesTable* typesTable =
    1032           0 :         (const IonTrackedOptimizationsTypesTable*) typesTableAddr;
    1033           0 :     MOZ_ASSERT(typesTable->numEntries() == unique.count());
    1034             :     const IonTrackedOptimizationsAttemptsTable* attemptsTable =
    1035           0 :         (const IonTrackedOptimizationsAttemptsTable*) attemptsTableAddr;
    1036           0 :     MOZ_ASSERT(attemptsTable->numEntries() == unique.count());
    1037             : 
    1038             :     // Verify each region.
    1039             :     uint32_t trackedIdx = 0;
    1040           0 :     for (uint32_t regionIdx = 0; regionIdx < regionTable->numEntries(); regionIdx++) {
    1041             :         // Check reverse offsets are within bounds.
    1042           0 :         MOZ_ASSERT(regionTable->entryOffset(regionIdx) <= trackedOptimizationsRegionTableOffset_);
    1043           0 :         MOZ_ASSERT_IF(regionIdx > 0, regionTable->entryOffset(regionIdx) <
    1044             :                                      regionTable->entryOffset(regionIdx - 1));
    1045             : 
    1046           0 :         IonTrackedOptimizationsRegion region = regionTable->entry(regionIdx);
    1047             : 
    1048             :         // Check the region range is covered by jitcode.
    1049           0 :         MOZ_ASSERT(region.startOffset() <= code->instructionsSize());
    1050           0 :         MOZ_ASSERT(region.endOffset() <= code->instructionsSize());
    1051             : 
    1052             :         IonTrackedOptimizationsRegion::RangeIterator iter = region.ranges();
    1053           0 :         while (iter.more()) {
    1054             :             // Assert that the offsets are correctly decoded from the delta.
    1055             :             uint32_t startOffset, endOffset;
    1056             :             uint8_t index;
    1057           0 :             iter.readNext(&startOffset, &endOffset, &index);
    1058           0 :             NativeToTrackedOptimizations& entry = trackedOptimizations_[trackedIdx++];
    1059           0 :             MOZ_ASSERT(startOffset == entry.startOffset.offset());
    1060           0 :             MOZ_ASSERT(endOffset == entry.endOffset.offset());
    1061           0 :             MOZ_ASSERT(index == unique.indexOf(entry.optimizations));
    1062             : 
    1063             :             // Assert that the type info and attempts vectors are correctly
    1064             :             // decoded. This is disabled for now if the types table might
    1065             :             // contain nursery pointers, in which case the types might not
    1066             :             // match, see bug 1175761.
    1067           0 :             JSRuntime* rt = code->runtimeFromMainThread();
    1068           0 :             if (!rt->gc.storeBuffer().cancelIonCompilations()) {
    1069           0 :                 IonTrackedOptimizationsTypeInfo typeInfo = typesTable->entry(index);
    1070           0 :                 TempOptimizationTypeInfoVector tvec(alloc());
    1071           0 :                 ReadTempTypeInfoVectorOp top(alloc(), &tvec);
    1072           0 :                 typeInfo.forEach(top, allTypes);
    1073           0 :                 MOZ_ASSERT_IF(!top.oom(), entry.optimizations->matchTypes(tvec));
    1074             :             }
    1075             : 
    1076           0 :             IonTrackedOptimizationsAttempts attempts = attemptsTable->entry(index);
    1077           0 :             TempOptimizationAttemptsVector avec(alloc());
    1078           0 :             ReadTempAttemptsVectorOp aop(&avec);
    1079           0 :             attempts.forEach(aop);
    1080           0 :             MOZ_ASSERT_IF(!aop.oom(), entry.optimizations->matchAttempts(avec));
    1081             :         }
    1082             :     }
    1083             : #endif
    1084           0 : }
    1085             : 
    1086             : void
    1087           0 : CodeGeneratorShared::markSafepoint(LInstruction* ins)
    1088             : {
    1089           0 :     markSafepointAt(masm.currentOffset(), ins);
    1090           0 : }
    1091             : 
    1092             : void
    1093           0 : CodeGeneratorShared::markSafepointAt(uint32_t offset, LInstruction* ins)
    1094             : {
    1095           0 :     MOZ_ASSERT_IF(!safepointIndices_.empty() && !masm.oom(),
    1096             :                   offset - safepointIndices_.back().displacement() >= sizeof(uint32_t));
    1097        3897 :     masm.propagateOOM(safepointIndices_.append(SafepointIndex(offset, ins->safepoint())));
    1098        1299 : }
    1099             : 
    1100             : void
    1101        1276 : CodeGeneratorShared::ensureOsiSpace()
    1102             : {
    1103             :     // For a refresher, an invalidation point is of the form:
    1104             :     // 1: call <target>
    1105             :     // 2: ...
    1106             :     // 3: <osipoint>
    1107             :     //
    1108             :     // The four bytes *before* instruction 2 are overwritten with an offset.
    1109             :     // Callers must ensure that the instruction itself has enough bytes to
    1110             :     // support this.
    1111             :     //
    1112             :     // The bytes *at* instruction 3 are overwritten with an invalidation jump.
    1113             :     // jump. These bytes may be in a completely different IR sequence, but
    1114             :     // represent the join point of the call out of the function.
    1115             :     //
    1116             :     // At points where we want to ensure that invalidation won't corrupt an
    1117             :     // important instruction, we make sure to pad with nops.
    1118           0 :     if (masm.currentOffset() - lastOsiPointOffset_ < Assembler::PatchWrite_NearCallSize()) {
    1119           0 :         int32_t paddingSize = Assembler::PatchWrite_NearCallSize();
    1120           0 :         paddingSize -= masm.currentOffset() - lastOsiPointOffset_;
    1121           0 :         for (int32_t i = 0; i < paddingSize; ++i)
    1122           0 :             masm.nop();
    1123             :     }
    1124           0 :     MOZ_ASSERT_IF(!masm.oom(),
    1125             :                   masm.currentOffset() - lastOsiPointOffset_ >= Assembler::PatchWrite_NearCallSize());
    1126        2552 :     lastOsiPointOffset_ = masm.currentOffset();
    1127        1276 : }
    1128             : 
    1129             : uint32_t
    1130           0 : CodeGeneratorShared::markOsiPoint(LOsiPoint* ins)
    1131             : {
    1132        1276 :     encode(ins->snapshot());
    1133           0 :     ensureOsiSpace();
    1134             : 
    1135           0 :     uint32_t offset = masm.currentOffset();
    1136        1276 :     SnapshotOffset so = ins->snapshot()->snapshotOffset();
    1137           0 :     masm.propagateOOM(osiIndices_.append(OsiIndex(offset, so)));
    1138             : 
    1139        1276 :     return offset;
    1140             : }
    1141             : 
    1142             : #ifdef CHECK_OSIPOINT_REGISTERS
    1143             : template <class Op>
    1144             : static void
    1145           0 : HandleRegisterDump(Op op, MacroAssembler& masm, LiveRegisterSet liveRegs, Register activation,
    1146             :                    Register scratch)
    1147             : {
    1148           0 :     const size_t baseOffset = JitActivation::offsetOfRegs();
    1149             : 
    1150             :     // Handle live GPRs.
    1151           0 :     for (GeneralRegisterIterator iter(liveRegs.gprs()); iter.more(); ++iter) {
    1152           0 :         Register reg = *iter;
    1153           0 :         Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    1154             : 
    1155           0 :         if (reg == activation) {
    1156             :             // To use the original value of the activation register (that's
    1157             :             // now on top of the stack), we need the scratch register.
    1158           0 :             masm.push(scratch);
    1159           0 :             masm.loadPtr(Address(masm.getStackPointer(), sizeof(uintptr_t)), scratch);
    1160           0 :             op(scratch, dump);
    1161           0 :             masm.pop(scratch);
    1162             :         } else {
    1163             :             op(reg, dump);
    1164             :         }
    1165             :     }
    1166             : 
    1167             :     // Handle live FPRs.
    1168           0 :     for (FloatRegisterIterator iter(liveRegs.fpus()); iter.more(); ++iter) {
    1169           0 :         FloatRegister reg = *iter;
    1170           0 :         Address dump(activation, baseOffset + RegisterDump::offsetOfRegister(reg));
    1171           0 :         op(reg, dump);
    1172             :     }
    1173           0 : }
    1174             : 
    1175             : class StoreOp
    1176             : {
    1177             :     MacroAssembler& masm;
    1178             : 
    1179             :   public:
    1180             :     explicit StoreOp(MacroAssembler& masm)
    1181           0 :       : masm(masm)
    1182             :     {}
    1183             : 
    1184             :     void operator()(Register reg, Address dump) {
    1185           0 :         masm.storePtr(reg, dump);
    1186             :     }
    1187           0 :     void operator()(FloatRegister reg, Address dump) {
    1188           0 :         if (reg.isDouble())
    1189           0 :             masm.storeDouble(reg, dump);
    1190           0 :         else if (reg.isSingle())
    1191           0 :             masm.storeFloat32(reg, dump);
    1192             : #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
    1193           0 :         else if (reg.isSimd128())
    1194           0 :             masm.storeUnalignedSimd128Float(reg, dump);
    1195             : #endif
    1196             :         else
    1197           0 :             MOZ_CRASH("Unexpected register type.");
    1198           0 :     }
    1199             : };
    1200             : 
    1201             : static void
    1202           0 : StoreAllLiveRegs(MacroAssembler& masm, LiveRegisterSet liveRegs)
    1203             : {
    1204             :     // Store a copy of all live registers before performing the call.
    1205             :     // When we reach the OsiPoint, we can use this to check nothing
    1206             :     // modified them in the meantime.
    1207             : 
    1208             :     // Load pointer to the JitActivation in a scratch register.
    1209           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1210           0 :     Register scratch = allRegs.takeAny();
    1211           0 :     masm.push(scratch);
    1212           0 :     masm.loadJitActivation(scratch);
    1213             : 
    1214           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1215           0 :     masm.add32(Imm32(1), checkRegs);
    1216             : 
    1217           0 :     StoreOp op(masm);
    1218           0 :     HandleRegisterDump<StoreOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    1219             : 
    1220           0 :     masm.pop(scratch);
    1221           0 : }
    1222             : 
    1223             : class VerifyOp
    1224             : {
    1225             :     MacroAssembler& masm;
    1226             :     Label* failure_;
    1227             : 
    1228             :   public:
    1229             :     VerifyOp(MacroAssembler& masm, Label* failure)
    1230           0 :       : masm(masm), failure_(failure)
    1231             :     {}
    1232             : 
    1233             :     void operator()(Register reg, Address dump) {
    1234           0 :         masm.branchPtr(Assembler::NotEqual, dump, reg, failure_);
    1235             :     }
    1236           0 :     void operator()(FloatRegister reg, Address dump) {
    1237           0 :         FloatRegister scratch;
    1238           0 :         if (reg.isDouble()) {
    1239           0 :             scratch = ScratchDoubleReg;
    1240           0 :             masm.loadDouble(dump, scratch);
    1241           0 :             masm.branchDouble(Assembler::DoubleNotEqual, scratch, reg, failure_);
    1242           0 :         } else if (reg.isSingle()) {
    1243           0 :             scratch = ScratchFloat32Reg;
    1244           0 :             masm.loadFloat32(dump, scratch);
    1245           0 :             masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
    1246             :         }
    1247             : 
    1248             :         // :TODO: (Bug 1133745) Add support to verify SIMD registers.
    1249           0 :     }
    1250             : };
    1251             : 
    1252             : void
    1253           0 : CodeGeneratorShared::verifyOsiPointRegs(LSafepoint* safepoint)
    1254             : {
    1255             :     // Ensure the live registers stored by callVM did not change between
    1256             :     // the call and this OsiPoint. Try-catch relies on this invariant.
    1257             : 
    1258             :     // Load pointer to the JitActivation in a scratch register.
    1259           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1260           0 :     Register scratch = allRegs.takeAny();
    1261           0 :     masm.push(scratch);
    1262           0 :     masm.loadJitActivation(scratch);
    1263             : 
    1264             :     // If we should not check registers (because the instruction did not call
    1265             :     // into the VM, or a GC happened), we're done.
    1266           0 :     Label failure, done;
    1267           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1268           0 :     masm.branch32(Assembler::Equal, checkRegs, Imm32(0), &done);
    1269             : 
    1270             :     // Having more than one VM function call made in one visit function at
    1271             :     // runtime is a sec-ciritcal error, because if we conservatively assume that
    1272             :     // one of the function call can re-enter Ion, then the invalidation process
    1273             :     // will potentially add a call at a random location, by patching the code
    1274             :     // before the return address.
    1275           0 :     masm.branch32(Assembler::NotEqual, checkRegs, Imm32(1), &failure);
    1276             : 
    1277             :     // Set checkRegs to 0, so that we don't try to verify registers after we
    1278             :     // return from this script to the caller.
    1279           0 :     masm.store32(Imm32(0), checkRegs);
    1280             : 
    1281             :     // Ignore clobbered registers. Some instructions (like LValueToInt32) modify
    1282             :     // temps after calling into the VM. This is fine because no other
    1283             :     // instructions (including this OsiPoint) will depend on them. Also
    1284             :     // backtracking can also use the same register for an input and an output.
    1285             :     // These are marked as clobbered and shouldn't get checked.
    1286           0 :     LiveRegisterSet liveRegs;
    1287           0 :     liveRegs.set() = RegisterSet::Intersect(safepoint->liveRegs().set(),
    1288           0 :                                             RegisterSet::Not(safepoint->clobberedRegs().set()));
    1289             : 
    1290           0 :     VerifyOp op(masm, &failure);
    1291           0 :     HandleRegisterDump<VerifyOp>(op, masm, liveRegs, scratch, allRegs.getAny());
    1292             : 
    1293           0 :     masm.jump(&done);
    1294             : 
    1295             :     // Do not profile the callWithABI that occurs below.  This is to avoid a
    1296             :     // rare corner case that occurs when profiling interacts with itself:
    1297             :     //
    1298             :     // When slow profiling assertions are turned on, FunctionBoundary ops
    1299             :     // (which update the profiler pseudo-stack) may emit a callVM, which
    1300             :     // forces them to have an osi point associated with them.  The
    1301             :     // FunctionBoundary for inline function entry is added to the caller's
    1302             :     // graph with a PC from the caller's code, but during codegen it modifies
    1303             :     // Gecko Profiler instrumentation to add the callee as the current top-most
    1304             :     // script. When codegen gets to the OSIPoint, and the callWithABI below is
    1305             :     // emitted, the codegen thinks that the current frame is the callee, but
    1306             :     // the PC it's using from the OSIPoint refers to the caller.  This causes
    1307             :     // the profiler instrumentation of the callWithABI below to ASSERT, since
    1308             :     // the script and pc are mismatched.  To avoid this, we simply omit
    1309             :     // instrumentation for these callWithABIs.
    1310             : 
    1311             :     // Any live register captured by a safepoint (other than temp registers)
    1312             :     // must remain unchanged between the call and the OsiPoint instruction.
    1313           0 :     masm.bind(&failure);
    1314           0 :     masm.assumeUnreachable("Modified registers between VM call and OsiPoint");
    1315             : 
    1316           0 :     masm.bind(&done);
    1317           0 :     masm.pop(scratch);
    1318           0 : }
    1319             : 
    1320             : bool
    1321           0 : CodeGeneratorShared::shouldVerifyOsiPointRegs(LSafepoint* safepoint)
    1322             : {
    1323        3622 :     if (!checkOsiPointRegisters)
    1324             :         return false;
    1325             : 
    1326           0 :     if (safepoint->liveRegs().emptyGeneral() && safepoint->liveRegs().emptyFloat())
    1327             :         return false; // No registers to check.
    1328             : 
    1329           0 :     return true;
    1330             : }
    1331             : 
    1332             : void
    1333           0 : CodeGeneratorShared::resetOsiPointRegs(LSafepoint* safepoint)
    1334             : {
    1335        1276 :     if (!shouldVerifyOsiPointRegs(safepoint))
    1336        1276 :         return;
    1337             : 
    1338             :     // Set checkRegs to 0. If we perform a VM call, the instruction
    1339             :     // will set it to 1.
    1340           0 :     AllocatableGeneralRegisterSet allRegs(GeneralRegisterSet::All());
    1341           0 :     Register scratch = allRegs.takeAny();
    1342           0 :     masm.push(scratch);
    1343           0 :     masm.loadJitActivation(scratch);
    1344           0 :     Address checkRegs(scratch, JitActivation::offsetOfCheckRegs());
    1345           0 :     masm.store32(Imm32(0), checkRegs);
    1346           0 :     masm.pop(scratch);
    1347             : }
    1348             : #endif
    1349             : 
    1350             : // Before doing any call to Cpp, you should ensure that volatile
    1351             : // registers are evicted by the register allocator.
    1352             : void
    1353        1070 : CodeGeneratorShared::callVM(const VMFunction& fun, LInstruction* ins, const Register* dynStack)
    1354             : {
    1355             :     // If we're calling a function with an out parameter type of double, make
    1356             :     // sure we have an FPU.
    1357        1070 :     MOZ_ASSERT_IF(fun.outParam == Type_Double, gen->runtime->jitSupportsFloatingPoint());
    1358             : 
    1359             : #ifdef DEBUG
    1360           0 :     if (ins->mirRaw()) {
    1361           0 :         MOZ_ASSERT(ins->mirRaw()->isInstruction());
    1362        1070 :         MInstruction* mir = ins->mirRaw()->toInstruction();
    1363        1070 :         MOZ_ASSERT_IF(mir->needsResumePoint(), mir->resumePoint());
    1364             :     }
    1365             : #endif
    1366             : 
    1367             :     // Stack is:
    1368             :     //    ... frame ...
    1369             :     //    [args]
    1370             : #ifdef DEBUG
    1371        1070 :     MOZ_ASSERT(pushedArgs_ == fun.explicitArgs);
    1372        1070 :     pushedArgs_ = 0;
    1373             : #endif
    1374             : 
    1375             :     // Get the wrapper of the VM function.
    1376        2140 :     TrampolinePtr wrapper = gen->jitRuntime()->getVMWrapper(fun);
    1377             : 
    1378             : #ifdef CHECK_OSIPOINT_REGISTERS
    1379        2140 :     if (shouldVerifyOsiPointRegs(ins->safepoint()))
    1380           0 :         StoreAllLiveRegs(masm, ins->safepoint()->liveRegs());
    1381             : #endif
    1382             : 
    1383             :     // Push an exit frame descriptor. If |dynStack| is a valid pointer to a
    1384             :     // register, then its value is added to the value of the |framePushed()| to
    1385             :     // fill the frame descriptor.
    1386           0 :     if (dynStack) {
    1387           0 :         masm.addPtr(Imm32(masm.framePushed()), *dynStack);
    1388           0 :         masm.makeFrameDescriptor(*dynStack, JitFrame_IonJS, ExitFrameLayout::Size());
    1389           0 :         masm.Push(*dynStack); // descriptor
    1390             :     } else {
    1391        1070 :         masm.pushStaticFrameDescriptor(JitFrame_IonJS, ExitFrameLayout::Size());
    1392             :     }
    1393             : 
    1394             :     // Call the wrapper function.  The wrapper is in charge to unwind the stack
    1395             :     // when returning from the call.  Failures are handled with exceptions based
    1396             :     // on the return value of the C functions.  To guard the outcome of the
    1397             :     // returned value, use another LIR instruction.
    1398        1070 :     uint32_t callOffset = masm.callJit(wrapper);
    1399        1070 :     markSafepointAt(callOffset, ins);
    1400             : 
    1401             :     // Remove rest of the frame left on the stack. We remove the return address
    1402             :     // which is implicitly poped when returning.
    1403        1070 :     int framePop = sizeof(ExitFrameLayout) - sizeof(void*);
    1404             : 
    1405             :     // Pop arguments from framePushed.
    1406        2140 :     masm.implicitPop(fun.explicitStackSlots() * sizeof(void*) + framePop);
    1407             :     // Stack is:
    1408             :     //    ... frame ...
    1409        1070 : }
    1410             : 
    1411             : class OutOfLineTruncateSlow : public OutOfLineCodeBase<CodeGeneratorShared>
    1412             : {
    1413             :     FloatRegister src_;
    1414             :     Register dest_;
    1415             :     bool widenFloatToDouble_;
    1416             :     wasm::BytecodeOffset bytecodeOffset_;
    1417             : 
    1418             :   public:
    1419             :     OutOfLineTruncateSlow(FloatRegister src, Register dest, bool widenFloatToDouble = false,
    1420             :                           wasm::BytecodeOffset bytecodeOffset = wasm::BytecodeOffset())
    1421           0 :       : src_(src),
    1422             :         dest_(dest),
    1423             :         widenFloatToDouble_(widenFloatToDouble),
    1424           0 :         bytecodeOffset_(bytecodeOffset)
    1425             :     { }
    1426             : 
    1427           0 :     void accept(CodeGeneratorShared* codegen) override {
    1428           0 :         codegen->visitOutOfLineTruncateSlow(this);
    1429           0 :     }
    1430             :     FloatRegister src() const {
    1431           0 :         return src_;
    1432             :     }
    1433             :     Register dest() const {
    1434             :         return dest_;
    1435             :     }
    1436             :     bool widenFloatToDouble() const {
    1437             :         return widenFloatToDouble_;
    1438             :     }
    1439             :     wasm::BytecodeOffset bytecodeOffset() const {
    1440             :         return bytecodeOffset_;
    1441             :     }
    1442             : };
    1443             : 
    1444             : OutOfLineCode*
    1445           0 : CodeGeneratorShared::oolTruncateDouble(FloatRegister src, Register dest, MInstruction* mir,
    1446             :                                        wasm::BytecodeOffset bytecodeOffset)
    1447             : {
    1448           0 :     MOZ_ASSERT_IF(IsCompilingWasm(), bytecodeOffset.isValid());
    1449             : 
    1450             :     OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, /* float32 */ false,
    1451           0 :                                                                     bytecodeOffset);
    1452           0 :     addOutOfLineCode(ool, mir);
    1453           0 :     return ool;
    1454             : }
    1455             : 
    1456             : void
    1457           0 : CodeGeneratorShared::emitTruncateDouble(FloatRegister src, Register dest, MTruncateToInt32* mir)
    1458             : {
    1459           0 :     OutOfLineCode* ool = oolTruncateDouble(src, dest, mir, mir->bytecodeOffset());
    1460             : 
    1461           0 :     masm.branchTruncateDoubleMaybeModUint32(src, dest, ool->entry());
    1462           0 :     masm.bind(ool->rejoin());
    1463           0 : }
    1464             : 
    1465             : void
    1466           0 : CodeGeneratorShared::emitTruncateFloat32(FloatRegister src, Register dest, MTruncateToInt32* mir)
    1467             : {
    1468             :     OutOfLineTruncateSlow* ool = new(alloc()) OutOfLineTruncateSlow(src, dest, /* float32 */ true,
    1469           0 :                                                                     mir->bytecodeOffset());
    1470           0 :     addOutOfLineCode(ool, mir);
    1471             : 
    1472           0 :     masm.branchTruncateFloat32MaybeModUint32(src, dest, ool->entry());
    1473           0 :     masm.bind(ool->rejoin());
    1474           0 : }
    1475             : 
    1476             : void
    1477           0 : CodeGeneratorShared::visitOutOfLineTruncateSlow(OutOfLineTruncateSlow* ool)
    1478             : {
    1479           0 :     FloatRegister src = ool->src();
    1480           0 :     Register dest = ool->dest();
    1481             : 
    1482           0 :     saveVolatile(dest);
    1483           0 :     masm.outOfLineTruncateSlow(src, dest, ool->widenFloatToDouble(), gen->compilingWasm(),
    1484           0 :                                ool->bytecodeOffset());
    1485           0 :     restoreVolatile(dest);
    1486             : 
    1487           0 :     masm.jump(ool->rejoin());
    1488           0 : }
    1489             : 
    1490             : bool
    1491          48 : CodeGeneratorShared::omitOverRecursedCheck() const
    1492             : {
    1493             :     // If the current function makes no calls (which means it isn't recursive)
    1494             :     // and it uses only a small amount of stack space, it doesn't need a
    1495             :     // stack overflow check. Note that the actual number here is somewhat
    1496             :     // arbitrary, and codegen actually uses small bounded amounts of
    1497             :     // additional stack space in some cases too.
    1498          48 :     return frameSize() < MAX_UNCHECKED_LEAF_FRAME_SIZE && !gen->needsOverrecursedCheck();
    1499             : }
    1500             : 
    1501             : void
    1502           0 : CodeGeneratorShared::emitPreBarrier(Register base, const LAllocation* index, int32_t offsetAdjustment)
    1503             : {
    1504           0 :     if (index->isConstant()) {
    1505           0 :         Address address(base, ToInt32(index) * sizeof(Value) + offsetAdjustment);
    1506           0 :         masm.guardedCallPreBarrier(address, MIRType::Value);
    1507             :     } else {
    1508          58 :         BaseIndex address(base, ToRegister(index), TimesEight, offsetAdjustment);
    1509           0 :         masm.guardedCallPreBarrier(address, MIRType::Value);
    1510             :     }
    1511          29 : }
    1512             : 
    1513             : void
    1514           0 : CodeGeneratorShared::emitPreBarrier(Address address)
    1515             : {
    1516         199 :     masm.guardedCallPreBarrier(address, MIRType::Value);
    1517         199 : }
    1518             : 
    1519             : void
    1520        1234 : CodeGeneratorShared::jumpToBlock(MBasicBlock* mir)
    1521             : {
    1522             :     // Skip past trivial blocks.
    1523        1234 :     mir = skipTrivialBlocks(mir);
    1524             : 
    1525             :     // No jump necessary if we can fall through to the next block.
    1526        1234 :     if (isNextBlock(mir->lir()))
    1527             :         return;
    1528             : 
    1529         332 :     masm.jump(mir->lir()->label());
    1530             : }
    1531             : 
    1532             : Label*
    1533          46 : CodeGeneratorShared::getJumpLabelForBranch(MBasicBlock* block)
    1534             : {
    1535             :     // Skip past trivial blocks.
    1536          46 :     return skipTrivialBlocks(block)->lir()->label();
    1537             : }
    1538             : 
    1539             : // This function is not used for MIPS/MIPS64. MIPS has branchToBlock.
    1540             : #if !defined(JS_CODEGEN_MIPS32) && !defined(JS_CODEGEN_MIPS64)
    1541             : void
    1542         461 : CodeGeneratorShared::jumpToBlock(MBasicBlock* mir, Assembler::Condition cond)
    1543             : {
    1544             :     // Skip past trivial blocks.
    1545         922 :     masm.j(cond, skipTrivialBlocks(mir)->lir()->label());
    1546         461 : }
    1547             : #endif
    1548             : 
    1549             : ReciprocalMulConstants
    1550           0 : CodeGeneratorShared::computeDivisionConstants(uint32_t d, int maxLog) {
    1551           0 :     MOZ_ASSERT(maxLog >= 2 && maxLog <= 32);
    1552             :     // In what follows, 0 < d < 2^maxLog and d is not a power of 2.
    1553           0 :     MOZ_ASSERT(d < (uint64_t(1) << maxLog) && (d & (d - 1)) != 0);
    1554             : 
    1555             :     // Speeding up division by non power-of-2 constants is possible by
    1556             :     // calculating, during compilation, a value M such that high-order
    1557             :     // bits of M*n correspond to the result of the division of n by d.
    1558             :     // No value of M can serve this purpose for arbitrarily big values
    1559             :     // of n but, for optimizing integer division, we're just concerned
    1560             :     // with values of n whose absolute value is bounded (by fitting in
    1561             :     // an integer type, say). With this in mind, we'll find a constant
    1562             :     // M as above that works for -2^maxLog <= n < 2^maxLog; maxLog can
    1563             :     // then be 31 for signed division or 32 for unsigned division.
    1564             :     //
    1565             :     // The original presentation of this technique appears in Hacker's
    1566             :     // Delight, a book by Henry S. Warren, Jr.. A proof of correctness
    1567             :     // for our version follows; we'll denote maxLog by L in the proof,
    1568             :     // for conciseness.
    1569             :     //
    1570             :     // Formally, for |d| < 2^L, we'll compute two magic values M and s
    1571             :     // in the ranges 0 <= M < 2^(L+1) and 0 <= s <= L such that
    1572             :     //     (M * n) >> (32 + s) = floor(n/d)    if    0 <= n < 2^L
    1573             :     //     (M * n) >> (32 + s) = ceil(n/d) - 1 if -2^L <= n < 0.
    1574             :     //
    1575             :     // Define p = 32 + s, M = ceil(2^p/d), and assume that s satisfies
    1576             :     //                     M - 2^p/d <= 2^(p-L)/d.                 (1)
    1577             :     // (Observe that p = CeilLog32(d) + L satisfies this, as the right
    1578             :     // side of (1) is at least one in this case). Then,
    1579             :     //
    1580             :     // a) If p <= CeilLog32(d) + L, then M < 2^(L+1) - 1.
    1581             :     // Proof: Indeed, M is monotone in p and, for p equal to the above
    1582             :     // value, the bounds 2^L > d >= 2^(p-L-1) + 1 readily imply that
    1583             :     //    2^p / d <  2^p/(d - 1) * (d - 1)/d
    1584             :     //            <= 2^(L+1) * (1 - 1/d) < 2^(L+1) - 2.
    1585             :     // The claim follows by applying the ceiling function.
    1586             :     //
    1587             :     // b) For any 0 <= n < 2^L, floor(Mn/2^p) = floor(n/d).
    1588             :     // Proof: Put x = floor(Mn/2^p); it's the unique integer for which
    1589             :     //                    Mn/2^p - 1 < x <= Mn/2^p.                (2)
    1590             :     // Using M >= 2^p/d on the LHS and (1) on the RHS, we get
    1591             :     //           n/d - 1 < x <= n/d + n/(2^L d) < n/d + 1/d.
    1592             :     // Since x is an integer, it's not in the interval (n/d, (n+1)/d),
    1593             :     // and so n/d - 1 < x <= n/d, which implies x = floor(n/d).
    1594             :     //
    1595             :     // c) For any -2^L <= n < 0, floor(Mn/2^p) + 1 = ceil(n/d).
    1596             :     // Proof: The proof is similar. Equation (2) holds as above. Using
    1597             :     // M > 2^p/d (d isn't a power of 2) on the RHS and (1) on the LHS,
    1598             :     //                 n/d + n/(2^L d) - 1 < x < n/d.
    1599             :     // Using n >= -2^L and summing 1,
    1600             :     //                  n/d - 1/d < x + 1 < n/d + 1.
    1601             :     // Since x + 1 is an integer, this implies n/d <= x + 1 < n/d + 1.
    1602             :     // In other words, x + 1 = ceil(n/d).
    1603             :     //
    1604             :     // Condition (1) isn't necessary for the existence of M and s with
    1605             :     // the properties above. Hacker's Delight provides a slightly less
    1606             :     // restrictive condition when d >= 196611, at the cost of a 3-page
    1607             :     // proof of correctness, for the case L = 31.
    1608             :     //
    1609             :     // Note that, since d*M - 2^p = d - (2^p)%d, (1) can be written as
    1610             :     //                   2^(p-L) >= d - (2^p)%d.
    1611             :     // In order to avoid overflow in the (2^p) % d calculation, we can
    1612             :     // compute it as (2^p-1) % d + 1, where 2^p-1 can then be computed
    1613             :     // without overflow as UINT64_MAX >> (64-p).
    1614             : 
    1615             :     // We now compute the least p >= 32 with the property above...
    1616             :     int32_t p = 32;
    1617           0 :     while ((uint64_t(1) << (p-maxLog)) + (UINT64_MAX >> (64-p)) % d + 1 < d)
    1618           0 :         p++;
    1619             : 
    1620             :     // ...and the corresponding M. For either the signed (L=31) or the
    1621             :     // unsigned (L=32) case, this value can be too large (cf. item a).
    1622             :     // Codegen can still multiply by M by multiplying by (M - 2^L) and
    1623             :     // adjusting the value afterwards, if this is the case.
    1624             :     ReciprocalMulConstants rmc;
    1625           0 :     rmc.multiplier = (UINT64_MAX >> (64-p))/d + 1;
    1626           0 :     rmc.shiftAmount = p - 32;
    1627             : 
    1628           0 :     return rmc;
    1629             : }
    1630             : 
    1631             : #ifdef JS_TRACE_LOGGING
    1632             : 
    1633             : void
    1634           0 : CodeGeneratorShared::emitTracelogScript(bool isStart)
    1635             : {
    1636         144 :     if (!TraceLogTextIdEnabled(TraceLogger_Scripts))
    1637           1 :         return;
    1638             : 
    1639           0 :     Label done;
    1640             : 
    1641           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1642           0 :     Register logger = regs.takeAnyGeneral();
    1643           0 :     Register script = regs.takeAnyGeneral();
    1644             : 
    1645           0 :     masm.Push(logger);
    1646             : 
    1647           0 :     masm.loadTraceLogger(logger);
    1648           0 :     masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
    1649             : 
    1650           0 :     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
    1651           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1652             : 
    1653           0 :     masm.Push(script);
    1654             : 
    1655           0 :     CodeOffset patchScript = masm.movWithPatch(ImmWord(0), script);
    1656           0 :     masm.propagateOOM(patchableTLScripts_.append(patchScript));
    1657             : 
    1658           0 :     if (isStart)
    1659           0 :         masm.tracelogStartId(logger, script);
    1660             :     else
    1661           0 :         masm.tracelogStopId(logger, script);
    1662             : 
    1663           0 :     masm.Pop(script);
    1664             : 
    1665           0 :     masm.bind(&done);
    1666             : 
    1667           0 :     masm.Pop(logger);
    1668             : }
    1669             : 
    1670             : void
    1671           0 : CodeGeneratorShared::emitTracelogTree(bool isStart, uint32_t textId)
    1672             : {
    1673         308 :     if (!TraceLogTextIdEnabled(textId))
    1674           1 :         return;
    1675             : 
    1676           0 :     Label done;
    1677           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1678           0 :     Register logger = regs.takeAnyGeneral();
    1679             : 
    1680           0 :     masm.Push(logger);
    1681             : 
    1682           0 :     masm.loadTraceLogger(logger);
    1683           0 :     masm.branchTestPtr(Assembler::Zero, logger, logger, &done);
    1684             : 
    1685           0 :     Address enabledAddress(logger, TraceLoggerThread::offsetOfEnabled());
    1686           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1687             : 
    1688           0 :     if (isStart)
    1689           0 :         masm.tracelogStartId(logger, textId);
    1690             :     else
    1691           0 :         masm.tracelogStopId(logger, textId);
    1692             : 
    1693           0 :     masm.bind(&done);
    1694             : 
    1695           0 :     masm.Pop(logger);
    1696             : }
    1697             : 
    1698             : void
    1699           0 : CodeGeneratorShared::emitTracelogTree(bool isStart, const char* text,
    1700             :                                       TraceLoggerTextId enabledTextId)
    1701             : {
    1702           0 :     if (!TraceLogTextIdEnabled(enabledTextId))
    1703           0 :         return;
    1704             : 
    1705           0 :     Label done;
    1706             : 
    1707           0 :     AllocatableRegisterSet regs(RegisterSet::Volatile());
    1708           0 :     Register loggerReg = regs.takeAnyGeneral();
    1709           0 :     Register eventReg = regs.takeAnyGeneral();
    1710             : 
    1711           0 :     masm.Push(loggerReg);
    1712             : 
    1713           0 :     masm.loadTraceLogger(loggerReg);
    1714           0 :     masm.branchTestPtr(Assembler::Zero, loggerReg, loggerReg, &done);
    1715             : 
    1716           0 :     Address enabledAddress(loggerReg, TraceLoggerThread::offsetOfEnabled());
    1717           0 :     masm.branch32(Assembler::Equal, enabledAddress, Imm32(0), &done);
    1718             : 
    1719           0 :     masm.Push(eventReg);
    1720             : 
    1721           0 :     PatchableTLEvent patchEvent(masm.movWithPatch(ImmWord(0), eventReg), text);
    1722           0 :     masm.propagateOOM(patchableTLEvents_.append(std::move(patchEvent)));
    1723             : 
    1724           0 :     if (isStart)
    1725           0 :         masm.tracelogStartId(loggerReg, eventReg);
    1726             :     else
    1727           0 :         masm.tracelogStopId(loggerReg, eventReg);
    1728             : 
    1729           0 :     masm.Pop(eventReg);
    1730             : 
    1731           0 :     masm.bind(&done);
    1732             : 
    1733             :     masm.Pop(loggerReg);
    1734             : }
    1735             : #endif
    1736             : 
    1737             : } // namespace jit
    1738             : } // namespace js

Generated by: LCOV version 1.13-14-ga5dd952