From 8f87fcabbe5e7461c34e8b7f44aa1671c88a2069 Mon Sep 17 00:00:00 2001 From: Will Springer Date: Sun, 26 Dec 2021 16:04:08 -0800 Subject: [PATCH 1/2] firefox-esr: update to 91.4.0, add ppc64le JIT patch More info on the baseline JS/WASM compiler for POWER9 LE at: https://www.talospace.com/2021/12/91esr-with-baseline-compilerbaseline.html --- .../patches/ppc64le-baseline-jit.patch | 3441 +++++++++++++++++ srcpkgs/firefox-esr/template | 11 +- 2 files changed, 3450 insertions(+), 2 deletions(-) create mode 100644 srcpkgs/firefox-esr/patches/ppc64le-baseline-jit.patch diff --git a/srcpkgs/firefox-esr/patches/ppc64le-baseline-jit.patch b/srcpkgs/firefox-esr/patches/ppc64le-baseline-jit.patch new file mode 100644 index 000000000000..cced0058e8e1 --- /dev/null +++ b/srcpkgs/firefox-esr/patches/ppc64le-baseline-jit.patch @@ -0,0 +1,3441 @@ +diff --git a/config/check_macroassembler_style.py b/config/check_macroassembler_style.py +index 0d040a939b..b83e3691dd 100644 +--- a/config/check_macroassembler_style.py ++++ b/config/check_macroassembler_style.py +@@ -24,17 +24,17 @@ from __future__ import absolute_import + from __future__ import print_function + + import difflib + import os + import re + import sys + + architecture_independent = set(["generic"]) +-all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared"]) ++all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared", "ppc64"]) + all_architecture_names = set(["x86", "x64", "arm", "arm64"]) + all_shared_architecture_names = set(["x86_shared", "arm", "arm64"]) + + reBeforeArg = "(?<=[(,\s])" + reArgType = "(?P[\w\s:*&]+)" + reArgName = "(?P\s\w+)" + reArgDefault = "(?P(?:\s=[^,)]+)?)" + reAfterArg = "(?=[,)])" +diff --git a/js/moz.configure b/js/moz.configure +index 3c3d0d4359..b217d0e15c 100644 +--- a/js/moz.configure ++++ b/js/moz.configure +@@ -214,23 +214,25 @@ def jit_codegen(jit_enabled, simulator, target): + return namespace(**{str(target.cpu): True}) + + + set_config("JS_CODEGEN_NONE", jit_codegen.none) + set_config("JS_CODEGEN_ARM", jit_codegen.arm) + set_config("JS_CODEGEN_ARM64", jit_codegen.arm64) + set_config("JS_CODEGEN_MIPS32", jit_codegen.mips32) + set_config("JS_CODEGEN_MIPS64", jit_codegen.mips64) ++set_config("JS_CODEGEN_PPC64", jit_codegen.ppc64) + set_config("JS_CODEGEN_X86", jit_codegen.x86) + set_config("JS_CODEGEN_X64", jit_codegen.x64) + set_define("JS_CODEGEN_NONE", jit_codegen.none) + set_define("JS_CODEGEN_ARM", jit_codegen.arm) + set_define("JS_CODEGEN_ARM64", jit_codegen.arm64) + set_define("JS_CODEGEN_MIPS32", jit_codegen.mips32) + set_define("JS_CODEGEN_MIPS64", jit_codegen.mips64) ++set_define("JS_CODEGEN_PPC64", jit_codegen.ppc64) + set_define("JS_CODEGEN_X86", jit_codegen.x86) + set_define("JS_CODEGEN_X64", jit_codegen.x64) + + # Profiling + # ======================================================= + option( + "--enable-instruments", + env="MOZ_INSTRUMENTS", +diff --git a/js/src/irregexp/RegExpNativeMacroAssembler.cpp b/js/src/irregexp/RegExpNativeMacroAssembler.cpp +index e0ef7e64f5..81d8e2a198 100644 +--- a/js/src/irregexp/RegExpNativeMacroAssembler.cpp ++++ b/js/src/irregexp/RegExpNativeMacroAssembler.cpp +@@ -813,18 +813,33 @@ void SMRegExpMacroAssembler::JumpOrBacktrack(Label* to) { + // If the test fails, call an OOL handler to try growing the stack. + void SMRegExpMacroAssembler::CheckBacktrackStackLimit() { + js::jit::Label no_stack_overflow; + masm_.branchPtr( + Assembler::BelowOrEqual, + AbsoluteAddress(isolate()->regexp_stack()->limit_address_address()), + backtrack_stack_pointer_, &no_stack_overflow); + ++#ifdef JS_CODEGEN_PPC64 ++ // LR on PowerPC isn't a GPR, so we have to explicitly save it here before ++ // we call or we will end up erroneously returning after the call to the ++ // stack overflow handler when we |blr| out and inevitably underflow the ++ // irregexp stack on the next backtrack. ++ masm_.xs_mflr(temp1_); ++ masm_.as_stdu(temp1_, masm_.getStackPointer(), -8); ++#endif ++ + masm_.call(&stack_overflow_label_); + ++#ifdef JS_CODEGEN_PPC64 ++ masm_.as_ld(temp1_, masm_.getStackPointer(), 0); ++ masm_.xs_mtlr(temp1_); ++ masm_.as_addi(masm_.getStackPointer(), masm_.getStackPointer(), 8); ++#endif ++ + // Exit with an exception if the call failed + masm_.branchTest32(Assembler::Zero, temp0_, temp0_, + &exit_with_exception_label_); + + masm_.bind(&no_stack_overflow); + } + + // This is used to sneak an OOM through the V8 layer. +@@ -1127,16 +1142,20 @@ void SMRegExpMacroAssembler::stackOverflowHandler() { + LiveGeneralRegisterSet volatileRegs(GeneralRegisterSet::Volatile()); + + #ifdef JS_USE_LINK_REGISTER + masm_.pushReturnAddress(); + #endif + + // Adjust for the return address on the stack. + size_t frameOffset = sizeof(void*); ++#ifdef JS_CODEGEN_PPC64 ++ // We have a double return address. ++ frameOffset += sizeof(void*); ++#endif + + volatileRegs.takeUnchecked(temp0_); + volatileRegs.takeUnchecked(temp1_); + masm_.PushRegsInMask(volatileRegs); + + using Fn = bool (*)(RegExpStack * regexp_stack); + masm_.setupUnalignedABICall(temp0_); + masm_.passABIArg(temp1_); +diff --git a/js/src/jit/AtomicOperations.h b/js/src/jit/AtomicOperations.h +index f4a5727d05..138612d53b 100644 +--- a/js/src/jit/AtomicOperations.h ++++ b/js/src/jit/AtomicOperations.h +@@ -373,19 +373,26 @@ constexpr inline bool AtomicOperations::isLockfreeJS(int32_t size) { + # include "jit/shared/AtomicOperations-feeling-lucky.h" + # endif + #elif defined(__mips__) + # if defined(__clang__) || defined(__GNUC__) + # include "jit/mips-shared/AtomicOperations-mips-shared.h" + # else + # error "AtomicOperations on MIPS for an unknown compiler" + # endif ++#elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \ ++ defined(__PPC64LE__) ++# if defined(JS_CODEGEN_PPC64) ++/* XXX: should be # include "jit/shared/AtomicOperations-shared-jit.h" */ ++# include "jit/shared/AtomicOperations-feeling-lucky.h" ++# else ++# include "jit/shared/AtomicOperations-feeling-lucky.h" ++# endif + #elif defined(__ppc__) || defined(__PPC__) || defined(__sparc__) || \ +- defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \ +- defined(__PPC64LE__) || defined(__alpha__) || defined(__hppa__) || \ ++ defined(__alpha__) || defined(__hppa__) || \ + defined(__sh__) || defined(__s390__) || defined(__s390x__) || \ + defined(__m68k__) || defined(__riscv) || defined(__wasi__) + # include "jit/shared/AtomicOperations-feeling-lucky.h" + #else + # error "No AtomicOperations support provided for this platform" + #endif + + #endif // jit_AtomicOperations_h +diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp +index bca1427f93..eb499b34cf 100644 +--- a/js/src/jit/BaselineBailouts.cpp ++++ b/js/src/jit/BaselineBailouts.cpp +@@ -481,16 +481,21 @@ class MOZ_STACK_CLASS BaselineStackBuilder { + // let X = STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE + // X + RectifierFrameLayout::Size() + // + ((RectifierFrameLayout*) X)->prevFrameLocalSize() + // - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr() + size_t extraOffset = + RectifierFrameLayout::Size() + priorFrame->prevFrameLocalSize() + + BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr(); + return virtualPointerAtStackOffset(priorOffset + extraOffset); ++#elif defined(JS_CODEGEN_PPC64) ++ (void)priorOffset; ++// XXX. The above code might work though ++#warning "TODO! BaselineStackBuilder::calculatePrevFramePtr()" ++ MOZ_CRASH(); + #elif defined(JS_CODEGEN_NONE) + (void)priorOffset; + MOZ_CRASH(); + #else + # error "Bad architecture!" + #endif + } + }; +diff --git a/js/src/jit/BaselineCodeGen.cpp b/js/src/jit/BaselineCodeGen.cpp +index 7089f5e300..d67236d2c5 100644 +--- a/js/src/jit/BaselineCodeGen.cpp ++++ b/js/src/jit/BaselineCodeGen.cpp +@@ -520,16 +520,19 @@ bool BaselineCodeGen::emitOutOfLinePostBarrierSlot() { + regs.take(BaselineFrameReg); + Register scratch = regs.takeAny(); + #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) + // On ARM, save the link register before calling. It contains the return + // address. The |masm.ret()| later will pop this into |pc| to return. + masm.push(lr); + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + masm.push(ra); ++#elif defined(JS_CODEGEN_PPC64) ++ masm.xs_mflr(ScratchRegister); ++ masm.push(ScratchRegister); + #endif + masm.pushValue(R0); + + using Fn = void (*)(JSRuntime * rt, js::gc::Cell * cell); + masm.setupUnalignedABICall(scratch); + masm.movePtr(ImmPtr(cx->runtime()), scratch); + masm.passABIArg(scratch); + masm.passABIArg(objReg); +diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp +index 9572394e76..dfe762e5c8 100644 +--- a/js/src/jit/BaselineIC.cpp ++++ b/js/src/jit/BaselineIC.cpp +@@ -127,17 +127,18 @@ class MOZ_RAII FallbackICCodeCompiler final { + }; + + AllocatableGeneralRegisterSet BaselineICAvailableGeneralRegs(size_t numInputs) { + AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All()); + #if defined(JS_CODEGEN_ARM) + MOZ_ASSERT(!regs.has(BaselineStackReg)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); + regs.take(BaselineSecondScratchReg); +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + MOZ_ASSERT(!regs.has(BaselineStackReg)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); + MOZ_ASSERT(!regs.has(BaselineSecondScratchReg)); + #elif defined(JS_CODEGEN_ARM64) + MOZ_ASSERT(!regs.has(PseudoStackPointer)); + MOZ_ASSERT(!regs.has(RealStackPointer)); + MOZ_ASSERT(!regs.has(ICTailCallReg)); + #else +diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h +index 5321978fc2..b2d9a8f5a5 100644 +--- a/js/src/jit/CodeGenerator.h ++++ b/js/src/jit/CodeGenerator.h +@@ -20,16 +20,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/CodeGenerator-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/CodeGenerator-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/CodeGenerator-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/CodeGenerator-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/CodeGenerator-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/CodeGenerator-none.h" + #else + # error "Unknown architecture!" + #endif + + #include "wasm/WasmGC.h" + +diff --git a/js/src/jit/FlushICache.h b/js/src/jit/FlushICache.h +index fe66080df5..2071563c1e 100644 +--- a/js/src/jit/FlushICache.h ++++ b/js/src/jit/FlushICache.h +@@ -19,17 +19,18 @@ namespace jit { + #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + + inline void FlushICache(void* code, size_t size, + bool codeIsThreadLocal = true) { + // No-op. Code and data caches are coherent on x86 and x64. + } + + #elif (defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)) || \ +- (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) ++ (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) || \ ++ defined(JS_CODEGEN_PPC64) + + extern void FlushICache(void* code, size_t size, bool codeIsThreadLocal = true); + + #elif defined(JS_CODEGEN_NONE) + + inline void FlushICache(void* code, size_t size, + bool codeIsThreadLocal = true) { + MOZ_CRASH(); +diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp +index 77cfe6a9cd..507f1551e6 100644 +--- a/js/src/jit/JitFrames.cpp ++++ b/js/src/jit/JitFrames.cpp +@@ -2220,16 +2220,24 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs, + machine.setRegisterLocation( + FloatRegister(FloatRegisters::Encoding(i), FloatRegisters::Single), + &fpregs[i]); + machine.setRegisterLocation( + FloatRegister(FloatRegisters::Encoding(i), FloatRegisters::Double), + &fpregs[i]); + // No SIMD support in bailouts, SIMD is internal to wasm + } ++#elif defined(JS_CODEGEN_PPC64) ++ for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) { ++ machine.setRegisterLocation(FloatRegister(i), &fpregs[i]); ++# ifdef ENABLE_WASM_SIMD ++ // Needs additional handling if VMX or non-FPR VSX regs are in play. ++# error "SIMD for PPC NYI" ++# endif ++ } + + #elif defined(JS_CODEGEN_NONE) + MOZ_CRASH(); + #else + # error "Unknown architecture!" + #endif + return machine; + } +diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h +index 40c661d146..7b4ea3157d 100644 +--- a/js/src/jit/JitFrames.h ++++ b/js/src/jit/JitFrames.h +@@ -152,16 +152,26 @@ struct ResumeFromException { + static const uint32_t RESUME_ENTRY_FRAME = 0; + static const uint32_t RESUME_CATCH = 1; + static const uint32_t RESUME_FINALLY = 2; + static const uint32_t RESUME_FORCED_RETURN = 3; + static const uint32_t RESUME_BAILOUT = 4; + static const uint32_t RESUME_WASM = 5; + static const uint32_t RESUME_WASM_CATCH = 6; + ++#if defined(JS_CODEGEN_PPC64) ++ // This gets built on the stack as part of exception returns. Because ++ // it goes right on top of the stack, an ABI-compliant routine can wreck ++ // it, so we implement a minimum Power ISA linkage area (four doublewords). ++ void *_ppc_sp_; ++ void *_ppc_cr_; ++ void *_ppc_lr_; ++ void *_ppc_toc_; ++#endif ++ + uint8_t* framePointer; + uint8_t* stackPointer; + uint8_t* target; + uint32_t kind; + + // Value to push when resuming into a |finally| block. + // Also used by Wasm to send the exception object to the throw stub. + JS::Value exception; +diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp +index de13777fc3..795e41bf21 100644 +--- a/js/src/jit/JitOptions.cpp ++++ b/js/src/jit/JitOptions.cpp +@@ -132,17 +132,22 @@ DefaultJitOptions::DefaultJitOptions() { + // Warp compile Generator functions + SET_DEFAULT(warpGenerator, true); + + // Whether the IonMonkey and Baseline JITs are enabled for Trusted Principals. + // (Ignored if ion or baselineJit is set to true.) + SET_DEFAULT(jitForTrustedPrincipals, false); + + // Whether the RegExp JIT is enabled. ++#if defined(JS_CODEGEN_PPC64) ++ // This may generate ISA 3 instructions. The other JIT tiers gate on it too. ++ SET_DEFAULT(nativeRegExp, MacroAssembler::SupportsFloatingPoint()); ++#else + SET_DEFAULT(nativeRegExp, true); ++#endif + + // Whether Warp should use ICs instead of transpiling Baseline CacheIR. + SET_DEFAULT(forceInlineCaches, false); + + // Whether all ICs should be initialized as megamorphic ICs. + SET_DEFAULT(forceMegamorphicICs, false); + + // Toggles whether large scripts are rejected. +diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h +index 024bd798ca..0cd43c12ab 100644 +--- a/js/src/jit/LIR.h ++++ b/js/src/jit/LIR.h +@@ -1939,16 +1939,18 @@ AnyRegister LAllocation::toRegister() const { + # include "jit/arm64/LIR-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + # if defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/LIR-mips32.h" + # elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/LIR-mips64.h" + # endif + # include "jit/mips-shared/LIR-mips-shared.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/LIR-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/LIR-none.h" + #else + # error "Unknown architecture!" + #endif + + #undef LIR_HEADER + +diff --git a/js/src/jit/Label.h b/js/src/jit/Label.h +index a8f93de378..480b18b251 100644 +--- a/js/src/jit/Label.h ++++ b/js/src/jit/Label.h +@@ -21,17 +21,18 @@ struct LabelBase { + uint32_t bound_ : 1; + + // offset_ < INVALID_OFFSET means that the label is either bound or has + // incoming uses and needs to be bound. + uint32_t offset_ : 31; + + void operator=(const LabelBase& label) = delete; + +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + public: + #endif + static const uint32_t INVALID_OFFSET = 0x7fffffff; // UINT31_MAX. + + public: + LabelBase() : bound_(false), offset_(INVALID_OFFSET) {} + + // If the label is bound, all incoming edges have been patched and any +diff --git a/js/src/jit/Lowering.h b/js/src/jit/Lowering.h +index 979687da85..c064e5d914 100644 +--- a/js/src/jit/Lowering.h ++++ b/js/src/jit/Lowering.h +@@ -18,16 +18,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/Lowering-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/Lowering-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/Lowering-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/Lowering-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/Lowering-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/Lowering-none.h" + #else + # error "Unknown architecture!" + #endif + + namespace js { + namespace jit { +diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h +index cf16cdf0a7..fa39c5f4d2 100644 +--- a/js/src/jit/MacroAssembler-inl.h ++++ b/js/src/jit/MacroAssembler-inl.h +@@ -30,16 +30,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/MacroAssembler-arm-inl.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/MacroAssembler-arm64-inl.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/MacroAssembler-mips32-inl.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/MacroAssembler-mips64-inl.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/MacroAssembler-ppc64-inl.h" + #elif !defined(JS_CODEGEN_NONE) + # error "Unknown architecture!" + #endif + + #include "wasm/WasmBuiltins.h" + + namespace js { + namespace jit { +diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp +index 2a3aeec607..cbe9d14f46 100644 +--- a/js/src/jit/MacroAssembler.cpp ++++ b/js/src/jit/MacroAssembler.cpp +@@ -4044,16 +4044,18 @@ void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type, + #elif JS_CODEGEN_ARM + ma_lsl(temp3, temp1, temp1); + #elif JS_CODEGEN_ARM64 + Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64)); + #elif JS_CODEGEN_MIPS32 + ma_sll(temp1, temp1, temp3); + #elif JS_CODEGEN_MIPS64 + ma_dsll(temp1, temp1, temp3); ++#elif JS_CODEGEN_PPC64 ++ as_sld(temp1, temp1, temp3); + #elif JS_CODEGEN_NONE + MOZ_CRASH(); + #else + # error "Unknown architecture" + #endif + + // No barrier is needed if the bit is set, |word & mask != 0|. + branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier); +diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h +index e2d53d5cef..cb0148b94e 100644 +--- a/js/src/jit/MacroAssembler.h ++++ b/js/src/jit/MacroAssembler.h +@@ -20,16 +20,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/MacroAssembler-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/MacroAssembler-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/MacroAssembler-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/MacroAssembler-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/MacroAssembler-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/MacroAssembler-none.h" + #else + # error "Unknown architecture!" + #endif + #include "jit/ABIFunctions.h" + #include "jit/AtomicOp.h" + #include "jit/AutoJitContextAlloc.h" +@@ -87,18 +89,18 @@ + // //{{{ check_macroassembler_style + // inline uint32_t + // MacroAssembler::framePushed() const + // { + // return framePushed_; + // } + // ////}}} check_macroassembler_style + +-#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64 +-#define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared ++#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64, ppc64 ++#define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared, ppc64 + + // * How this macro works: + // + // DEFINED_ON is a macro which check if, for the current architecture, the + // method is defined on the macro assembler or not. + // + // For each architecture, we have a macro named DEFINED_ON_arch. This macro is + // empty if this is not the current architecture. Otherwise it must be either +@@ -134,16 +136,17 @@ + #define DEFINED_ON_x86 + #define DEFINED_ON_x64 + #define DEFINED_ON_x86_shared + #define DEFINED_ON_arm + #define DEFINED_ON_arm64 + #define DEFINED_ON_mips32 + #define DEFINED_ON_mips64 + #define DEFINED_ON_mips_shared ++#define DEFINED_ON_ppc64 + #define DEFINED_ON_none + + // Specialize for each architecture. + #if defined(JS_CODEGEN_X86) + # undef DEFINED_ON_x86 + # define DEFINED_ON_x86 define + # undef DEFINED_ON_x86_shared + # define DEFINED_ON_x86_shared define +@@ -163,16 +166,19 @@ + # define DEFINED_ON_mips32 define + # undef DEFINED_ON_mips_shared + # define DEFINED_ON_mips_shared define + #elif defined(JS_CODEGEN_MIPS64) + # undef DEFINED_ON_mips64 + # define DEFINED_ON_mips64 define + # undef DEFINED_ON_mips_shared + # define DEFINED_ON_mips_shared define ++#elif defined(JS_CODEGEN_PPC64) ++# undef DEFINED_ON_ppc64 ++# define DEFINED_ON_ppc64 define + #elif defined(JS_CODEGEN_NONE) + # undef DEFINED_ON_none + # define DEFINED_ON_none crash + #else + # error "Unknown architecture!" + #endif + + #define DEFINED_ON_RESULT_crash \ +@@ -479,36 +485,36 @@ class MacroAssembler : public MacroAssemblerSpecific { + // targets roll their own save-code instead. + // + // Nevertheless, because some targets *do* call PushRegsInMask from + // JitRuntime::generateInvalidator, you should check carefully all of the + // ::generateInvalidator methods if you change the PushRegsInMask format. + + // The size of the area used by PushRegsInMask. + size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + void PushRegsInMask(LiveRegisterSet set) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + void PushRegsInMask(LiveGeneralRegisterSet set); + + // Like PushRegsInMask, but instead of pushing the registers, store them to + // |dest|. |dest| should point to the end of the reserved space, so the + // first register will be stored at |dest.offset - sizeof(register)|. It is + // required that |dest.offset| is at least as large as the value computed by + // PushRegsInMaskSizeInBytes for this |set|. In other words, |dest.base| + // must point to either the lowest address in the save area, or some address + // below that. + void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + void PopRegsInMask(LiveRegisterSet set); + void PopRegsInMask(LiveGeneralRegisterSet set); + void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + // =============================================================== + // Stack manipulation functions -- single registers/values. + + void Push(const Operand op) DEFINED_ON(x86_shared); + void Push(Register reg) PER_SHARED_ARCH; + void Push(Register reg1, Register reg2, Register reg3, Register reg4) + DEFINED_ON(arm64); +@@ -531,17 +537,17 @@ class MacroAssembler : public MacroAssemblerSpecific { + inline CodeOffset PushWithPatch(ImmWord word); + inline CodeOffset PushWithPatch(ImmPtr imm); + + void Pop(const Operand op) DEFINED_ON(x86_shared); + void Pop(Register reg) PER_SHARED_ARCH; + void Pop(FloatRegister t) PER_SHARED_ARCH; + void Pop(const ValueOperand& val) PER_SHARED_ARCH; + void PopFlags() DEFINED_ON(x86_shared); +- void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared); ++ void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared, ppc64); + void popRooted(VMFunctionData::RootType rootType, Register cellReg, + const ValueOperand& valueReg); + + // Move the stack pointer based on the requested amount. + void adjustStack(int amount); + void freeStack(uint32_t amount); + + // Warning: This method does not update the framePushed() counter. +@@ -589,18 +595,18 @@ class MacroAssembler : public MacroAssemblerSpecific { + + // Push the return address and make a call. On platforms where this function + // is not defined, push the link register (pushReturnAddress) at the entry + // point of the callee. + void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared); + void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared); + + // These do not adjust framePushed(). +- void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64); +- void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64); ++ void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64, ppc64); ++ void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64, ppc64); + + // Useful for dealing with two-valued returns. + void moveRegPair(Register src0, Register src1, Register dst0, Register dst1, + MoveOp::Type type = MoveOp::GENERAL); + + public: + // =============================================================== + // Patchable near/far jumps. +@@ -621,20 +627,20 @@ class MacroAssembler : public MacroAssemblerSpecific { + + // These methods are like movWithPatch/PatchDataWithValueCheck but allow + // using pc-relative addressing on certain platforms (RIP-relative LEA on x64, + // ADR instruction on arm64). + // + // Note: "Near" applies to ARM64 where the target must be within 1 MB (this is + // release-asserted). + CodeOffset moveNearAddressWithPatch(Register dest) +- DEFINED_ON(x86, x64, arm, arm64, mips_shared); ++ DEFINED_ON(x86, x64, arm, arm64, mips_shared, ppc64); + static void patchNearAddressMove(CodeLocationLabel loc, + CodeLocationLabel target) +- DEFINED_ON(x86, x64, arm, arm64, mips_shared); ++ DEFINED_ON(x86, x64, arm, arm64, mips_shared, ppc64); + + public: + // =============================================================== + // [SMDOC] JIT-to-C++ Function Calls (callWithABI) + // + // callWithABI is used to make a call using the standard C/C++ system ABI. + // + // callWithABI is a low level interface for making calls, as such every call +@@ -983,20 +989,21 @@ class MacroAssembler : public MacroAssemblerSpecific { + inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH; + inline void xor32(Imm32 imm, const Address& dest) PER_SHARED_ARCH; + inline void xor32(const Address& src, Register dest) PER_SHARED_ARCH; + + inline void xorPtr(Register src, Register dest) PER_ARCH; + inline void xorPtr(Imm32 imm, Register dest) PER_ARCH; + + inline void and64(const Operand& src, Register64 dest) +- DEFINED_ON(x64, mips64); +- inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64); ++ DEFINED_ON(x64, mips64, ppc64); ++ inline void or64(const Operand& src, Register64 dest) ++ DEFINED_ON(x64, mips64, ppc64); + inline void xor64(const Operand& src, Register64 dest) +- DEFINED_ON(x64, mips64); ++ DEFINED_ON(x64, mips64, ppc64); + + // =============================================================== + // Swap instructions + + // Swap the two lower bytes and sign extend the result to 32-bit. + inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH; + + // Swap the two lower bytes and zero extend the result to 32-bit. +@@ -1020,27 +1027,27 @@ class MacroAssembler : public MacroAssemblerSpecific { + inline void addPtr(Register src, Register dest) PER_ARCH; + inline void addPtr(Register src1, Register src2, Register dest) + DEFINED_ON(arm64); + inline void addPtr(Imm32 imm, Register dest) PER_ARCH; + inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64); + inline void addPtr(ImmWord imm, Register dest) PER_ARCH; + inline void addPtr(ImmPtr imm, Register dest); + inline void addPtr(Imm32 imm, const Address& dest) +- DEFINED_ON(mips_shared, arm, arm64, x86, x64); ++ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64); + inline void addPtr(Imm32 imm, const AbsoluteAddress& dest) + DEFINED_ON(x86, x64); + inline void addPtr(const Address& src, Register dest) +- DEFINED_ON(mips_shared, arm, arm64, x86, x64); ++ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64); + + inline void add64(Register64 src, Register64 dest) PER_ARCH; + inline void add64(Imm32 imm, Register64 dest) PER_ARCH; + inline void add64(Imm64 imm, Register64 dest) PER_ARCH; + inline void add64(const Operand& src, Register64 dest) +- DEFINED_ON(x64, mips64); ++ DEFINED_ON(x64, mips64, ppc64); + + inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + + // Compute dest=SP-imm where dest is a pointer registers and not SP. The + // offset returned from sub32FromStackPtrWithPatch() must be passed to + // patchSub32FromStackPtr(). + inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH; + inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH; +@@ -1049,58 +1056,58 @@ class MacroAssembler : public MacroAssemblerSpecific { + inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86); + + inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH; + inline void sub32(Register src, Register dest) PER_SHARED_ARCH; + inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH; + + inline void subPtr(Register src, Register dest) PER_ARCH; + inline void subPtr(Register src, const Address& dest) +- DEFINED_ON(mips_shared, arm, arm64, x86, x64); ++ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64); + inline void subPtr(Imm32 imm, Register dest) PER_ARCH; + inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64); + inline void subPtr(const Address& addr, Register dest) +- DEFINED_ON(mips_shared, arm, arm64, x86, x64); ++ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64); + + inline void sub64(Register64 src, Register64 dest) PER_ARCH; + inline void sub64(Imm64 imm, Register64 dest) PER_ARCH; + inline void sub64(const Operand& src, Register64 dest) +- DEFINED_ON(x64, mips64); ++ DEFINED_ON(x64, mips64, ppc64); + + inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + + inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + + inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH; + + inline void mul32(Register src1, Register src2, Register dest, Label* onOver) + DEFINED_ON(arm64); + + inline void mulPtr(Register rhs, Register srcDest) PER_ARCH; + + inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64); + inline void mul64(const Operand& src, const Register64& dest, +- const Register temp) DEFINED_ON(x64, mips64); ++ const Register temp) DEFINED_ON(x64, mips64, ppc64); + inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH; + inline void mul64(Imm64 imm, const Register64& dest, const Register temp) +- DEFINED_ON(x86, x64, arm, mips32, mips64); ++ DEFINED_ON(x86, x64, arm, mips32, mips64, ppc64); + inline void mul64(const Register64& src, const Register64& dest, + const Register temp) PER_ARCH; + inline void mul64(const Register64& src1, const Register64& src2, + const Register64& dest) DEFINED_ON(arm64); + inline void mul64(Imm64 src1, const Register64& src2, const Register64& dest) + DEFINED_ON(arm64); + + inline void mulBy3(Register src, Register dest) PER_ARCH; + + inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + + inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest) +- DEFINED_ON(mips_shared, arm, arm64, x86, x64); ++ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64); + + // Perform an integer division, returning the integer part rounded toward + // zero. rhs must not be zero, and the division must not overflow. + // + // On x86_shared, srcDest must be eax and edx will be clobbered. + // On ARM, the chip must have hardware division instructions. + inline void quotient32(Register rhs, Register srcDest, + bool isUnsigned) PER_SHARED_ARCH; +@@ -1117,41 +1124,41 @@ class MacroAssembler : public MacroAssemblerSpecific { + // zero. rhs must not be zero, and the division must not overflow. + // + // This variant preserves registers, and doesn't require hardware division + // instructions on ARM (will call out to a runtime routine). + // + // rhs is preserved, srdDest is clobbered. + void flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned, + const LiveRegisterSet& volatileLiveRegs) +- DEFINED_ON(mips_shared, arm, arm64, x86_shared); ++ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64); + + // Perform an integer division, returning the integer part rounded toward + // zero. rhs must not be zero, and the division must not overflow. + // + // This variant preserves registers, and doesn't require hardware division + // instructions on ARM (will call out to a runtime routine). + // + // rhs is preserved, srdDest is clobbered. + void flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned, + const LiveRegisterSet& volatileLiveRegs) +- DEFINED_ON(mips_shared, arm, arm64, x86_shared); ++ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64); + + // Perform an integer division, returning the integer part rounded toward + // zero. rhs must not be zero, and the division must not overflow. The + // remainder is stored into the third argument register here. + // + // This variant preserves registers, and doesn't require hardware division + // instructions on ARM (will call out to a runtime routine). + // + // rhs is preserved, srdDest and remOutput are clobbered. + void flexibleDivMod32(Register rhs, Register srcDest, Register remOutput, + bool isUnsigned, + const LiveRegisterSet& volatileLiveRegs) +- DEFINED_ON(mips_shared, arm, arm64, x86_shared); ++ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64); + + inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH; + + inline void inc64(AbsoluteAddress dest) PER_ARCH; + + inline void neg32(Register reg) PER_SHARED_ARCH; + inline void neg64(Register64 reg) PER_ARCH; +@@ -1342,17 +1349,17 @@ class MacroAssembler : public MacroAssemblerSpecific { + // temp may be invalid only if the chip has the POPCNT instruction. + inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH; + + // =============================================================== + // Condition functions + + template + inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest) +- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64); ++ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64); + + template + inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH; + + // =============================================================== + // Branch functions + + template +@@ -1367,34 +1374,34 @@ class MacroAssembler : public MacroAssemblerSpecific { + + inline void branch32(Condition cond, const Address& lhs, Register rhs, + Label* label) PER_SHARED_ARCH; + inline void branch32(Condition cond, const Address& lhs, Imm32 rhs, + Label* label) PER_SHARED_ARCH; + + inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs, + Label* label) DEFINED_ON(arm, x86_shared); + inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs, + Label* label) PER_SHARED_ARCH; + + inline void branch32(Condition cond, const Operand& lhs, Register rhs, + Label* label) DEFINED_ON(x86_shared); + inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs, + Label* label) DEFINED_ON(x86_shared); + + inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // The supported condition are Equal, NotEqual, LessThan(orEqual), + // GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label + // is not defined it will fall through to next instruction, else jump to the + // fail label. + inline void branch64(Condition cond, Register64 lhs, Imm64 val, + Label* success, Label* fail = nullptr) PER_ARCH; + inline void branch64(Condition cond, Register64 lhs, Register64 rhs, +@@ -1433,32 +1440,32 @@ class MacroAssembler : public MacroAssemblerSpecific { + + inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs, + Label* label) PER_SHARED_ARCH; + inline void branchPtr(Condition cond, const BaseIndex& lhs, Register rhs, + Label* label) PER_SHARED_ARCH; + + inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, + Register rhs, Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its + // chunk header, or nullptr if it is in the tenured heap. + void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH; + + void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp, + Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + void branchPtrInNurseryChunk(Condition cond, const Address& address, + Register temp, Label* label) DEFINED_ON(x86); + void branchValueIsNurseryCell(Condition cond, const Address& address, + Register temp, Label* label) PER_ARCH; + void branchValueIsNurseryCell(Condition cond, ValueOperand value, + Register temp, Label* label) PER_ARCH; + + // This function compares a Value (lhs) which is having a private pointer +@@ -1470,36 +1477,36 @@ class MacroAssembler : public MacroAssemblerSpecific { + FloatRegister rhs, Label* label) PER_SHARED_ARCH; + + // Truncate a double/float32 to int32 and when it doesn't fit an int32 it will + // jump to the failure label. This particular variant is allowed to return the + // value module 2**32, which isn't implemented on all architectures. E.g. the + // x64 variants will do this only in the int64_t range. + inline void branchTruncateFloat32MaybeModUint32(FloatRegister src, + Register dest, Label* fail) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void branchTruncateDoubleMaybeModUint32(FloatRegister src, + Register dest, Label* fail) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // Truncate a double/float32 to intptr and when it doesn't fit jump to the + // failure label. + inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest, + Label* fail) DEFINED_ON(x86, x64); + inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest, + Label* fail) DEFINED_ON(x86, x64); + + // Truncate a double/float32 to int32 and when it doesn't fit jump to the + // failure label. + inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest, + Label* fail) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest, + Label* fail) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void branchDouble(DoubleCondition cond, FloatRegister lhs, + FloatRegister rhs, Label* label) PER_SHARED_ARCH; + + inline void branchDoubleNotInInt64Range(Address src, Register temp, + Label* fail); + inline void branchDoubleNotInUInt64Range(Address src, Register temp, + Label* fail); +@@ -1543,17 +1550,17 @@ class MacroAssembler : public MacroAssemblerSpecific { + L label) PER_SHARED_ARCH; + template + inline void branchTest32(Condition cond, Register lhs, Imm32 rhs, + L label) PER_SHARED_ARCH; + inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh, + Label* label) PER_SHARED_ARCH; + inline void branchTest32(Condition cond, const AbsoluteAddress& lhs, + Imm32 rhs, Label* label) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + template + inline void branchTestPtr(Condition cond, Register lhs, Register rhs, + L label) PER_SHARED_ARCH; + inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs, + Label* label) PER_SHARED_ARCH; + inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs, + Label* label) PER_SHARED_ARCH; +@@ -1689,17 +1696,17 @@ class MacroAssembler : public MacroAssemblerSpecific { + + // Perform a type-test on a tag of a Value (32bits boxing), or the tagged + // value (64bits boxing). + inline void branchTestUndefined(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; + inline void branchTestInt32(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; + inline void branchTestDouble(Condition cond, Register tag, Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + inline void branchTestNumber(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; + inline void branchTestBoolean(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; + inline void branchTestString(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; + inline void branchTestSymbol(Condition cond, Register tag, + Label* label) PER_SHARED_ARCH; +@@ -1721,106 +1728,106 @@ class MacroAssembler : public MacroAssemblerSpecific { + // BaseIndex and ValueOperand variants clobber the ScratchReg on x64. + // All Variants clobber the ScratchReg on arm64. + inline void branchTestUndefined(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestUndefined(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestUndefined(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestInt32(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestInt32(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestInt32(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestDouble(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestDouble(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestDouble(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestNumber(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestBoolean(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestBoolean(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestBoolean(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestString(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestString(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestString(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestSymbol(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestSymbol(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestSymbol(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestBigInt(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestBigInt(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestBigInt(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestNull(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestNull(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestNull(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + // Clobbers the ScratchReg on x64. + inline void branchTestObject(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestObject(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestObject(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestGCThing(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestGCThing(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestGCThing(Condition cond, const ValueOperand& value, + Label* label) PER_SHARED_ARCH; + + inline void branchTestPrimitive(Condition cond, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestMagic(Condition cond, const Address& address, + Label* label) PER_SHARED_ARCH; + inline void branchTestMagic(Condition cond, const BaseIndex& address, + Label* label) PER_SHARED_ARCH; + template + inline void branchTestMagic(Condition cond, const ValueOperand& value, + L label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + inline void branchTestMagic(Condition cond, const Address& valaddr, + JSWhyMagic why, Label* label) PER_ARCH; + + inline void branchTestMagicValue(Condition cond, const ValueOperand& val, + JSWhyMagic why, Label* label); + + void branchTestValue(Condition cond, const ValueOperand& lhs, +@@ -1828,42 +1835,42 @@ class MacroAssembler : public MacroAssemblerSpecific { + + inline void branchTestValue(Condition cond, const BaseIndex& lhs, + const ValueOperand& rhs, Label* label) PER_ARCH; + + // Checks if given Value is evaluated to true or false in a condition. + // The type of the value should match the type of the method. + inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg, + Label* label) PER_SHARED_ARCH; + inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value, + Label* label) PER_ARCH; + inline void branchTestStringTruthy(bool truthy, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + inline void branchTestBigIntTruthy(bool truthy, const ValueOperand& value, + Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + // Create an unconditional branch to the address given as argument. + inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH; + + private: + template + inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label) + DEFINED_ON(x86_shared); + + void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label) + DEFINED_ON(x86); + template + void branchValueIsNurseryCellImpl(Condition cond, const T& value, + Register temp, Label* label) +- DEFINED_ON(arm64, x64, mips64); ++ DEFINED_ON(arm64, x64, mips64, ppc64); + + template + inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label) + DEFINED_ON(arm, arm64, x86_shared); + template + inline void branchTestInt32Impl(Condition cond, const T& t, Label* label) + DEFINED_ON(arm, arm64, x86_shared); + template +@@ -1923,116 +1930,116 @@ class MacroAssembler : public MacroAssemblerSpecific { + inline void fallibleUnboxString(const T& src, Register dest, Label* fail); + template + inline void fallibleUnboxSymbol(const T& src, Register dest, Label* fail); + template + inline void fallibleUnboxBigInt(const T& src, Register dest, Label* fail); + + inline void cmp32Move32(Condition cond, Register lhs, Register rhs, + Register src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86_shared); ++ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64); + + inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs, + Register src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86_shared); ++ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64); + + inline void cmpPtrMovePtr(Condition cond, Register lhs, Register rhs, + Register src, Register dest) PER_ARCH; + + inline void cmpPtrMovePtr(Condition cond, Register lhs, const Address& rhs, + Register src, Register dest) PER_ARCH; + + inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs, + const Address& src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86_shared); ++ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64); + + inline void cmp32Load32(Condition cond, Register lhs, Register rhs, + const Address& src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86_shared); ++ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64); + + inline void cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs, + const Address& src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs, + Register src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask, + const Address& src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask, + Register src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // Conditional move for Spectre mitigations. + inline void spectreMovePtr(Condition cond, Register src, Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // Zeroes dest if the condition is true. + inline void spectreZeroRegister(Condition cond, Register scratch, + Register dest) +- DEFINED_ON(arm, arm64, mips_shared, x86_shared); ++ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64); + + // Performs a bounds check and zeroes the index register if out-of-bounds + // (to mitigate Spectre). + private: + inline void spectreBoundsCheck32(Register index, const Operand& length, + Register maybeScratch, Label* failure) + DEFINED_ON(x86); + + public: + inline void spectreBoundsCheck32(Register index, Register length, + Register maybeScratch, Label* failure) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void spectreBoundsCheck32(Register index, const Address& length, + Register maybeScratch, Label* failure) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + inline void spectreBoundsCheckPtr(Register index, Register length, + Register maybeScratch, Label* failure) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + inline void spectreBoundsCheckPtr(Register index, const Address& length, + Register maybeScratch, Label* failure) +- DEFINED_ON(arm, arm64, mips_shared, x86, x64); ++ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64); + + // ======================================================================== + // Canonicalization primitives. + inline void canonicalizeDouble(FloatRegister reg); + inline void canonicalizeDoubleIfDeterministic(FloatRegister reg); + + inline void canonicalizeFloat(FloatRegister reg); + inline void canonicalizeFloatIfDeterministic(FloatRegister reg); + + public: + // ======================================================================== + // Memory access primitives. + inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest) +- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64); ++ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64); + inline void storeUncanonicalizedDouble(FloatRegister src, + const BaseIndex& dest) +- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64); ++ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64); + inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest) + DEFINED_ON(x86_shared); + + template + inline void storeDouble(FloatRegister src, const T& dest); + + template + inline void boxDouble(FloatRegister src, const T& dest); + + using MacroAssemblerSpecific::boxDouble; + + inline void storeUncanonicalizedFloat32(FloatRegister src, + const Address& dest) +- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64); ++ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64); + inline void storeUncanonicalizedFloat32(FloatRegister src, + const BaseIndex& dest) +- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64); ++ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64); + inline void storeUncanonicalizedFloat32(FloatRegister src, + const Operand& dest) + DEFINED_ON(x86_shared); + + template + inline void storeFloat32(FloatRegister src, const T& dest); + + template +@@ -3470,20 +3477,20 @@ class MacroAssembler : public MacroAssemblerSpecific { + DEFINED_ON(x86, x64); + + public: + // ======================================================================== + // Convert floating point. + + // temp required on x86 and x64; must be undefined on mips64. + void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp) +- DEFINED_ON(arm64, mips64, x64, x86); ++ DEFINED_ON(arm64, mips64, x64, x86, ppc64); + + void convertInt64ToFloat32(Register64 src, FloatRegister dest) +- DEFINED_ON(arm64, mips64, x64, x86); ++ DEFINED_ON(arm64, mips64, x64, x86, ppc64); + + bool convertUInt64ToDoubleNeedsTemp() PER_ARCH; + + // temp required when convertUInt64ToDoubleNeedsTemp() returns true. + void convertUInt64ToDouble(Register64 src, FloatRegister dest, + Register temp) PER_ARCH; + + void convertInt64ToDouble(Register64 src, FloatRegister dest) PER_ARCH; +@@ -3514,29 +3521,29 @@ class MacroAssembler : public MacroAssemblerSpecific { + // + // On 32-bit systems for both wasm and asm.js, and on 64-bit systems for + // asm.js, heap lengths are limited to 2GB. On 64-bit systems for wasm, + // 32-bit heap lengths are limited to 4GB, and 64-bit heap lengths will be + // limited to something much larger. + + void wasmBoundsCheck32(Condition cond, Register index, + Register boundsCheckLimit, Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + void wasmBoundsCheck32(Condition cond, Register index, + Address boundsCheckLimit, Label* label) +- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared); ++ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64); + + void wasmBoundsCheck64(Condition cond, Register64 index, + Register64 boundsCheckLimit, Label* label) +- DEFINED_ON(arm64, mips64, x64); ++ DEFINED_ON(arm64, mips64, x64, ppc64); + + void wasmBoundsCheck64(Condition cond, Register64 index, + Address boundsCheckLimit, Label* label) +- DEFINED_ON(arm64, mips64, x64); ++ DEFINED_ON(arm64, mips64, x64, ppc64); + + // Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds. + void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, + AnyRegister out) DEFINED_ON(x86, x64); + void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, + Register64 out) DEFINED_ON(x86, x64); + void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, + Operand dstAddr) DEFINED_ON(x86, x64); +@@ -3546,26 +3553,26 @@ class MacroAssembler : public MacroAssemblerSpecific { + // For all the ARM/MIPS wasmLoad and wasmStore functions below, `ptr` + // MUST equal `ptrScratch`, and that register will be updated based on + // conditions listed below (where it is only mentioned as `ptr`). + + // `ptr` will be updated if access.offset() != 0 or access.type() == + // Scalar::Int64. + void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, + Register ptr, Register ptrScratch, AnyRegister output) +- DEFINED_ON(arm, mips_shared); ++ DEFINED_ON(arm, mips_shared, ppc64); + void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase, + Register ptr, Register ptrScratch, Register64 output) +- DEFINED_ON(arm, mips32, mips64); ++ DEFINED_ON(arm, mips32, mips64, ppc64); + void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, + Register memoryBase, Register ptr, Register ptrScratch) +- DEFINED_ON(arm, mips_shared); ++ DEFINED_ON(arm, mips_shared, ppc64); + void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, + Register memoryBase, Register ptr, Register ptrScratch) +- DEFINED_ON(arm, mips32, mips64); ++ DEFINED_ON(arm, mips32, mips64, ppc64); + + // These accept general memoryBase + ptr + offset (in `access`); the offset is + // always smaller than the guard region. They will insert an additional add + // if the offset is nonzero, and of course that add may require a temporary + // register for the offset if the offset is large, and instructions to set it + // up. + void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase, + Register ptr, AnyRegister output) DEFINED_ON(arm64); +@@ -3575,100 +3582,100 @@ class MacroAssembler : public MacroAssemblerSpecific { + Register memoryBase, Register ptr) DEFINED_ON(arm64); + void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, + Register memoryBase, Register ptr) DEFINED_ON(arm64); + + // `ptr` will always be updated. + void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access, + Register memoryBase, Register ptr, Register ptrScratch, + Register output, Register tmp) +- DEFINED_ON(mips32, mips64); ++ DEFINED_ON(mips32, mips64, ppc64); + + // MIPS: `ptr` will always be updated. + void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access, + Register memoryBase, Register ptr, + Register ptrScratch, FloatRegister output, + Register tmp1, Register tmp2, Register tmp3) +- DEFINED_ON(mips32, mips64); ++ DEFINED_ON(mips32, mips64, ppc64); + + // `ptr` will always be updated. + void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access, + Register memoryBase, Register ptr, + Register ptrScratch, Register64 output, +- Register tmp) DEFINED_ON(mips32, mips64); ++ Register tmp) DEFINED_ON(mips32, mips64, ppc64); + + // MIPS: `ptr` will always be updated. + void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value, + Register memoryBase, Register ptr, + Register ptrScratch, Register tmp) +- DEFINED_ON(mips32, mips64); ++ DEFINED_ON(mips32, mips64, ppc64); + + // `ptr` will always be updated. + void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access, + FloatRegister floatValue, Register memoryBase, + Register ptr, Register ptrScratch, Register tmp) +- DEFINED_ON(mips32, mips64); ++ DEFINED_ON(mips32, mips64, ppc64); + + // `ptr` will always be updated. + void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access, + Register64 value, Register memoryBase, + Register ptr, Register ptrScratch, Register tmp) +- DEFINED_ON(mips32, mips64); ++ DEFINED_ON(mips32, mips64, ppc64); + + // wasm specific methods, used in both the wasm baseline compiler and ion. + + // The truncate-to-int32 methods do not bind the rejoin label; clients must + // do so if oolWasmTruncateCheckF64ToI32() can jump to it. + void wasmTruncateDoubleToUInt32(FloatRegister input, Register output, + bool isSaturating, Label* oolEntry) PER_ARCH; + void wasmTruncateDoubleToInt32(FloatRegister input, Register output, + bool isSaturating, + Label* oolEntry) PER_SHARED_ARCH; + void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output, + TruncFlags flags, wasm::BytecodeOffset off, + Label* rejoin) +- DEFINED_ON(arm, arm64, x86_shared, mips_shared); ++ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64); + + void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output, + bool isSaturating, Label* oolEntry) PER_ARCH; + void wasmTruncateFloat32ToInt32(FloatRegister input, Register output, + bool isSaturating, + Label* oolEntry) PER_SHARED_ARCH; + void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output, + TruncFlags flags, wasm::BytecodeOffset off, + Label* rejoin) +- DEFINED_ON(arm, arm64, x86_shared, mips_shared); ++ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64); + + // The truncate-to-int64 methods will always bind the `oolRejoin` label + // after the last emitted instruction. + void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output, + bool isSaturating, Label* oolEntry, + Label* oolRejoin, FloatRegister tempDouble) +- DEFINED_ON(arm64, x86, x64, mips64); ++ DEFINED_ON(arm64, x86, x64, mips64, ppc64); + void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output, + bool isSaturating, Label* oolEntry, + Label* oolRejoin, FloatRegister tempDouble) +- DEFINED_ON(arm64, x86, x64, mips64); ++ DEFINED_ON(arm64, x86, x64, mips64, ppc64); + void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output, + TruncFlags flags, wasm::BytecodeOffset off, + Label* rejoin) +- DEFINED_ON(arm, arm64, x86_shared, mips_shared); ++ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64); + + void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output, + bool isSaturating, Label* oolEntry, + Label* oolRejoin, FloatRegister tempDouble) +- DEFINED_ON(arm64, x86, x64, mips64); ++ DEFINED_ON(arm64, x86, x64, mips64, ppc64); + void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output, + bool isSaturating, Label* oolEntry, + Label* oolRejoin, FloatRegister tempDouble) +- DEFINED_ON(arm64, x86, x64, mips64); ++ DEFINED_ON(arm64, x86, x64, mips64, ppc64); + void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output, + TruncFlags flags, wasm::BytecodeOffset off, + Label* rejoin) +- DEFINED_ON(arm, arm64, x86_shared, mips_shared); ++ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64); + + // This function takes care of loading the callee's TLS and pinned regs but + // it is the caller's responsibility to save/restore TLS or pinned regs. + CodeOffset wasmCallImport(const wasm::CallSiteDesc& desc, + const wasm::CalleeDesc& callee); + + // WasmTableCallIndexReg must contain the index of the indirect call. + CodeOffset wasmCallIndirect(const wasm::CallSiteDesc& desc, +@@ -3735,72 +3742,72 @@ class MacroAssembler : public MacroAssemblerSpecific { + const BaseIndex& mem, Register expected, + Register replacement, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void compareExchange(Scalar::Type type, const Synchronization& sync, + const Address& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void compareExchange(Scalar::Type type, const Synchronization& sync, + const BaseIndex& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + // x86: `expected` and `output` must be edx:eax; `replacement` is ecx:ebx. + // x64: `output` must be rax. + // ARM: Registers must be distinct; `replacement` and `output` must be + // (even,odd) pairs. + + void compareExchange64(const Synchronization& sync, const Address& mem, + Register64 expected, Register64 replacement, + Register64 output) +- DEFINED_ON(arm, arm64, x64, x86, mips64); ++ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64); + + void compareExchange64(const Synchronization& sync, const BaseIndex& mem, + Register64 expected, Register64 replacement, + Register64 output) +- DEFINED_ON(arm, arm64, x64, x86, mips64); ++ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64); + + // Exchange with memory. Return the value initially in memory. + // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit + // and 16-bit wide operations. + + void atomicExchange(Scalar::Type type, const Synchronization& sync, + const Address& mem, Register value, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void atomicExchange(Scalar::Type type, const Synchronization& sync, + const BaseIndex& mem, Register value, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void atomicExchange(Scalar::Type type, const Synchronization& sync, + const Address& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void atomicExchange(Scalar::Type type, const Synchronization& sync, + const BaseIndex& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + // x86: `value` must be ecx:ebx; `output` must be edx:eax. + // ARM: `value` and `output` must be distinct and (even,odd) pairs. + // ARM64: `value` and `output` must be distinct. + + void atomicExchange64(const Synchronization& sync, const Address& mem, + Register64 value, Register64 output) +- DEFINED_ON(arm, arm64, x64, x86, mips64); ++ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64); + + void atomicExchange64(const Synchronization& sync, const BaseIndex& mem, + Register64 value, Register64 output) +- DEFINED_ON(arm, arm64, x64, x86, mips64); ++ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64); + + // Read-modify-write with memory. Return the value in memory before the + // operation. + // + // x86-shared: + // For 8-bit operations, `value` and `output` must have a byte subregister. + // For Add and Sub, `temp` must be invalid. + // For And, Or, and Xor, `output` must be eax and `temp` must have a byte +@@ -3826,44 +3833,44 @@ class MacroAssembler : public MacroAssemblerSpecific { + + void atomicFetchOp(Scalar::Type type, const Synchronization& sync, + AtomicOp op, Imm32 value, const BaseIndex& mem, + Register temp, Register output) DEFINED_ON(x86_shared); + + void atomicFetchOp(Scalar::Type type, const Synchronization& sync, + AtomicOp op, Register value, const Address& mem, + Register valueTemp, Register offsetTemp, Register maskTemp, +- Register output) DEFINED_ON(mips_shared); ++ Register output) DEFINED_ON(mips_shared, ppc64); + + void atomicFetchOp(Scalar::Type type, const Synchronization& sync, + AtomicOp op, Register value, const BaseIndex& mem, + Register valueTemp, Register offsetTemp, Register maskTemp, +- Register output) DEFINED_ON(mips_shared); ++ Register output) DEFINED_ON(mips_shared, ppc64); + + // x86: + // `temp` must be ecx:ebx; `output` must be edx:eax. + // x64: + // For Add and Sub, `temp` is ignored. + // For And, Or, and Xor, `output` must be rax. + // ARM: + // `temp` and `output` must be (even,odd) pairs and distinct from `value`. + // ARM64: + // Registers `value`, `temp`, and `output` must all differ. + + void atomicFetchOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const Address& mem, Register64 temp, +- Register64 output) DEFINED_ON(arm, arm64, x64, mips64); ++ Register64 output) DEFINED_ON(arm, arm64, x64, mips64, ppc64); + + void atomicFetchOp64(const Synchronization& sync, AtomicOp op, + const Address& value, const Address& mem, + Register64 temp, Register64 output) DEFINED_ON(x86); + + void atomicFetchOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const BaseIndex& mem, Register64 temp, +- Register64 output) DEFINED_ON(arm, arm64, x64, mips64); ++ Register64 output) DEFINED_ON(arm, arm64, x64, mips64, ppc64); + + void atomicFetchOp64(const Synchronization& sync, AtomicOp op, + const Address& value, const BaseIndex& mem, + Register64 temp, Register64 output) DEFINED_ON(x86); + + // x64: + // `value` can be any register. + // ARM: +@@ -3871,24 +3878,24 @@ class MacroAssembler : public MacroAssemblerSpecific { + // ARM64: + // Registers `value` and `temp` must differ. + + void atomicEffectOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const Address& mem) DEFINED_ON(x64); + + void atomicEffectOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const Address& mem, Register64 temp) +- DEFINED_ON(arm, arm64, mips64); ++ DEFINED_ON(arm, arm64, mips64, ppc64); + + void atomicEffectOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const BaseIndex& mem) DEFINED_ON(x64); + + void atomicEffectOp64(const Synchronization& sync, AtomicOp op, + Register64 value, const BaseIndex& mem, Register64 temp) +- DEFINED_ON(arm, arm64, mips64); ++ DEFINED_ON(arm, arm64, mips64, ppc64); + + // 64-bit atomic load. On 64-bit systems, use regular load with + // Synchronization::Load, not this method. + // + // x86: `temp` must be ecx:ebx; `output` must be edx:eax. + // ARM: `output` must be (even,odd) pair. + + void atomicLoad64(const Synchronization& sync, const Address& mem, +@@ -3930,43 +3937,43 @@ class MacroAssembler : public MacroAssemblerSpecific { + const BaseIndex& mem, Register expected, + Register replacement, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void wasmCompareExchange(const wasm::MemoryAccessDesc& access, + const Address& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, +- Register output) DEFINED_ON(mips_shared); ++ Register output) DEFINED_ON(mips_shared, ppc64); + + void wasmCompareExchange(const wasm::MemoryAccessDesc& access, + const BaseIndex& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, +- Register output) DEFINED_ON(mips_shared); ++ Register output) DEFINED_ON(mips_shared, ppc64); + + void wasmAtomicExchange(const wasm::MemoryAccessDesc& access, + const Address& mem, Register value, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void wasmAtomicExchange(const wasm::MemoryAccessDesc& access, + const BaseIndex& mem, Register value, Register output) + DEFINED_ON(arm, arm64, x86_shared); + + void wasmAtomicExchange(const wasm::MemoryAccessDesc& access, + const Address& mem, Register value, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void wasmAtomicExchange(const wasm::MemoryAccessDesc& access, + const BaseIndex& mem, Register value, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const Address& mem, Register temp, + Register output) DEFINED_ON(arm, arm64, x86_shared); + + void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Imm32 value, const Address& mem, Register temp, + Register output) DEFINED_ON(x86_shared); +@@ -3977,23 +3984,23 @@ class MacroAssembler : public MacroAssemblerSpecific { + + void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Imm32 value, const BaseIndex& mem, Register temp, + Register output) DEFINED_ON(x86_shared); + + void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const Address& mem, Register valueTemp, + Register offsetTemp, Register maskTemp, +- Register output) DEFINED_ON(mips_shared); ++ Register output) DEFINED_ON(mips_shared, ppc64); + + void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const BaseIndex& mem, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + // Read-modify-write with memory. Return no value. + // + // MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit + // and 16-bit wide operations. + + void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const Address& mem, Register temp) +@@ -4009,22 +4016,22 @@ class MacroAssembler : public MacroAssemblerSpecific { + + void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Imm32 value, const BaseIndex& mem, Register temp) + DEFINED_ON(x86_shared); + + void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const Address& mem, + Register valueTemp, Register offsetTemp, +- Register maskTemp) DEFINED_ON(mips_shared); ++ Register maskTemp) DEFINED_ON(mips_shared, ppc64); + + void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register value, const BaseIndex& mem, + Register valueTemp, Register offsetTemp, +- Register maskTemp) DEFINED_ON(mips_shared); ++ Register maskTemp) DEFINED_ON(mips_shared, ppc64); + + // 64-bit wide operations. + + // 64-bit atomic load. On 64-bit systems, use regular wasm load with + // Synchronization::Load, not this method. + // + // x86: `temp` must be ecx:ebx; `output` must be edx:eax. + // ARM: `temp` should be invalid; `output` must be (even,odd) pair. +@@ -4074,22 +4081,22 @@ class MacroAssembler : public MacroAssemblerSpecific { + // ARM: Registers must be distinct; `temp` and `output` must be (even,odd) + // pairs. + // MIPS: Registers must be distinct. + // MIPS32: `temp` should be invalid. + + void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register64 value, const Address& mem, + Register64 temp, Register64 output) +- DEFINED_ON(arm, arm64, mips32, mips64, x64); ++ DEFINED_ON(arm, arm64, mips32, mips64, x64, ppc64); + + void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op, + Register64 value, const BaseIndex& mem, + Register64 temp, Register64 output) +- DEFINED_ON(arm, arm64, mips32, mips64, x64); ++ DEFINED_ON(arm, arm64, mips32, mips64, x64, ppc64); + + void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op, + const Address& value, const Address& mem, + Register64 temp, Register64 output) DEFINED_ON(x86); + + void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op, + const Address& value, const BaseIndex& mem, + Register64 temp, Register64 output) DEFINED_ON(x86); +@@ -4131,42 +4138,42 @@ class MacroAssembler : public MacroAssemblerSpecific { + const BaseIndex& mem, Register expected, + Register replacement, Register temp, + AnyRegister output) DEFINED_ON(arm, arm64, x86_shared); + + void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const Address& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, Register temp, +- AnyRegister output) DEFINED_ON(mips_shared); ++ AnyRegister output) DEFINED_ON(mips_shared, ppc64); + + void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const BaseIndex& mem, Register expected, + Register replacement, Register valueTemp, + Register offsetTemp, Register maskTemp, Register temp, +- AnyRegister output) DEFINED_ON(mips_shared); ++ AnyRegister output) DEFINED_ON(mips_shared, ppc64); + + void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const Address& mem, Register value, Register temp, + AnyRegister output) DEFINED_ON(arm, arm64, x86_shared); + + void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const BaseIndex& mem, Register value, Register temp, + AnyRegister output) DEFINED_ON(arm, arm64, x86_shared); + + void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const Address& mem, Register value, Register valueTemp, + Register offsetTemp, Register maskTemp, Register temp, +- AnyRegister output) DEFINED_ON(mips_shared); ++ AnyRegister output) DEFINED_ON(mips_shared, ppc64); + + void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync, + const BaseIndex& mem, Register value, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register temp, AnyRegister output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const Address& mem, + Register temp1, Register temp2, AnyRegister output) + DEFINED_ON(arm, arm64, x86_shared); + + void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const BaseIndex& mem, +@@ -4182,23 +4189,23 @@ class MacroAssembler : public MacroAssemblerSpecific { + AtomicOp op, Imm32 value, const BaseIndex& mem, + Register temp1, Register temp2, AnyRegister output) + DEFINED_ON(x86_shared); + + void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const Address& mem, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register temp, AnyRegister output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const BaseIndex& mem, + Register valueTemp, Register offsetTemp, + Register maskTemp, Register temp, AnyRegister output) +- DEFINED_ON(mips_shared); ++ DEFINED_ON(mips_shared, ppc64); + + void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const Address& mem, + Register temp) DEFINED_ON(arm, arm64, x86_shared); + + void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const BaseIndex& mem, + Register temp) DEFINED_ON(arm, arm64, x86_shared); +@@ -4209,22 +4216,22 @@ class MacroAssembler : public MacroAssemblerSpecific { + + void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Imm32 value, const BaseIndex& mem, + Register temp) DEFINED_ON(x86_shared); + + void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const Address& mem, + Register valueTemp, Register offsetTemp, +- Register maskTemp) DEFINED_ON(mips_shared); ++ Register maskTemp) DEFINED_ON(mips_shared, ppc64); + + void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync, + AtomicOp op, Register value, const BaseIndex& mem, + Register valueTemp, Register offsetTemp, +- Register maskTemp) DEFINED_ON(mips_shared); ++ Register maskTemp) DEFINED_ON(mips_shared, ppc64); + + void atomicIsLockFreeJS(Register value, Register output); + + // ======================================================================== + // Spectre Mitigations. + // + // Spectre attacks are side-channel attacks based on cache pollution or + // slow-execution of some instructions. We have multiple spectre mitigations +@@ -4803,17 +4810,17 @@ class MacroAssembler : public MacroAssemblerSpecific { + // StackPointer manipulation functions. + // On ARM64, the StackPointer is implemented as two synchronized registers. + // Code shared across platforms must use these functions to be valid. + template + inline void addToStackPtr(T t); + template + inline void addStackPtrTo(T t); + +- void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64); ++ void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64, ppc64); + void subFromStackPtr(Register reg); + + template + void subStackPtrFrom(T t) { + subPtr(getStackPointer(), t); + } + + template +diff --git a/js/src/jit/MoveEmitter.h b/js/src/jit/MoveEmitter.h +index 6c62c0561a..30ee4b61a5 100644 +--- a/js/src/jit/MoveEmitter.h ++++ b/js/src/jit/MoveEmitter.h +@@ -12,15 +12,17 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/MoveEmitter-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/MoveEmitter-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/MoveEmitter-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/MoveEmitter-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/MoveEmitter-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/MoveEmitter-none.h" + #else + # error "Unknown architecture!" + #endif + + #endif /* jit_MoveEmitter_h */ +diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h +index 67c8661004..ef49df83e5 100644 +--- a/js/src/jit/Registers.h ++++ b/js/src/jit/Registers.h +@@ -15,16 +15,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/Architecture-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/Architecture-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/Architecture-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/Architecture-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/Architecture-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/Architecture-none.h" + #else + # error "Unknown architecture!" + #endif + + namespace js { + namespace jit { +diff --git a/js/src/jit/SharedICHelpers-inl.h b/js/src/jit/SharedICHelpers-inl.h +index 901c80cdd8..fd4a27d8bb 100644 +--- a/js/src/jit/SharedICHelpers-inl.h ++++ b/js/src/jit/SharedICHelpers-inl.h +@@ -12,16 +12,18 @@ + #elif defined(JS_CODEGEN_X64) + # include "jit/x64/SharedICHelpers-x64-inl.h" + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/SharedICHelpers-arm-inl.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/SharedICHelpers-arm64-inl.h" + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + # include "jit/mips-shared/SharedICHelpers-mips-shared-inl.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/SharedICHelpers-ppc64-inl.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/SharedICHelpers-none-inl.h" + #else + # error "Unknown architecture!" + #endif + + namespace js { + namespace jit {} // namespace jit +diff --git a/js/src/jit/SharedICHelpers.h b/js/src/jit/SharedICHelpers.h +index 563cae3ccf..737ca1d5a5 100644 +--- a/js/src/jit/SharedICHelpers.h ++++ b/js/src/jit/SharedICHelpers.h +@@ -12,16 +12,18 @@ + #elif defined(JS_CODEGEN_X64) + # include "jit/x64/SharedICHelpers-x64.h" + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/SharedICHelpers-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/SharedICHelpers-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + # include "jit/mips-shared/SharedICHelpers-mips-shared.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/SharedICHelpers-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/SharedICHelpers-none.h" + #else + # error "Unknown architecture!" + #endif + + namespace js { + namespace jit {} // namespace jit +diff --git a/js/src/jit/SharedICRegisters.h b/js/src/jit/SharedICRegisters.h +index c87e5f8408..76239d5dde 100644 +--- a/js/src/jit/SharedICRegisters.h ++++ b/js/src/jit/SharedICRegisters.h +@@ -14,16 +14,18 @@ + #elif defined(JS_CODEGEN_ARM) + # include "jit/arm/SharedICRegisters-arm.h" + #elif defined(JS_CODEGEN_ARM64) + # include "jit/arm64/SharedICRegisters-arm64.h" + #elif defined(JS_CODEGEN_MIPS32) + # include "jit/mips32/SharedICRegisters-mips32.h" + #elif defined(JS_CODEGEN_MIPS64) + # include "jit/mips64/SharedICRegisters-mips64.h" ++#elif defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/SharedICRegisters-ppc64.h" + #elif defined(JS_CODEGEN_NONE) + # include "jit/none/SharedICRegisters-none.h" + #else + # error "Unknown architecture!" + #endif + + namespace js { + namespace jit {} // namespace jit +diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build +index f50d86fc44..82cddd07af 100644 +--- a/js/src/jit/moz.build ++++ b/js/src/jit/moz.build +@@ -227,17 +227,29 @@ elif CONFIG["JS_CODEGEN_MIPS32"] or CONFIG["JS_CODEGEN_MIPS64"]: + "mips64/CodeGenerator-mips64.cpp", + "mips64/Lowering-mips64.cpp", + "mips64/MacroAssembler-mips64.cpp", + "mips64/MoveEmitter-mips64.cpp", + "mips64/Trampoline-mips64.cpp", + ] + if CONFIG["JS_SIMULATOR_MIPS64"]: + UNIFIED_SOURCES += ["mips64/Simulator-mips64.cpp"] +- ++elif CONFIG["JS_CODEGEN_PPC64"]: ++ lir_inputs += ["ppc64/LIR-ppc64.h"] ++ UNIFIED_SOURCES += [ ++ "ppc64/Architecture-ppc64.cpp", ++ "ppc64/Assembler-ppc64.cpp", ++ "ppc64/Bailouts-ppc64.cpp", ++ "ppc64/CodeGenerator-ppc64.cpp", ++ "ppc64/Lowering-ppc64.cpp", ++ "ppc64/MacroAssembler-ppc64.cpp", ++ "ppc64/MoveEmitter-ppc64.cpp", ++ "ppc64/Trampoline-ppc64.cpp", ++ "shared/AtomicOperations-shared-jit.cpp", ++ ] + + # Generate jit/MIROpsGenerated.h from jit/MIROps.yaml + GeneratedFile( + "MIROpsGenerated.h", + script="GenerateMIRFiles.py", + entry_point="generate_mir_header", + inputs=["MIROps.yaml"], + ) +diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h +index dfb2bcb6b8..69ba759d42 100644 +--- a/js/src/jit/shared/Assembler-shared.h ++++ b/js/src/jit/shared/Assembler-shared.h +@@ -20,23 +20,24 @@ + #include "jit/Registers.h" + #include "jit/RegisterSets.h" + #include "js/ScalarType.h" // js::Scalar::Type + #include "vm/HelperThreads.h" + #include "vm/NativeObject.h" + #include "wasm/WasmTypes.h" + + #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ +- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + // Push return addresses callee-side. + # define JS_USE_LINK_REGISTER + #endif + + #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ +- defined(JS_CODEGEN_ARM64) ++ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_PPC64) + // JS_CODELABEL_LINKMODE gives labels additional metadata + // describing how Bind() should patch them. + # define JS_CODELABEL_LINKMODE + #endif + + namespace js { + namespace jit { + +diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.cpp b/js/src/jit/shared/AtomicOperations-shared-jit.cpp +index 79463f118b..7c8eeaf89e 100644 +--- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp ++++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp +@@ -133,16 +133,38 @@ static constexpr Register AtomicTemp = edx; + // 64-bit registers for cmpxchg8b. ValReg/Val2Reg/Temp are not used in this + // case. + + static constexpr Register64 AtomicValReg64(edx, eax); + static constexpr Register64 AtomicVal2Reg64(ecx, ebx); + + // AtomicReturnReg64 is unused on x86. + ++#elif defined(JS_CODEGEN_PPC64) ++ ++// Selected registers match the argument registers, except that the Ptr is not ++// in IntArgReg0 so as not to conflict with the result register. ++ ++static const LiveRegisterSet AtomicNonVolatileRegs; ++ ++static constexpr Register AtomicPtrReg = IntArgReg4; ++static constexpr Register AtomicPtr2Reg = IntArgReg1; ++static constexpr Register AtomicValReg = IntArgReg1; ++static constexpr Register64 AtomicValReg64(IntArgReg1); ++static constexpr Register AtomicVal2Reg = IntArgReg2; ++static constexpr Register64 AtomicVal2Reg64(IntArgReg2); ++static constexpr Register AtomicTemp = IntArgReg3; ++static constexpr Register AtomicTemp2 = IntArgReg5; ++static constexpr Register AtomicTemp3 = IntArgReg6; ++static constexpr Register64 AtomicTemp64(IntArgReg3); ++static constexpr Register64 AtomicTemp64_2(IntArgReg5); ++static constexpr Register64 AtomicTemp64_3(IntArgReg6); ++ ++static constexpr Register64 AtomicReturnReg64 = ReturnReg64; ++ + #else + # error "Unsupported platform" + #endif + + // These are useful shorthands and hide the meaningless uint/int distinction. + + static constexpr Scalar::Type SIZE8 = Scalar::Uint8; + static constexpr Scalar::Type SIZE16 = Scalar::Uint16; +@@ -248,31 +270,37 @@ static uint32_t GenPrologue(MacroAssembler& masm, ArgIterator* iter) { + uint32_t start = masm.currentOffset(); + masm.PushRegsInMask(AtomicNonVolatileRegs); + #if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) + // The return address is among the nonvolatile registers, if pushed at all. + iter->argBase = masm.framePushed(); + #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + // The return address is pushed separately. + iter->argBase = sizeof(void*) + masm.framePushed(); ++#elif defined(JS_CODEGEN_PPC64) ++// XXX ++ // The return address is in LR (an SPR); it's not (probably) on the stack. ++ iter->argBase = masm.framePushed(); + #else + # error "Unsupported platform" + #endif + return start; + } + + static void GenEpilogue(MacroAssembler& masm) { + masm.PopRegsInMask(AtomicNonVolatileRegs); + MOZ_ASSERT(masm.framePushed() == 0); + #if defined(JS_CODEGEN_ARM64) + masm.Ret(); + #elif defined(JS_CODEGEN_ARM) + masm.mov(lr, pc); + #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + masm.ret(); ++#elif defined(JS_CODEGEN_PPC64) ++ masm.as_blr(); + #endif + } + + #ifndef JS_64BIT + static uint32_t GenNop(MacroAssembler& masm) { + ArgIterator iter; + uint32_t start = GenPrologue(masm, &iter); + GenEpilogue(masm); +@@ -414,21 +442,31 @@ static uint32_t GenCmpxchg(MacroAssembler& masm, Scalar::Type size, + ArgIterator iter; + uint32_t start = GenPrologue(masm, &iter); + GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg); + + Address addr(AtomicPtrReg, 0); + switch (size) { + case SIZE8: + case SIZE16: ++#if defined(JS_CODEGEN_PPC64) ++ masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg, ++ AtomicTemp, AtomicTemp2, AtomicTemp3, ReturnReg); ++ break; ++#endif + case SIZE32: + GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg); + GenGprArg(masm, MIRType::Int32, &iter, AtomicVal2Reg); ++#if defined(JS_CODEGEN_PPC64) ++ masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg, ++ InvalidReg, InvalidReg, InvalidReg, ReturnReg); ++#else + masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg, + ReturnReg); ++#endif + break; + case SIZE64: + GenGpr64Arg(masm, &iter, AtomicValReg64); + GenGpr64Arg(masm, &iter, AtomicVal2Reg64); + #if defined(JS_CODEGEN_X86) + static_assert(AtomicValReg64 == Register64(edx, eax)); + static_assert(AtomicVal2Reg64 == Register64(ecx, ebx)); + +@@ -453,19 +491,29 @@ static uint32_t GenExchange(MacroAssembler& masm, Scalar::Type size, + ArgIterator iter; + uint32_t start = GenPrologue(masm, &iter); + GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg); + + Address addr(AtomicPtrReg, 0); + switch (size) { + case SIZE8: + case SIZE16: ++#if defined(JS_CODEGEN_PPC64) ++ masm.atomicExchange(size, sync, addr, AtomicValReg, ++ AtomicTemp, AtomicTemp2, AtomicTemp3, ReturnReg); ++ break; ++#endif + case SIZE32: + GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg); ++#if defined(JS_CODEGEN_PPC64) ++ masm.atomicExchange(size, sync, addr, AtomicValReg, ++ InvalidReg, InvalidReg, InvalidReg, ReturnReg); ++#else + masm.atomicExchange(size, sync, addr, AtomicValReg, ReturnReg); ++#endif + break; + case SIZE64: + #if defined(JS_64BIT) + GenGpr64Arg(masm, &iter, AtomicValReg64); + masm.atomicExchange64(sync, addr, AtomicValReg64, AtomicReturnReg64); + break; + #else + MOZ_CRASH("64-bit atomic exchange not available on this platform"); +@@ -492,17 +540,22 @@ static uint32_t GenFetchOp(MacroAssembler& masm, Scalar::Type size, AtomicOp op, + #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + Register tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp + ? Register::Invalid() + : AtomicTemp; + #else + Register tmp = AtomicTemp; + #endif + GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg); ++#if defined(JS_CODEGEN_PPC64) ++ masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, AtomicTemp2, ++ AtomicTemp3, ReturnReg); ++#else + masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, ReturnReg); ++#endif + break; + } + case SIZE64: { + #if defined(JS_64BIT) + # if defined(JS_CODEGEN_X64) + Register64 tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp + ? Register64::Invalid() + : AtomicTemp64; +@@ -636,16 +689,19 @@ static bool UnalignedAccessesAreOK() { + #endif + #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + return true; + #elif defined(JS_CODEGEN_ARM) + return !HasAlignmentFault(); + #elif defined(JS_CODEGEN_ARM64) + // This is not necessarily true but it's the best guess right now. + return true; ++#elif defined(JS_CODEGEN_PPC64) ++ // We'd sure like to avoid it, even though it works. ++ return false; + #else + # error "Unsupported platform" + #endif + } + + void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src, + size_t nbytes) { + const uint8_t* lim = src + nbytes; +diff --git a/js/src/jsapi-tests/testJitABIcalls.cpp b/js/src/jsapi-tests/testJitABIcalls.cpp +index 02b67da3ca..bd45389b21 100644 +--- a/js/src/jsapi-tests/testJitABIcalls.cpp ++++ b/js/src/jsapi-tests/testJitABIcalls.cpp +@@ -653,16 +653,19 @@ class JitABICall final : public JSAPITest, public DefineCheckArgs { + Register base = r8; + regs.take(base); + #elif defined(JS_CODEGEN_MIPS32) + Register base = t1; + regs.take(base); + #elif defined(JS_CODEGEN_MIPS64) + Register base = t1; + regs.take(base); ++#elif defined(JS_CODEGEN_PPC64) ++ Register base = r0; ++ regs.take(base); + #else + # error "Unknown architecture!" + #endif + + Register setup = regs.takeAny(); + + this->generateCalls(masm, base, setup); + +diff --git a/js/src/jsapi-tests/testsJit.cpp b/js/src/jsapi-tests/testsJit.cpp +index 069eef43fe..705609df2c 100644 +--- a/js/src/jsapi-tests/testsJit.cpp ++++ b/js/src/jsapi-tests/testsJit.cpp +@@ -20,16 +20,21 @@ void PrepareJit(js::jit::MacroAssembler& masm) { + #endif + AllocatableRegisterSet regs(RegisterSet::All()); + LiveRegisterSet save(regs.asLiveSet()); + #if defined(JS_CODEGEN_ARM) + save.add(js::jit::d15); + #endif + #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + save.add(js::jit::ra); ++#elif defined(JS_CODEGEN_PPC64) ++ // XXX ++ // Push the link register separately, since it's not a GPR. ++ masm.xs_mflr(ScratchRegister); ++ masm.as_stdu(ScratchRegister, StackPointer, -8); + #elif defined(JS_USE_LINK_REGISTER) + save.add(js::jit::lr); + #endif + masm.PushRegsInMask(save); + } + + // Generate the exit path of the JIT code, which restores every register. Then, + // make it executable and run it. +@@ -37,26 +42,35 @@ bool ExecuteJit(JSContext* cx, js::jit::MacroAssembler& masm) { + using namespace js::jit; + AllocatableRegisterSet regs(RegisterSet::All()); + LiveRegisterSet save(regs.asLiveSet()); + #if defined(JS_CODEGEN_ARM) + save.add(js::jit::d15); + #endif + #if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + save.add(js::jit::ra); ++#elif defined(JS_CODEGEN_PPC64) ++ // We pop after loading the regs. + #elif defined(JS_USE_LINK_REGISTER) + save.add(js::jit::lr); + #endif + masm.PopRegsInMask(save); + #if defined(JS_CODEGEN_ARM64) + // Return using the value popped into x30. + masm.abiret(); + + // Reset stack pointer. + masm.SetStackPointer64(PseudoStackPointer64); ++#elif defined(JS_CODEGEN_PPC64) ++ // XXX ++ // Pop LR and exit. ++ masm.as_ld(ScratchRegister, StackPointer, 0); ++ masm.xs_mtlr(ScratchRegister); ++ masm.as_addi(StackPointer, StackPointer, 8); ++ masm.as_blr(); + #else + // Exit the JIT-ed code using the ABI return style. + masm.abiret(); + #endif + + if (masm.oom()) { + return false; + } +diff --git a/js/src/util/Poison.h b/js/src/util/Poison.h +index 8356ca1f00..5eeb111cf8 100644 +--- a/js/src/util/Poison.h ++++ b/js/src/util/Poison.h +@@ -88,16 +88,18 @@ const uint8_t JS_SCOPE_DATA_TRAILING_NAMES_PATTERN = 0xCC; + */ + #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \ + defined(JS_CODEGEN_NONE) + # define JS_SWEPT_CODE_PATTERN 0xED // IN instruction, crashes in user mode. + #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) + # define JS_SWEPT_CODE_PATTERN 0xA3 // undefined instruction + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + # define JS_SWEPT_CODE_PATTERN 0x01 // undefined instruction ++#elif defined(JS_CODEGEN_PPC64) || defined(JS_CODEGEN_PPC) ++# define JS_SWEPT_CODE_PATTERN 0x00 // architecturally defined as illegal + #else + # error "JS_SWEPT_CODE_PATTERN not defined for this platform" + #endif + + enum class MemCheckKind : uint8_t { + // Marks a region as poisoned. Memory sanitizers like ASan will crash when + // accessing it (both reads and writes). + MakeNoAccess, +diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp +index 156f3cbbba..ab29f44713 100644 +--- a/js/src/wasm/WasmBaselineCompile.cpp ++++ b/js/src/wasm/WasmBaselineCompile.cpp +@@ -138,16 +138,19 @@ + #if defined(JS_CODEGEN_MIPS32) + # include "jit/mips-shared/Assembler-mips-shared.h" + # include "jit/mips32/Assembler-mips32.h" + #endif + #if defined(JS_CODEGEN_MIPS64) + # include "jit/mips-shared/Assembler-mips-shared.h" + # include "jit/mips64/Assembler-mips64.h" + #endif ++#if defined(JS_CODEGEN_PPC64) ++# include "jit/ppc64/Assembler-ppc64.h" ++#endif + #include "js/ScalarType.h" // js::Scalar::Type + #include "util/Memory.h" + #include "wasm/TypedObject.h" + #include "wasm/WasmGC.h" + #include "wasm/WasmGenerator.h" + #include "wasm/WasmInstance.h" + #include "wasm/WasmOpIter.h" + #include "wasm/WasmSignalHandlers.h" +@@ -288,16 +291,23 @@ static constexpr Register RabaldrScratchI32 = CallTempReg2; + #endif + + #ifdef RABALDR_SCRATCH_F32_ALIASES_F64 + # if !defined(RABALDR_SCRATCH_F32) || !defined(RABALDR_SCRATCH_F64) + # error "Bad configuration" + # endif + #endif + ++#ifdef JS_CODEGEN_PPC64 ++# define RABALDR_SCRATCH_I32 ++// We can use all the argregs up, and we don't want the JIT using our own ++// private scratch registers, so this is the best option of what's left. ++static constexpr Register RabaldrScratchI32 = r19; ++#endif ++ + template + struct RegTypeOf { + #ifdef ENABLE_WASM_SIMD + static_assert(t == MIRType::Float32 || t == MIRType::Double || + t == MIRType::Simd128, + "Float mask type"); + #else + static_assert(t == MIRType::Float32 || t == MIRType::Double, +@@ -550,16 +560,18 @@ struct SpecificRegs {}; + #elif defined(JS_CODEGEN_MIPS32) + struct SpecificRegs { + RegI64 abiReturnRegI64; + + SpecificRegs() : abiReturnRegI64(ReturnReg64) {} + }; + #elif defined(JS_CODEGEN_MIPS64) + struct SpecificRegs {}; ++#elif defined(JS_CODEGEN_PPC64) ++struct SpecificRegs {}; + #else + struct SpecificRegs { + # ifndef JS_64BIT + RegI64 abiReturnRegI64; + # endif + + SpecificRegs() { MOZ_CRASH("BaseCompiler porting interface: SpecificRegs"); } + }; +@@ -6038,16 +6050,25 @@ class BaseCompiler final : public BaseCompilerInterface { + ABIArg argLoc = call->abi.next(MIRType::Int32); + if (argLoc.kind() == ABIArg::Stack) { + ScratchI32 scratch(*this); + loadI32(arg, scratch); + masm.store32(scratch, Address(masm.getStackPointer(), + argLoc.offsetFromArgBase())); + } else { + loadI32(arg, RegI32(argLoc.gpr())); ++#if JS_CODEGEN_PPC64 ++ // If this is a call to compiled C++, we must ensure that the ++ // upper 32 bits are clear: addi can sign-extend, which yields ++ // difficult-to-diagnose bugs when the function expects a uint32_t ++ // but the register it gets has a residual 64-bit value. ++ if (call->usesSystemAbi) { ++ masm.as_rldicl(argLoc.gpr(), argLoc.gpr(), 0, 32); ++ } ++#endif + } + break; + } + case ValType::I64: { + ABIArg argLoc = call->abi.next(MIRType::Int64); + if (argLoc.kind() == ABIArg::Stack) { + ScratchI32 scratch(*this); + #ifdef JS_PUNBOX64 +@@ -6324,17 +6345,18 @@ class BaseCompiler final : public BaseCompilerInterface { + + // Compute the absolute table base pointer into `scratch`, offset by 8 + // to account for the fact that ma_mov read PC+8. + masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch); + + // Jump indirect via table element. + masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc, + Offset, Assembler::Always); +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + ScratchI32 scratch(*this); + CodeLabel tableCl; + + masm.ma_li(scratch, &tableCl); + + tableCl.target()->bind(theTable->offset()); + masm.addCodeLabel(tableCl); + +@@ -6493,16 +6515,22 @@ class BaseCompiler final : public BaseCompilerInterface { + # elif defined(JS_CODEGEN_ARM64) + ARMRegister sd(srcDest.reg, 64); + ARMRegister r(rhs.reg, 64); + if (isUnsigned) { + masm.Udiv(sd, sd, r); + } else { + masm.Sdiv(sd, sd, r); + } ++# elif defined(JS_CODEGEN_PPC64) ++ if (isUnsigned) { ++ masm.as_divdu(srcDest.reg, srcDest.reg, rhs.reg); ++ } else { ++ masm.as_divd(srcDest.reg, srcDest.reg, rhs.reg); ++ } + # else + MOZ_CRASH("BaseCompiler platform hook: quotientI64"); + # endif + masm.bind(&done); + } + + void remainderI64(RegI64 rhs, RegI64 srcDest, RegI64 reserved, + IsUnsigned isUnsigned, bool isConst, int64_t c) { +@@ -6544,29 +6572,46 @@ class BaseCompiler final : public BaseCompilerInterface { + ARMRegister t(temp, 64); + if (isUnsigned) { + masm.Udiv(t, sd, r); + } else { + masm.Sdiv(t, sd, r); + } + masm.Mul(t, t, r); + masm.Sub(sd, sd, t); ++# elif defined(JS_CODEGEN_PPC64) ++ if (js::jit::HasPPCISA3()) { ++ if (isUnsigned) { ++ masm.as_modud(srcDest.reg, srcDest.reg, rhs.reg); ++ } else { ++ masm.as_modsd(srcDest.reg, srcDest.reg, rhs.reg); ++ } ++ } else { ++ ScratchI32 temp(*this); ++ if (isUnsigned) { ++ masm.as_divdu(temp, srcDest.reg, rhs.reg); ++ } else { ++ masm.as_divd(temp, srcDest.reg, rhs.reg); ++ } ++ masm.as_mulld(temp, temp, rhs.reg); ++ masm.as_subf(srcDest.reg, temp, srcDest.reg); // T = B - A ++ } + # else + MOZ_CRASH("BaseCompiler platform hook: remainderI64"); + # endif + masm.bind(&done); + } + #endif // RABALDR_INT_DIV_I64_CALLOUT + + RegI32 needRotate64Temp() { + #if defined(JS_CODEGEN_X86) + return needI32(); + #elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \ + defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + return RegI32::Invalid(); + #else + MOZ_CRASH("BaseCompiler platform hook: needRotate64Temp"); + #endif + } + + class OutOfLineTruncateCheckF32OrF64ToI32 : public OutOfLineCode { + AnyReg src; +@@ -6869,30 +6914,35 @@ class BaseCompiler final : public BaseCompilerInterface { + RegI64 ptr64 = fromI32(ptr); + + // In principle there may be non-zero bits in the upper bits of the + // register; clear them. + # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) + // The canonical value is zero-extended (see comment block "64-bit GPRs + // carrying 32-bit values" in MacroAssembler.h); we already have that. + masm.assertCanonicalInt32(ptr); ++# elif defined(JS_CODEGEN_PPC64) ++ // The canonical value is sign-extended. ++ masm.as_rldicl(ptr, ptr, 0, 32); // "clrldi" + # else + MOZ_CRASH("Platform code needed here"); + # endif + + // Any Spectre mitigation will appear to update the ptr64 register. + masm.wasmBoundsCheck64( + Assembler::Below, ptr64, + Address(tls, offsetof(TlsData, boundsCheckLimit)), &ok); + + // Restore the value to the canonical form for a 32-bit value in a + // 64-bit register and/or the appropriate form for further use in the + // indexing instruction. + # if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) + // The canonical value is zero-extended; we already have that. ++# elif defined(JS_CODEGEN_PPC64) ++ // Leave it zero-extended. + # else + MOZ_CRASH("Platform code needed here"); + # endif + } else { + masm.wasmBoundsCheck32( + Assembler::Below, ptr, + Address(tls, offsetof(TlsData, boundsCheckLimit)), &ok); + } +@@ -6903,17 +6953,17 @@ class BaseCompiler final : public BaseCompilerInterface { + #endif + masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset()); + masm.bind(&ok); + } + } + + #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \ + defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access, + AccessCheck* check, RegI32 tls, + RegI32 ptr) { + MOZ_ASSERT(needTlsForAccess(*check) == tls.isValid()); + prepareMemoryAccess(access, check, tls, ptr); + return BaseIndex(HeapReg, ptr, TimesOne, access->offset()); + } + #elif defined(JS_CODEGEN_X86) +@@ -7001,17 +7051,19 @@ class BaseCompiler final : public BaseCompilerInterface { + if (dest.tag == AnyReg::I64) { + MOZ_ASSERT(dest.i64() == specific_.abiReturnRegI64); + masm.wasmLoadI64(*access, srcAddr, dest.i64()); + } else { + // For 8 bit loads, this will generate movsbl or movzbl, so + // there's no constraint on what the output register may be. + masm.wasmLoad(*access, srcAddr, dest.any()); + } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) ++// XXX: We don't really need this anymore + if (IsUnaligned(*access)) { + switch (dest.tag) { + case AnyReg::I64: + masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(), + temp1); + break; + case AnyReg::F32: + masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(), +@@ -7102,17 +7154,19 @@ class BaseCompiler final : public BaseCompilerInterface { + MOZ_ASSERT(temp.isInvalid()); + if (access->type() == Scalar::Int64) { + masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr); + } else if (src.tag == AnyReg::I64) { + masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr); + } else { + masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr); + } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) ++// XXX: We don't really need this anymore + if (IsUnaligned(*access)) { + switch (src.tag) { + case AnyReg::I64: + masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr, + temp); + break; + case AnyReg::F32: + masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr, +@@ -7160,17 +7214,18 @@ class BaseCompiler final : public BaseCompilerInterface { + } + void maybeFree(BaseCompiler* bc) { + for (size_t i = 0; i < Count; ++i) { + bc->maybeFree(this->operator[](i)); + } + } + }; + +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + using AtomicRMW32Temps = Atomic32Temps<3>; + #else + using AtomicRMW32Temps = Atomic32Temps<1>; + #endif + + template + void atomicRMW32(const MemoryAccessDesc& access, T srcAddr, AtomicOp op, + RegI32 rv, RegI32 rd, const AtomicRMW32Temps& temps) { +@@ -7187,17 +7242,18 @@ class BaseCompiler final : public BaseCompilerInterface { + } + masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temp, rd); + break; + } + #endif + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], temps[1], + temps[2], rd); + #else + masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], rd); + #endif + break; + default: { + MOZ_CRASH("Bad type for atomic operation"); +@@ -7208,17 +7264,18 @@ class BaseCompiler final : public BaseCompilerInterface { + // On x86, V is Address. On other platforms, it is Register64. + // T is BaseIndex or Address. + template + void atomicRMW64(const MemoryAccessDesc& access, const T& srcAddr, + AtomicOp op, V value, Register64 temp, Register64 rd) { + masm.wasmAtomicFetchOp64(access, op, value, srcAddr, temp, rd); + } + +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + using AtomicCmpXchg32Temps = Atomic32Temps<3>; + #else + using AtomicCmpXchg32Temps = Atomic32Temps<0>; + #endif + + template + void atomicCmpXchg32(const MemoryAccessDesc& access, T srcAddr, + RegI32 rexpect, RegI32 rnew, RegI32 rd, +@@ -7236,29 +7293,31 @@ class BaseCompiler final : public BaseCompilerInterface { + } + masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd); + break; + } + #endif + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, temps[0], + temps[1], temps[2], rd); + #else + masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd); + #endif + break; + default: + MOZ_CRASH("Bad type for atomic operation"); + } + } + +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + using AtomicXchg32Temps = Atomic32Temps<3>; + #else + using AtomicXchg32Temps = Atomic32Temps<0>; + #endif + + template + void atomicXchg32(const MemoryAccessDesc& access, T srcAddr, RegI32 rv, + RegI32 rd, const AtomicXchg32Temps& temps) { +@@ -7275,17 +7334,18 @@ class BaseCompiler final : public BaseCompilerInterface { + masm.wasmAtomicExchange(access, srcAddr, rv, rd); + } + break; + } + #endif + case Scalar::Uint16: + case Scalar::Int32: + case Scalar::Uint32: +-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + masm.wasmAtomicExchange(access, srcAddr, rv, temps[0], temps[1], + temps[2], rd); + #else + masm.wasmAtomicExchange(access, srcAddr, rv, rd); + #endif + break; + default: + MOZ_CRASH("Bad type for atomic operation"); +@@ -7342,16 +7402,18 @@ class BaseCompiler final : public BaseCompilerInterface { + #elif defined(JS_CODEGEN_MIPS32) + pop2xI64(r0, r1); + *temp = needI32(); + #elif defined(JS_CODEGEN_ARM) + pop2xI64(r0, r1); + *temp = needI32(); + #elif defined(JS_CODEGEN_ARM64) + pop2xI64(r0, r1); ++#elif defined(JS_CODEGEN_PPC64) ++ pop2xI64(r0, r1); + #else + MOZ_CRASH("BaseCompiler porting interface: pop2xI64ForMulI64"); + #endif + } + + void pop2xI64ForDivI64(RegI64* r0, RegI64* r1, RegI64* reserved) { + #if defined(JS_CODEGEN_X64) + // r0 must be rax, and rdx will be clobbered. +@@ -7529,17 +7591,18 @@ class BaseCompiler final : public BaseCompilerInterface { + rexpect = bc->popI32(); + } + setRd(bc->needI32()); + } + ~PopAtomicCmpXchg32Regs() { + bc->freeI32(rnew); + bc->freeI32(rexpect); + } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type, + Scalar::Type viewType) + : Base(bc) { + if (type == ValType::I64) { + rnew = bc->popI64ToI32(); + rexpect = bc->popI64ToI32(); + } else { + rnew = bc->popI32(); +@@ -7606,17 +7669,17 @@ class BaseCompiler final : public BaseCompilerInterface { + rexpect = bc->popI64(); + setRd(bc->needI64Pair()); + } + ~PopAtomicCmpXchg64Regs() { + bc->freeI64(rexpect); + bc->freeI64(rnew); + } + #elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) { + rnew = bc->popI64(); + rexpect = bc->popI64(); + setRd(bc->needI64()); + } + ~PopAtomicCmpXchg64Regs() { + bc->freeI64(rexpect); + bc->freeI64(rnew); +@@ -7658,17 +7721,18 @@ class BaseCompiler final : public BaseCompilerInterface { + bc->needI64(bc->specific_.edx_eax); + setRd(bc->specific_.edx_eax); + } + ~PopAtomicLoad64Regs() { bc->freeI32(bc->specific_.ecx); } + # elif defined(JS_CODEGEN_ARM) + explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) { + setRd(bc->needI64Pair()); + } +-# elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++# elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) { + setRd(bc->needI64()); + } + # else + explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) { + MOZ_CRASH("BaseCompiler porting interface: PopAtomicLoad64Regs"); + } + # endif +@@ -7745,17 +7809,18 @@ class BaseCompiler final : public BaseCompilerInterface { + rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32(); + temps.allocate(bc); + setRd(bc->needI32()); + } + ~PopAtomicRMW32Regs() { + bc->freeI32(rv); + temps.maybeFree(bc); + } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type, + Scalar::Type viewType, AtomicOp op) + : Base(bc) { + rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32(); + if (Scalar::byteSize(viewType) < 4) { + temps.allocate(bc); + } + +@@ -7833,17 +7898,17 @@ class BaseCompiler final : public BaseCompilerInterface { + temp = bc->needI64Pair(); + setRd(bc->needI64Pair()); + } + ~PopAtomicRMW64Regs() { + bc->freeI64(rv); + bc->freeI64(temp); + } + #elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) { + rv = bc->popI64(); + temp = bc->needI64(); + setRd(bc->needI64()); + } + ~PopAtomicRMW64Regs() { + bc->freeI64(rv); + bc->freeI64(temp); +@@ -7888,17 +7953,18 @@ class BaseCompiler final : public BaseCompilerInterface { + #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) + explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, + Scalar::Type viewType) + : Base(bc) { + rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32(); + setRd(bc->needI32()); + } + ~PopAtomicXchg32Regs() { bc->freeI32(rv); } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type, + Scalar::Type viewType) + : Base(bc) { + rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32(); + if (Scalar::byteSize(viewType) < 4) { + temps.allocate(bc); + } + setRd(bc->needI32()); +@@ -7954,17 +8020,18 @@ class BaseCompiler final : public BaseCompilerInterface { + ~PopAtomicXchg64Regs() { bc->freeI64(rv); } + #elif defined(JS_CODEGEN_ARM) + // Both rv and rd must be odd/even pairs. + explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) { + rv = bc->popI64ToSpecific(bc->needI64Pair()); + setRd(bc->needI64Pair()); + } + ~PopAtomicXchg64Regs() { bc->freeI64(rv); } +-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) + explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) { + rv = bc->popI64ToSpecific(bc->needI64()); + setRd(bc->needI64()); + } + ~PopAtomicXchg64Regs() { bc->freeI64(rv); } + #else + explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) { + MOZ_CRASH("BaseCompiler porting interface: xchg64"); +@@ -8968,16 +9035,18 @@ static void CtzI32(MacroAssembler& masm, RegI32 rsd) { + + // Currently common to PopcntI32 and PopcntI64 + static RegI32 PopcntTemp(BaseCompiler& bc) { + #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : bc.needI32(); + #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ + defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + return bc.needI32(); ++#elif defined(JS_CODEGEN_PPC64) ++ return RegI32::Invalid(); // We rock. + #else + MOZ_CRASH("BaseCompiler platform hook: PopcntTemp"); + #endif + } + + static void PopcntI32(BaseCompiler& bc, RegI32 rsd, RegI32 temp) { + bc.masm.popcnt32(rsd, rsd, temp); + } +@@ -11982,17 +12051,17 @@ RegI32 BaseCompiler::popMemory32Access(MemoryAccessDesc* access, + bceCheckLocal(access, check, local); + } + + return popI32(); + } + + void BaseCompiler::pushHeapBase() { + #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + RegI64 heapBase = needI64(); + moveI64(RegI64(Register64(HeapReg)), heapBase); + pushI64(heapBase); + #elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) + RegI32 heapBase = needI32(); + moveI32(RegI32(HeapReg), heapBase); + pushI32(heapBase); + #elif defined(JS_CODEGEN_X86) +@@ -17244,17 +17313,19 @@ bool js::wasm::BaselinePlatformSupport() { + // they are definitely implemented on the Cortex-A7 and Cortex-A15 + // and on all ARMv8 systems. + if (!HasIDIV()) { + return false; + } + #endif + #if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \ + defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \ +- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \ ++ defined(JS_CODEGEN_PPC64) ++ // PPC64 gates on other prerequisites not specified here. + return true; + #else + return false; + #endif + } + + bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv, + const CompilerEnvironment& compilerEnv, +diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp +index 0f456aaaa5..f0694f1b9e 100644 +--- a/js/src/wasm/WasmCompile.cpp ++++ b/js/src/wasm/WasmCompile.cpp +@@ -45,16 +45,17 @@ using namespace js::wasm; + uint32_t wasm::ObservedCPUFeatures() { + enum Arch { + X86 = 0x1, + X64 = 0x2, + ARM = 0x3, + MIPS = 0x4, + MIPS64 = 0x5, + ARM64 = 0x6, ++ PPC64 = 0x7, + ARCH_BITS = 3 + }; + + #if defined(JS_CODEGEN_X86) + MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= + (UINT32_MAX >> ARCH_BITS)); + return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS); + #elif defined(JS_CODEGEN_X64) +@@ -68,16 +69,19 @@ uint32_t wasm::ObservedCPUFeatures() { + MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS)); + return ARM64 | (jit::GetARM64Flags() << ARCH_BITS); + #elif defined(JS_CODEGEN_MIPS32) + MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); + return MIPS | (jit::GetMIPSFlags() << ARCH_BITS); + #elif defined(JS_CODEGEN_MIPS64) + MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS)); + return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS); ++#elif defined(JS_CODEGEN_PPC64) ++ MOZ_ASSERT(jit::GetPPC64Flags() <= (UINT32_MAX >> ARCH_BITS)); ++ return PPC64 | (jit::GetPPC64Flags() << ARCH_BITS); + #elif defined(JS_CODEGEN_NONE) + return 0; + #else + # error "unknown architecture" + #endif + } + + FeatureArgs FeatureArgs::build(JSContext* cx, const FeatureOptions& options) { +diff --git a/js/src/wasm/WasmFrame.h b/js/src/wasm/WasmFrame.h +index 85f2612d14..9919205739 100644 +--- a/js/src/wasm/WasmFrame.h ++++ b/js/src/wasm/WasmFrame.h +@@ -53,16 +53,25 @@ constexpr uintptr_t ExitOrJitEntryFPTag = 0x1; + // before the function has made its stack reservation, the stack alignment is + // sizeof(Frame) % WasmStackAlignment. + // + // During MacroAssembler code generation, the bytes pushed after the wasm::Frame + // are counted by masm.framePushed. Thus, the stack alignment at any point in + // time is (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment. + + class Frame { ++#if defined(JS_CODEGEN_PPC64) ++ // Since Wasm can call directly to ABI-compliant routines, the Frame must ++ // have an ABI-compliant linkage area. We allocate four doublewords, the ++ // minimum size. ++ void *_ppc_sp_; ++ void *_ppc_cr_; ++ void *_ppc_lr_; ++ void *_ppc_toc_; ++#endif + // See GenerateCallableEpilogue for why this must be + // the first field of wasm::Frame (in a downward-growing stack). + // It's either the caller's Frame*, for wasm callers, or the JIT caller frame + // plus a tag otherwise. + uint8_t* callerFP_; + + // The return address pushed by the call (in the case of ARM/MIPS the return + // address is pushed by the first instruction of the prologue). +@@ -115,18 +124,21 @@ class Frame { + static uint8_t* addExitOrJitEntryFPTag(const Frame* fp) { + MOZ_ASSERT(!isExitOrJitEntryFP(fp)); + return reinterpret_cast(reinterpret_cast(fp) | + ExitOrJitEntryFPTag); + } + }; + + static_assert(!std::is_polymorphic_v, "Frame doesn't need a vtable."); ++#if !defined(JS_CODEGEN_PPC64) ++// Not on PowerPC, it's not. + static_assert(sizeof(Frame) == 2 * sizeof(void*), + "Frame is a two pointer structure"); ++#endif + + class FrameWithTls : public Frame { + TlsData* calleeTls_; + TlsData* callerTls_; + + public: + TlsData* calleeTls() { return calleeTls_; } + TlsData* callerTls() { return callerTls_; } +diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp +index dffab53940..5da8d6c730 100644 +--- a/js/src/wasm/WasmFrameIter.cpp ++++ b/js/src/wasm/WasmFrameIter.cpp +@@ -358,16 +358,21 @@ static const unsigned SetFP = 16; + static const unsigned PoppedFP = 4; + static_assert(BeforePushRetAddr == 0, "Required by StartUnwinding"); + static_assert(PushedFP > PushedRetAddr, "Required by StartUnwinding"); + #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) + static const unsigned PushedRetAddr = 8; + static const unsigned PushedFP = 12; + static const unsigned SetFP = 16; + static const unsigned PoppedFP = 4; ++#elif defined(JS_CODEGEN_PPC64) ++static const unsigned PushedRetAddr = 12; ++static const unsigned PushedFP = 16; ++static const unsigned SetFP = 20; ++static const unsigned PoppedFP = 8; + #elif defined(JS_CODEGEN_NONE) + // Synthetic values to satisfy asserts and avoid compiler warnings. + static const unsigned PushedRetAddr = 0; + static const unsigned PushedFP = 1; + static const unsigned SetFP = 2; + static const unsigned PoppedFP = 3; + #else + # error "Unknown architecture!" +@@ -453,16 +458,38 @@ static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) { + MemOperand(sp, Frame::callerFPOffset())); + MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry); + masm.Mov(ARMRegister(FramePointer, 64), sp); + MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry); + + // And restore the SP-reg setting, per comment above. + masm.SetStackPointer64(stashedSPreg); + } ++#elif defined(JS_CODEGEN_PPC64) ++ { ++ *entry = masm.currentOffset(); ++ ++ // These must be in this precise order. Fortunately we can subsume the ++ // SPR load into the initial "verse" since it is treated atomically. ++ // The linkage area required for ABI compliance is baked into the Frame. ++ masm.xs_mflr(ScratchRegister); ++ masm.as_addi(StackPointer, StackPointer, -(sizeof(Frame))); ++ masm.as_std(ScratchRegister, StackPointer, Frame::returnAddressOffset()); ++ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry); ++ masm.as_std(FramePointer, StackPointer, Frame::callerFPOffset()); ++ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry); ++ masm.xs_mr(FramePointer, StackPointer); ++ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry); ++ ++ // Burn nops because we have to make this a multiple of 16 and the mfspr ++ // just screwed us. ++ masm.as_nop(); // 24 ++ masm.as_nop(); // 28 ++ masm.as_nop(); // 32 // trap point ++ } + #else + { + # if defined(JS_CODEGEN_ARM) + AutoForbidPoolsAndNops afp(&masm, + /* number of instructions in scope = */ 3); + + *entry = masm.currentOffset(); + +@@ -527,16 +554,28 @@ static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed, + // use it. Hence we have to do it "by hand". + masm.Mov(PseudoStackPointer64, vixl::sp); + + masm.Ret(ARMRegister(lr, 64)); + + // See comment at equivalent place in |GenerateCallablePrologue| above. + masm.SetStackPointer64(stashedSPreg); + ++#elif defined(JS_CODEGEN_PPC64) ++ ++ masm.as_ld(FramePointer, StackPointer, Frame::callerFPOffset()); ++ poppedFP = masm.currentOffset(); ++ // This is suboptimal since we get serialized, but has to be in this order. ++ masm.as_ld(ScratchRegister, StackPointer, Frame::returnAddressOffset()); ++ masm.xs_mtlr(ScratchRegister); ++ *ret = masm.currentOffset(); ++ ++ masm.as_addi(StackPointer, StackPointer, sizeof(Frame)); ++ masm.as_blr(); ++ + #else + // Forbid pools for the same reason as described in GenerateCallablePrologue. + # if defined(JS_CODEGEN_ARM) + AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 6); + # endif + + // There is an important ordering constraint here: fp must be repointed to + // the caller's frame before any field of the frame currently pointed to by +@@ -773,16 +812,23 @@ void wasm::GenerateJitEntryPrologue(MacroAssembler& masm, Offsets* offsets) { + AutoForbidPoolsAndNops afp(&masm, + /* number of instructions in scope = */ 3); + offsets->begin = masm.currentOffset(); + static_assert(BeforePushRetAddr == 0); + // Subtract from SP first as SP must be aligned before offsetting. + masm.Sub(sp, sp, 8); + masm.storePtr(lr, Address(masm.getStackPointer(), 0)); + masm.adjustFrame(8); ++#elif defined(JS_CODEGEN_PPC64) ++ offsets->begin = masm.currentOffset(); ++ ++ // We have to burn a nop here to match the other prologue length. ++ masm.xs_mflr(ScratchRegister); ++ masm.as_nop(); // might as well explicitly wait for the mfspr to complete ++ masm.as_stdu(ScratchRegister, StackPointer, -8); + #else + // The x86/x64 call instruction pushes the return address. + offsets->begin = masm.currentOffset(); + #endif + MOZ_ASSERT_IF(!masm.oom(), + PushedRetAddr == masm.currentOffset() - offsets->begin); + + // Save jit frame pointer, so unwinding from wasm to jit frames is trivial. +diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp +index 4eb77a81a2..3f00cbb632 100644 +--- a/js/src/wasm/WasmGC.cpp ++++ b/js/src/wasm/WasmGC.cpp +@@ -284,16 +284,33 @@ bool IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC) { + (insn[-1] & 0xfffffc1f) == 0xd63f0000 || // blr reg + (insn[-1] & 0xfc000000) == 0x94000000 || // bl simm26 + (debugEnabled && insn[-1] == 0xd503201f)); // nop + + # elif defined(JS_CODEGEN_MIPS64) + // TODO (bug 1699696): Implement this. As for the platforms above, we need to + // enumerate all code sequences that can precede the stackmap location. + return true; ++# elif defined(JS_CODEGEN_PPC64) ++// XXX: we should just be able to use inst[0] ++ const uint32_t* insn = (const uint32_t*)nextPC; ++ js::jit::Instruction* inst = (js::jit::Instruction*)nextPC; ++ //fprintf(stderr, "IsValidStackMapKey: 0x%lx 0x%08x\n", (uint64_t)nextPC, insn[0]); ++ return (((uintptr_t(insn) & 3) == 0) && ++ (inst[0].extractOpcode() == js::jit::PPC_addi || // stack allocate ++ inst[0].extractOpcode() == js::jit::PPC_addis || // load immediate ++ inst[0].extractOpcode() == js::jit::PPC_cmpwi || // test after bl ++ inst[0].extractOpcode() == js::jit::PPC_cmpw || // (extsw, same) ++ inst[0].extractOpcode() == js::jit::PPC_lfd || // load FPR ++ inst[0].extractOpcode() == js::jit::PPC_lfs || // load FPR ++ inst[0].extractOpcode() == js::jit::PPC_lwz || // load GPR ++ inst[0].extractOpcode() == js::jit::PPC_ld || // load GPR ++ inst[0].extractOpcode() == js::jit::PPC_b || // branch ++ inst[0].encode() == js::jit::PPC_nop || // GET BACK TO WORK ++ inst[0].encode() == js::jit::PPC_stop)); // designated throw + # else + MOZ_CRASH("IsValidStackMapKey: requires implementation on this platform"); + # endif + } + #endif + + } // namespace wasm + } // namespace js +diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp +index 4ab2a44192..1a51061a12 100644 +--- a/js/src/wasm/WasmSignalHandlers.cpp ++++ b/js/src/wasm/WasmSignalHandlers.cpp +@@ -101,16 +101,17 @@ using mozilla::DebugOnly; + # endif + # if defined(__mips__) + # define EPC_sig(p) ((p)->sc_pc) + # define RFP_sig(p) ((p)->sc_regs[30]) + # endif + # if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \ + defined(__PPC64LE__) + # define R01_sig(p) ((p)->sc_frame.fixreg[1]) ++# define R31_sig(p) ((p)->sc_frame.fixreg[31]) + # define R32_sig(p) ((p)->sc_frame.srr0) + # endif + #elif defined(__linux__) || defined(__sun) + # if defined(__linux__) + # define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP]) + # define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP]) + # define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP]) + # else +@@ -147,16 +148,17 @@ using mozilla::DebugOnly; + # if defined(__linux__) && (defined(__sparc__) && defined(__arch64__)) + # define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC]) + # define FP_sig(p) ((p)->uc_mcontext.mc_fp) + # define SP_sig(p) ((p)->uc_mcontext.mc_i7) + # endif + # if defined(__linux__) && (defined(__ppc64__) || defined(__PPC64__) || \ + defined(__ppc64le__) || defined(__PPC64LE__)) + # define R01_sig(p) ((p)->uc_mcontext.gp_regs[1]) ++# define R31_sig(p) ((p)->uc_mcontext.gp_regs[31]) + # define R32_sig(p) ((p)->uc_mcontext.gp_regs[32]) + # endif + #elif defined(__NetBSD__) + # define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP]) + # define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP]) + # define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP]) + # define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP]) + # define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP]) +@@ -173,16 +175,17 @@ using mozilla::DebugOnly; + # endif + # if defined(__mips__) + # define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC]) + # define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8]) + # endif + # if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \ + defined(__PPC64LE__) + # define R01_sig(p) ((p)->uc_mcontext.__gregs[_REG_R1]) ++# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_R31]) + # define R32_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC]) + # endif + #elif defined(__DragonFly__) || defined(__FreeBSD__) || \ + defined(__FreeBSD_kernel__) + # define EIP_sig(p) ((p)->uc_mcontext.mc_eip) + # define EBP_sig(p) ((p)->uc_mcontext.mc_ebp) + # define ESP_sig(p) ((p)->uc_mcontext.mc_esp) + # define RIP_sig(p) ((p)->uc_mcontext.mc_rip) +@@ -207,16 +210,17 @@ using mozilla::DebugOnly; + # endif + # if defined(__FreeBSD__) && defined(__mips__) + # define EPC_sig(p) ((p)->uc_mcontext.mc_pc) + # define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30]) + # endif + # if defined(__FreeBSD__) && (defined(__ppc64__) || defined(__PPC64__) || \ + defined(__ppc64le__) || defined(__PPC64LE__)) + # define R01_sig(p) ((p)->uc_mcontext.mc_gpr[1]) ++# define R31_sig(p) ((p)->uc_mcontext.mc_gpr[31]) + # define R32_sig(p) ((p)->uc_mcontext.mc_srr0) + # endif + #elif defined(XP_DARWIN) + # define EIP_sig(p) ((p)->thread.uts.ts32.__eip) + # define EBP_sig(p) ((p)->thread.uts.ts32.__ebp) + # define ESP_sig(p) ((p)->thread.uts.ts32.__esp) + # define RIP_sig(p) ((p)->thread.__rip) + # define RBP_sig(p) ((p)->thread.__rbp) +@@ -367,17 +371,17 @@ struct macos_aarch64_context { + # define PC_sig(p) EPC_sig(p) + # define FP_sig(p) RFP_sig(p) + # define SP_sig(p) RSP_sig(p) + # define LR_sig(p) R31_sig(p) + #elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \ + defined(__PPC64LE__) + # define PC_sig(p) R32_sig(p) + # define SP_sig(p) R01_sig(p) +-# define FP_sig(p) R01_sig(p) ++# define FP_sig(p) R31_sig(p) + #endif + + static void SetContextPC(CONTEXT* context, uint8_t* pc) { + #ifdef PC_sig + *reinterpret_cast(&PC_sig(context)) = pc; + #else + MOZ_CRASH(); + #endif +diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp +index 59a5cf18bf..dbc10c6e2c 100644 +--- a/js/src/wasm/WasmStubs.cpp ++++ b/js/src/wasm/WasmStubs.cpp +@@ -719,17 +719,17 @@ static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe, + AssertExpectedSP(masm); + masm.haltingAlign(CodeAlignment); + + offsets->begin = masm.currentOffset(); + + // Save the return address if it wasn't already saved by the call insn. + #ifdef JS_USE_LINK_REGISTER + # if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \ +- defined(JS_CODEGEN_MIPS64) ++ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64) + masm.pushReturnAddress(); + # elif defined(JS_CODEGEN_ARM64) + // WasmPush updates framePushed() unlike pushReturnAddress(), but that's + // cancelled by the setFramePushed() below. + WasmPush(masm, lr); + # else + MOZ_CRASH("Implement this"); + # endif +@@ -2111,17 +2111,26 @@ static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, + masm.storePtr(scratch, + Address(masm.getStackPointer(), i->offsetFromArgBase())); + } + i++; + MOZ_ASSERT(i.done()); + + // Make the call, test whether it succeeded, and extract the return value. + AssertStackAlignment(masm, ABIStackAlignment); ++#ifdef JS_CODEGEN_PPC64 ++ // Because this is calling an ABI-compliant function, we have to pull down ++ // a dummy linkage area or the values on the stack will be stomped on. The ++ // minimum size is sufficient. ++ masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), -32); ++#endif + masm.call(SymbolicAddress::CallImport_General); ++#ifdef JS_CODEGEN_PPC64 ++ masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), 32); ++#endif + masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel); + + ResultType resultType = ResultType::Vector(fi.funcType().results()); + ValType registerResultType; + for (ABIResultIter iter(resultType); !iter.done(); iter.next()) { + if (iter.cur().inRegister()) { + MOZ_ASSERT(!registerResultType.isValid()); + registerResultType = iter.cur().type(); +@@ -2673,16 +2682,21 @@ static const LiveRegisterSet RegsToPreserve( + #elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) + // It's correct to use FloatRegisters::AllMask even when SIMD is not enabled; + // PushRegsInMask strips out the high lanes of the XMM registers in this case, + // while the singles will be stripped as they are aliased by the larger doubles. + static const LiveRegisterSet RegsToPreserve( + GeneralRegisterSet(Registers::AllMask & + ~(Registers::SetType(1) << Registers::StackPointer)), + FloatRegisterSet(FloatRegisters::AllMask)); ++#elif defined(JS_CODEGEN_PPC64) ++// Note that this includes no SPRs, since the JIT is unaware of them. ++static const LiveRegisterSet RegsToPreserve( ++ GeneralRegisterSet(Registers::AllMask), ++ FloatRegisterSet(FloatRegisters::AllMask)); + #else + static const LiveRegisterSet RegsToPreserve( + GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask)); + # ifdef ENABLE_WASM_SIMD + # error "no SIMD support" + # endif + #endif + +diff --git a/modules/libpref/init/StaticPrefList.yaml b/modules/libpref/init/StaticPrefList.yaml +index d81025b282..43b75c6ae0 100644 +--- a/modules/libpref/init/StaticPrefList.yaml ++++ b/modules/libpref/init/StaticPrefList.yaml +@@ -5729,17 +5729,17 @@ + - name: javascript.options.baselinejit + type: bool + value: true + mirror: always # LoadStartupJSPrefs + do_not_use_directly: true + + - name: javascript.options.ion + type: bool +- value: true ++ value: false + mirror: always # LoadStartupJSPrefs + do_not_use_directly: true + + # The irregexp JIT for regex evaluation. + - name: javascript.options.native_regexp + type: bool + value: true + mirror: always # LoadStartupJSPrefs +@@ -5968,17 +5968,17 @@ + value: 6 * 1024 * 1024 + #else + value: 2 * 1024 * 1024 + #endif + mirror: always + + - name: javascript.options.wasm_optimizingjit + type: bool +- value: true ++ value: false + mirror: always + + #if defined(ENABLE_WASM_SIMD) + - name: javascript.options.wasm_simd + type: bool + value: true + mirror: always + #endif // defined(ENABLE_WASM_SIMD) diff --git a/srcpkgs/firefox-esr/template b/srcpkgs/firefox-esr/template index 35e7b0be93c8..3b72e33f9764 100644 --- a/srcpkgs/firefox-esr/template +++ b/srcpkgs/firefox-esr/template @@ -3,7 +3,7 @@ # THIS PKG MUST BE SYNCHRONIZED WITH "srcpkgs/firefox-esr-i18n". # pkgname=firefox-esr -version=91.3.0 +version=91.4.0 revision=1 wrksrc="firefox-${version}" build_helper="rust" @@ -12,7 +12,7 @@ maintainer="Orphaned " license="MPL-2.0, GPL-2.0-or-later, LGPL-2.1-or-later" homepage="https://www.mozilla.org/firefox/" distfiles="${MOZILLA_SITE}/firefox/releases/${version}esr/source/firefox-${version}esr.source.tar.xz" -checksum=128b5349f112d8a0fd8698f3645ed43ea29d4b95047b7c4fd770b95d0661e96c +checksum=e722829db490f9332712a81c35996541737bbfb05232d47190fee779c4fcb327 lib32disabled=yes @@ -145,6 +145,13 @@ do_build() { echo "ac_add_options --enable-rust-simd" >>.mozconfig ;; esac + # enable out-of-tree baseline JS/WASM compiler for ppc64le + # more info: https://www.talospace.com/2021/12/91esr-with-baseline-compilerbaseline.html + # POWER9 only for now; runtime detection disables JIT on POWER8 + case "$XBPS_TARGET_MACHINE" in + ppc64le*) echo "ac_add_options --enable-jit" >>.mozconfig ;; + esac + export LDFLAGS+=" -Wl,-rpath=/usr/lib/firefox" if [ "$SOURCE_DATE_EPOCH" ]; then From 07824e848993288073b364635128eb27de4a7722 Mon Sep 17 00:00:00 2001 From: Will Springer Date: Sun, 26 Dec 2021 16:29:48 -0800 Subject: [PATCH 2/2] firefox-esr-i18n: update to 91.4.0 --- srcpkgs/firefox-esr-i18n/template | 188 +++++++++++++++--------------- 1 file changed, 94 insertions(+), 94 deletions(-) diff --git a/srcpkgs/firefox-esr-i18n/template b/srcpkgs/firefox-esr-i18n/template index 51583fb80e9b..d7168a2fd318 100644 --- a/srcpkgs/firefox-esr-i18n/template +++ b/srcpkgs/firefox-esr-i18n/template @@ -1,6 +1,6 @@ # Template file for 'firefox-esr-i18n' pkgname=firefox-esr-i18n -version=91.3.0 +version=91.4.0 revision=1 build_style=meta short_desc="Firefox ESR language packs" @@ -135,96 +135,96 @@ _pkgtmpl() { } } -checksum="0eee1b17b1f5aed676a2a494cf29b5e5d3d2698728a35eba08e66571d8c84c14 - bff092f5e1bc8e01e3d88fad4f6121f71c6df9f3f68a0437ef0a14050c7e3b42 - 71498faef46ff645bbbac526bb66b1cbca34c2b7022ac570a4796c6a39a8975f - bd99f9ada2c8248ee1239aa95f448804ec9686a73a28045bf05689d6919fbfcd - 467e04bd6eecf14f1b565ee9b89161873d205f54076d94d41a8bfc6fa2a58996 - 9881874658e6066b366a0a0f6140141caa165c1830f74947270c52c06cb38fa8 - a9c1c955fb11c7f473ea615c917804cbba1cc1c87b027b581f5c8a738f1a34fb - c61f578136c998d7fcfe157dd85637d536da4593a4f8fa90ffc5fe94bba9a803 - 47ce84a0d744c33049a6e081bd8d65dac515237e5298ff9fc424a1bc4700cc92 - 08f6eb5779f4399fdf0a8132dc92438e974cd3dcb4c5438fb587db876ee889ec - e16ed1906c49edfaaa233338bfd6eaf9e4f2113260a5682d4fdd9e9ce71b017e - ddb25de3a9e4cc3e282ffe270e62c13287a016e3c0d676cf8242b6d93afccaf5 - 9ad51c6346e7d70a94277eaf7146fa11199b2ab40d91653d6abf0d374f7134fd - 64ad0267457678a7b6f2b3e452f3678a3aaf14bfb905f4f78ce4d99e06312c37 - 91df48c9b6ae23e9e94641b98a0ba978f308cfd72236f0a0b9fa712e026cc71d - 52e87048120243092d77f79aa33852b57f6aec875b44cc7f321abd50f88afe7d - a98a26d0a50af7fec7a95f138d724ef67f309303a0df8067c0755c139b254aee - 126894015f17d908062f1600c7d8fa4010b8ed70d206bffc4a408b43356ea3d4 - 22180f6601376b132f2f46eb08a4ecb3c484e76bb90587764addd5d0647029ea - cbb323b2b1133d8290b991b79008ee8471a916c5ba542ca8e359770c19d4759a - b3015a7dbf693ce9ef44c6f3b8f7c0f043bb13474e4144159cc26a65cbb23521 - 2ab851974bdbb5b914acd44edfd6ef2adf4d0d83b872a8adb8b2b64d620b27fc - 234ecc57e99be39aa51ab5e5e5b238a9c5857fd366f69cdf160efd25237d0496 - e627aa9ee85e449ced2e655f45ab60e1372a36f4b9ff8543839a864766718569 - de9b1a64b01fd15ff6b89d6ccc93451e1f11216b07dc5e6fa05268c3e926cae1 - 2ca889180191d8183e052c9df3c377600fcae622f7649ab488a3c1a5a1cd8d5b - 3b95aace3579b535a55128b9959979a61dba22bcdb195e362ceeb7526407c77a - 49154388549a9c5db321ebb7d327708a7bf4b36de39cdf0d93d3b88cee077fb3 - 9f2b35bc942ca69cb498c2752a234f58a1a765ce30563589c720a755bf29e655 - e10a8e72f0b6aa267a4a3e51708c86c21d9b92ddfac28d1f2d68a8b21729ca8c - 09a1e51ba668959331ac46041a76cc15f345b8634a642918544c0158777dd989 - 7792bbcc9a1ddcfca8968cea5bcaae250095b5a46fde5d51a48d4426b4c6f5d2 - 638c665557ef6556a5bbb2c9f79fca780a081226dbd5a465e7615f055a117748 - 9c3b45900a28ab3efb5b3bb52daebacc1dd3a246936b745fff676e6c3fd251fd - 7bcbba3e353cc7a0344302db8a9888f606e28e8c2f11f4f88dde52fba69b43c4 - 62383ba98a16964ce6ef2bde878515505965cfded17b69e477d3671e1a942fef - dcc2212529bea4e8b21bedf3ceefeec1a798818005d526d6d18cabecdff230e1 - 5d0c64cec6aafcf478b57c26640e18cfae6928d5ae705b9a82337559389a57fe - dab730eab216c2cb0a1672a21e78fbf93c672498d5e36781b1c68050ae8431d9 - e9296fdadb84d3462f63a06e9e72e3d7c1e6df95870a6f303b498fb115e8ac6b - 519f1afe2b1a6747e26718f28b39d27f5d0188bc85ed906b54d18cbca6a67ec7 - e90fdd34d1d4612bd1a7a81ca3275be2dada26862944b4caeb356482596f591a - 83142afa82a9db327475afcc3318882fd8a26d81bc946749d57eb3923c5f489b - 027604328860d5bc938ac9564aae8957308da5b9ec05cd3984856da1c2af3ea0 - a7028398816ae2334bf94c66efe429d981a48b790286d27fdc25f3388720e62c - 9224a7e0f1ceecdf4c7f77fdeb510100419f9e805e0b204f0c59d36275f6d925 - 8af990a6fb805f552643b6ee38d178d2f75bd169802cec178175c8005bc77b37 - b629886a47facaf7fad105b7ee9dc98b470670b25b575e51710d876235209674 - 31fc7f882b79c4d91d270bcc8e5b2fa7066f21d6c8a5d4d491d7896e49202f06 - fd7e5dc510770cc601f9ed4e174b8585ef28316f660379235245f10cc0c6bc3e - e0ac6d1626c1dfa05bcf6298919aeee4fa6904d4a347608d729e38ead4baf120 - fe05e00413a2905f3f25d65d635655cf9e37d62fec5d0d235ea5cc7fa08ee1fd - 20ff2c930b732082a44020fdebc588ac069264ead48fa0f9ace5aea4210b046f - 4346e474009cbb630392ed0583c65448ee577a71403cb164e904a99bf6e819c2 - f48ac2e2725c83c82af0374b5aa7a1da270bf25ea696c533c981d34518746fbd - e520a79ebdec829889a2225079ea7081bfa153ccf71024a9617625b88923d011 - feb471682e8aa79cdc410c053bd2763878275f1abff17bcb1783798ee264b6be - f98a07ccb4806ddfef815712756f06b82543dd13483abe9865434d4c247fa2b0 - 17f6388807ec48a8419dcc67381f8d8a5aadd467ed7d6c386ff56173e645633c - f7aab2c51a78cac47270bd37c9b545fe07b08429299d8fdc27b087ccbb4108f1 - 0dd59e9a669894d9ae82c50f2ee04495922aa6f45780b51df089706c7ba9cbcc - 7579ce83f088693989326b7166f66a3808398be81b922361556c3efbc8a8c08a - f07e95eea11c8f6e516c0fdb33038c321ae8aa27ac81f5c6773cf3104e967a19 - 5e247f86c15a267ac017dfcf73395ed3104f407c96bb606e34070332e2b26050 - 8b61a367a186feca735487d9f62f0de7865d1a678e7e9f990ad31945bab10eb7 - 080f150e066508946482e78dcc714c9fb3b63a7975f66ceba018ffc5bc65a833 - e9f5b74a2856fc009c3139164872ccbe284618d7fc1fa59c16575c82cda8491e - 80c50b252b4bc0c00a336a9bdbfb05e7c510f4ad1ae6c64f92f6f6a037f9d762 - 85596a73a957ff80d403d691cbc96724bae7186e1358f85c3ef21c501b033ad1 - 81277a3e8b896e760ef75ad90c197818aa3395abf017b7560db5b189302d8e1d - 733de2df49762e7c6e16a02acbfe409869ec61f071364d0bbe8cbee43f350c07 - b8d634c6ce7b03bc78c02480d82a4effbec46119cd8a6c7a0fbfbdbd7f8bee93 - f68c8c8de23ec4eae407e44176df77b48146a2bd6de4edbcf6eca33f3cf93cb2 - da5b8d10bfae987051f978f5f5707c9532c124233929402249ac99fe027c4c56 - 5a00221c264e06b04476acf4098eb9a5813eaebd38d35527652ae757bd7ac819 - 849872f1edf674b5200c35b05375a23ae56437e8ec1f5cbbd8cd4470127896d1 - c37075191481a93fd82c3ec12cb5ed81682eff4722e1d519287535e959a46379 - 286fd2991570be7813a7372b0651ee84de3e94e12b41e906de380573c05b75a9 - 8f367feebd634b1cdd004c0c448fed8473f780511f525eba7a902753d873b63a - f58b3eb8dbf4b214cd6de44ffc23ddddd2a2e61ba4f2b21eb1ae134669e3e19a - c527ca276d7c2b676e89030a36b1b0d2eb30eb986849d1ecc3c7fc90ed1b5b22 - 893484fbfa297fc87733918620f94629f2baee5d747b8cbcef4ff5268d0b8d3e - 11a46dde2f4e32c7ba5498195973e26dd757e6eb6f81717259062a92c0bc6cc0 - 0bca99ff50cb7105bbaba19b345c933b9e4902a92e439bc1bddc8db210a0416e - 07536a87813be5de3f2d7b9828b9d846daa4656ed983ef8321496d63d74ed55f - bf57e98659c357fb4223d1320d445ad58cb75a5327eceecc9deef4f81b713388 - 795322220ec8916e15938637d3d0ee0fedc876e6020a5e8d26d8281829b7a609 - 0b4327c104f159202f4d0ec2c4af3f742551322ea7880194cb4d5eb2e72efb39 - 9cc3ac6c592f7cc08f5bd0c7285c5de6530a63397212878d69f436f07110049b - c63c170c66b39ad9de3f8f6adc5980c7994ea5ee873140654bdcf12c3bc315d1 - cf94ac6f8728367d485173afc6b17d1687ebb23cb7bc3cf36073ceb0786e6283 - abceb2f7f9ea142c9029344e502cfad821d32960462b9c1747c289db19a652bc - e0eb06bf6818df10345e08ad1431d300bbe26c52f1c1795e95fd157296d3794d" +checksum="a3fe325e110c28efeb513c823d9a456856f6dd2a6392c60f612a48735075b8c7 + a508fde605dd2104e729a05a3dcee41a025f50fba24285c41e7cc668a52fd46d + 4ea526f9ab87ccbb3caef3120d42a1a198c08726f48cb87da5b9f048fb824a39 + 86a29d4a760d33abfbb446650f5b2089834cde750a0ed68d979fc1463589de41 + d2bcf36b3fbd58d82875895f082f0305a6cac47eb9c1d13824dc6301a5aab126 + ca90695258f56e01ee53958497c93ab31c0a65093e23f9fa2b7dd6f2c5ce5ea1 + 048318839b7f92d7a6521bf7407d912a272b689b42d34cca0c7590c09a52769e + 976e653cc2ae3a0bb7ec03786462c14aa0daacc7dd33ddff237cbf99dbe5dba6 + a3ff1411df9768267d32d909e088136b58c655bf0a86a338616b6a7cc92f96a7 + b88fcfe3fcea3e16fba7fbda4593d488e431e259130b9b7e2b2c64e129be38dc + 0689ffd82532c5446f6722c05711262a6b2f3ae84af3df96bd1ffc687eaa0c37 + 5fefb708dab6049dda65de8671ce1c16e5274e5bac598e935dbc953e5ba479ee + 87d078fb011020ae0797deda9f0ddd99c920e21da423446ad0f32fad3b547c4b + d420ac250a0abb37c168a534f4431a6a84fba40b3f51fb7594b5cffe3fee2fad + 83e855b5938b1cb68d54ffa610e6e2a2a8651228b543dde0784e8c1c79a7b3b0 + addb8e941c2e1ca2b3e9cc0bd28a41c7f83166614571b178fd3bcdaff6f1e2fe + a7a249232fc31095b7102e1dd4e7c5d044b90c3b05f7fa24bdfffbcbf385a9be + 540900cf945e919602119d3093e7bf08c9d6c7fca32cc2435bae347195bbc79e + 71f234c40dbf8d1eda9b19d73bac6199bf67c919080fccb50ff3a1125c4dd8b4 + 5132c482307fe8e8780cd446a02a1023f4b5e1f9611f8909cb5897f97081c7d5 + 7fef48dd27ad01ae849c6e100919f3061605152110036fcffc7e8463cbc35c32 + 280d850c293c0f387e299555391678e0b8c9d0be6bb7fb282a0295129e128eb3 + d11795c537f3650df1051b21050c490d99e8b96e53b27cb1ece989b4a63b6939 + 71823c92c3f1dbd6779a32c878942c2340aa39331ee98c54fbcba921241da2c8 + 0e655bfc9885901b68e9c0da72b20b0d6a36e8229b1e4f65b2393de2eaf8e568 + f8997d075096acbb55957727ca5998e6da89a407937e63900ab8f4f9b5412942 + 1a58929e6938bb9bb655dadb4d873827a3075e7fcdea492190eb6f37e10fa445 + 4aaefcc3587315f7897aa2cedd79b46057928209e853b9050a77d2c5036e17d1 + 5389567cb43c62a55d1a893e0dfbd50f95a9c2c5b17b49d221e2d3a7f11d60b3 + 697acf1474963200f0f8332d797f97f360ba1a16577040a9ffc0406a0c694f62 + 34b12bd48175d01a194b5a56568089582db29c228b6df0209fb977f412a7cb41 + 9c4ed3dd4cba389368abed394fbc0a73b8f5a1881280eb2f312d93473c505066 + 1a331a032c4d1407da1be1fe2565625b0f6ef4150ce2e2fa959c4f06ef95680c + 90464fff829d18976b8ff488577cd41b635213dc1143562900b2f84cdb09fd75 + 6592c75b51da6b086c575c870e70c6a2a2d1ef6b7c87a5ab88033b667dc0ab08 + e9e63b64b67d76f3d9c3131bbec4c491d1ab660f814039e3582c169da509c8f0 + ee05e1eadd5e34d2595ec59089c1a45a2f2dcbdcc4fde456195e277c61c12659 + 94740399909f7e043cb2d55e2e35ded31007a9d800180d29218f5d79586025b9 + 939dff37c5c80f8a69d366a0b1cce4acdb51e2c1fa1cac43d89ed3cc846aac33 + be968262c32957492549f7b134721ccec0cb05d0307646c6e9a95176b7bbc37f + bd3354f8bb41930ca4f27946362bce4fabdf41affb443aabb9d52e045d13af0a + 0e5f812b2b57cbf01a669daeae3cde5614242e2ad947039ba4dbc5186476fb91 + 06d584f5607fd9efd1391d56eadd4ff64daedbe93e2080ba02275cb47f9b259f + a9f028820090095c5df7025f47937efb4d9534c3ece6fa68c39211b80136fb10 + 9c637820c0a73b654bafe9ea9525a3d9f14d4fade1f2350786871aa339a4599d + ee8608549a357c947b9f1ced4836152190b09e916c9ea5ae7ca50b6046bb2e79 + 790df28b6f95f25c0dd9277aac66d522cf24e1a9948715f33ecd9a365e1c572e + 96c8aba4cbabaa8bad8c4e48f895de8b6cd75dd16ccef52ce659875aece873c6 + 6ea192aafbb61e524e56a04526898ac8fdbab2d412e8611b52ae5875fcac3fa0 + 6d26161f188e860c519fa628b66211c390a4dd1a69eadf61414738dcfb0f355d + a72fdfdcda52eb6ae65d6cc88264fb0ead2616f93ebf4cff90ddc71c86ce41fa + 2b36258af01488a4ede9db471a7015bc03ee9958be8ff1e350c0a0524ec4475a + 2958da4031bcd30b6b7b456e2b191e309316574d17853883dcf69dd3e817508d + dae23206f09d79073870ca4d4fba7d4053f0ac4b95105aae8ac6a40987588d9c + ced62234f587da491668632546489163eb015a659e33e4044ee64c9aaf94990a + 521bd5f19d5551c5e74fd971193086cd66091d11b250dbc7fd10c6092a07df72 + b8ebebeeabd3bb860575324e62d721022b65e2e226b269760a0c93ab81d619c5 + 48a5616436e05f8be9b793d60c04da196e84cc8898a19da6b91e930d496a518a + b58487ecc07eacea269f52d9a932dca8ff1a4f070042a91aee0a563e8f7aca59 + baa53232fe3374857f2df3d56609aafe63ff3b1a6cf8c29e04157a5ca54e2a70 + 5d17b5f3449312cffb768f7081415d209787e29cb514296d242bba1437fc57b0 + c11aebcaa937e18a3c65ccf7356663493bea13bb87995968cdab0ed21784b049 + 8db940971924b2e3a8dadcde33b51d89e61163c9aba093f228c3135285be903a + c92a686f4ecccf92f036aa923d5c5c63012f3ebc7d5d39e0d85c7012202657dc + b4a402cba82bb59d11e03eb537268a43c4b24dadfdb5ea76d077feca36de467c + 721d680c71ca5edbbc4ce8942dbd3a3ade896a3303a9587755026e78b6c6e5c9 + 31955b366db7f3058178fe3ed0d523e420a38cbd356358aaa7dab294e79e5250 + 0ca4b0e966e68f7935935cf8a26019a1bdc9c0d1e0477f72a8667b9ace01e769 + 5478faddf117f03c0635cc44ad726af2f4c3d54a219a2b8b402e41fc053220d5 + f1a4e31526d32924ccf9cd72c36fabba8b6c60a74256f786c9c7b82d5dabf39b + 6ee81beda19a14e485696cda5e009523cbeafd10c43e79fd5ba207f5b643e28b + ca0267ac05c2d5115e87fe31425022f80bc9c1a862becb3d83b8b92187fc3304 + b9e52fe2b6ba919106489e269e3063eafa0ebd25ee31ae9f6b885bcec811a22e + d5e8b7301e1148f675a0251a772c81363cdad4a4327c8be6514f27c80842f5ea + 77df736e14bc594d5679622bed8d58663fe8df441596a610b13ef997a8a09150 + c7f73504f9fcaf312d807aa078e429e369477c49380f01a82d6622e570481022 + 78ef98ec08fca3bc96f8c6c86bd3a4fda532a4a2e30618ff9156bce3f31e46f6 + 5369d90bfb0189bee5069e6674cd74c96d2ff84faca215abc2af67207e8dc0c8 + 85dd4da34b09b381ea4902da0391e8379bd2a58cc6eba74d664a2d911395a990 + 4bba3ac9db3225eccc5608681895eb438e23f08dd433d33f3c75b146b9b39e30 + 74c3e6ea5bc2992dbf2dfa2ee4a4942da714d6d424073542a0f6605ecec75a8e + 7862a93bf9d93c0a9b91297a2d86e67ce488b131cb49460c261d8e720fc5269b + ac104c7c0df33b65aa5083a2727643e14ef9995b4a26e5392bf5e77ebf084b15 + 4d1b6cc2b9725fe13da6c53b8868c92b05404c95c7e125c98afffcdaad3210aa + 047e655db499dcb09e591b03fa37ab5ab62356037ab413148c3218b90c83330c + d8063e2594fc895ad8fda1997b5a0f53dc1aad733cb200be46bb245043e69b2b + 823b5c076cebee06c4a55b55a88a62baefedb050a786631ed143cc142dfc63dc + f0b47e4a21eb381407576eb427de6043193fc6322c0ab7f11347fb7c105c89ad + 3dec9929a909823c73c515c908ab1b22869f56b633fbe88cc38b36dc17984975 + c7763afedfd08d895ac7de8837c828e07cac5984df561f2a34aaebcb9b763234 + 65fa0bb892070a244959f01cb3fd7d0fb4e35defeed341bf711efefb801e0928 + c718c742aa63f7560c89bd8fe96b1514cc21504d0061761bcb717582c3a91d6b + 687f12cefd8ce75371a2a865c1badbae81c131dd1365cda263b1140e2b01d3d1"