summaryrefslogtreecommitdiff
path: root/js/src
diff options
context:
space:
mode:
authorMoonchild <moonchild@palemoon.org>2023-09-18 16:57:09 +0200
committerMoonchild <moonchild@palemoon.org>2023-09-18 16:57:09 +0200
commit7003ea7dc4e6164fc2451e4255da0cc1a7b4d22f (patch)
tree3a098fa1b2914b1c7cd6eea39fc0fc53ce3b9fa1 /js/src
parent8635073418a294520172bf5b05d886b35e4bcdc3 (diff)
downloaduxp-7003ea7dc4e6164fc2451e4255da0cc1a7b4d22f.tar.gz
Issue #2307 - Part 3: Remove SIMD.js support
This gets rid of the SIMD-specific scalar types in JS. This considerably deviates from Mozilla's work because of our divergent path.
Diffstat (limited to 'js/src')
-rw-r--r--js/src/builtin/SIMD.cpp1551
-rw-r--r--js/src/builtin/SIMD.h1218
-rw-r--r--js/src/builtin/TestingFunctions.cpp2
-rw-r--r--js/src/builtin/TypedObject.cpp86
-rw-r--r--js/src/builtin/TypedObject.h52
-rw-r--r--js/src/builtin/TypedObject.js481
-rw-r--r--js/src/builtin/TypedObjectConstants.h23
-rw-r--r--js/src/devtools/automation/cgc-jittest-timeouts.txt1
-rw-r--r--js/src/doc/JITOptimizations/Outcomes.md14
-rw-r--r--js/src/jit-test/lib/simd.js109
-rw-r--r--js/src/jit-test/tests/SIMD/binary-arith.js30
-rw-r--r--js/src/jit-test/tests/SIMD/bool32x4-arith.js15
-rw-r--r--js/src/jit-test/tests/SIMD/bool32x4-const.js65
-rw-r--r--js/src/jit-test/tests/SIMD/bug1109911.js11
-rw-r--r--js/src/jit-test/tests/SIMD/bug1121299.js31
-rw-r--r--js/src/jit-test/tests/SIMD/bug1123631.js9
-rw-r--r--js/src/jit-test/tests/SIMD/bug1130845.js15
-rw-r--r--js/src/jit-test/tests/SIMD/bug1241872.js10
-rw-r--r--js/src/jit-test/tests/SIMD/bug1248503.js16
-rw-r--r--js/src/jit-test/tests/SIMD/bug1273483.js9
-rw-r--r--js/src/jit-test/tests/SIMD/bug1296640-gc-args.js9
-rw-r--r--js/src/jit-test/tests/SIMD/bug1303780-gc-args.js12
-rw-r--r--js/src/jit-test/tests/SIMD/bug953108.js10
-rw-r--r--js/src/jit-test/tests/SIMD/check.js25
-rw-r--r--js/src/jit-test/tests/SIMD/compare.js39
-rw-r--r--js/src/jit-test/tests/SIMD/complex-4.js70
-rw-r--r--js/src/jit-test/tests/SIMD/convert.js68
-rw-r--r--js/src/jit-test/tests/SIMD/float32x4-binary-arith.js33
-rw-r--r--js/src/jit-test/tests/SIMD/getters.js48
-rw-r--r--js/src/jit-test/tests/SIMD/inline-missing-arguments.js81
-rw-r--r--js/src/jit-test/tests/SIMD/load.js123
-rw-r--r--js/src/jit-test/tests/SIMD/nursery-overflow.js29
-rw-r--r--js/src/jit-test/tests/SIMD/recover.js70
-rw-r--r--js/src/jit-test/tests/SIMD/replacelane.js181
-rw-r--r--js/src/jit-test/tests/SIMD/saturate.js37
-rw-r--r--js/src/jit-test/tests/SIMD/select.js35
-rw-r--r--js/src/jit-test/tests/SIMD/shift.js75
-rw-r--r--js/src/jit-test/tests/SIMD/shuffle.js86
-rw-r--r--js/src/jit-test/tests/SIMD/splat.js15
-rw-r--r--js/src/jit-test/tests/SIMD/store.js143
-rw-r--r--js/src/jit-test/tests/SIMD/swizzle.js104
-rw-r--r--js/src/jit-test/tests/SIMD/uconvert.js86
-rw-r--r--js/src/jit-test/tests/SIMD/unary.js35
-rw-r--r--js/src/jit-test/tests/SIMD/unbox.js144
-rw-r--r--js/src/jit-test/tests/asm.js/bug1126251.js38
-rw-r--r--js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js28
-rw-r--r--js/src/jit-test/tests/asm.js/simd-fbirds.js197
-rw-r--r--js/src/jit-test/tests/asm.js/simd-mandelbrot.js1818
-rw-r--r--js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js10
-rw-r--r--js/src/jit-test/tests/asm.js/testBug1099216.js61
-rw-r--r--js/src/jit-test/tests/asm.js/testJumpRange.js20
-rw-r--r--js/src/jit-test/tests/asm.js/testProfiling.js13
-rw-r--r--js/src/jit-test/tests/asm.js/testSIMD-16x8.js510
-rw-r--r--js/src/jit-test/tests/asm.js/testSIMD-8x16.js524
-rw-r--r--js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js84
-rw-r--r--js/src/jit-test/tests/asm.js/testSIMD-load-store.js457
-rw-r--r--js/src/jit-test/tests/asm.js/testSIMD.js1575
-rw-r--r--js/src/jit-test/tests/asm.js/testZOOB.js112
-rw-r--r--js/src/jit/BaselineBailouts.cpp1
-rw-r--r--js/src/jit/BaselineIC.cpp72
-rw-r--r--js/src/jit/BaselineInspector.cpp19
-rw-r--r--js/src/jit/BaselineInspector.h1
-rw-r--r--js/src/jit/CodeGenerator.cpp146
-rw-r--r--js/src/jit/CodeGenerator.h18
-rw-r--r--js/src/jit/EagerSimdUnbox.cpp127
-rw-r--r--js/src/jit/EagerSimdUnbox.h24
-rw-r--r--js/src/jit/InlinableNatives.h11
-rw-r--r--js/src/jit/Ion.cpp17
-rw-r--r--js/src/jit/IonBuilder.cpp18
-rw-r--r--js/src/jit/IonBuilder.h46
-rw-r--r--js/src/jit/IonOptimizationLevels.cpp2
-rw-r--r--js/src/jit/IonOptimizationLevels.h7
-rw-r--r--js/src/jit/IonTypes.h79
-rw-r--r--js/src/jit/JitCompartment.h26
-rw-r--r--js/src/jit/JitFrameIterator.h2
-rw-r--r--js/src/jit/JitOptions.cpp3
-rw-r--r--js/src/jit/JitOptions.h1
-rw-r--r--js/src/jit/LIR.h9
-rw-r--r--js/src/jit/Lowering.cpp227
-rw-r--r--js/src/jit/Lowering.h11
-rw-r--r--js/src/jit/MCallOptimize.cpp787
-rw-r--r--js/src/jit/MIR.cpp524
-rw-r--r--js/src/jit/MIR.h1232
-rw-r--r--js/src/jit/MIRGenerator.h5
-rw-r--r--js/src/jit/MIRGraph.cpp33
-rw-r--r--js/src/jit/MOpcodes.h21
-rw-r--r--js/src/jit/MacroAssembler.cpp103
-rw-r--r--js/src/jit/MacroAssembler.h11
-rw-r--r--js/src/jit/RangeAnalysis.cpp4
-rw-r--r--js/src/jit/Recover.cpp74
-rw-r--r--js/src/jit/Recover.h12
-rw-r--r--js/src/jit/TypePolicy.cpp124
-rw-r--r--js/src/jit/TypePolicy.h55
-rw-r--r--js/src/jit/TypedObjectPrediction.cpp7
-rw-r--r--js/src/jit/TypedObjectPrediction.h3
-rw-r--r--js/src/jit/arm/Lowering-arm.h11
-rw-r--r--js/src/jit/arm/MacroAssembler-arm.h28
-rw-r--r--js/src/jit/arm64/Lowering-arm64.h12
-rw-r--r--js/src/jit/arm64/MacroAssembler-arm64.h33
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.h11
-rw-r--r--js/src/jit/mips32/MacroAssembler-mips32.h28
-rw-r--r--js/src/jit/mips64/MacroAssembler-mips64.h28
-rw-r--r--js/src/jit/none/Lowering-none.h8
-rw-r--r--js/src/jit/none/MacroAssembler-none.h17
-rw-r--r--js/src/jit/shared/Assembler-shared.h12
-rw-r--r--js/src/jit/shared/CodeGenerator-shared-inl.h4
-rw-r--r--js/src/jit/shared/CodeGenerator-shared.cpp15
-rw-r--r--js/src/jit/shared/LIR-shared.h697
-rw-r--r--js/src/jit/shared/LOpcodes-shared.h41
-rw-r--r--js/src/jit/shared/Lowering-shared-inl.h3
-rw-r--r--js/src/jit/shared/Lowering-shared.h16
-rw-r--r--js/src/jit/x64/Assembler-x64.h4
-rw-r--r--js/src/jit/x64/CodeGenerator-x64.cpp8
-rw-r--r--js/src/jit/x64/LOpcodes-x64.h2
-rw-r--r--js/src/jit/x64/Lowering-x64.cpp8
-rw-r--r--js/src/jit/x64/MacroAssembler-x64.cpp61
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp889
-rw-r--r--js/src/jit/x86-shared/CodeGenerator-x86-shared.h71
-rw-r--r--js/src/jit/x86-shared/LIR-x86-shared.h41
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.cpp375
-rw-r--r--js/src/jit/x86-shared/Lowering-x86-shared.h14
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h23
-rw-r--r--js/src/jit/x86-shared/MacroAssembler-x86-shared.h77
-rw-r--r--js/src/jit/x86/Assembler-x86.h4
-rw-r--r--js/src/jit/x86/CodeGenerator-x86.cpp2
-rw-r--r--js/src/jit/x86/LOpcodes-x86.h2
-rw-r--r--js/src/jit/x86/Lowering-x86.cpp8
-rw-r--r--js/src/jit/x86/MacroAssembler-x86.cpp62
-rw-r--r--js/src/js.msg5
-rw-r--r--js/src/jsapi.cpp3
-rw-r--r--js/src/jscntxt.cpp6
-rw-r--r--js/src/jsfriendapi.h72
-rw-r--r--js/src/jsprototypes.h7
-rw-r--r--js/src/moz.build2
-rw-r--r--js/src/vm/ArrayBufferObject.cpp4
-rw-r--r--js/src/vm/GlobalObject.h18
-rw-r--r--js/src/vm/SelfHosting.cpp15
-rw-r--r--js/src/vm/TypedArrayCommon.h13
-rw-r--r--js/src/vm/TypedArrayObject.cpp12
-rw-r--r--js/src/vm/TypedArrayObject.h5
-rw-r--r--js/src/wasm/AsmJS.cpp1828
-rw-r--r--js/src/wasm/AsmJS.h1
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp64
-rw-r--r--js/src/wasm/WasmBinaryConstants.h100
-rw-r--r--js/src/wasm/WasmBinaryFormat.cpp11
-rw-r--r--js/src/wasm/WasmBinaryFormat.h45
-rw-r--r--js/src/wasm/WasmBinaryIterator.cpp218
-rw-r--r--js/src/wasm/WasmBinaryIterator.h417
-rw-r--r--js/src/wasm/WasmGenerator.cpp10
-rw-r--r--js/src/wasm/WasmInstance.cpp119
-rw-r--r--js/src/wasm/WasmIonCompile.cpp862
-rw-r--r--js/src/wasm/WasmJS.cpp1
-rw-r--r--js/src/wasm/WasmSignalHandlers.cpp1
-rw-r--r--js/src/wasm/WasmStubs.cpp69
-rw-r--r--js/src/wasm/WasmTextUtils.cpp1
-rw-r--r--js/src/wasm/WasmTypes.cpp29
-rw-r--r--js/src/wasm/WasmTypes.h165
157 files changed, 147 insertions, 21186 deletions
diff --git a/js/src/builtin/SIMD.cpp b/js/src/builtin/SIMD.cpp
deleted file mode 100644
index 93476c5db3..0000000000
--- a/js/src/builtin/SIMD.cpp
+++ /dev/null
@@ -1,1551 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-/*
- * JS SIMD pseudo-module.
- * Specification matches polyfill:
- * https://github.com/johnmccutchan/ecmascript_simd/blob/master/src/ecmascript_simd.js
- * The objects float32x4 and int32x4 are installed on the SIMD pseudo-module.
- */
-
-#include "builtin/SIMD.h"
-
-#include "mozilla/FloatingPoint.h"
-#include "mozilla/IntegerTypeTraits.h"
-#include "mozilla/Sprintf.h"
-
-#include "jsapi.h"
-#include "jsfriendapi.h"
-#include "jsnum.h"
-#include "jsprf.h"
-
-#include "builtin/TypedObject.h"
-#include "jit/InlinableNatives.h"
-#include "js/GCAPI.h"
-#include "js/Value.h"
-
-#include "jsobjinlines.h"
-
-using namespace js;
-
-using mozilla::ArrayLength;
-using mozilla::IsFinite;
-using mozilla::IsNaN;
-using mozilla::FloorLog2;
-using mozilla::NumberIsInt32;
-
-///////////////////////////////////////////////////////////////////////////
-// SIMD
-
-static_assert(unsigned(SimdType::Count) == 12, "sync with TypedObjectConstants.h");
-
-static bool ArgumentToLaneIndex(JSContext* cx, JS::HandleValue v, unsigned limit, unsigned* lane);
-
-static bool
-CheckVectorObject(HandleValue v, SimdType expectedType)
-{
- if (!v.isObject())
- return false;
-
- JSObject& obj = v.toObject();
- if (!obj.is<TypedObject>())
- return false;
-
- TypeDescr& typeRepr = obj.as<TypedObject>().typeDescr();
- if (typeRepr.kind() != type::Simd)
- return false;
-
- return typeRepr.as<SimdTypeDescr>().type() == expectedType;
-}
-
-template<class V>
-bool
-js::IsVectorObject(HandleValue v)
-{
- return CheckVectorObject(v, V::type);
-}
-
-#define FOR_EACH_SIMD(macro) \
- macro(Int8x16) \
- macro(Int16x8) \
- macro(Int32x4) \
- macro(Uint8x16) \
- macro(Uint16x8) \
- macro(Uint32x4) \
- macro(Float32x4) \
- macro(Float64x2) \
- macro(Bool8x16) \
- macro(Bool16x8) \
- macro(Bool32x4) \
- macro(Bool64x2)
-
-#define InstantiateIsVectorObject_(T) \
- template bool js::IsVectorObject<T>(HandleValue v);
-FOR_EACH_SIMD(InstantiateIsVectorObject_)
-#undef InstantiateIsVectorObject_
-
-const char*
-js::SimdTypeToString(SimdType type)
-{
- switch (type) {
-#define RETSTR_(TYPE) case SimdType::TYPE: return #TYPE;
- FOR_EACH_SIMD(RETSTR_)
-#undef RETSTR_
- case SimdType::Count: break;
- }
- return "<bad SimdType>";
-}
-
-PropertyName*
-js::SimdTypeToName(const JSAtomState& atoms, SimdType type)
-{
- switch (type) {
-#define CASE_(TypeName) case SimdType::TypeName: return atoms.TypeName;
- FOR_EACH_SIMD(CASE_)
-#undef CASE_
- case SimdType::Count: break;
- }
- MOZ_CRASH("bad SIMD type");
-}
-
-bool
-js::IsSimdTypeName(const JSAtomState& atoms, const PropertyName* name, SimdType* type)
-{
-#define CHECK_(TypeName) if (name == atoms.TypeName) { \
- *type = SimdType::TypeName; \
- return true; \
- }
- FOR_EACH_SIMD(CHECK_)
-#undef CHECK_
- return false;
-}
-
-static inline bool
-ErrorBadArgs(JSContext* cx)
-{
- JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_TYPED_ARRAY_BAD_ARGS);
- return false;
-}
-
-static inline bool
-ErrorWrongTypeArg(JSContext* cx, unsigned argIndex, Handle<TypeDescr*> typeDescr)
-{
- MOZ_ASSERT(argIndex < 10);
- char charArgIndex[2];
- SprintfLiteral(charArgIndex, "%u", argIndex);
-
- HeapSlot& typeNameSlot = typeDescr->getReservedSlotRef(JS_DESCR_SLOT_STRING_REPR);
- char* typeNameStr = JS_EncodeString(cx, typeNameSlot.toString());
- if (!typeNameStr)
- return false;
-
- JS_ReportErrorNumberLatin1(cx, GetErrorMessage, nullptr, JSMSG_SIMD_NOT_A_VECTOR,
- typeNameStr, charArgIndex);
- JS_free(cx, typeNameStr);
- return false;
-}
-
-static inline bool
-ErrorBadIndex(JSContext* cx)
-{
- JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_BAD_INDEX);
- return false;
-}
-
-template<typename T>
-static SimdTypeDescr*
-GetTypeDescr(JSContext* cx)
-{
- RootedGlobalObject global(cx, cx->global());
- return GlobalObject::getOrCreateSimdTypeDescr(cx, global, T::type);
-}
-
-template<typename V>
-bool
-js::ToSimdConstant(JSContext* cx, HandleValue v, jit::SimdConstant* out)
-{
- typedef typename V::Elem Elem;
- Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
- if (!typeDescr)
- return false;
- if (!IsVectorObject<V>(v))
- return ErrorWrongTypeArg(cx, 1, typeDescr);
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* mem = reinterpret_cast<Elem*>(v.toObject().as<TypedObject>().typedMem(nogc));
- *out = jit::SimdConstant::CreateSimd128(mem);
- return true;
-}
-
-template bool js::ToSimdConstant<Int8x16>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Int16x8>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Int32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Float32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Bool8x16>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Bool16x8>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-template bool js::ToSimdConstant<Bool32x4>(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-
-template<typename Elem>
-static Elem
-TypedObjectMemory(HandleValue v, const JS::AutoRequireNoGC& nogc)
-{
- TypedObject& obj = v.toObject().as<TypedObject>();
- return reinterpret_cast<Elem>(obj.typedMem(nogc));
-}
-
-static const ClassOps SimdTypeDescrClassOps = {
- nullptr, /* addProperty */
- nullptr, /* delProperty */
- nullptr, /* getProperty */
- nullptr, /* setProperty */
- nullptr, /* enumerate */
- nullptr, /* resolve */
- nullptr, /* mayResolve */
- TypeDescr::finalize,
- SimdTypeDescr::call
-};
-
-const Class SimdTypeDescr::class_ = {
- "SIMD",
- JSCLASS_HAS_RESERVED_SLOTS(JS_DESCR_SLOTS) | JSCLASS_BACKGROUND_FINALIZE,
- &SimdTypeDescrClassOps
-};
-
-namespace {
-
-// Define classes (Int8x16Defn, Int16x8Defn, etc.) to group together various
-// properties and so on.
-#define DEFINE_DEFN_(TypeName) \
-class TypeName##Defn { \
- public: \
- static const JSFunctionSpec Methods[]; \
-};
-
-FOR_EACH_SIMD(DEFINE_DEFN_)
-#undef DEFINE_DEFN_
-
-} // namespace
-
-// Shared type descriptor methods for all SIMD types.
-static const JSFunctionSpec TypeDescriptorMethods[] = {
- JS_SELF_HOSTED_FN("toSource", "DescrToSource", 0, 0),
- JS_SELF_HOSTED_FN("array", "ArrayShorthand", 1, 0),
- JS_SELF_HOSTED_FN("equivalent", "TypeDescrEquivalent", 1, 0),
- JS_FS_END
-};
-
-// Shared TypedObject methods for all SIMD types.
-static const JSFunctionSpec SimdTypedObjectMethods[] = {
- JS_SELF_HOSTED_FN("toString", "SimdToString", 0, 0),
- JS_SELF_HOSTED_FN("valueOf", "SimdValueOf", 0, 0),
- JS_SELF_HOSTED_FN("toSource", "SimdToSource", 0, 0),
- JS_FS_END
-};
-
-// Provide JSJitInfo structs for those types that are supported by Ion.
-// The controlling SIMD type is encoded as the InlinableNative primary opcode.
-// The SimdOperation within the type is encoded in the .depth field.
-//
-// The JS_INLINABLE_FN macro refers to js::JitInfo_##native which we provide as
-// Simd##Type##_##Operation
-//
-// /!\ Don't forget to keep this list in sync with the SIMD instrinics used in
-// SelfHosting.cpp.
-
-namespace js {
-namespace jit {
-
-static_assert(uint64_t(SimdOperation::Last) <= UINT16_MAX, "SimdOperation must fit in uint16_t");
-
-// See also JitInfo_* in MCallOptimize.cpp. We provide a JSJitInfo for all the
-// named functions here. The default JitInfo_SimdInt32x4 etc structs represent the
-// SimdOperation::Constructor.
-#define DEFN(TYPE, OP) const JSJitInfo JitInfo_Simd##TYPE##_##OP = { \
- /* .getter, unused for inlinable natives. */ \
- { nullptr }, \
- /* .inlinableNative, but we have to init first union member: .protoID. */ \
- { uint16_t(InlinableNative::Simd##TYPE) }, \
- /* .nativeOp. Actually initializing first union member .depth. */ \
- { uint16_t(SimdOperation::Fn_##OP) }, \
- /* .type_ bitfield says this in an inlinable native function. */ \
- JSJitInfo::InlinableNative \
- /* Remaining fields are not used for inlinable natives. They are zero-initialized. */ \
-};
-
-// This list of inlinable types should match the one in jit/InlinableNatives.h.
-#define TDEFN(Name, Func, Operands) DEFN(Float32x4, Name)
-FLOAT32X4_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Int8x16, Name)
-INT8X16_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Uint8x16, Name)
-UINT8X16_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Int16x8, Name)
-INT16X8_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Uint16x8, Name)
-UINT16X8_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Int32x4, Name)
-INT32X4_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Uint32x4, Name)
-UINT32X4_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Bool8x16, Name)
-BOOL8X16_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Bool16x8, Name)
-BOOL16X8_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-#define TDEFN(Name, Func, Operands) DEFN(Bool32x4, Name)
-BOOL32X4_FUNCTION_LIST(TDEFN)
-#undef TDEFN
-
-} // namespace jit
-} // namespace js
-
-const JSFunctionSpec Float32x4Defn::Methods[] = {
-#define SIMD_FLOAT32X4_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_float32x4_##Name, Operands, 0, SimdFloat32x4_##Name),
- FLOAT32X4_FUNCTION_LIST(SIMD_FLOAT32X4_FUNCTION_ITEM)
-#undef SIMD_FLOAT32x4_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Float64x2Defn::Methods[] = {
-#define SIMD_FLOAT64X2_FUNCTION_ITEM(Name, Func, Operands) \
- JS_FN(#Name, js::simd_float64x2_##Name, Operands, 0),
- FLOAT64X2_FUNCTION_LIST(SIMD_FLOAT64X2_FUNCTION_ITEM)
-#undef SIMD_FLOAT64X2_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Int8x16Defn::Methods[] = {
-#define SIMD_INT8X16_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_int8x16_##Name, Operands, 0, SimdInt8x16_##Name),
- INT8X16_FUNCTION_LIST(SIMD_INT8X16_FUNCTION_ITEM)
-#undef SIMD_INT8X16_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Int16x8Defn::Methods[] = {
-#define SIMD_INT16X8_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_int16x8_##Name, Operands, 0, SimdInt16x8_##Name),
- INT16X8_FUNCTION_LIST(SIMD_INT16X8_FUNCTION_ITEM)
-#undef SIMD_INT16X8_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Int32x4Defn::Methods[] = {
-#define SIMD_INT32X4_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_int32x4_##Name, Operands, 0, SimdInt32x4_##Name),
- INT32X4_FUNCTION_LIST(SIMD_INT32X4_FUNCTION_ITEM)
-#undef SIMD_INT32X4_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Uint8x16Defn::Methods[] = {
-#define SIMD_UINT8X16_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_uint8x16_##Name, Operands, 0, SimdUint8x16_##Name),
- UINT8X16_FUNCTION_LIST(SIMD_UINT8X16_FUNCTION_ITEM)
-#undef SIMD_UINT8X16_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Uint16x8Defn::Methods[] = {
-#define SIMD_UINT16X8_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_uint16x8_##Name, Operands, 0, SimdUint16x8_##Name),
- UINT16X8_FUNCTION_LIST(SIMD_UINT16X8_FUNCTION_ITEM)
-#undef SIMD_UINT16X8_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Uint32x4Defn::Methods[] = {
-#define SIMD_UINT32X4_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_uint32x4_##Name, Operands, 0, SimdUint32x4_##Name),
- UINT32X4_FUNCTION_LIST(SIMD_UINT32X4_FUNCTION_ITEM)
-#undef SIMD_UINT32X4_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Bool8x16Defn::Methods[] = {
-#define SIMD_BOOL8X16_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_bool8x16_##Name, Operands, 0, SimdBool8x16_##Name),
- BOOL8X16_FUNCTION_LIST(SIMD_BOOL8X16_FUNCTION_ITEM)
-#undef SIMD_BOOL8X16_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Bool16x8Defn::Methods[] = {
-#define SIMD_BOOL16X8_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_bool16x8_##Name, Operands, 0, SimdBool16x8_##Name),
- BOOL16X8_FUNCTION_LIST(SIMD_BOOL16X8_FUNCTION_ITEM)
-#undef SIMD_BOOL16X8_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Bool32x4Defn::Methods[] = {
-#define SIMD_BOOL32X4_FUNCTION_ITEM(Name, Func, Operands) \
- JS_INLINABLE_FN(#Name, js::simd_bool32x4_##Name, Operands, 0, SimdBool32x4_##Name),
- BOOL32X4_FUNCTION_LIST(SIMD_BOOL32X4_FUNCTION_ITEM)
-#undef SIMD_BOOL32X4_FUNCTION_ITEM
- JS_FS_END
-};
-
-const JSFunctionSpec Bool64x2Defn::Methods[] = {
-#define SIMD_BOOL64X2_FUNCTION_ITEM(Name, Func, Operands) \
- JS_FN(#Name, js::simd_bool64x2_##Name, Operands, 0),
- BOOL64X2_FUNCTION_LIST(SIMD_BOOL64X2_FUNCTION_ITEM)
-#undef SIMD_BOOL64x2_FUNCTION_ITEM
- JS_FS_END
-};
-
-template <typename T>
-static bool
-FillLanes(JSContext* cx, Handle<TypedObject*> result, const CallArgs& args)
-{
- typedef typename T::Elem Elem;
- Elem tmp;
- for (unsigned i = 0; i < T::lanes; i++) {
- if (!T::Cast(cx, args.get(i), &tmp))
- return false;
- // Reassure typedMem() that we won't GC while holding onto the returned
- // pointer, even though we could GC on every iteration of this loop
- // (but it is safe because we re-fetch each time.)
- JS::AutoCheckCannotGC nogc(cx);
- reinterpret_cast<Elem*>(result->typedMem(nogc))[i] = tmp;
- }
- args.rval().setObject(*result);
- return true;
-}
-
-bool
-SimdTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
-{
- CallArgs args = CallArgsFromVp(argc, vp);
-
- Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
- Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, descr, 0));
- if (!result)
- return false;
-
-#define CASE_CALL_(Type) \
- case SimdType::Type: return FillLanes< ::Type>(cx, result, args);
-
- switch (descr->type()) {
- FOR_EACH_SIMD(CASE_CALL_)
- case SimdType::Count: break;
- }
-
-#undef CASE_CALL_
- MOZ_CRASH("unexpected SIMD descriptor");
- return false;
-}
-
-///////////////////////////////////////////////////////////////////////////
-// SIMD class
-
-static const ClassOps SimdObjectClassOps = {
- nullptr, /* addProperty */
- nullptr, /* delProperty */
- nullptr, /* getProperty */
- nullptr, /* setProperty */
- nullptr, /* enumerate */
- SimdObject::resolve
-};
-
-const Class SimdObject::class_ = {
- "SIMD",
- JSCLASS_HAS_RESERVED_SLOTS(uint32_t(SimdType::Count)),
- &SimdObjectClassOps
-};
-
-/* static */ bool
-GlobalObject::initSimdObject(JSContext* cx, Handle<GlobalObject*> global)
-{
- // SIMD relies on the TypedObject module being initialized.
- // In particular, the self-hosted code for array() wants
- // to be able to call GetTypedObjectModule(). It is NOT necessary
- // to install the TypedObjectModule global, but at the moment
- // those two things are not separable.
- if (!GlobalObject::getOrCreateTypedObjectModule(cx, global))
- return false;
-
- RootedObject globalSimdObject(cx);
- RootedObject objProto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
- if (!objProto)
- return false;
-
- globalSimdObject = NewObjectWithGivenProto(cx, &SimdObject::class_, objProto, SingletonObject);
- if (!globalSimdObject)
- return false;
-
- RootedValue globalSimdValue(cx, ObjectValue(*globalSimdObject));
- if (!DefineProperty(cx, global, cx->names().SIMD, globalSimdValue, nullptr, nullptr,
- JSPROP_RESOLVING))
- {
- return false;
- }
-
- global->setConstructor(JSProto_SIMD, globalSimdValue);
- return true;
-}
-
-static bool
-CreateSimdType(JSContext* cx, Handle<GlobalObject*> global, HandlePropertyName stringRepr,
- SimdType simdType, const JSFunctionSpec* methods)
-{
- RootedObject funcProto(cx, GlobalObject::getOrCreateFunctionPrototype(cx, global));
- if (!funcProto)
- return false;
-
- // Create type constructor itself and initialize its reserved slots.
- Rooted<SimdTypeDescr*> typeDescr(cx);
- typeDescr = NewObjectWithGivenProto<SimdTypeDescr>(cx, funcProto, SingletonObject);
- if (!typeDescr)
- return false;
-
- typeDescr->initReservedSlot(JS_DESCR_SLOT_KIND, Int32Value(type::Simd));
- typeDescr->initReservedSlot(JS_DESCR_SLOT_STRING_REPR, StringValue(stringRepr));
- typeDescr->initReservedSlot(JS_DESCR_SLOT_ALIGNMENT, Int32Value(SimdTypeDescr::alignment(simdType)));
- typeDescr->initReservedSlot(JS_DESCR_SLOT_SIZE, Int32Value(SimdTypeDescr::size(simdType)));
- typeDescr->initReservedSlot(JS_DESCR_SLOT_OPAQUE, BooleanValue(false));
- typeDescr->initReservedSlot(JS_DESCR_SLOT_TYPE, Int32Value(uint8_t(simdType)));
-
- if (!CreateUserSizeAndAlignmentProperties(cx, typeDescr))
- return false;
-
- // Create prototype property, which inherits from Object.prototype.
- RootedObject objProto(cx, GlobalObject::getOrCreateObjectPrototype(cx, global));
- if (!objProto)
- return false;
- Rooted<TypedProto*> proto(cx);
- proto = NewObjectWithGivenProto<TypedProto>(cx, objProto, SingletonObject);
- if (!proto)
- return false;
- typeDescr->initReservedSlot(JS_DESCR_SLOT_TYPROTO, ObjectValue(*proto));
-
- // Link constructor to prototype and install properties.
- if (!JS_DefineFunctions(cx, typeDescr, TypeDescriptorMethods))
- return false;
-
- if (!LinkConstructorAndPrototype(cx, typeDescr, proto) ||
- !JS_DefineFunctions(cx, proto, SimdTypedObjectMethods))
- {
- return false;
- }
-
- // Bind type descriptor to the global SIMD object
- RootedObject globalSimdObject(cx, GlobalObject::getOrCreateSimdGlobalObject(cx, global));
- MOZ_ASSERT(globalSimdObject);
-
- RootedValue typeValue(cx, ObjectValue(*typeDescr));
- if (!JS_DefineFunctions(cx, typeDescr, methods) ||
- !DefineProperty(cx, globalSimdObject, stringRepr, typeValue, nullptr, nullptr,
- JSPROP_READONLY | JSPROP_PERMANENT | JSPROP_RESOLVING))
- {
- return false;
- }
-
- uint32_t slot = uint32_t(typeDescr->type());
- MOZ_ASSERT(globalSimdObject->as<NativeObject>().getReservedSlot(slot).isUndefined());
- globalSimdObject->as<NativeObject>().setReservedSlot(slot, ObjectValue(*typeDescr));
- return !!typeDescr;
-}
-
-/* static */ bool
-GlobalObject::initSimdType(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType)
-{
-#define CREATE_(Type) \
- case SimdType::Type: \
- return CreateSimdType(cx, global, cx->names().Type, simdType, Type##Defn::Methods);
-
- switch (simdType) {
- FOR_EACH_SIMD(CREATE_)
- case SimdType::Count: break;
- }
- MOZ_CRASH("unexpected simd type");
-
-#undef CREATE_
-}
-
-/* static */ SimdTypeDescr*
-GlobalObject::getOrCreateSimdTypeDescr(JSContext* cx, Handle<GlobalObject*> global,
- SimdType simdType)
-{
- MOZ_ASSERT(unsigned(simdType) < unsigned(SimdType::Count), "Invalid SIMD type");
-
- RootedObject globalSimdObject(cx, GlobalObject::getOrCreateSimdGlobalObject(cx, global));
- if (!globalSimdObject)
- return nullptr;
-
- uint32_t typeSlotIndex = uint32_t(simdType);
- if (globalSimdObject->as<NativeObject>().getReservedSlot(typeSlotIndex).isUndefined() &&
- !GlobalObject::initSimdType(cx, global, simdType))
- {
- return nullptr;
- }
-
- const Value& slot = globalSimdObject->as<NativeObject>().getReservedSlot(typeSlotIndex);
- MOZ_ASSERT(slot.isObject());
- return &slot.toObject().as<SimdTypeDescr>();
-}
-
-bool
-SimdObject::resolve(JSContext* cx, JS::HandleObject obj, JS::HandleId id, bool* resolved)
-{
- *resolved = false;
- if (!JSID_IS_ATOM(id))
- return true;
- JSAtom* str = JSID_TO_ATOM(id);
- Rooted<GlobalObject*> global(cx, cx->global());
-#define TRY_RESOLVE_(Type) \
- if (str == cx->names().Type) { \
- *resolved = CreateSimdType(cx, global, cx->names().Type, \
- SimdType::Type, Type##Defn::Methods); \
- return *resolved; \
- }
- FOR_EACH_SIMD(TRY_RESOLVE_)
-#undef TRY_RESOLVE_
- return true;
-}
-
-JSObject*
-js::InitSimdClass(JSContext* cx, HandleObject obj)
-{
- Handle<GlobalObject*> global = obj.as<GlobalObject>();
- return GlobalObject::getOrCreateSimdGlobalObject(cx, global);
-}
-
-template<typename V>
-JSObject*
-js::CreateSimd(JSContext* cx, const typename V::Elem* data)
-{
- typedef typename V::Elem Elem;
- Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
- if (!typeDescr)
- return nullptr;
-
- Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, typeDescr, 0));
- if (!result)
- return nullptr;
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* resultMem = reinterpret_cast<Elem*>(result->typedMem(nogc));
- memcpy(resultMem, data, sizeof(Elem) * V::lanes);
- return result;
-}
-
-#define InstantiateCreateSimd_(Type) \
- template JSObject* js::CreateSimd<Type>(JSContext* cx, const Type::Elem* data);
-
-FOR_EACH_SIMD(InstantiateCreateSimd_)
-
-#undef InstantiateCreateSimd_
-
-#undef FOR_EACH_SIMD
-
-namespace js {
-// Unary SIMD operators
-template<typename T>
-struct Identity {
- static T apply(T x) { return x; }
-};
-template<typename T>
-struct Abs {
- static T apply(T x) { return mozilla::Abs(x); }
-};
-template<typename T>
-struct Neg {
- static T apply(T x) { return -1 * x; }
-};
-template<typename T>
-struct Not {
- static T apply(T x) { return ~x; }
-};
-template<typename T>
-struct LogicalNot {
- static T apply(T x) { return !x; }
-};
-template<typename T>
-struct RecApprox {
- static T apply(T x) { return 1 / x; }
-};
-template<typename T>
-struct RecSqrtApprox {
- static T apply(T x) { return 1 / sqrt(x); }
-};
-template<typename T>
-struct Sqrt {
- static T apply(T x) { return sqrt(x); }
-};
-
-// Binary SIMD operators
-template<typename T>
-struct Add {
- static T apply(T l, T r) { return l + r; }
-};
-template<typename T>
-struct Sub {
- static T apply(T l, T r) { return l - r; }
-};
-template<typename T>
-struct Div {
- static T apply(T l, T r) { return l / r; }
-};
-template<typename T>
-struct Mul {
- static T apply(T l, T r) { return l * r; }
-};
-template<typename T>
-struct Minimum {
- static T apply(T l, T r) { return math_min_impl(l, r); }
-};
-template<typename T>
-struct MinNum {
- static T apply(T l, T r) { return IsNaN(l) ? r : (IsNaN(r) ? l : math_min_impl(l, r)); }
-};
-template<typename T>
-struct Maximum {
- static T apply(T l, T r) { return math_max_impl(l, r); }
-};
-template<typename T>
-struct MaxNum {
- static T apply(T l, T r) { return IsNaN(l) ? r : (IsNaN(r) ? l : math_max_impl(l, r)); }
-};
-template<typename T>
-struct LessThan {
- static bool apply(T l, T r) { return l < r; }
-};
-template<typename T>
-struct LessThanOrEqual {
- static bool apply(T l, T r) { return l <= r; }
-};
-template<typename T>
-struct GreaterThan {
- static bool apply(T l, T r) { return l > r; }
-};
-template<typename T>
-struct GreaterThanOrEqual {
- static bool apply(T l, T r) { return l >= r; }
-};
-template<typename T>
-struct Equal {
- static bool apply(T l, T r) { return l == r; }
-};
-template<typename T>
-struct NotEqual {
- static bool apply(T l, T r) { return l != r; }
-};
-template<typename T>
-struct Xor {
- static T apply(T l, T r) { return l ^ r; }
-};
-template<typename T>
-struct And {
- static T apply(T l, T r) { return l & r; }
-};
-template<typename T>
-struct Or {
- static T apply(T l, T r) { return l | r; }
-};
-
-// For the following three operators, if the value v we're trying to shift is
-// such that v << bits can't fit in the int32 range, then we have undefined
-// behavior, according to C++11 [expr.shift]p2. However, left-shifting an
-// unsigned type is well-defined.
-//
-// In C++, shifting by an amount outside the range [0;N-1] is undefined
-// behavior. SIMD.js reduces the shift amount modulo the number of bits in a
-// lane and has defined behavior for all shift amounts.
-template<typename T>
-struct ShiftLeft {
- static T apply(T v, int32_t bits) {
- typedef typename mozilla::MakeUnsigned<T>::Type UnsignedT;
- uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
- return UnsignedT(v) << maskedBits;
- }
-};
-template<typename T>
-struct ShiftRightArithmetic {
- static T apply(T v, int32_t bits) {
- typedef typename mozilla::MakeSigned<T>::Type SignedT;
- uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
- return SignedT(v) >> maskedBits;
- }
-};
-template<typename T>
-struct ShiftRightLogical {
- static T apply(T v, int32_t bits) {
- typedef typename mozilla::MakeUnsigned<T>::Type UnsignedT;
- uint32_t maskedBits = uint32_t(bits) % (sizeof(T) * 8);
- return UnsignedT(v) >> maskedBits;
- }
-};
-
-// Saturating arithmetic is only defined on types smaller than int.
-// Clamp `x` into the range supported by the integral type T.
-template<typename T>
-static T
-Saturate(int x)
-{
- static_assert(mozilla::IsIntegral<T>::value, "Only integer saturation supported");
- static_assert(sizeof(T) < sizeof(int), "Saturating int-sized arithmetic is not safe");
- const T lower = mozilla::MinValue<T>::value;
- const T upper = mozilla::MaxValue<T>::value;
- if (x > int(upper))
- return upper;
- if (x < int(lower))
- return lower;
- return T(x);
-}
-
-// Since signed integer overflow is undefined behavior in C++, it would be
-// wildly irresponsible to attempt something as dangerous as adding two numbers
-// coming from user code. However, in this case we know that T is smaller than
-// int, so there is no way these operations can cause overflow. The
-// static_assert in Saturate() enforces this for us.
-template<typename T>
-struct AddSaturate {
- static T apply(T l, T r) { return Saturate<T>(l + r); }
-};
-template<typename T>
-struct SubSaturate {
- static T apply(T l, T r) { return Saturate<T>(l - r); }
-};
-
-} // namespace js
-
-template<typename Out>
-static bool
-StoreResult(JSContext* cx, CallArgs& args, typename Out::Elem* result)
-{
- RootedObject obj(cx, CreateSimd<Out>(cx, result));
- if (!obj)
- return false;
- args.rval().setObject(*obj);
- return true;
-}
-
-// StoreResult can GC, and it is commonly used after pulling something out of a
-// TypedObject:
-//
-// Elem result = op(TypedObjectMemory<Elem>(args[0]));
-// StoreResult<Out>(..., result);
-//
-// The pointer extracted from the typed object in args[0] in the above example
-// could be an interior pointer, and therefore be invalidated by GC.
-// TypedObjectMemory() requires an assertion token to be passed in to prove
-// that we won't GC, but the scope of eg an AutoCheckCannotGC RAII object
-// extends to the end of its containing scope -- which would include the call
-// to StoreResult, resulting in a rooting hazard.
-//
-// TypedObjectElemArray fixes this by wrapping the problematic pointer in a
-// type, and the analysis is able to see that it is dead before calling
-// StoreResult. (But if another GC called is made before the pointer is dead,
-// it will correctly report a hazard.)
-//
-template <typename Elem>
-class TypedObjectElemArray {
- Elem* elements;
- public:
- explicit TypedObjectElemArray(HandleValue objVal) {
- JS::AutoCheckCannotGC nogc;
- elements = TypedObjectMemory<Elem*>(objVal, nogc);
- }
- Elem& operator[](int i) { return elements[i]; }
-} JS_HAZ_GC_POINTER;
-
-// Coerces the inputs of type In to the type Coercion, apply the operator Op
-// and converts the result to the type Out.
-template<typename In, typename Coercion, template<typename C> class Op, typename Out>
-static bool
-CoercedUnaryFunc(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename Coercion::Elem CoercionElem;
- typedef typename Out::Elem RetElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 1 || !IsVectorObject<In>(args[0]))
- return ErrorBadArgs(cx);
-
- CoercionElem result[Coercion::lanes];
- TypedObjectElemArray<CoercionElem> val(args[0]);
- for (unsigned i = 0; i < Coercion::lanes; i++)
- result[i] = Op<CoercionElem>::apply(val[i]);
- return StoreResult<Out>(cx, args, (RetElem*) result);
-}
-
-// Coerces the inputs of type In to the type Coercion, apply the operator Op
-// and converts the result to the type Out.
-template<typename In, typename Coercion, template<typename C> class Op, typename Out>
-static bool
-CoercedBinaryFunc(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename Coercion::Elem CoercionElem;
- typedef typename Out::Elem RetElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 2 || !IsVectorObject<In>(args[0]) || !IsVectorObject<In>(args[1]))
- return ErrorBadArgs(cx);
-
- CoercionElem result[Coercion::lanes];
- TypedObjectElemArray<CoercionElem> left(args[0]);
- TypedObjectElemArray<CoercionElem> right(args[1]);
- for (unsigned i = 0; i < Coercion::lanes; i++)
- result[i] = Op<CoercionElem>::apply(left[i], right[i]);
- return StoreResult<Out>(cx, args, (RetElem*) result);
-}
-
-// Same as above, with no coercion, i.e. Coercion == In.
-template<typename In, template<typename C> class Op, typename Out>
-static bool
-UnaryFunc(JSContext* cx, unsigned argc, Value* vp)
-{
- return CoercedUnaryFunc<In, Out, Op, Out>(cx, argc, vp);
-}
-
-template<typename In, template<typename C> class Op, typename Out>
-static bool
-BinaryFunc(JSContext* cx, unsigned argc, Value* vp)
-{
- return CoercedBinaryFunc<In, Out, Op, Out>(cx, argc, vp);
-}
-
-template<typename V>
-static bool
-ExtractLane(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() < 2 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- unsigned lane;
- if (!ArgumentToLaneIndex(cx, args[1], V::lanes, &lane))
- return false;
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
- Elem val = vec[lane];
- args.rval().set(V::ToValue(val));
- return true;
-}
-
-template<typename V>
-static bool
-AllTrue(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() < 1 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
- bool allTrue = true;
- for (unsigned i = 0; allTrue && i < V::lanes; i++)
- allTrue = vec[i];
-
- args.rval().setBoolean(allTrue);
- return true;
-}
-
-template<typename V>
-static bool
-AnyTrue(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() < 1 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* vec = TypedObjectMemory<Elem*>(args[0], nogc);
- bool anyTrue = false;
- for (unsigned i = 0; !anyTrue && i < V::lanes; i++)
- anyTrue = vec[i];
-
- args.rval().setBoolean(anyTrue);
- return true;
-}
-
-template<typename V>
-static bool
-ReplaceLane(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- // Only the first and second arguments are mandatory
- if (args.length() < 2 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- unsigned lane;
- if (!ArgumentToLaneIndex(cx, args[1], V::lanes, &lane))
- return false;
-
- Elem value;
- if (!V::Cast(cx, args.get(2), &value))
- return false;
-
- TypedObjectElemArray<Elem> vec(args[0]);
- Elem result[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++)
- result[i] = i == lane ? value : vec[i];
-
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename V>
-static bool
-Swizzle(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != (V::lanes + 1) || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- unsigned lanes[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++) {
- if (!ArgumentToLaneIndex(cx, args[i + 1], V::lanes, &lanes[i]))
- return false;
- }
-
- TypedObjectElemArray<Elem> val(args[0]);
- Elem result[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++)
- result[i] = val[lanes[i]];
-
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename V>
-static bool
-Shuffle(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != (V::lanes + 2) || !IsVectorObject<V>(args[0]) || !IsVectorObject<V>(args[1]))
- return ErrorBadArgs(cx);
-
- unsigned lanes[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++) {
- if (!ArgumentToLaneIndex(cx, args[i + 2], 2 * V::lanes, &lanes[i]))
- return false;
- }
-
- Elem result[V::lanes];
- {
- JS::AutoCheckCannotGC nogc(cx);
- Elem* lhs = TypedObjectMemory<Elem*>(args[0], nogc);
- Elem* rhs = TypedObjectMemory<Elem*>(args[1], nogc);
-
- for (unsigned i = 0; i < V::lanes; i++) {
- Elem* selectedInput = lanes[i] < V::lanes ? lhs : rhs;
- result[i] = selectedInput[lanes[i] % V::lanes];
- }
- }
-
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename V, template<typename T> class Op>
-static bool
-BinaryScalar(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 2)
- return ErrorBadArgs(cx);
-
- if (!IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- int32_t bits;
- if (!ToInt32(cx, args[1], &bits))
- return false;
-
- TypedObjectElemArray<Elem> val(args[0]);
- Elem result[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++)
- result[i] = Op<Elem>::apply(val[i], bits);
-
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename In, template<typename C> class Op, typename Out>
-static bool
-CompareFunc(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename In::Elem InElem;
- typedef typename Out::Elem OutElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 2 || !IsVectorObject<In>(args[0]) || !IsVectorObject<In>(args[1]))
- return ErrorBadArgs(cx);
-
- OutElem result[Out::lanes];
- TypedObjectElemArray<InElem> left(args[0]);
- TypedObjectElemArray<InElem> right(args[1]);
- for (unsigned i = 0; i < Out::lanes; i++) {
- unsigned j = (i * In::lanes) / Out::lanes;
- result[i] = Op<InElem>::apply(left[j], right[j]) ? -1 : 0;
- }
-
- return StoreResult<Out>(cx, args, result);
-}
-
-// This struct defines whether we should throw during a conversion attempt,
-// when trying to convert a value of type from From to the type To. This
-// happens whenever a C++ conversion would have undefined behavior (and perhaps
-// be platform-dependent).
-template<typename From, typename To>
-struct ThrowOnConvert;
-
-struct NeverThrow
-{
- static bool value(int32_t v) {
- return false;
- }
-};
-
-// While int32 to float conversions can be lossy, these conversions have
-// defined behavior in C++, so we don't need to care about them here. In practice,
-// this means round to nearest, tie with even (zero bit in significand).
-template<>
-struct ThrowOnConvert<int32_t, float> : public NeverThrow {};
-
-template<>
-struct ThrowOnConvert<uint32_t, float> : public NeverThrow {};
-
-// All int32 can be safely converted to doubles.
-template<>
-struct ThrowOnConvert<int32_t, double> : public NeverThrow {};
-
-template<>
-struct ThrowOnConvert<uint32_t, double> : public NeverThrow {};
-
-// All floats can be safely converted to doubles.
-template<>
-struct ThrowOnConvert<float, double> : public NeverThrow {};
-
-// Double to float conversion for inputs which aren't in the float range are
-// undefined behavior in C++, but they're defined in IEEE754.
-template<>
-struct ThrowOnConvert<double, float> : public NeverThrow {};
-
-// Float to integer conversions have undefined behavior if the float value
-// is out of the representable integer range (on x86, will yield the undefined
-// value pattern, namely 0x80000000; on arm, will clamp the input value), so
-// check this here.
-template<typename From, typename IntegerType>
-struct ThrowIfNotInRange
-{
- static_assert(mozilla::IsIntegral<IntegerType>::value, "bad destination type");
-
- static bool value(From v) {
- // Truncate to integer value before the range check.
- double d = trunc(double(v));
- // Arrange relations so NaN returns true (i.e., it throws a RangeError).
- return !(d >= double(mozilla::MinValue<IntegerType>::value) &&
- d <= double(mozilla::MaxValue<IntegerType>::value));
- }
-};
-
-template<>
-struct ThrowOnConvert<double, int32_t> : public ThrowIfNotInRange<double, int32_t> {};
-
-template<>
-struct ThrowOnConvert<double, uint32_t> : public ThrowIfNotInRange<double, uint32_t> {};
-
-template<>
-struct ThrowOnConvert<float, int32_t> : public ThrowIfNotInRange<float, int32_t> {};
-
-template<>
-struct ThrowOnConvert<float, uint32_t> : public ThrowIfNotInRange<float, uint32_t> {};
-
-template<typename V, typename Vret>
-static bool
-FuncConvert(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
- typedef typename Vret::Elem RetElem;
-
- static_assert(!mozilla::IsSame<V,Vret>::value, "Can't convert SIMD type to itself");
- static_assert(V::lanes == Vret::lanes, "Can only convert from same number of lanes");
- static_assert(!mozilla::IsIntegral<Elem>::value || !mozilla::IsIntegral<RetElem>::value,
- "Cannot convert between integer SIMD types");
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 1 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- TypedObjectElemArray<Elem> val(args[0]);
- RetElem result[Vret::lanes];
- for (unsigned i = 0; i < V::lanes; i++) {
- if (ThrowOnConvert<Elem, RetElem>::value(val[i])) {
- JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_SIMD_FAILED_CONVERSION);
- return false;
- }
- result[i] = ConvertScalar<RetElem>(val[i]);
- }
-
- return StoreResult<Vret>(cx, args, result);
-}
-
-template<typename V, typename Vret>
-static bool
-FuncConvertBits(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
- typedef typename Vret::Elem RetElem;
-
- static_assert(!mozilla::IsSame<V, Vret>::value, "Can't convert SIMD type to itself");
- static_assert(V::lanes * sizeof(Elem) == Vret::lanes * sizeof(RetElem),
- "Can only bitcast from the same number of bits");
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 1 || !IsVectorObject<V>(args[0]))
- return ErrorBadArgs(cx);
-
- // While we could just pass the typedMem of args[0] as StoreResults' last
- // argument, a GC could move the pointer to its memory in the meanwhile.
- // For consistency with other SIMD functions, simply copy the input in a
- // temporary array.
- RetElem copy[Vret::lanes];
- {
- JS::AutoCheckCannotGC nogc(cx);
- memcpy(copy, TypedObjectMemory<RetElem*>(args[0], nogc), Vret::lanes * sizeof(RetElem));
- }
- return StoreResult<Vret>(cx, args, copy);
-}
-
-template<typename Vret>
-static bool
-FuncSplat(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename Vret::Elem RetElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- RetElem arg;
- if (!Vret::Cast(cx, args.get(0), &arg))
- return false;
-
- RetElem result[Vret::lanes];
- for (unsigned i = 0; i < Vret::lanes; i++)
- result[i] = arg;
- return StoreResult<Vret>(cx, args, result);
-}
-
-template<typename V>
-static bool
-Bool(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
-
- Elem result[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++)
- result[i] = ToBoolean(args.get(i)) ? -1 : 0;
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename V, typename MaskType>
-static bool
-SelectBits(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
- typedef typename MaskType::Elem MaskTypeElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 3 || !IsVectorObject<MaskType>(args[0]) ||
- !IsVectorObject<V>(args[1]) || !IsVectorObject<V>(args[2]))
- {
- return ErrorBadArgs(cx);
- }
-
- TypedObjectElemArray<MaskTypeElem> val(args[0]);
- TypedObjectElemArray<MaskTypeElem> tv(args[1]);
- TypedObjectElemArray<MaskTypeElem> fv(args[2]);
-
- MaskTypeElem tr[MaskType::lanes];
- for (unsigned i = 0; i < MaskType::lanes; i++)
- tr[i] = And<MaskTypeElem>::apply(val[i], tv[i]);
-
- MaskTypeElem fr[MaskType::lanes];
- for (unsigned i = 0; i < MaskType::lanes; i++)
- fr[i] = And<MaskTypeElem>::apply(Not<MaskTypeElem>::apply(val[i]), fv[i]);
-
- MaskTypeElem orInt[MaskType::lanes];
- for (unsigned i = 0; i < MaskType::lanes; i++)
- orInt[i] = Or<MaskTypeElem>::apply(tr[i], fr[i]);
-
- Elem* result = reinterpret_cast<Elem*>(orInt);
- return StoreResult<V>(cx, args, result);
-}
-
-template<typename V, typename MaskType>
-static bool
-Select(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
- typedef typename MaskType::Elem MaskTypeElem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 3 || !IsVectorObject<MaskType>(args[0]) ||
- !IsVectorObject<V>(args[1]) || !IsVectorObject<V>(args[2]))
- {
- return ErrorBadArgs(cx);
- }
-
- TypedObjectElemArray<MaskTypeElem> mask(args[0]);
- TypedObjectElemArray<Elem> tv(args[1]);
- TypedObjectElemArray<Elem> fv(args[2]);
-
- Elem result[V::lanes];
- for (unsigned i = 0; i < V::lanes; i++)
- result[i] = mask[i] ? tv[i] : fv[i];
-
- return StoreResult<V>(cx, args, result);
-}
-
-// Extract an integer lane index from a function argument.
-//
-// Register an exception and return false if the argument is not suitable.
-static bool
-ArgumentToLaneIndex(JSContext* cx, JS::HandleValue v, unsigned limit, unsigned* lane)
-{
- uint64_t arg;
- if (!ToIntegerIndex(cx, v, &arg))
- return false;
- if (arg >= limit)
- return ErrorBadIndex(cx);
-
- *lane = unsigned(arg);
- return true;
-}
-
-// Look for arguments (ta, idx) where ta is a TypedArray and idx is a
-// non-negative integer.
-// Check that accessBytes can be accessed starting from index idx in the array.
-// Return the array handle in typedArray and idx converted to a byte offset in byteStart.
-static bool
-TypedArrayFromArgs(JSContext* cx, const CallArgs& args, uint32_t accessBytes,
- MutableHandleObject typedArray, size_t* byteStart)
-{
- if (!args[0].isObject())
- return ErrorBadArgs(cx);
-
- JSObject& argobj = args[0].toObject();
- if (!argobj.is<TypedArrayObject>())
- return ErrorBadArgs(cx);
-
- typedArray.set(&argobj);
-
- uint64_t index;
- if (!ToIntegerIndex(cx, args[1], &index))
- return false;
-
- // Do the range check in 64 bits even when size_t is 32 bits.
- // This can't overflow because index <= 2^53.
- uint64_t bytes = index * typedArray->as<TypedArrayObject>().bytesPerElement();
- // Keep in sync with AsmJS OnOutOfBounds function.
- if ((bytes + accessBytes) > typedArray->as<TypedArrayObject>().byteLength())
- return ErrorBadIndex(cx);
-
- *byteStart = bytes;
-
- return true;
-}
-
-template<class V, unsigned NumElem>
-static bool
-Load(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 2)
- return ErrorBadArgs(cx);
-
- size_t byteStart;
- RootedObject typedArray(cx);
- if (!TypedArrayFromArgs(cx, args, sizeof(Elem) * NumElem, &typedArray, &byteStart))
- return false;
-
- Rooted<TypeDescr*> typeDescr(cx, GetTypeDescr<V>(cx));
- if (!typeDescr)
- return false;
-
- Rooted<TypedObject*> result(cx, TypedObject::createZeroed(cx, typeDescr, 0));
- if (!result)
- return false;
-
- JS::AutoCheckCannotGC nogc(cx);
- SharedMem<Elem*> src =
- typedArray->as<TypedArrayObject>().viewDataEither().addBytes(byteStart).cast<Elem*>();
- Elem* dst = reinterpret_cast<Elem*>(result->typedMem(nogc));
- jit::AtomicOperations::podCopySafeWhenRacy(SharedMem<Elem*>::unshared(dst), src, NumElem);
-
- args.rval().setObject(*result);
- return true;
-}
-
-template<class V, unsigned NumElem>
-static bool
-Store(JSContext* cx, unsigned argc, Value* vp)
-{
- typedef typename V::Elem Elem;
-
- CallArgs args = CallArgsFromVp(argc, vp);
- if (args.length() != 3)
- return ErrorBadArgs(cx);
-
- size_t byteStart;
- RootedObject typedArray(cx);
- if (!TypedArrayFromArgs(cx, args, sizeof(Elem) * NumElem, &typedArray, &byteStart))
- return false;
-
- if (!IsVectorObject<V>(args[2]))
- return ErrorBadArgs(cx);
-
- JS::AutoCheckCannotGC nogc(cx);
- Elem* src = TypedObjectMemory<Elem*>(args[2], nogc);
- SharedMem<Elem*> dst =
- typedArray->as<TypedArrayObject>().viewDataEither().addBytes(byteStart).cast<Elem*>();
- js::jit::AtomicOperations::podCopySafeWhenRacy(dst, SharedMem<Elem*>::unshared(src), NumElem);
-
- args.rval().setObject(args[2].toObject());
- return true;
-}
-
-#define DEFINE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-FLOAT32X4_FUNCTION_LIST(DEFINE_SIMD_FLOAT32X4_FUNCTION)
-#undef DEFINE_SIMD_FLOAT32X4_FUNCTION
-
-#define DEFINE_SIMD_FLOAT64X2_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_float64x2_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-FLOAT64X2_FUNCTION_LIST(DEFINE_SIMD_FLOAT64X2_FUNCTION)
-#undef DEFINE_SIMD_FLOAT64X2_FUNCTION
-
-#define DEFINE_SIMD_INT8X16_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_int8x16_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-INT8X16_FUNCTION_LIST(DEFINE_SIMD_INT8X16_FUNCTION)
-#undef DEFINE_SIMD_INT8X16_FUNCTION
-
-#define DEFINE_SIMD_INT16X8_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_int16x8_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-INT16X8_FUNCTION_LIST(DEFINE_SIMD_INT16X8_FUNCTION)
-#undef DEFINE_SIMD_INT16X8_FUNCTION
-
-#define DEFINE_SIMD_INT32X4_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_int32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-INT32X4_FUNCTION_LIST(DEFINE_SIMD_INT32X4_FUNCTION)
-#undef DEFINE_SIMD_INT32X4_FUNCTION
-
-#define DEFINE_SIMD_UINT8X16_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_uint8x16_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-UINT8X16_FUNCTION_LIST(DEFINE_SIMD_UINT8X16_FUNCTION)
-#undef DEFINE_SIMD_UINT8X16_FUNCTION
-
-#define DEFINE_SIMD_UINT16X8_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_uint16x8_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-UINT16X8_FUNCTION_LIST(DEFINE_SIMD_UINT16X8_FUNCTION)
-#undef DEFINE_SIMD_UINT16X8_FUNCTION
-
-#define DEFINE_SIMD_UINT32X4_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_uint32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-UINT32X4_FUNCTION_LIST(DEFINE_SIMD_UINT32X4_FUNCTION)
-#undef DEFINE_SIMD_UINT32X4_FUNCTION
-
-#define DEFINE_SIMD_BOOL8X16_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_bool8x16_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-
-BOOL8X16_FUNCTION_LIST(DEFINE_SIMD_BOOL8X16_FUNCTION)
-#undef DEFINE_SIMD_BOOL8X16_FUNCTION
-
-#define DEFINE_SIMD_BOOL16X8_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_bool16x8_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-BOOL16X8_FUNCTION_LIST(DEFINE_SIMD_BOOL16X8_FUNCTION)
-#undef DEFINE_SIMD_BOOL16X8_FUNCTION
-
-#define DEFINE_SIMD_BOOL32X4_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_bool32x4_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-BOOL32X4_FUNCTION_LIST(DEFINE_SIMD_BOOL32X4_FUNCTION)
-#undef DEFINE_SIMD_BOOL32X4_FUNCTION
-
-#define DEFINE_SIMD_BOOL64X2_FUNCTION(Name, Func, Operands) \
-bool \
-js::simd_bool64x2_##Name(JSContext* cx, unsigned argc, Value* vp) \
-{ \
- return Func(cx, argc, vp); \
-}
-BOOL64X2_FUNCTION_LIST(DEFINE_SIMD_BOOL64X2_FUNCTION)
-#undef DEFINE_SIMD_BOOL64X2_FUNCTION
diff --git a/js/src/builtin/SIMD.h b/js/src/builtin/SIMD.h
deleted file mode 100644
index 8d8b226416..0000000000
--- a/js/src/builtin/SIMD.h
+++ /dev/null
@@ -1,1218 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#ifndef builtin_SIMD_h
-#define builtin_SIMD_h
-
-#include "jsapi.h"
-#include "NamespaceImports.h"
-
-#include "builtin/TypedObjectConstants.h"
-#include "jit/IonTypes.h"
-#include "js/Conversions.h"
-
-/*
- * JS SIMD functions.
- * Spec matching polyfill:
- * https://github.com/tc39/ecmascript_simd/blob/master/src/ecmascript_simd.js
- */
-
-// Bool8x16.
-#define BOOL8X16_UNARY_FUNCTION_LIST(V) \
- V(not, (UnaryFunc<Bool8x16, LogicalNot, Bool8x16>), 1) \
- V(check, (UnaryFunc<Bool8x16, Identity, Bool8x16>), 1) \
- V(splat, (FuncSplat<Bool8x16>), 1) \
- V(allTrue, (AllTrue<Bool8x16>), 1) \
- V(anyTrue, (AnyTrue<Bool8x16>), 1)
-
-#define BOOL8X16_BINARY_FUNCTION_LIST(V) \
- V(extractLane, (ExtractLane<Bool8x16>), 2) \
- V(and, (BinaryFunc<Bool8x16, And, Bool8x16>), 2) \
- V(or, (BinaryFunc<Bool8x16, Or, Bool8x16>), 2) \
- V(xor, (BinaryFunc<Bool8x16, Xor, Bool8x16>), 2) \
-
-#define BOOL8X16_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Bool8x16>), 3)
-
-#define BOOL8X16_FUNCTION_LIST(V) \
- BOOL8X16_UNARY_FUNCTION_LIST(V) \
- BOOL8X16_BINARY_FUNCTION_LIST(V) \
- BOOL8X16_TERNARY_FUNCTION_LIST(V)
-
-// Bool 16x8.
-#define BOOL16X8_UNARY_FUNCTION_LIST(V) \
- V(not, (UnaryFunc<Bool16x8, LogicalNot, Bool16x8>), 1) \
- V(check, (UnaryFunc<Bool16x8, Identity, Bool16x8>), 1) \
- V(splat, (FuncSplat<Bool16x8>), 1) \
- V(allTrue, (AllTrue<Bool16x8>), 1) \
- V(anyTrue, (AnyTrue<Bool16x8>), 1)
-
-#define BOOL16X8_BINARY_FUNCTION_LIST(V) \
- V(extractLane, (ExtractLane<Bool16x8>), 2) \
- V(and, (BinaryFunc<Bool16x8, And, Bool16x8>), 2) \
- V(or, (BinaryFunc<Bool16x8, Or, Bool16x8>), 2) \
- V(xor, (BinaryFunc<Bool16x8, Xor, Bool16x8>), 2) \
-
-#define BOOL16X8_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Bool16x8>), 3)
-
-#define BOOL16X8_FUNCTION_LIST(V) \
- BOOL16X8_UNARY_FUNCTION_LIST(V) \
- BOOL16X8_BINARY_FUNCTION_LIST(V) \
- BOOL16X8_TERNARY_FUNCTION_LIST(V)
-
-// Bool32x4.
-#define BOOL32X4_UNARY_FUNCTION_LIST(V) \
- V(not, (UnaryFunc<Bool32x4, LogicalNot, Bool32x4>), 1) \
- V(check, (UnaryFunc<Bool32x4, Identity, Bool32x4>), 1) \
- V(splat, (FuncSplat<Bool32x4>), 1) \
- V(allTrue, (AllTrue<Bool32x4>), 1) \
- V(anyTrue, (AnyTrue<Bool32x4>), 1)
-
-#define BOOL32X4_BINARY_FUNCTION_LIST(V) \
- V(extractLane, (ExtractLane<Bool32x4>), 2) \
- V(and, (BinaryFunc<Bool32x4, And, Bool32x4>), 2) \
- V(or, (BinaryFunc<Bool32x4, Or, Bool32x4>), 2) \
- V(xor, (BinaryFunc<Bool32x4, Xor, Bool32x4>), 2) \
-
-#define BOOL32X4_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Bool32x4>), 3)
-
-#define BOOL32X4_FUNCTION_LIST(V) \
- BOOL32X4_UNARY_FUNCTION_LIST(V) \
- BOOL32X4_BINARY_FUNCTION_LIST(V) \
- BOOL32X4_TERNARY_FUNCTION_LIST(V)
-
-// Bool64x2.
-#define BOOL64X2_UNARY_FUNCTION_LIST(V) \
- V(not, (UnaryFunc<Bool64x2, LogicalNot, Bool64x2>), 1) \
- V(check, (UnaryFunc<Bool64x2, Identity, Bool64x2>), 1) \
- V(splat, (FuncSplat<Bool64x2>), 1) \
- V(allTrue, (AllTrue<Bool64x2>), 1) \
- V(anyTrue, (AnyTrue<Bool64x2>), 1)
-
-#define BOOL64X2_BINARY_FUNCTION_LIST(V) \
- V(extractLane, (ExtractLane<Bool64x2>), 2) \
- V(and, (BinaryFunc<Bool64x2, And, Bool64x2>), 2) \
- V(or, (BinaryFunc<Bool64x2, Or, Bool64x2>), 2) \
- V(xor, (BinaryFunc<Bool64x2, Xor, Bool64x2>), 2) \
-
-#define BOOL64X2_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Bool64x2>), 3)
-
-#define BOOL64X2_FUNCTION_LIST(V) \
- BOOL64X2_UNARY_FUNCTION_LIST(V) \
- BOOL64X2_BINARY_FUNCTION_LIST(V) \
- BOOL64X2_TERNARY_FUNCTION_LIST(V)
-
-// Float32x4.
-#define FLOAT32X4_UNARY_FUNCTION_LIST(V) \
- V(abs, (UnaryFunc<Float32x4, Abs, Float32x4>), 1) \
- V(check, (UnaryFunc<Float32x4, Identity, Float32x4>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Float32x4>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Float32x4>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Float32x4>), 1) \
- V(fromInt32x4, (FuncConvert<Int32x4, Float32x4>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Float32x4>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Float32x4>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Float32x4>), 1) \
- V(fromUint32x4, (FuncConvert<Uint32x4, Float32x4>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Float32x4>), 1) \
- V(neg, (UnaryFunc<Float32x4, Neg, Float32x4>), 1) \
- V(reciprocalApproximation, (UnaryFunc<Float32x4, RecApprox, Float32x4>), 1) \
- V(reciprocalSqrtApproximation, (UnaryFunc<Float32x4, RecSqrtApprox, Float32x4>), 1) \
- V(splat, (FuncSplat<Float32x4>), 1) \
- V(sqrt, (UnaryFunc<Float32x4, Sqrt, Float32x4>), 1)
-
-#define FLOAT32X4_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Float32x4, Add, Float32x4>), 2) \
- V(div, (BinaryFunc<Float32x4, Div, Float32x4>), 2) \
- V(equal, (CompareFunc<Float32x4, Equal, Bool32x4>), 2) \
- V(extractLane, (ExtractLane<Float32x4>), 2) \
- V(greaterThan, (CompareFunc<Float32x4, GreaterThan, Bool32x4>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Float32x4, GreaterThanOrEqual, Bool32x4>), 2) \
- V(lessThan, (CompareFunc<Float32x4, LessThan, Bool32x4>), 2) \
- V(lessThanOrEqual, (CompareFunc<Float32x4, LessThanOrEqual, Bool32x4>), 2) \
- V(load, (Load<Float32x4, 4>), 2) \
- V(load3, (Load<Float32x4, 3>), 2) \
- V(load2, (Load<Float32x4, 2>), 2) \
- V(load1, (Load<Float32x4, 1>), 2) \
- V(max, (BinaryFunc<Float32x4, Maximum, Float32x4>), 2) \
- V(maxNum, (BinaryFunc<Float32x4, MaxNum, Float32x4>), 2) \
- V(min, (BinaryFunc<Float32x4, Minimum, Float32x4>), 2) \
- V(minNum, (BinaryFunc<Float32x4, MinNum, Float32x4>), 2) \
- V(mul, (BinaryFunc<Float32x4, Mul, Float32x4>), 2) \
- V(notEqual, (CompareFunc<Float32x4, NotEqual, Bool32x4>), 2) \
- V(sub, (BinaryFunc<Float32x4, Sub, Float32x4>), 2)
-
-#define FLOAT32X4_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Float32x4>), 3) \
- V(select, (Select<Float32x4, Bool32x4>), 3) \
- V(store, (Store<Float32x4, 4>), 3) \
- V(store3, (Store<Float32x4, 3>), 3) \
- V(store2, (Store<Float32x4, 2>), 3) \
- V(store1, (Store<Float32x4, 1>), 3)
-
-#define FLOAT32X4_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Float32x4>, 5) \
- V(shuffle, Shuffle<Float32x4>, 6)
-
-#define FLOAT32X4_FUNCTION_LIST(V) \
- FLOAT32X4_UNARY_FUNCTION_LIST(V) \
- FLOAT32X4_BINARY_FUNCTION_LIST(V) \
- FLOAT32X4_TERNARY_FUNCTION_LIST(V) \
- FLOAT32X4_SHUFFLE_FUNCTION_LIST(V)
-
-// Float64x2.
-#define FLOAT64X2_UNARY_FUNCTION_LIST(V) \
- V(abs, (UnaryFunc<Float64x2, Abs, Float64x2>), 1) \
- V(check, (UnaryFunc<Float64x2, Identity, Float64x2>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Float64x2>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Float64x2>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Float64x2>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Float64x2>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Float64x2>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Float64x2>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Float64x2>), 1) \
- V(neg, (UnaryFunc<Float64x2, Neg, Float64x2>), 1) \
- V(reciprocalApproximation, (UnaryFunc<Float64x2, RecApprox, Float64x2>), 1) \
- V(reciprocalSqrtApproximation, (UnaryFunc<Float64x2, RecSqrtApprox, Float64x2>), 1) \
- V(splat, (FuncSplat<Float64x2>), 1) \
- V(sqrt, (UnaryFunc<Float64x2, Sqrt, Float64x2>), 1)
-
-#define FLOAT64X2_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Float64x2, Add, Float64x2>), 2) \
- V(div, (BinaryFunc<Float64x2, Div, Float64x2>), 2) \
- V(equal, (CompareFunc<Float64x2, Equal, Bool64x2>), 2) \
- V(extractLane, (ExtractLane<Float64x2>), 2) \
- V(greaterThan, (CompareFunc<Float64x2, GreaterThan, Bool64x2>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Float64x2, GreaterThanOrEqual, Bool64x2>), 2) \
- V(lessThan, (CompareFunc<Float64x2, LessThan, Bool64x2>), 2) \
- V(lessThanOrEqual, (CompareFunc<Float64x2, LessThanOrEqual, Bool64x2>), 2) \
- V(load, (Load<Float64x2, 2>), 2) \
- V(load1, (Load<Float64x2, 1>), 2) \
- V(max, (BinaryFunc<Float64x2, Maximum, Float64x2>), 2) \
- V(maxNum, (BinaryFunc<Float64x2, MaxNum, Float64x2>), 2) \
- V(min, (BinaryFunc<Float64x2, Minimum, Float64x2>), 2) \
- V(minNum, (BinaryFunc<Float64x2, MinNum, Float64x2>), 2) \
- V(mul, (BinaryFunc<Float64x2, Mul, Float64x2>), 2) \
- V(notEqual, (CompareFunc<Float64x2, NotEqual, Bool64x2>), 2) \
- V(sub, (BinaryFunc<Float64x2, Sub, Float64x2>), 2)
-
-#define FLOAT64X2_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Float64x2>), 3) \
- V(select, (Select<Float64x2, Bool64x2>), 3) \
- V(store, (Store<Float64x2, 2>), 3) \
- V(store1, (Store<Float64x2, 1>), 3)
-
-#define FLOAT64X2_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Float64x2>, 3) \
- V(shuffle, Shuffle<Float64x2>, 4)
-
-#define FLOAT64X2_FUNCTION_LIST(V) \
- FLOAT64X2_UNARY_FUNCTION_LIST(V) \
- FLOAT64X2_BINARY_FUNCTION_LIST(V) \
- FLOAT64X2_TERNARY_FUNCTION_LIST(V) \
- FLOAT64X2_SHUFFLE_FUNCTION_LIST(V)
-
-// Int8x16.
-#define INT8X16_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Int8x16, Identity, Int8x16>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int8x16>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int8x16>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Int8x16>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Int8x16>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int8x16>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int8x16>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int8x16>), 1) \
- V(neg, (UnaryFunc<Int8x16, Neg, Int8x16>), 1) \
- V(not, (UnaryFunc<Int8x16, Not, Int8x16>), 1) \
- V(splat, (FuncSplat<Int8x16>), 1)
-
-#define INT8X16_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Int8x16, Add, Int8x16>), 2) \
- V(addSaturate, (BinaryFunc<Int8x16, AddSaturate, Int8x16>), 2) \
- V(and, (BinaryFunc<Int8x16, And, Int8x16>), 2) \
- V(equal, (CompareFunc<Int8x16, Equal, Bool8x16>), 2) \
- V(extractLane, (ExtractLane<Int8x16>), 2) \
- V(greaterThan, (CompareFunc<Int8x16, GreaterThan, Bool8x16>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Int8x16, GreaterThanOrEqual, Bool8x16>), 2) \
- V(lessThan, (CompareFunc<Int8x16, LessThan, Bool8x16>), 2) \
- V(lessThanOrEqual, (CompareFunc<Int8x16, LessThanOrEqual, Bool8x16>), 2) \
- V(load, (Load<Int8x16, 16>), 2) \
- V(mul, (BinaryFunc<Int8x16, Mul, Int8x16>), 2) \
- V(notEqual, (CompareFunc<Int8x16, NotEqual, Bool8x16>), 2) \
- V(or, (BinaryFunc<Int8x16, Or, Int8x16>), 2) \
- V(sub, (BinaryFunc<Int8x16, Sub, Int8x16>), 2) \
- V(subSaturate, (BinaryFunc<Int8x16, SubSaturate, Int8x16>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Int8x16, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Int8x16, ShiftRightArithmetic>), 2) \
- V(xor, (BinaryFunc<Int8x16, Xor, Int8x16>), 2)
-
-#define INT8X16_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Int8x16>), 3) \
- V(select, (Select<Int8x16, Bool8x16>), 3) \
- V(store, (Store<Int8x16, 16>), 3)
-
-#define INT8X16_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Int8x16>, 17) \
- V(shuffle, Shuffle<Int8x16>, 18)
-
-#define INT8X16_FUNCTION_LIST(V) \
- INT8X16_UNARY_FUNCTION_LIST(V) \
- INT8X16_BINARY_FUNCTION_LIST(V) \
- INT8X16_TERNARY_FUNCTION_LIST(V) \
- INT8X16_SHUFFLE_FUNCTION_LIST(V)
-
-// Uint8x16.
-#define UINT8X16_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Uint8x16, Identity, Uint8x16>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint8x16>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint8x16>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint8x16>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint8x16>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint8x16>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Uint8x16>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Uint8x16>), 1) \
- V(neg, (UnaryFunc<Uint8x16, Neg, Uint8x16>), 1) \
- V(not, (UnaryFunc<Uint8x16, Not, Uint8x16>), 1) \
- V(splat, (FuncSplat<Uint8x16>), 1)
-
-#define UINT8X16_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Uint8x16, Add, Uint8x16>), 2) \
- V(addSaturate, (BinaryFunc<Uint8x16, AddSaturate, Uint8x16>), 2) \
- V(and, (BinaryFunc<Uint8x16, And, Uint8x16>), 2) \
- V(equal, (CompareFunc<Uint8x16, Equal, Bool8x16>), 2) \
- V(extractLane, (ExtractLane<Uint8x16>), 2) \
- V(greaterThan, (CompareFunc<Uint8x16, GreaterThan, Bool8x16>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Uint8x16, GreaterThanOrEqual, Bool8x16>), 2) \
- V(lessThan, (CompareFunc<Uint8x16, LessThan, Bool8x16>), 2) \
- V(lessThanOrEqual, (CompareFunc<Uint8x16, LessThanOrEqual, Bool8x16>), 2) \
- V(load, (Load<Uint8x16, 16>), 2) \
- V(mul, (BinaryFunc<Uint8x16, Mul, Uint8x16>), 2) \
- V(notEqual, (CompareFunc<Uint8x16, NotEqual, Bool8x16>), 2) \
- V(or, (BinaryFunc<Uint8x16, Or, Uint8x16>), 2) \
- V(sub, (BinaryFunc<Uint8x16, Sub, Uint8x16>), 2) \
- V(subSaturate, (BinaryFunc<Uint8x16, SubSaturate, Uint8x16>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Uint8x16, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Uint8x16, ShiftRightLogical>), 2) \
- V(xor, (BinaryFunc<Uint8x16, Xor, Uint8x16>), 2)
-
-#define UINT8X16_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Uint8x16>), 3) \
- V(select, (Select<Uint8x16, Bool8x16>), 3) \
- V(store, (Store<Uint8x16, 16>), 3)
-
-#define UINT8X16_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Uint8x16>, 17) \
- V(shuffle, Shuffle<Uint8x16>, 18)
-
-#define UINT8X16_FUNCTION_LIST(V) \
- UINT8X16_UNARY_FUNCTION_LIST(V) \
- UINT8X16_BINARY_FUNCTION_LIST(V) \
- UINT8X16_TERNARY_FUNCTION_LIST(V) \
- UINT8X16_SHUFFLE_FUNCTION_LIST(V)
-
-// Int16x8.
-#define INT16X8_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Int16x8, Identity, Int16x8>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int16x8>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int16x8>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Int16x8>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Int16x8>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int16x8>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int16x8>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int16x8>), 1) \
- V(neg, (UnaryFunc<Int16x8, Neg, Int16x8>), 1) \
- V(not, (UnaryFunc<Int16x8, Not, Int16x8>), 1) \
- V(splat, (FuncSplat<Int16x8>), 1)
-
-#define INT16X8_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Int16x8, Add, Int16x8>), 2) \
- V(addSaturate, (BinaryFunc<Int16x8, AddSaturate, Int16x8>), 2) \
- V(and, (BinaryFunc<Int16x8, And, Int16x8>), 2) \
- V(equal, (CompareFunc<Int16x8, Equal, Bool16x8>), 2) \
- V(extractLane, (ExtractLane<Int16x8>), 2) \
- V(greaterThan, (CompareFunc<Int16x8, GreaterThan, Bool16x8>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Int16x8, GreaterThanOrEqual, Bool16x8>), 2) \
- V(lessThan, (CompareFunc<Int16x8, LessThan, Bool16x8>), 2) \
- V(lessThanOrEqual, (CompareFunc<Int16x8, LessThanOrEqual, Bool16x8>), 2) \
- V(load, (Load<Int16x8, 8>), 2) \
- V(mul, (BinaryFunc<Int16x8, Mul, Int16x8>), 2) \
- V(notEqual, (CompareFunc<Int16x8, NotEqual, Bool16x8>), 2) \
- V(or, (BinaryFunc<Int16x8, Or, Int16x8>), 2) \
- V(sub, (BinaryFunc<Int16x8, Sub, Int16x8>), 2) \
- V(subSaturate, (BinaryFunc<Int16x8, SubSaturate, Int16x8>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Int16x8, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Int16x8, ShiftRightArithmetic>), 2) \
- V(xor, (BinaryFunc<Int16x8, Xor, Int16x8>), 2)
-
-#define INT16X8_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Int16x8>), 3) \
- V(select, (Select<Int16x8, Bool16x8>), 3) \
- V(store, (Store<Int16x8, 8>), 3)
-
-#define INT16X8_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Int16x8>, 9) \
- V(shuffle, Shuffle<Int16x8>, 10)
-
-#define INT16X8_FUNCTION_LIST(V) \
- INT16X8_UNARY_FUNCTION_LIST(V) \
- INT16X8_BINARY_FUNCTION_LIST(V) \
- INT16X8_TERNARY_FUNCTION_LIST(V) \
- INT16X8_SHUFFLE_FUNCTION_LIST(V)
-
-// Uint16x8.
-#define UINT16X8_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Uint16x8, Identity, Uint16x8>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint16x8>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint16x8>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint16x8>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint16x8>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint16x8>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Uint16x8>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Uint16x8>), 1) \
- V(neg, (UnaryFunc<Uint16x8, Neg, Uint16x8>), 1) \
- V(not, (UnaryFunc<Uint16x8, Not, Uint16x8>), 1) \
- V(splat, (FuncSplat<Uint16x8>), 1)
-
-#define UINT16X8_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Uint16x8, Add, Uint16x8>), 2) \
- V(addSaturate, (BinaryFunc<Uint16x8, AddSaturate, Uint16x8>), 2) \
- V(and, (BinaryFunc<Uint16x8, And, Uint16x8>), 2) \
- V(equal, (CompareFunc<Uint16x8, Equal, Bool16x8>), 2) \
- V(extractLane, (ExtractLane<Uint16x8>), 2) \
- V(greaterThan, (CompareFunc<Uint16x8, GreaterThan, Bool16x8>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Uint16x8, GreaterThanOrEqual, Bool16x8>), 2) \
- V(lessThan, (CompareFunc<Uint16x8, LessThan, Bool16x8>), 2) \
- V(lessThanOrEqual, (CompareFunc<Uint16x8, LessThanOrEqual, Bool16x8>), 2) \
- V(load, (Load<Uint16x8, 8>), 2) \
- V(mul, (BinaryFunc<Uint16x8, Mul, Uint16x8>), 2) \
- V(notEqual, (CompareFunc<Uint16x8, NotEqual, Bool16x8>), 2) \
- V(or, (BinaryFunc<Uint16x8, Or, Uint16x8>), 2) \
- V(sub, (BinaryFunc<Uint16x8, Sub, Uint16x8>), 2) \
- V(subSaturate, (BinaryFunc<Uint16x8, SubSaturate, Uint16x8>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Uint16x8, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Uint16x8, ShiftRightLogical>), 2) \
- V(xor, (BinaryFunc<Uint16x8, Xor, Uint16x8>), 2)
-
-#define UINT16X8_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Uint16x8>), 3) \
- V(select, (Select<Uint16x8, Bool16x8>), 3) \
- V(store, (Store<Uint16x8, 8>), 3)
-
-#define UINT16X8_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Uint16x8>, 9) \
- V(shuffle, Shuffle<Uint16x8>, 10)
-
-#define UINT16X8_FUNCTION_LIST(V) \
- UINT16X8_UNARY_FUNCTION_LIST(V) \
- UINT16X8_BINARY_FUNCTION_LIST(V) \
- UINT16X8_TERNARY_FUNCTION_LIST(V) \
- UINT16X8_SHUFFLE_FUNCTION_LIST(V)
-
-// Int32x4.
-#define INT32X4_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Int32x4, Identity, Int32x4>), 1) \
- V(fromFloat32x4, (FuncConvert<Float32x4, Int32x4>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Int32x4>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Int32x4>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Int32x4>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Int32x4>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Int32x4>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Int32x4>), 1) \
- V(fromUint32x4Bits, (FuncConvertBits<Uint32x4, Int32x4>), 1) \
- V(neg, (UnaryFunc<Int32x4, Neg, Int32x4>), 1) \
- V(not, (UnaryFunc<Int32x4, Not, Int32x4>), 1) \
- V(splat, (FuncSplat<Int32x4>), 0)
-
-#define INT32X4_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Int32x4, Add, Int32x4>), 2) \
- V(and, (BinaryFunc<Int32x4, And, Int32x4>), 2) \
- V(equal, (CompareFunc<Int32x4, Equal, Bool32x4>), 2) \
- V(extractLane, (ExtractLane<Int32x4>), 2) \
- V(greaterThan, (CompareFunc<Int32x4, GreaterThan, Bool32x4>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Int32x4, GreaterThanOrEqual, Bool32x4>), 2) \
- V(lessThan, (CompareFunc<Int32x4, LessThan, Bool32x4>), 2) \
- V(lessThanOrEqual, (CompareFunc<Int32x4, LessThanOrEqual, Bool32x4>), 2) \
- V(load, (Load<Int32x4, 4>), 2) \
- V(load3, (Load<Int32x4, 3>), 2) \
- V(load2, (Load<Int32x4, 2>), 2) \
- V(load1, (Load<Int32x4, 1>), 2) \
- V(mul, (BinaryFunc<Int32x4, Mul, Int32x4>), 2) \
- V(notEqual, (CompareFunc<Int32x4, NotEqual, Bool32x4>), 2) \
- V(or, (BinaryFunc<Int32x4, Or, Int32x4>), 2) \
- V(sub, (BinaryFunc<Int32x4, Sub, Int32x4>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Int32x4, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Int32x4, ShiftRightArithmetic>), 2) \
- V(xor, (BinaryFunc<Int32x4, Xor, Int32x4>), 2)
-
-#define INT32X4_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Int32x4>), 3) \
- V(select, (Select<Int32x4, Bool32x4>), 3) \
- V(store, (Store<Int32x4, 4>), 3) \
- V(store3, (Store<Int32x4, 3>), 3) \
- V(store2, (Store<Int32x4, 2>), 3) \
- V(store1, (Store<Int32x4, 1>), 3)
-
-#define INT32X4_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Int32x4>, 5) \
- V(shuffle, Shuffle<Int32x4>, 6)
-
-#define INT32X4_FUNCTION_LIST(V) \
- INT32X4_UNARY_FUNCTION_LIST(V) \
- INT32X4_BINARY_FUNCTION_LIST(V) \
- INT32X4_TERNARY_FUNCTION_LIST(V) \
- INT32X4_SHUFFLE_FUNCTION_LIST(V)
-
-// Uint32x4.
-#define UINT32X4_UNARY_FUNCTION_LIST(V) \
- V(check, (UnaryFunc<Uint32x4, Identity, Uint32x4>), 1) \
- V(fromFloat32x4, (FuncConvert<Float32x4, Uint32x4>), 1) \
- V(fromFloat32x4Bits, (FuncConvertBits<Float32x4, Uint32x4>), 1) \
- V(fromFloat64x2Bits, (FuncConvertBits<Float64x2, Uint32x4>), 1) \
- V(fromInt8x16Bits, (FuncConvertBits<Int8x16, Uint32x4>), 1) \
- V(fromInt16x8Bits, (FuncConvertBits<Int16x8, Uint32x4>), 1) \
- V(fromInt32x4Bits, (FuncConvertBits<Int32x4, Uint32x4>), 1) \
- V(fromUint8x16Bits, (FuncConvertBits<Uint8x16, Uint32x4>), 1) \
- V(fromUint16x8Bits, (FuncConvertBits<Uint16x8, Uint32x4>), 1) \
- V(neg, (UnaryFunc<Uint32x4, Neg, Uint32x4>), 1) \
- V(not, (UnaryFunc<Uint32x4, Not, Uint32x4>), 1) \
- V(splat, (FuncSplat<Uint32x4>), 0)
-
-#define UINT32X4_BINARY_FUNCTION_LIST(V) \
- V(add, (BinaryFunc<Uint32x4, Add, Uint32x4>), 2) \
- V(and, (BinaryFunc<Uint32x4, And, Uint32x4>), 2) \
- V(equal, (CompareFunc<Uint32x4, Equal, Bool32x4>), 2) \
- V(extractLane, (ExtractLane<Uint32x4>), 2) \
- V(greaterThan, (CompareFunc<Uint32x4, GreaterThan, Bool32x4>), 2) \
- V(greaterThanOrEqual, (CompareFunc<Uint32x4, GreaterThanOrEqual, Bool32x4>), 2) \
- V(lessThan, (CompareFunc<Uint32x4, LessThan, Bool32x4>), 2) \
- V(lessThanOrEqual, (CompareFunc<Uint32x4, LessThanOrEqual, Bool32x4>), 2) \
- V(load, (Load<Uint32x4, 4>), 2) \
- V(load3, (Load<Uint32x4, 3>), 2) \
- V(load2, (Load<Uint32x4, 2>), 2) \
- V(load1, (Load<Uint32x4, 1>), 2) \
- V(mul, (BinaryFunc<Uint32x4, Mul, Uint32x4>), 2) \
- V(notEqual, (CompareFunc<Uint32x4, NotEqual, Bool32x4>), 2) \
- V(or, (BinaryFunc<Uint32x4, Or, Uint32x4>), 2) \
- V(sub, (BinaryFunc<Uint32x4, Sub, Uint32x4>), 2) \
- V(shiftLeftByScalar, (BinaryScalar<Uint32x4, ShiftLeft>), 2) \
- V(shiftRightByScalar, (BinaryScalar<Uint32x4, ShiftRightLogical>), 2) \
- V(xor, (BinaryFunc<Uint32x4, Xor, Uint32x4>), 2)
-
-#define UINT32X4_TERNARY_FUNCTION_LIST(V) \
- V(replaceLane, (ReplaceLane<Uint32x4>), 3) \
- V(select, (Select<Uint32x4, Bool32x4>), 3) \
- V(store, (Store<Uint32x4, 4>), 3) \
- V(store3, (Store<Uint32x4, 3>), 3) \
- V(store2, (Store<Uint32x4, 2>), 3) \
- V(store1, (Store<Uint32x4, 1>), 3)
-
-#define UINT32X4_SHUFFLE_FUNCTION_LIST(V) \
- V(swizzle, Swizzle<Uint32x4>, 5) \
- V(shuffle, Shuffle<Uint32x4>, 6)
-
-#define UINT32X4_FUNCTION_LIST(V) \
- UINT32X4_UNARY_FUNCTION_LIST(V) \
- UINT32X4_BINARY_FUNCTION_LIST(V) \
- UINT32X4_TERNARY_FUNCTION_LIST(V) \
- UINT32X4_SHUFFLE_FUNCTION_LIST(V)
-
-/*
- * The FOREACH macros below partition all of the SIMD operations into disjoint
- * sets.
- */
-
-// Operations available on all SIMD types. Mixed arity.
-#define FOREACH_COMMON_SIMD_OP(_) \
- _(extractLane) \
- _(replaceLane) \
- _(check) \
- _(splat)
-
-// Lanewise operations available on numeric SIMD types.
-// Include lane-wise select here since it is not arithmetic and defined on
-// numeric types too.
-#define FOREACH_LANE_SIMD_OP(_) \
- _(select) \
- _(swizzle) \
- _(shuffle)
-
-// Memory operations available on numeric SIMD types.
-#define FOREACH_MEMORY_SIMD_OP(_) \
- _(load) \
- _(store)
-
-// Memory operations available on numeric X4 SIMD types.
-#define FOREACH_MEMORY_X4_SIMD_OP(_) \
- _(load1) \
- _(load2) \
- _(load3) \
- _(store1) \
- _(store2) \
- _(store3)
-
-// Unary operations on Bool vectors.
-#define FOREACH_BOOL_SIMD_UNOP(_) \
- _(allTrue) \
- _(anyTrue)
-
-// Unary bitwise SIMD operators defined on all integer and boolean SIMD types.
-#define FOREACH_BITWISE_SIMD_UNOP(_) \
- _(not)
-
-// Binary bitwise SIMD operators defined on all integer and boolean SIMD types.
-#define FOREACH_BITWISE_SIMD_BINOP(_) \
- _(and) \
- _(or) \
- _(xor)
-
-// Bitwise shifts defined on integer SIMD types.
-#define FOREACH_SHIFT_SIMD_OP(_) \
- _(shiftLeftByScalar) \
- _(shiftRightByScalar)
-
-// Unary arithmetic operators defined on numeric SIMD types.
-#define FOREACH_NUMERIC_SIMD_UNOP(_) \
- _(neg)
-
-// Binary arithmetic operators defined on numeric SIMD types.
-#define FOREACH_NUMERIC_SIMD_BINOP(_) \
- _(add) \
- _(sub) \
- _(mul)
-
-// Unary arithmetic operators defined on floating point SIMD types.
-#define FOREACH_FLOAT_SIMD_UNOP(_) \
- _(abs) \
- _(sqrt) \
- _(reciprocalApproximation) \
- _(reciprocalSqrtApproximation)
-
-// Binary arithmetic operators defined on floating point SIMD types.
-#define FOREACH_FLOAT_SIMD_BINOP(_) \
- _(div) \
- _(max) \
- _(min) \
- _(maxNum) \
- _(minNum)
-
-// Binary operations on small integer (< 32 bits) vectors.
-#define FOREACH_SMINT_SIMD_BINOP(_) \
- _(addSaturate) \
- _(subSaturate)
-
-// Comparison operators defined on numeric SIMD types.
-#define FOREACH_COMP_SIMD_OP(_) \
- _(lessThan) \
- _(lessThanOrEqual) \
- _(equal) \
- _(notEqual) \
- _(greaterThan) \
- _(greaterThanOrEqual)
-
-/*
- * All SIMD operations, excluding casts.
- */
-#define FORALL_SIMD_NONCAST_OP(_) \
- FOREACH_COMMON_SIMD_OP(_) \
- FOREACH_LANE_SIMD_OP(_) \
- FOREACH_MEMORY_SIMD_OP(_) \
- FOREACH_MEMORY_X4_SIMD_OP(_) \
- FOREACH_BOOL_SIMD_UNOP(_) \
- FOREACH_BITWISE_SIMD_UNOP(_) \
- FOREACH_BITWISE_SIMD_BINOP(_) \
- FOREACH_SHIFT_SIMD_OP(_) \
- FOREACH_NUMERIC_SIMD_UNOP(_) \
- FOREACH_NUMERIC_SIMD_BINOP(_) \
- FOREACH_FLOAT_SIMD_UNOP(_) \
- FOREACH_FLOAT_SIMD_BINOP(_) \
- FOREACH_SMINT_SIMD_BINOP(_) \
- FOREACH_COMP_SIMD_OP(_)
-
-/*
- * All operations on integer SIMD types, excluding casts and
- * FOREACH_MEMORY_X4_OP.
- */
-#define FORALL_INT_SIMD_OP(_) \
- FOREACH_COMMON_SIMD_OP(_) \
- FOREACH_LANE_SIMD_OP(_) \
- FOREACH_MEMORY_SIMD_OP(_) \
- FOREACH_BITWISE_SIMD_UNOP(_) \
- FOREACH_BITWISE_SIMD_BINOP(_) \
- FOREACH_SHIFT_SIMD_OP(_) \
- FOREACH_NUMERIC_SIMD_UNOP(_) \
- FOREACH_NUMERIC_SIMD_BINOP(_) \
- FOREACH_COMP_SIMD_OP(_)
-
-/*
- * All operations on floating point SIMD types, excluding casts and
- * FOREACH_MEMORY_X4_OP.
- */
-#define FORALL_FLOAT_SIMD_OP(_) \
- FOREACH_COMMON_SIMD_OP(_) \
- FOREACH_LANE_SIMD_OP(_) \
- FOREACH_MEMORY_SIMD_OP(_) \
- FOREACH_NUMERIC_SIMD_UNOP(_) \
- FOREACH_NUMERIC_SIMD_BINOP(_) \
- FOREACH_FLOAT_SIMD_UNOP(_) \
- FOREACH_FLOAT_SIMD_BINOP(_) \
- FOREACH_COMP_SIMD_OP(_)
-
-/*
- * All operations on Bool SIMD types.
- *
- * These types don't have casts, so no need to specialize.
- */
-#define FORALL_BOOL_SIMD_OP(_) \
- FOREACH_COMMON_SIMD_OP(_) \
- FOREACH_BOOL_SIMD_UNOP(_) \
- FOREACH_BITWISE_SIMD_UNOP(_) \
- FOREACH_BITWISE_SIMD_BINOP(_)
-
-/*
- * The sets of cast operations are listed per type below.
- *
- * These sets are not disjoint.
- */
-
-#define FOREACH_INT8X16_SIMD_CAST(_) \
- _(fromFloat32x4Bits) \
- _(fromFloat64x2Bits) \
- _(fromInt16x8Bits) \
- _(fromInt32x4Bits)
-
-#define FOREACH_INT16X8_SIMD_CAST(_) \
- _(fromFloat32x4Bits) \
- _(fromFloat64x2Bits) \
- _(fromInt8x16Bits) \
- _(fromInt32x4Bits)
-
-#define FOREACH_INT32X4_SIMD_CAST(_) \
- _(fromFloat32x4) \
- _(fromFloat32x4Bits) \
- _(fromFloat64x2Bits) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits)
-
-#define FOREACH_FLOAT32X4_SIMD_CAST(_)\
- _(fromFloat64x2Bits) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits) \
- _(fromInt32x4) \
- _(fromInt32x4Bits)
-
-#define FOREACH_FLOAT64X2_SIMD_CAST(_)\
- _(fromFloat32x4Bits) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits) \
- _(fromInt32x4Bits)
-
-// All operations on Int32x4.
-#define FORALL_INT32X4_SIMD_OP(_) \
- FORALL_INT_SIMD_OP(_) \
- FOREACH_MEMORY_X4_SIMD_OP(_) \
- FOREACH_INT32X4_SIMD_CAST(_)
-
-// All operations on Float32X4
-#define FORALL_FLOAT32X4_SIMD_OP(_) \
- FORALL_FLOAT_SIMD_OP(_) \
- FOREACH_MEMORY_X4_SIMD_OP(_) \
- FOREACH_FLOAT32X4_SIMD_CAST(_)
-
-/*
- * All SIMD operations assuming only 32x4 types exist.
- * This is used in the current asm.js impl.
- */
-#define FORALL_SIMD_ASMJS_OP(_) \
- FORALL_SIMD_NONCAST_OP(_) \
- _(fromFloat32x4) \
- _(fromFloat32x4Bits) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits) \
- _(fromInt32x4) \
- _(fromInt32x4Bits) \
- _(fromUint8x16Bits) \
- _(fromUint16x8Bits) \
- _(fromUint32x4) \
- _(fromUint32x4Bits)
-
-// All operations on Int8x16 or Uint8x16 in the asm.js world.
-// Note: this does not include conversions and casts to/from Uint8x16 because
-// this list is shared between Int8x16 and Uint8x16.
-#define FORALL_INT8X16_ASMJS_OP(_) \
- FORALL_INT_SIMD_OP(_) \
- FOREACH_SMINT_SIMD_BINOP(_) \
- _(fromInt16x8Bits) \
- _(fromInt32x4Bits) \
- _(fromFloat32x4Bits)
-
-// All operations on Int16x8 or Uint16x8 in the asm.js world.
-// Note: this does not include conversions and casts to/from Uint16x8 because
-// this list is shared between Int16x8 and Uint16x8.
-#define FORALL_INT16X8_ASMJS_OP(_) \
- FORALL_INT_SIMD_OP(_) \
- FOREACH_SMINT_SIMD_BINOP(_) \
- _(fromInt8x16Bits) \
- _(fromInt32x4Bits) \
- _(fromFloat32x4Bits)
-
-// All operations on Int32x4 or Uint32x4 in the asm.js world.
-// Note: this does not include conversions and casts to/from Uint32x4 because
-// this list is shared between Int32x4 and Uint32x4.
-#define FORALL_INT32X4_ASMJS_OP(_) \
- FORALL_INT_SIMD_OP(_) \
- FOREACH_MEMORY_X4_SIMD_OP(_) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits) \
- _(fromFloat32x4) \
- _(fromFloat32x4Bits)
-
-// All operations on Float32X4 in the asm.js world.
-#define FORALL_FLOAT32X4_ASMJS_OP(_) \
- FORALL_FLOAT_SIMD_OP(_) \
- FOREACH_MEMORY_X4_SIMD_OP(_) \
- _(fromInt8x16Bits) \
- _(fromInt16x8Bits) \
- _(fromInt32x4Bits) \
- _(fromInt32x4) \
- _(fromUint32x4)
-
-namespace js {
-
-// Complete set of SIMD types.
-// It must be kept in sync with the enumeration of values in
-// TypedObjectConstants.h; in particular we need to ensure that Count is
-// appropriately set with respect to the number of actual types.
-enum class SimdType {
- Int8x16 = JS_SIMDTYPEREPR_INT8X16,
- Int16x8 = JS_SIMDTYPEREPR_INT16X8,
- Int32x4 = JS_SIMDTYPEREPR_INT32X4,
- Uint8x16 = JS_SIMDTYPEREPR_UINT8X16,
- Uint16x8 = JS_SIMDTYPEREPR_UINT16X8,
- Uint32x4 = JS_SIMDTYPEREPR_UINT32X4,
- Float32x4 = JS_SIMDTYPEREPR_FLOAT32X4,
- Float64x2 = JS_SIMDTYPEREPR_FLOAT64X2,
- Bool8x16 = JS_SIMDTYPEREPR_BOOL8X16,
- Bool16x8 = JS_SIMDTYPEREPR_BOOL16X8,
- Bool32x4 = JS_SIMDTYPEREPR_BOOL32X4,
- Bool64x2 = JS_SIMDTYPEREPR_BOOL64X2,
- Count
-};
-
-// The integer SIMD types have a lot of operations that do the exact same thing
-// for signed and unsigned integer types. Sometimes it is simpler to treat
-// signed and unsigned integer SIMD types as the same type, using a SimdSign to
-// distinguish the few cases where there is a difference.
-enum class SimdSign {
- // Signedness is not applicable to this type. (i.e., Float or Bool).
- NotApplicable,
- // Treat as an unsigned integer with a range 0 .. 2^N-1.
- Unsigned,
- // Treat as a signed integer in two's complement encoding.
- Signed,
-};
-
-// Get the signedness of a SIMD type.
-inline SimdSign
-GetSimdSign(SimdType t)
-{
- switch(t) {
- case SimdType::Int8x16:
- case SimdType::Int16x8:
- case SimdType::Int32x4:
- return SimdSign::Signed;
-
- case SimdType::Uint8x16:
- case SimdType::Uint16x8:
- case SimdType::Uint32x4:
- return SimdSign::Unsigned;
-
- default:
- return SimdSign::NotApplicable;
- }
-}
-
-inline bool
-IsSignedIntSimdType(SimdType type)
-{
- return GetSimdSign(type) == SimdSign::Signed;
-}
-
-// Get the boolean SIMD type with the same shape as t.
-//
-// This is the result type of a comparison operation, and it can also be used to
-// identify the geometry of a SIMD type.
-inline SimdType
-GetBooleanSimdType(SimdType t)
-{
- switch(t) {
- case SimdType::Int8x16:
- case SimdType::Uint8x16:
- case SimdType::Bool8x16:
- return SimdType::Bool8x16;
-
- case SimdType::Int16x8:
- case SimdType::Uint16x8:
- case SimdType::Bool16x8:
- return SimdType::Bool16x8;
-
- case SimdType::Int32x4:
- case SimdType::Uint32x4:
- case SimdType::Float32x4:
- case SimdType::Bool32x4:
- return SimdType::Bool32x4;
-
- case SimdType::Float64x2:
- case SimdType::Bool64x2:
- return SimdType::Bool64x2;
-
- case SimdType::Count:
- break;
- }
- MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
-}
-
-// Get the number of lanes in a SIMD type.
-inline unsigned
-GetSimdLanes(SimdType t)
-{
- switch(t) {
- case SimdType::Int8x16:
- case SimdType::Uint8x16:
- case SimdType::Bool8x16:
- return 16;
-
- case SimdType::Int16x8:
- case SimdType::Uint16x8:
- case SimdType::Bool16x8:
- return 8;
-
- case SimdType::Int32x4:
- case SimdType::Uint32x4:
- case SimdType::Float32x4:
- case SimdType::Bool32x4:
- return 4;
-
- case SimdType::Float64x2:
- case SimdType::Bool64x2:
- return 2;
-
- case SimdType::Count:
- break;
- }
- MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad SIMD type");
-}
-
-// Complete set of SIMD operations.
-//
-// No SIMD types implement all of these operations.
-//
-// C++ defines keywords and/or/xor/not, so prepend Fn_ to all named functions to
-// avoid clashes.
-//
-// Note: because of a gcc < v4.8's compiler bug, uint8_t can't be used as the
-// storage class here. See bug 1243810. See also
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64037 .
-enum class SimdOperation {
- // The constructor call. No Fn_ prefix here.
- Constructor,
-
- // All the operations, except for casts.
-#define DEFOP(x) Fn_##x,
- FORALL_SIMD_NONCAST_OP(DEFOP)
-#undef DEFOP
-
- // Int <-> Float conversions.
- Fn_fromInt32x4,
- Fn_fromUint32x4,
- Fn_fromFloat32x4,
-
- // Bitcasts. One for each type with a memory representation.
- Fn_fromInt8x16Bits,
- Fn_fromInt16x8Bits,
- Fn_fromInt32x4Bits,
- Fn_fromUint8x16Bits,
- Fn_fromUint16x8Bits,
- Fn_fromUint32x4Bits,
- Fn_fromFloat32x4Bits,
- Fn_fromFloat64x2Bits,
-
- Last = Fn_fromFloat64x2Bits
-};
-
-// These classes implement the concept containing the following constraints:
-// - requires typename Elem: this is the scalar lane type, stored in each lane
-// of the SIMD vector.
-// - requires static const unsigned lanes: this is the number of lanes (length)
-// of the SIMD vector.
-// - requires static const SimdType type: this is the SimdType enum value
-// corresponding to the SIMD type.
-// - requires static bool Cast(JSContext*, JS::HandleValue, Elem*): casts a
-// given Value to the current scalar lane type and saves it in the Elem
-// out-param.
-// - requires static Value ToValue(Elem): returns a Value of the right type
-// containing the given value.
-//
-// This concept is used in the templates above to define the functions
-// associated to a given type and in their implementations, to avoid code
-// redundancy.
-
-struct Float32x4 {
- typedef float Elem;
- static const unsigned lanes = 4;
- static const SimdType type = SimdType::Float32x4;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- double d;
- if (!ToNumber(cx, v, &d))
- return false;
- *out = float(d);
- return true;
- }
- static Value ToValue(Elem value) {
- return DoubleValue(JS::CanonicalizeNaN(value));
- }
-};
-
-struct Float64x2 {
- typedef double Elem;
- static const unsigned lanes = 2;
- static const SimdType type = SimdType::Float64x2;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToNumber(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return DoubleValue(JS::CanonicalizeNaN(value));
- }
-};
-
-struct Int8x16 {
- typedef int8_t Elem;
- static const unsigned lanes = 16;
- static const SimdType type = SimdType::Int8x16;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToInt8(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Int16x8 {
- typedef int16_t Elem;
- static const unsigned lanes = 8;
- static const SimdType type = SimdType::Int16x8;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToInt16(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Int32x4 {
- typedef int32_t Elem;
- static const unsigned lanes = 4;
- static const SimdType type = SimdType::Int32x4;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToInt32(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Uint8x16 {
- typedef uint8_t Elem;
- static const unsigned lanes = 16;
- static const SimdType type = SimdType::Uint8x16;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToUint8(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Uint16x8 {
- typedef uint16_t Elem;
- static const unsigned lanes = 8;
- static const SimdType type = SimdType::Uint16x8;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToUint16(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Uint32x4 {
- typedef uint32_t Elem;
- static const unsigned lanes = 4;
- static const SimdType type = SimdType::Uint32x4;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- return ToUint32(cx, v, out);
- }
- static Value ToValue(Elem value) {
- return NumberValue(value);
- }
-};
-
-struct Bool8x16 {
- typedef int8_t Elem;
- static const unsigned lanes = 16;
- static const SimdType type = SimdType::Bool8x16;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- *out = ToBoolean(v) ? -1 : 0;
- return true;
- }
- static Value ToValue(Elem value) {
- return BooleanValue(value);
- }
-};
-
-struct Bool16x8 {
- typedef int16_t Elem;
- static const unsigned lanes = 8;
- static const SimdType type = SimdType::Bool16x8;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- *out = ToBoolean(v) ? -1 : 0;
- return true;
- }
- static Value ToValue(Elem value) {
- return BooleanValue(value);
- }
-};
-
-struct Bool32x4 {
- typedef int32_t Elem;
- static const unsigned lanes = 4;
- static const SimdType type = SimdType::Bool32x4;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- *out = ToBoolean(v) ? -1 : 0;
- return true;
- }
- static Value ToValue(Elem value) {
- return BooleanValue(value);
- }
-};
-
-struct Bool64x2 {
- typedef int64_t Elem;
- static const unsigned lanes = 2;
- static const SimdType type = SimdType::Bool64x2;
- static MOZ_MUST_USE bool Cast(JSContext* cx, JS::HandleValue v, Elem* out) {
- *out = ToBoolean(v) ? -1 : 0;
- return true;
- }
- static Value ToValue(Elem value) {
- return BooleanValue(value);
- }
-};
-
-// Get the well known name of the SIMD.* object corresponding to type.
-PropertyName* SimdTypeToName(const JSAtomState& atoms, SimdType type);
-
-// Check if name is the well known name of a SIMD type.
-// Returns true and sets *type iff name is known.
-bool IsSimdTypeName(const JSAtomState& atoms, const PropertyName* name, SimdType* type);
-
-const char* SimdTypeToString(SimdType type);
-
-template<typename V>
-JSObject* CreateSimd(JSContext* cx, const typename V::Elem* data);
-
-template<typename V>
-bool IsVectorObject(HandleValue v);
-
-template<typename V>
-MOZ_MUST_USE bool ToSimdConstant(JSContext* cx, HandleValue v, jit::SimdConstant* out);
-
-JSObject*
-InitSimdClass(JSContext* cx, HandleObject obj);
-
-namespace jit {
-
-extern const JSJitInfo JitInfo_SimdInt32x4_extractLane;
-extern const JSJitInfo JitInfo_SimdFloat32x4_extractLane;
-
-} // namespace jit
-
-#define DECLARE_SIMD_FLOAT32X4_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_float32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
-FLOAT32X4_FUNCTION_LIST(DECLARE_SIMD_FLOAT32X4_FUNCTION)
-#undef DECLARE_SIMD_FLOAT32X4_FUNCTION
-
-#define DECLARE_SIMD_FLOAT64X2_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_float64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
-FLOAT64X2_FUNCTION_LIST(DECLARE_SIMD_FLOAT64X2_FUNCTION)
-#undef DECLARE_SIMD_FLOAT64X2_FUNCTION
-
-#define DECLARE_SIMD_INT8X16_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_int8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
-INT8X16_FUNCTION_LIST(DECLARE_SIMD_INT8X16_FUNCTION)
-#undef DECLARE_SIMD_INT8X16_FUNCTION
-
-#define DECLARE_SIMD_INT16X8_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_int16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
-INT16X8_FUNCTION_LIST(DECLARE_SIMD_INT16X8_FUNCTION)
-#undef DECLARE_SIMD_INT16X8_FUNCTION
-
-#define DECLARE_SIMD_INT32X4_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_int32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
-INT32X4_FUNCTION_LIST(DECLARE_SIMD_INT32X4_FUNCTION)
-#undef DECLARE_SIMD_INT32X4_FUNCTION
-
-#define DECLARE_SIMD_UINT8X16_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_uint8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
-UINT8X16_FUNCTION_LIST(DECLARE_SIMD_UINT8X16_FUNCTION)
-#undef DECLARE_SIMD_UINT8X16_FUNCTION
-
-#define DECLARE_SIMD_UINT16X8_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_uint16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
-UINT16X8_FUNCTION_LIST(DECLARE_SIMD_UINT16X8_FUNCTION)
-#undef DECLARE_SIMD_UINT16X8_FUNCTION
-
-#define DECLARE_SIMD_UINT32X4_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_uint32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
-UINT32X4_FUNCTION_LIST(DECLARE_SIMD_UINT32X4_FUNCTION)
-#undef DECLARE_SIMD_UINT32X4_FUNCTION
-
-#define DECLARE_SIMD_BOOL8X16_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_bool8x16_##Name(JSContext* cx, unsigned argc, Value* vp);
-BOOL8X16_FUNCTION_LIST(DECLARE_SIMD_BOOL8X16_FUNCTION)
-#undef DECLARE_SIMD_BOOL8X16_FUNCTION
-
-#define DECLARE_SIMD_BOOL16X8_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_bool16x8_##Name(JSContext* cx, unsigned argc, Value* vp);
-BOOL16X8_FUNCTION_LIST(DECLARE_SIMD_BOOL16X8_FUNCTION)
-#undef DECLARE_SIMD_BOOL16X8_FUNCTION
-
-#define DECLARE_SIMD_BOOL32X4_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_bool32x4_##Name(JSContext* cx, unsigned argc, Value* vp);
-BOOL32X4_FUNCTION_LIST(DECLARE_SIMD_BOOL32X4_FUNCTION)
-#undef DECLARE_SIMD_BOOL32X4_FUNCTION
-
-#define DECLARE_SIMD_BOOL64X2_FUNCTION(Name, Func, Operands) \
-extern MOZ_MUST_USE bool \
-simd_bool64x2_##Name(JSContext* cx, unsigned argc, Value* vp);
-BOOL64X2_FUNCTION_LIST(DECLARE_SIMD_BOOL64X2_FUNCTION)
-#undef DECLARE_SIMD_BOOL64X2_FUNCTION
-
-} /* namespace js */
-
-#endif /* builtin_SIMD_h */
diff --git a/js/src/builtin/TestingFunctions.cpp b/js/src/builtin/TestingFunctions.cpp
index 997695aecb..2608733853 100644
--- a/js/src/builtin/TestingFunctions.cpp
+++ b/js/src/builtin/TestingFunctions.cpp
@@ -3114,7 +3114,7 @@ static bool
IsSimdAvailable(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
-#if defined(JS_CODEGEN_NONE) || !defined(ENABLE_SIMD)
+#if defined(JS_CODEGEN_NONE)
bool available = false;
#else
bool available = cx->jitSupportsSimd();
diff --git a/js/src/builtin/TypedObject.cpp b/js/src/builtin/TypedObject.cpp
index 2796848c02..10ae8902dd 100644
--- a/js/src/builtin/TypedObject.cpp
+++ b/js/src/builtin/TypedObject.cpp
@@ -12,7 +12,6 @@
#include "jsfun.h"
#include "jsutil.h"
-#include "builtin/SIMD.h"
#include "gc/Marking.h"
#include "js/Vector.h"
#include "vm/GlobalObject.h"
@@ -255,10 +254,6 @@ ScalarTypeDescr::typeName(Type type)
JS_FOR_EACH_SCALAR_TYPE_REPR(NUMERIC_TYPE_TO_STRING)
#undef NUMERIC_TYPE_TO_STRING
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@@ -296,10 +291,6 @@ ScalarTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
JS_FOR_EACH_SCALAR_TYPE_REPR(SCALARTYPE_CALL)
#undef SCALARTYPE_CALL
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
MOZ_CRASH();
}
@@ -414,50 +405,6 @@ js::ReferenceTypeDescr::call(JSContext* cx, unsigned argc, Value* vp)
}
/***************************************************************************
- * SIMD type objects
- *
- * Note: these are partially defined in SIMD.cpp
- */
-
-SimdType
-SimdTypeDescr::type() const {
- uint32_t t = uint32_t(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
- MOZ_ASSERT(t < uint32_t(SimdType::Count));
- return SimdType(t);
-}
-
-uint32_t
-SimdTypeDescr::size(SimdType t)
-{
- MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
- switch (t) {
- case SimdType::Int8x16:
- case SimdType::Int16x8:
- case SimdType::Int32x4:
- case SimdType::Uint8x16:
- case SimdType::Uint16x8:
- case SimdType::Uint32x4:
- case SimdType::Float32x4:
- case SimdType::Float64x2:
- case SimdType::Bool8x16:
- case SimdType::Bool16x8:
- case SimdType::Bool32x4:
- case SimdType::Bool64x2:
- return 16;
- case SimdType::Count:
- break;
- }
- MOZ_CRASH("unexpected SIMD type");
-}
-
-uint32_t
-SimdTypeDescr::alignment(SimdType t)
-{
- MOZ_ASSERT(unsigned(t) < unsigned(SimdType::Count));
- return size(t);
-}
-
-/***************************************************************************
* ArrayMetaTypeDescr class
*/
@@ -1525,7 +1472,6 @@ TypedObjLengthFromType(TypeDescr& descr)
case type::Scalar:
case type::Reference:
case type::Struct:
- case type::Simd:
return 0;
case type::Array:
@@ -1650,7 +1596,6 @@ TypeDescr::hasProperty(const JSAtomState& names, jsid id)
switch (kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
return false;
case type::Array:
@@ -1723,7 +1668,6 @@ TypedObject::obj_hasProperty(JSContext* cx, HandleObject obj, HandleId id, bool*
switch (typedObj->typeDescr().kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
break;
case type::Array: {
@@ -1775,9 +1719,6 @@ TypedObject::obj_getProperty(JSContext* cx, HandleObject obj, HandleValue receiv
case type::Reference:
break;
- case type::Simd:
- break;
-
case type::Array:
if (JSID_IS_ATOM(id, cx->names().length)) {
if (!typedObj->isAttached()) {
@@ -1824,7 +1765,6 @@ TypedObject::obj_getElement(JSContext* cx, HandleObject obj, HandleValue receive
switch (descr->kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
case type::Struct:
break;
@@ -1870,9 +1810,6 @@ TypedObject::obj_setProperty(JSContext* cx, HandleObject obj, HandleId id, Handl
case type::Reference:
break;
- case type::Simd:
- break;
-
case type::Array: {
if (JSID_IS_ATOM(id, cx->names().length)) {
if (receiver.isObject() && obj == &receiver.toObject()) {
@@ -1940,7 +1877,6 @@ TypedObject::obj_getOwnPropertyDescriptor(JSContext* cx, HandleObject obj, Handl
switch (descr->kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
break;
case type::Array:
@@ -1994,7 +1930,6 @@ IsOwnId(JSContext* cx, HandleObject obj, HandleId id)
switch (typedObj->typeDescr().kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
return false;
case type::Array:
@@ -2033,8 +1968,7 @@ TypedObject::obj_enumerate(JSContext* cx, HandleObject obj, AutoIdVector& proper
RootedId id(cx);
switch (descr->kind()) {
case type::Scalar:
- case type::Reference:
- case type::Simd: {
+ case type::Reference: {
// Nothing to enumerate.
break;
}
@@ -2257,7 +2191,6 @@ LengthForType(TypeDescr& descr)
case type::Scalar:
case type::Reference:
case type::Struct:
- case type::Simd:
return 0;
case type::Array:
@@ -2555,22 +2488,6 @@ js::GetTypedObjectModule(JSContext* cx, unsigned argc, Value* vp)
return true;
}
-bool
-js::GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp)
-{
- CallArgs args = CallArgsFromVp(argc, vp);
- MOZ_ASSERT(args.length() == 1);
- MOZ_ASSERT(args[0].isInt32());
- // One of the JS_SIMDTYPEREPR_* constants / a SimdType enum value.
- // getOrCreateSimdTypeDescr() will do the range check.
- int32_t simdTypeRepr = args[0].toInt32();
- Rooted<GlobalObject*> global(cx, cx->global());
- MOZ_ASSERT(global);
- auto* obj = GlobalObject::getOrCreateSimdTypeDescr(cx, global, SimdType(simdTypeRepr));
- args.rval().setObject(*obj);
- return true;
-}
-
#define JS_STORE_SCALAR_CLASS_IMPL(_constant, T, _name) \
bool \
js::StoreScalar##T::Func(JSContext* cx, unsigned argc, Value* vp) \
@@ -2761,7 +2678,6 @@ visitReferences(TypeDescr& descr,
switch (descr.kind()) {
case type::Scalar:
- case type::Simd:
return;
case type::Reference:
diff --git a/js/src/builtin/TypedObject.h b/js/src/builtin/TypedObject.h
index 9318a0f795..cceff0c638 100644
--- a/js/src/builtin/TypedObject.h
+++ b/js/src/builtin/TypedObject.h
@@ -117,7 +117,6 @@ namespace type {
enum Kind {
Scalar = JS_TYPEREPR_SCALAR_KIND,
Reference = JS_TYPEREPR_REFERENCE_KIND,
- Simd = JS_TYPEREPR_SIMD_KIND,
Struct = JS_TYPEREPR_STRUCT_KIND,
Array = JS_TYPEREPR_ARRAY_KIND
};
@@ -129,7 +128,6 @@ enum Kind {
class SimpleTypeDescr;
class ComplexTypeDescr;
-class SimdTypeDescr;
class StructTypeDescr;
class TypedProto;
@@ -255,14 +253,6 @@ class ScalarTypeDescr : public SimpleTypeDescr
"TypedObjectConstants.h must be consistent with Scalar::Type");
static_assert(Scalar::Uint8Clamped == JS_SCALARTYPEREPR_UINT8_CLAMPED,
"TypedObjectConstants.h must be consistent with Scalar::Type");
- static_assert(Scalar::Float32x4 == JS_SCALARTYPEREPR_FLOAT32X4,
- "TypedObjectConstants.h must be consistent with Scalar::Type");
- static_assert(Scalar::Int8x16 == JS_SCALARTYPEREPR_INT8X16,
- "TypedObjectConstants.h must be consistent with Scalar::Type");
- static_assert(Scalar::Int16x8 == JS_SCALARTYPEREPR_INT16X8,
- "TypedObjectConstants.h must be consistent with Scalar::Type");
- static_assert(Scalar::Int32x4 == JS_SCALARTYPEREPR_INT32X4,
- "TypedObjectConstants.h must be consistent with Scalar::Type");
return Type(getReservedSlot(JS_DESCR_SLOT_TYPE).toInt32());
}
@@ -340,25 +330,6 @@ class ComplexTypeDescr : public TypeDescr
}
};
-enum class SimdType;
-
-/*
- * SIMD Type descriptors.
- */
-class SimdTypeDescr : public ComplexTypeDescr
-{
- public:
- static const type::Kind Kind = type::Simd;
- static const bool Opaque = false;
- static const Class class_;
- static uint32_t size(SimdType t);
- static uint32_t alignment(SimdType t);
- static MOZ_MUST_USE bool call(JSContext* cx, unsigned argc, Value* vp);
- static bool is(const Value& v);
-
- SimdType type() const;
-};
-
bool IsTypedObjectClass(const Class* clasp); // Defined below
bool IsTypedObjectArray(JSObject& obj);
@@ -757,16 +728,6 @@ class InlineOpaqueTypedObject : public InlineTypedObject
static const Class class_;
};
-// Class for the global SIMD object.
-class SimdObject : public JSObject
-{
- public:
- static const Class class_;
- static MOZ_MUST_USE bool toString(JSContext* cx, unsigned int argc, Value* vp);
- static MOZ_MUST_USE bool resolve(JSContext* cx, JS::HandleObject obj, JS::HandleId,
- bool* resolved);
-};
-
/*
* Usage: NewOpaqueTypedObject(typeObj)
*
@@ -865,16 +826,6 @@ MOZ_MUST_USE bool ClampToUint8(JSContext* cx, unsigned argc, Value* vp);
MOZ_MUST_USE bool GetTypedObjectModule(JSContext* cx, unsigned argc, Value* vp);
/*
- * Usage: GetSimdTypeDescr(simdTypeRepr)
- *
- * Returns one of the SIMD type objects, identified by `simdTypeRepr` which must
- * be one of the JS_SIMDTYPEREPR_* constants.
- *
- * The SIMD pseudo-module must have been initialized for this to be safe.
- */
-MOZ_MUST_USE bool GetSimdTypeDescr(JSContext* cx, unsigned argc, Value* vp);
-
-/*
* Usage: Store_int8(targetDatum, targetOffset, value)
* ...
* Store_uint8(targetDatum, targetOffset, value)
@@ -1008,8 +959,7 @@ inline bool
IsComplexTypeDescrClass(const Class* clasp)
{
return clasp == &StructTypeDescr::class_ ||
- clasp == &ArrayTypeDescr::class_ ||
- clasp == &SimdTypeDescr::class_;
+ clasp == &ArrayTypeDescr::class_;
}
inline bool
diff --git a/js/src/builtin/TypedObject.js b/js/src/builtin/TypedObject.js
index c4ddee486b..31ece2712f 100644
--- a/js/src/builtin/TypedObject.js
+++ b/js/src/builtin/TypedObject.js
@@ -58,9 +58,6 @@ function TypedObjectGet(descr, typedObj, offset) {
case JS_TYPEREPR_REFERENCE_KIND:
return TypedObjectGetReference(descr, typedObj, offset);
- case JS_TYPEREPR_SIMD_KIND:
- return TypedObjectGetSimd(descr, typedObj, offset);
-
case JS_TYPEREPR_ARRAY_KIND:
case JS_TYPEREPR_STRUCT_KIND:
return TypedObjectGetDerived(descr, typedObj, offset);
@@ -142,144 +139,6 @@ function TypedObjectGetReference(descr, typedObj, offset) {
return undefined;
}
-function TypedObjectGetSimd(descr, typedObj, offset) {
- var type = DESCR_TYPE(descr);
- var simdTypeDescr = GetSimdTypeDescr(type);
- switch (type) {
- case JS_SIMDTYPEREPR_FLOAT32X4:
- var x = Load_float32(typedObj, offset + 0);
- var y = Load_float32(typedObj, offset + 4);
- var z = Load_float32(typedObj, offset + 8);
- var w = Load_float32(typedObj, offset + 12);
- return simdTypeDescr(x, y, z, w);
-
- case JS_SIMDTYPEREPR_FLOAT64X2:
- var x = Load_float64(typedObj, offset + 0);
- var y = Load_float64(typedObj, offset + 8);
- return simdTypeDescr(x, y);
-
- case JS_SIMDTYPEREPR_INT8X16:
- var s0 = Load_int8(typedObj, offset + 0);
- var s1 = Load_int8(typedObj, offset + 1);
- var s2 = Load_int8(typedObj, offset + 2);
- var s3 = Load_int8(typedObj, offset + 3);
- var s4 = Load_int8(typedObj, offset + 4);
- var s5 = Load_int8(typedObj, offset + 5);
- var s6 = Load_int8(typedObj, offset + 6);
- var s7 = Load_int8(typedObj, offset + 7);
- var s8 = Load_int8(typedObj, offset + 8);
- var s9 = Load_int8(typedObj, offset + 9);
- var s10 = Load_int8(typedObj, offset + 10);
- var s11 = Load_int8(typedObj, offset + 11);
- var s12 = Load_int8(typedObj, offset + 12);
- var s13 = Load_int8(typedObj, offset + 13);
- var s14 = Load_int8(typedObj, offset + 14);
- var s15 = Load_int8(typedObj, offset + 15);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
-
- case JS_SIMDTYPEREPR_INT16X8:
- var s0 = Load_int16(typedObj, offset + 0);
- var s1 = Load_int16(typedObj, offset + 2);
- var s2 = Load_int16(typedObj, offset + 4);
- var s3 = Load_int16(typedObj, offset + 6);
- var s4 = Load_int16(typedObj, offset + 8);
- var s5 = Load_int16(typedObj, offset + 10);
- var s6 = Load_int16(typedObj, offset + 12);
- var s7 = Load_int16(typedObj, offset + 14);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
-
- case JS_SIMDTYPEREPR_INT32X4:
- var x = Load_int32(typedObj, offset + 0);
- var y = Load_int32(typedObj, offset + 4);
- var z = Load_int32(typedObj, offset + 8);
- var w = Load_int32(typedObj, offset + 12);
- return simdTypeDescr(x, y, z, w);
-
- case JS_SIMDTYPEREPR_UINT8X16:
- var s0 = Load_uint8(typedObj, offset + 0);
- var s1 = Load_uint8(typedObj, offset + 1);
- var s2 = Load_uint8(typedObj, offset + 2);
- var s3 = Load_uint8(typedObj, offset + 3);
- var s4 = Load_uint8(typedObj, offset + 4);
- var s5 = Load_uint8(typedObj, offset + 5);
- var s6 = Load_uint8(typedObj, offset + 6);
- var s7 = Load_uint8(typedObj, offset + 7);
- var s8 = Load_uint8(typedObj, offset + 8);
- var s9 = Load_uint8(typedObj, offset + 9);
- var s10 = Load_uint8(typedObj, offset + 10);
- var s11 = Load_uint8(typedObj, offset + 11);
- var s12 = Load_uint8(typedObj, offset + 12);
- var s13 = Load_uint8(typedObj, offset + 13);
- var s14 = Load_uint8(typedObj, offset + 14);
- var s15 = Load_uint8(typedObj, offset + 15);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
-
- case JS_SIMDTYPEREPR_UINT16X8:
- var s0 = Load_uint16(typedObj, offset + 0);
- var s1 = Load_uint16(typedObj, offset + 2);
- var s2 = Load_uint16(typedObj, offset + 4);
- var s3 = Load_uint16(typedObj, offset + 6);
- var s4 = Load_uint16(typedObj, offset + 8);
- var s5 = Load_uint16(typedObj, offset + 10);
- var s6 = Load_uint16(typedObj, offset + 12);
- var s7 = Load_uint16(typedObj, offset + 14);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
-
- case JS_SIMDTYPEREPR_UINT32X4:
- var x = Load_uint32(typedObj, offset + 0);
- var y = Load_uint32(typedObj, offset + 4);
- var z = Load_uint32(typedObj, offset + 8);
- var w = Load_uint32(typedObj, offset + 12);
- return simdTypeDescr(x, y, z, w);
-
- case JS_SIMDTYPEREPR_BOOL8X16:
- var s0 = Load_int8(typedObj, offset + 0);
- var s1 = Load_int8(typedObj, offset + 1);
- var s2 = Load_int8(typedObj, offset + 2);
- var s3 = Load_int8(typedObj, offset + 3);
- var s4 = Load_int8(typedObj, offset + 4);
- var s5 = Load_int8(typedObj, offset + 5);
- var s6 = Load_int8(typedObj, offset + 6);
- var s7 = Load_int8(typedObj, offset + 7);
- var s8 = Load_int8(typedObj, offset + 8);
- var s9 = Load_int8(typedObj, offset + 9);
- var s10 = Load_int8(typedObj, offset + 10);
- var s11 = Load_int8(typedObj, offset + 11);
- var s12 = Load_int8(typedObj, offset + 12);
- var s13 = Load_int8(typedObj, offset + 13);
- var s14 = Load_int8(typedObj, offset + 14);
- var s15 = Load_int8(typedObj, offset + 15);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14, s15);
-
- case JS_SIMDTYPEREPR_BOOL16X8:
- var s0 = Load_int16(typedObj, offset + 0);
- var s1 = Load_int16(typedObj, offset + 2);
- var s2 = Load_int16(typedObj, offset + 4);
- var s3 = Load_int16(typedObj, offset + 6);
- var s4 = Load_int16(typedObj, offset + 8);
- var s5 = Load_int16(typedObj, offset + 10);
- var s6 = Load_int16(typedObj, offset + 12);
- var s7 = Load_int16(typedObj, offset + 14);
- return simdTypeDescr(s0, s1, s2, s3, s4, s5, s6, s7);
-
- case JS_SIMDTYPEREPR_BOOL32X4:
- var x = Load_int32(typedObj, offset + 0);
- var y = Load_int32(typedObj, offset + 4);
- var z = Load_int32(typedObj, offset + 8);
- var w = Load_int32(typedObj, offset + 12);
- return simdTypeDescr(x, y, z, w);
-
- case JS_SIMDTYPEREPR_BOOL64X2:
- var x = Load_int32(typedObj, offset + 0);
- var y = Load_int32(typedObj, offset + 8);
- return simdTypeDescr(x, y);
-
- }
-
- assert(false, "Unhandled SIMD type: " + type);
- return undefined;
-}
-
///////////////////////////////////////////////////////////////////////////
// Setting values
//
@@ -301,10 +160,6 @@ function TypedObjectSet(descr, typedObj, offset, name, fromValue) {
TypedObjectSetReference(descr, typedObj, offset, name, fromValue);
return;
- case JS_TYPEREPR_SIMD_KIND:
- TypedObjectSetSimd(descr, typedObj, offset, fromValue);
- return;
-
case JS_TYPEREPR_ARRAY_KIND:
var length = DESCR_ARRAY_LENGTH(descr);
if (TypedObjectSetArray(descr, length, typedObj, offset, fromValue))
@@ -418,107 +273,6 @@ function TypedObjectSetReference(descr, typedObj, offset, name, fromValue) {
return undefined;
}
-// Sets `fromValue` to `this` assuming that `this` is a scalar type.
-function TypedObjectSetSimd(descr, typedObj, offset, fromValue) {
- if (!IsObject(fromValue) || !ObjectIsTypedObject(fromValue))
- ThrowTypeError(JSMSG_CANT_CONVERT_TO,
- typeof(fromValue),
- DESCR_STRING_REPR(descr));
-
- if (!DescrsEquiv(descr, TypedObjectTypeDescr(fromValue)))
- ThrowTypeError(JSMSG_CANT_CONVERT_TO,
- typeof(fromValue),
- DESCR_STRING_REPR(descr));
-
- var type = DESCR_TYPE(descr);
- switch (type) {
- case JS_SIMDTYPEREPR_FLOAT32X4:
- Store_float32(typedObj, offset + 0, Load_float32(fromValue, 0));
- Store_float32(typedObj, offset + 4, Load_float32(fromValue, 4));
- Store_float32(typedObj, offset + 8, Load_float32(fromValue, 8));
- Store_float32(typedObj, offset + 12, Load_float32(fromValue, 12));
- break;
- case JS_SIMDTYPEREPR_FLOAT64X2:
- Store_float64(typedObj, offset + 0, Load_float64(fromValue, 0));
- Store_float64(typedObj, offset + 8, Load_float64(fromValue, 8));
- break;
- case JS_SIMDTYPEREPR_INT8X16:
- case JS_SIMDTYPEREPR_BOOL8X16:
- Store_int8(typedObj, offset + 0, Load_int8(fromValue, 0));
- Store_int8(typedObj, offset + 1, Load_int8(fromValue, 1));
- Store_int8(typedObj, offset + 2, Load_int8(fromValue, 2));
- Store_int8(typedObj, offset + 3, Load_int8(fromValue, 3));
- Store_int8(typedObj, offset + 4, Load_int8(fromValue, 4));
- Store_int8(typedObj, offset + 5, Load_int8(fromValue, 5));
- Store_int8(typedObj, offset + 6, Load_int8(fromValue, 6));
- Store_int8(typedObj, offset + 7, Load_int8(fromValue, 7));
- Store_int8(typedObj, offset + 8, Load_int8(fromValue, 8));
- Store_int8(typedObj, offset + 9, Load_int8(fromValue, 9));
- Store_int8(typedObj, offset + 10, Load_int8(fromValue, 10));
- Store_int8(typedObj, offset + 11, Load_int8(fromValue, 11));
- Store_int8(typedObj, offset + 12, Load_int8(fromValue, 12));
- Store_int8(typedObj, offset + 13, Load_int8(fromValue, 13));
- Store_int8(typedObj, offset + 14, Load_int8(fromValue, 14));
- Store_int8(typedObj, offset + 15, Load_int8(fromValue, 15));
- break;
- case JS_SIMDTYPEREPR_INT16X8:
- case JS_SIMDTYPEREPR_BOOL16X8:
- Store_int16(typedObj, offset + 0, Load_int16(fromValue, 0));
- Store_int16(typedObj, offset + 2, Load_int16(fromValue, 2));
- Store_int16(typedObj, offset + 4, Load_int16(fromValue, 4));
- Store_int16(typedObj, offset + 6, Load_int16(fromValue, 6));
- Store_int16(typedObj, offset + 8, Load_int16(fromValue, 8));
- Store_int16(typedObj, offset + 10, Load_int16(fromValue, 10));
- Store_int16(typedObj, offset + 12, Load_int16(fromValue, 12));
- Store_int16(typedObj, offset + 14, Load_int16(fromValue, 14));
- break;
- case JS_SIMDTYPEREPR_INT32X4:
- case JS_SIMDTYPEREPR_BOOL32X4:
- case JS_SIMDTYPEREPR_BOOL64X2:
- Store_int32(typedObj, offset + 0, Load_int32(fromValue, 0));
- Store_int32(typedObj, offset + 4, Load_int32(fromValue, 4));
- Store_int32(typedObj, offset + 8, Load_int32(fromValue, 8));
- Store_int32(typedObj, offset + 12, Load_int32(fromValue, 12));
- break;
- case JS_SIMDTYPEREPR_UINT8X16:
- Store_uint8(typedObj, offset + 0, Load_uint8(fromValue, 0));
- Store_uint8(typedObj, offset + 1, Load_uint8(fromValue, 1));
- Store_uint8(typedObj, offset + 2, Load_uint8(fromValue, 2));
- Store_uint8(typedObj, offset + 3, Load_uint8(fromValue, 3));
- Store_uint8(typedObj, offset + 4, Load_uint8(fromValue, 4));
- Store_uint8(typedObj, offset + 5, Load_uint8(fromValue, 5));
- Store_uint8(typedObj, offset + 6, Load_uint8(fromValue, 6));
- Store_uint8(typedObj, offset + 7, Load_uint8(fromValue, 7));
- Store_uint8(typedObj, offset + 8, Load_uint8(fromValue, 8));
- Store_uint8(typedObj, offset + 9, Load_uint8(fromValue, 9));
- Store_uint8(typedObj, offset + 10, Load_uint8(fromValue, 10));
- Store_uint8(typedObj, offset + 11, Load_uint8(fromValue, 11));
- Store_uint8(typedObj, offset + 12, Load_uint8(fromValue, 12));
- Store_uint8(typedObj, offset + 13, Load_uint8(fromValue, 13));
- Store_uint8(typedObj, offset + 14, Load_uint8(fromValue, 14));
- Store_uint8(typedObj, offset + 15, Load_uint8(fromValue, 15));
- break;
- case JS_SIMDTYPEREPR_UINT16X8:
- Store_uint16(typedObj, offset + 0, Load_uint16(fromValue, 0));
- Store_uint16(typedObj, offset + 2, Load_uint16(fromValue, 2));
- Store_uint16(typedObj, offset + 4, Load_uint16(fromValue, 4));
- Store_uint16(typedObj, offset + 6, Load_uint16(fromValue, 6));
- Store_uint16(typedObj, offset + 8, Load_uint16(fromValue, 8));
- Store_uint16(typedObj, offset + 10, Load_uint16(fromValue, 10));
- Store_uint16(typedObj, offset + 12, Load_uint16(fromValue, 12));
- Store_uint16(typedObj, offset + 14, Load_uint16(fromValue, 14));
- break;
- case JS_SIMDTYPEREPR_UINT32X4:
- Store_uint32(typedObj, offset + 0, Load_uint32(fromValue, 0));
- Store_uint32(typedObj, offset + 4, Load_uint32(fromValue, 4));
- Store_uint32(typedObj, offset + 8, Load_uint32(fromValue, 8));
- Store_uint32(typedObj, offset + 12, Load_uint32(fromValue, 12));
- break;
- default:
- assert(false, "Unhandled Simd type: " + type);
- }
-}
-
///////////////////////////////////////////////////////////////////////////
// C++ Wrappers
//
@@ -635,241 +389,6 @@ function TypedObjectArrayRedimension(newArrayType) {
}
///////////////////////////////////////////////////////////////////////////
-// SIMD
-
-function SimdProtoString(type) {
- switch (type) {
- case JS_SIMDTYPEREPR_INT8X16:
- return "Int8x16";
- case JS_SIMDTYPEREPR_INT16X8:
- return "Int16x8";
- case JS_SIMDTYPEREPR_INT32X4:
- return "Int32x4";
- case JS_SIMDTYPEREPR_UINT8X16:
- return "Uint8x16";
- case JS_SIMDTYPEREPR_UINT16X8:
- return "Uint16x8";
- case JS_SIMDTYPEREPR_UINT32X4:
- return "Uint32x4";
- case JS_SIMDTYPEREPR_FLOAT32X4:
- return "Float32x4";
- case JS_SIMDTYPEREPR_FLOAT64X2:
- return "Float64x2";
- case JS_SIMDTYPEREPR_BOOL8X16:
- return "Bool8x16";
- case JS_SIMDTYPEREPR_BOOL16X8:
- return "Bool16x8";
- case JS_SIMDTYPEREPR_BOOL32X4:
- return "Bool32x4";
- case JS_SIMDTYPEREPR_BOOL64X2:
- return "Bool64x2";
- }
-
- assert(false, "Unhandled type constant");
- return undefined;
-}
-
-function SimdTypeToLength(type) {
- switch (type) {
- case JS_SIMDTYPEREPR_INT8X16:
- case JS_SIMDTYPEREPR_BOOL8X16:
- return 16;
- case JS_SIMDTYPEREPR_INT16X8:
- case JS_SIMDTYPEREPR_BOOL16X8:
- return 8;
- case JS_SIMDTYPEREPR_INT32X4:
- case JS_SIMDTYPEREPR_FLOAT32X4:
- case JS_SIMDTYPEREPR_BOOL32X4:
- return 4;
- case JS_SIMDTYPEREPR_FLOAT64X2:
- case JS_SIMDTYPEREPR_BOOL64X2:
- return 2;
- }
-
- assert(false, "Unhandled type constant");
- return undefined;
-}
-
-// This implements SIMD.*.prototype.valueOf().
-// Once we have proper value semantics for SIMD types, this function should just
-// perform a type check and return this.
-// For now, throw a TypeError unconditionally since valueOf() was probably
-// called from ToNumber() which is supposed to throw when attempting to convert
-// a SIMD value to a number.
-function SimdValueOf() {
- if (!IsObject(this) || !ObjectIsTypedObject(this))
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
-
- var descr = TypedObjectTypeDescr(this);
-
- if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD", "valueOf", typeof this);
-
- ThrowTypeError(JSMSG_SIMD_TO_NUMBER);
-}
-
-function SimdToSource() {
- if (!IsObject(this) || !ObjectIsTypedObject(this))
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
-
- var descr = TypedObjectTypeDescr(this);
-
- if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toSource", typeof this);
-
- return SimdFormatString(descr, this);
-}
-
-function SimdToString() {
- if (!IsObject(this) || !ObjectIsTypedObject(this))
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
-
- var descr = TypedObjectTypeDescr(this);
-
- if (DESCR_KIND(descr) != JS_TYPEREPR_SIMD_KIND)
- ThrowTypeError(JSMSG_INCOMPATIBLE_PROTO, "SIMD.*", "toString", typeof this);
-
- return SimdFormatString(descr, this);
-}
-
-function SimdFormatString(descr, typedObj) {
- var typerepr = DESCR_TYPE(descr);
- var protoString = SimdProtoString(typerepr);
- switch (typerepr) {
- case JS_SIMDTYPEREPR_INT8X16: {
- var s1 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 7);
- var s9 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 8);
- var s10 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 9);
- var s11 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 10);
- var s12 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 11);
- var s13 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 12);
- var s14 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 13);
- var s15 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 14);
- var s16 = callFunction(std_SIMD_Int8x16_extractLane, null, typedObj, 15);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
- }
- case JS_SIMDTYPEREPR_INT16X8: {
- var s1 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Int16x8_extractLane, null, typedObj, 7);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
- }
- case JS_SIMDTYPEREPR_INT32X4: {
- var x = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 1);
- var z = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 2);
- var w = callFunction(std_SIMD_Int32x4_extractLane, null, typedObj, 3);
- return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
- }
- case JS_SIMDTYPEREPR_UINT8X16: {
- var s1 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 7);
- var s9 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 8);
- var s10 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 9);
- var s11 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 10);
- var s12 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 11);
- var s13 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 12);
- var s14 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 13);
- var s15 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 14);
- var s16 = callFunction(std_SIMD_Uint8x16_extractLane, null, typedObj, 15);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
- }
- case JS_SIMDTYPEREPR_UINT16X8: {
- var s1 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Uint16x8_extractLane, null, typedObj, 7);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
- }
- case JS_SIMDTYPEREPR_UINT32X4: {
- var x = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 1);
- var z = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 2);
- var w = callFunction(std_SIMD_Uint32x4_extractLane, null, typedObj, 3);
- return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
- }
- case JS_SIMDTYPEREPR_FLOAT32X4: {
- var x = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 1);
- var z = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 2);
- var w = callFunction(std_SIMD_Float32x4_extractLane, null, typedObj, 3);
- return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
- }
- case JS_SIMDTYPEREPR_FLOAT64X2: {
- var x = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Float64x2_extractLane, null, typedObj, 1);
- return `SIMD.${protoString}(${x}, ${y})`;
- }
- case JS_SIMDTYPEREPR_BOOL8X16: {
- var s1 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 7);
- var s9 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 8);
- var s10 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 9);
- var s11 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 10);
- var s12 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 11);
- var s13 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 12);
- var s14 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 13);
- var s15 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 14);
- var s16 = callFunction(std_SIMD_Bool8x16_extractLane, null, typedObj, 15);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8}, ${s9}, ${s10}, ${s11}, ${s12}, ${s13}, ${s14}, ${s15}, ${s16})`;
- }
- case JS_SIMDTYPEREPR_BOOL16X8: {
- var s1 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 0);
- var s2 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 1);
- var s3 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 2);
- var s4 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 3);
- var s5 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 4);
- var s6 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 5);
- var s7 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 6);
- var s8 = callFunction(std_SIMD_Bool16x8_extractLane, null, typedObj, 7);
- return `SIMD.${protoString}(${s1}, ${s2}, ${s3}, ${s4}, ${s5}, ${s6}, ${s7}, ${s8})`;
- }
- case JS_SIMDTYPEREPR_BOOL32X4: {
- var x = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 1);
- var z = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 2);
- var w = callFunction(std_SIMD_Bool32x4_extractLane, null, typedObj, 3);
- return `SIMD.${protoString}(${x}, ${y}, ${z}, ${w})`;
- }
- case JS_SIMDTYPEREPR_BOOL64X2: {
- var x = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 0);
- var y = callFunction(std_SIMD_Bool64x2_extractLane, null, typedObj, 1);
- return `SIMD.${protoString}(${x}, ${y})`;
- }
- }
- assert(false, "unexpected SIMD kind");
- return '?';
-}
-
-///////////////////////////////////////////////////////////////////////////
// Miscellaneous
function DescrsEquiv(descr1, descr2) {
diff --git a/js/src/builtin/TypedObjectConstants.h b/js/src/builtin/TypedObjectConstants.h
index aa930d29bb..5900d131fe 100644
--- a/js/src/builtin/TypedObjectConstants.h
+++ b/js/src/builtin/TypedObjectConstants.h
@@ -52,7 +52,7 @@
#define JS_DESCR_SLOT_ARRAYPROTO 6 // Lazily created prototype for arrays
#define JS_DESCR_SLOT_TRACE_LIST 7 // List of references for use in tracing
-// Slots on scalars, references, and SIMD objects
+// Slots on scalars and references
#define JS_DESCR_SLOT_TYPE 8 // Type code
// Slots on array descriptors
@@ -74,7 +74,6 @@
#define JS_TYPEREPR_REFERENCE_KIND 2
#define JS_TYPEREPR_STRUCT_KIND 3
#define JS_TYPEREPR_ARRAY_KIND 4
-#define JS_TYPEREPR_SIMD_KIND 5
// These constants are for use exclusively in JS code. In C++ code,
// prefer Scalar::Int8 etc, which allows you to write a switch which will
@@ -90,10 +89,6 @@
#define JS_SCALARTYPEREPR_UINT8_CLAMPED 8
#define JS_SCALARTYPEREPR_BIGINT64 9
#define JS_SCALARTYPEREPR_BIGUINT64 10
-#define JS_SCALARTYPEREPR_FLOAT32X4 13
-#define JS_SCALARTYPEREPR_INT8X16 14
-#define JS_SCALARTYPEREPR_INT16X8 15
-#define JS_SCALARTYPEREPR_INT32X4 16
// These constants are for use exclusively in JS code. In C++ code,
// prefer ReferenceTypeRepresentation::TYPE_ANY etc, which allows
@@ -103,20 +98,4 @@
#define JS_REFERENCETYPEREPR_OBJECT 1
#define JS_REFERENCETYPEREPR_STRING 2
-// These constants are for use exclusively in JS code. In C++ code, prefer
-// SimdType::Int32x4 etc, since that allows you to write a switch which will
-// receive a warning if you omit a case.
-#define JS_SIMDTYPEREPR_INT8X16 0
-#define JS_SIMDTYPEREPR_INT16X8 1
-#define JS_SIMDTYPEREPR_INT32X4 2
-#define JS_SIMDTYPEREPR_UINT8X16 3
-#define JS_SIMDTYPEREPR_UINT16X8 4
-#define JS_SIMDTYPEREPR_UINT32X4 5
-#define JS_SIMDTYPEREPR_FLOAT32X4 6
-#define JS_SIMDTYPEREPR_FLOAT64X2 7
-#define JS_SIMDTYPEREPR_BOOL8X16 8
-#define JS_SIMDTYPEREPR_BOOL16X8 9
-#define JS_SIMDTYPEREPR_BOOL32X4 10
-#define JS_SIMDTYPEREPR_BOOL64X2 11
-
#endif
diff --git a/js/src/devtools/automation/cgc-jittest-timeouts.txt b/js/src/devtools/automation/cgc-jittest-timeouts.txt
index 84e29f8931..dcb37d660d 100644
--- a/js/src/devtools/automation/cgc-jittest-timeouts.txt
+++ b/js/src/devtools/automation/cgc-jittest-timeouts.txt
@@ -1,4 +1,3 @@
-SIMD/nursery-overflow.js
asm.js/testParallelCompile.js
auto-regress/bug653395.js
auto-regress/bug654392.js
diff --git a/js/src/doc/JITOptimizations/Outcomes.md b/js/src/doc/JITOptimizations/Outcomes.md
index b0eb9c43af..471c794174 100644
--- a/js/src/doc/JITOptimizations/Outcomes.md
+++ b/js/src/doc/JITOptimizations/Outcomes.md
@@ -152,11 +152,6 @@ Failed to do range check of element access on a typed object.
### AccessNotDense
-### AccessNotSimdObject
-
-The observed type of the target of the property access doesn't guarantee
-that it is a SIMD object.
-
### AccessNotTypedObject
The observed type of the target of the property access doesn't guarantee
@@ -218,15 +213,6 @@ the keys have never been observed to be a String, Symbol, or Int32.
IonMonkey only generates inline caches for element accesses which are
either on dense objects (e.g. dense Arrays), or Typed Arrays.
-### NoSimdJitSupport
-
-Optimization failed because SIMD JIT support was not enabled.
-
-### SimdTypeNotOptimized
-
-The type observed as being retrieved from this property access did not
-match an optimizable type.
-
### HasCommonInliningPath
Inlining was abandoned because the inlining call path was repeated. A
diff --git a/js/src/jit-test/lib/simd.js b/js/src/jit-test/lib/simd.js
deleted file mode 100644
index f275c6f081..0000000000
--- a/js/src/jit-test/lib/simd.js
+++ /dev/null
@@ -1,109 +0,0 @@
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-function booleanBinaryX4(op, v, w) {
- var arr = [];
- var [varr, warr] = [simdToArray(v), simdToArray(w)];
- for (var i = 0; i < 4; i++)
- arr[i] = op(varr[i], warr[i]);
- return arr;
-}
-
-function binaryX(op, v, w) {
- var arr = [];
- var [varr, warr] = [simdToArray(v), simdToArray(w)];
- [varr, warr] = [varr.map(Math.fround), warr.map(Math.fround)];
- for (var i = 0; i < varr.length; i++)
- arr[i] = op(varr[i], warr[i]);
- return arr.map(Math.fround);
-}
-
-function unaryX4(op, v, coerceFunc) {
- var arr = [];
- var varr = simdToArray(v).map(coerceFunc);
- for (var i = 0; i < 4; i++)
- arr[i] = op(varr[i]);
- return arr.map(coerceFunc);
-}
-
-function assertNear(a, b) {
- assertEq((a != a && b != b) || Math.abs(a - b) < 0.001, true);
-}
-
-function GetType(v) {
- var pt = Object.getPrototypeOf(v);
- switch (pt) {
- case SIMD.Int8x16.prototype: return SIMD.Int8x16;
- case SIMD.Int16x8.prototype: return SIMD.Int16x8;
- case SIMD.Int32x4.prototype: return SIMD.Int32x4;
- case SIMD.Uint8x16.prototype: return SIMD.Uint8x16;
- case SIMD.Uint16x8.prototype: return SIMD.Uint16x8;
- case SIMD.Uint32x4.prototype: return SIMD.Uint32x4;
- case SIMD.Float32x4.prototype: return SIMD.Float32x4;
- case SIMD.Bool8x16.prototype: return SIMD.Bool8x16;
- case SIMD.Bool16x8.prototype: return SIMD.Bool16x8;
- case SIMD.Bool32x4.prototype: return SIMD.Bool32x4;
- }
- throw "unexpected SIMD type";
-}
-
-function GetLength(t) {
- switch (t) {
- case SIMD.Int8x16: return 16;
- case SIMD.Int16x8: return 8;
- case SIMD.Int32x4: return 4;
- case SIMD.Uint8x16: return 16;
- case SIMD.Uint16x8: return 8;
- case SIMD.Uint32x4: return 4;
- case SIMD.Float32x4: return 4;
- case SIMD.Bool8x16: return 16;
- case SIMD.Bool16x8: return 8;
- case SIMD.Bool32x4: return 4;
- }
- throw "unexpected SIMD type";
-}
-
-function assertEqVec(v, w) {
- var typeV = GetType(v);
- var lengthV = GetLength(typeV);
- var ext = typeV.extractLane;
- assertEq(GetType(w), typeV);
- for (var i = 0; i < lengthV; i++)
- assertEq(ext(v, i), ext(w, i));
-}
-
-function assertEqVecArr(v, w) {
- var typeV = GetType(v);
- var lengthV = GetLength(typeV);
- var ext = typeV.extractLane;
- assertEq(w.length, lengthV);
-
- for (var i = 0; i < lengthV; i++)
- assertEq(ext(v, i), w[i]);
-}
-
-function assertEqX4(vec, arr, ...opts) {
-
- var assertFunc;
- if (opts.length == 1 && typeof opts[0] !== 'undefined') {
- assertFunc = opts[0];
- } else {
- assertFunc = assertEq;
- }
-
- var Type = GetType(vec);
-
- assertFunc(Type.extractLane(vec, 0), arr[0]);
- assertFunc(Type.extractLane(vec, 1), arr[1]);
- assertFunc(Type.extractLane(vec, 2), arr[2]);
- assertFunc(Type.extractLane(vec, 3), arr[3]);
-}
-
-function simdToArray(vec) {
- var Type = GetType(vec);
- var Length = GetLength(Type);
- var a = [];
- for (var i = 0; i < Length; i++)
- a.push(Type.extractLane(vec, i));
- return a;
-}
diff --git a/js/src/jit-test/tests/SIMD/binary-arith.js b/js/src/jit-test/tests/SIMD/binary-arith.js
deleted file mode 100644
index 74211d46db..0000000000
--- a/js/src/jit-test/tests/SIMD/binary-arith.js
+++ /dev/null
@@ -1,30 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var i1 = SIMD.Int32x4(1, 2, 3, 4);
- var i2 = SIMD.Int32x4(4, 3, 2, 1);
-
- var f1 = SIMD.Float32x4(1, 2, 3, 4);
- var f2 = SIMD.Float32x4(4, 3, 2, 1);
-
- var i8_1 = SIMD.Int8x16(1, 2, 3, 4, 20, 30, 40, 50, 100, 115, 120, 125);
- var i8_2 = SIMD.Int8x16(4, 3, 2, 1, 8, 7, 6, 5, 12, 11, 10, 9);
-
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Float32x4.add(f1, f2), binaryX((x, y) => x + y, f1, f2));
- assertEqX4(SIMD.Float32x4.sub(f1, f2), binaryX((x, y) => x - y, f1, f2));
- assertEqX4(SIMD.Float32x4.mul(f1, f2), binaryX((x, y) => x * y, f1, f2));
-
- assertEqX4(SIMD.Int32x4.add(i1, i2), binaryX((x, y) => x + y, i1, i2));
- assertEqX4(SIMD.Int32x4.sub(i1, i2), binaryX((x, y) => x - y, i1, i2));
- assertEqX4(SIMD.Int32x4.mul(i1, i2), binaryX((x, y) => x * y, i1, i2));
-
- assertEqX4(SIMD.Int8x16.add(i8_1, i8_2), binaryX((x, y) => (x + y) << 24 >> 24, i8_1, i8_2));
- assertEqX4(SIMD.Int8x16.sub(i8_1, i8_2), binaryX((x, y) => (x - y) << 24 >> 24, i8_1, i8_2));
- assertEqX4(SIMD.Int8x16.mul(i8_1, i8_2), binaryX((x, y) => (x * y) << 24 >> 24, i8_1, i8_2));
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/bool32x4-arith.js b/js/src/jit-test/tests/SIMD/bool32x4-arith.js
deleted file mode 100644
index bafceefa9e..0000000000
--- a/js/src/jit-test/tests/SIMD/bool32x4-arith.js
+++ /dev/null
@@ -1,15 +0,0 @@
-load(libdir + "simd.js");
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var b1 = SIMD.Bool32x4(true, false, true, false);
- var b2 = SIMD.Bool32x4(true, true, true, true);
- do {
- assertEqX4(SIMD.Bool32x4.and(b1, b2), booleanBinaryX4((x, y) => x && y, b1, b2));
- assertEqX4(SIMD.Bool32x4.or(b1, b2), booleanBinaryX4((x, y) => x || y, b1, b2));
- assertEqX4(SIMD.Bool32x4.xor(b1, b2), booleanBinaryX4((x, y) => x != y, b1, b2));
- } while (!inIon());
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/bool32x4-const.js b/js/src/jit-test/tests/SIMD/bool32x4-const.js
deleted file mode 100644
index 54bada215b..0000000000
--- a/js/src/jit-test/tests/SIMD/bool32x4-const.js
+++ /dev/null
@@ -1,65 +0,0 @@
-load(libdir + "simd.js");
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-// Test constant folding into the Bool32x4 constructor.
-// Verify that we get the truthiness right, c.f. the ECMA ToBoolean() function.
-function f1() {
- var B = SIMD.Bool32x4;
- var S = SIMD.Bool32x4.splat;
- return [
- B(false, false, false, true),
- B(true),
- B(undefined, null, "", "x"),
- B({}, 0, 1, -0.0),
- B(NaN, -NaN, Symbol(), objectEmulatingUndefined()),
-
- S(false),
- S(true),
- S(undefined),
- S(null),
-
- S(""),
- S("x"),
- S(0),
- S(1),
-
- S({}),
- S(-0.0),
- S(NaN),
- S(Symbol()),
-
- S(objectEmulatingUndefined())
- ];
-}
-
-function f() {
- for (var i = 0; i < 100; i++) {
- var a = f1()
- assertEqX4(a[0], [false, false, false, true]);
- assertEqX4(a[1], [true, false, false, false]);
- assertEqX4(a[2], [false, false, false, true]);
- assertEqX4(a[3], [true, false, true, false]);
- assertEqX4(a[4], [false, false, true, false]);
-
- // Splats.
- assertEqX4(a[5], [false, false, false, false]);
- assertEqX4(a[6], [true, true, true, true]);
- assertEqX4(a[7], [false, false, false, false]);
- assertEqX4(a[8], [false, false, false, false]);
-
- assertEqX4(a[9], [false, false, false, false]);
- assertEqX4(a[10], [true, true, true, true]);
- assertEqX4(a[11], [false, false, false, false]);
- assertEqX4(a[12], [true, true, true, true]);
-
- assertEqX4(a[13], [true, true, true, true]);
- assertEqX4(a[14], [false, false, false, false]);
- assertEqX4(a[15], [false, false, false, false]);
- assertEqX4(a[16], [true, true, true, true]);
-
- assertEqX4(a[17], [false, false, false, false]);
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/bug1109911.js b/js/src/jit-test/tests/SIMD/bug1109911.js
deleted file mode 100644
index 23a5c5721d..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1109911.js
+++ /dev/null
@@ -1,11 +0,0 @@
-if (typeof TypedObject === "undefined" || typeof SIMD === 'undefined')
- quit();
-
-var Int32x4 = SIMD.Int32x4;
-var a = Int32x4((4294967295), 200, 300, 400);
-addCase( new Array(Math.pow(2,12)) );
-for ( var arg = "", i = 0; i < Math.pow(2,12); i++ ) {}
-addCase( a );
-function addCase(object) {
- object.length
-}
diff --git a/js/src/jit-test/tests/SIMD/bug1121299.js b/js/src/jit-test/tests/SIMD/bug1121299.js
deleted file mode 100644
index 17ca46e2ec..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1121299.js
+++ /dev/null
@@ -1,31 +0,0 @@
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-setJitCompilerOption("baseline.warmup.trigger", 10);
-setJitCompilerOption("ion.warmup.trigger", 30);
-
-function test_1(i) {
- if (i >= 40)
- return;
- var a = SIMD.Float32x4(1.1, 2.2, 3.3, 4.6);
- SIMD.Int32x4.fromFloat32x4(a);
- test_1(i + 1);
-}
-test_1(0);
-
-
-var Float32x4 = SIMD.Float32x4;
-function test_2() {
- var Array = Float32x4.array(3);
- var array = new Array([
- Float32x4(1, 2, 3, 4),
- Float32x4(5, 6, 7, 8),
- Float32x4(9, 10, 11, 12)
- ]);
- if (typeof reportCompare === "function")
- reportCompare(true, true);
-}
-test_2();
-evaluate("test_2(); test_2();", {
- isRunOnce: true,
-});
diff --git a/js/src/jit-test/tests/SIMD/bug1123631.js b/js/src/jit-test/tests/SIMD/bug1123631.js
deleted file mode 100644
index 28c0e0aa15..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1123631.js
+++ /dev/null
@@ -1,9 +0,0 @@
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-var Float64x2 = SIMD.Float64x2;
-function test() {
- var a = Float64x2(1, 2);
-}
-test();
-test();
diff --git a/js/src/jit-test/tests/SIMD/bug1130845.js b/js/src/jit-test/tests/SIMD/bug1130845.js
deleted file mode 100644
index 2baf3865d2..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1130845.js
+++ /dev/null
@@ -1,15 +0,0 @@
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-var Int32x4 = SIMD.Int32x4;
-function test() {
- var a = Int32x4();
- var b = Int32x4(10, 20, 30, 40);
- var c = SIMD.Int32x4.and(a, b);
- assertEq(Int32x4.extractLane(c, 0), 0);
- return 0;
-}
-test();
-var u = [], v = [];
-for (var j=0; j<u.length; ++j)
- v[test()] = t;
diff --git a/js/src/jit-test/tests/SIMD/bug1241872.js b/js/src/jit-test/tests/SIMD/bug1241872.js
deleted file mode 100644
index c36ebcc6b8..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1241872.js
+++ /dev/null
@@ -1,10 +0,0 @@
-if (typeof SIMD !== 'object')
- quit(0);
-
-function test() {
- return SIMD.Float32x4().toSource();
-}
-
-var r = '';
-for (var i = 0; i < 10000; i++)
- r = test();
diff --git a/js/src/jit-test/tests/SIMD/bug1248503.js b/js/src/jit-test/tests/SIMD/bug1248503.js
deleted file mode 100644
index e121cea1d1..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1248503.js
+++ /dev/null
@@ -1,16 +0,0 @@
-if (typeof SIMD !== 'object')
- quit(0);
-
-function assertEqVec(v, w) {
- [0].forEach(i => v, w);
- function assertEqX4(...opts) {}
-}
-gczeal(1);
-function f() {
- SIMD.Float32x4();
- var i1 = SIMD.Int32x4();
- for (j = 0; j < 100000; ++j, eval.eval)
- assertEqVec(SIMD.Int32x4.check(i1), i1);
-}
-f();
-
diff --git a/js/src/jit-test/tests/SIMD/bug1273483.js b/js/src/jit-test/tests/SIMD/bug1273483.js
deleted file mode 100644
index 3c9386b4f4..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1273483.js
+++ /dev/null
@@ -1,9 +0,0 @@
-if (typeof SIMD === 'undefined')
- quit();
-
-Int8x16 = SIMD.Int8x16;
-var Int32x4 = SIMD.Int32x4;
-function testSwizzleForType(type) type();
-testSwizzleForType(Int8x16);
-function testSwizzleInt32x4() testSwizzleForType(Int32x4);
-testSwizzleInt32x4();
diff --git a/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js b/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js
deleted file mode 100644
index 4dbe954106..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1296640-gc-args.js
+++ /dev/null
@@ -1,9 +0,0 @@
-if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
- quit();
-}
-
-gczeal(9, 2);
-var Int8x16 = SIMD.Int8x16;
-var v = Int8x16();
-var good = { valueOf: () => 21 };
-Int8x16.shiftLeftByScalar(v, good);
diff --git a/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js b/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js
deleted file mode 100644
index a894d532ef..0000000000
--- a/js/src/jit-test/tests/SIMD/bug1303780-gc-args.js
+++ /dev/null
@@ -1,12 +0,0 @@
-if (typeof gczeal === 'undefined' || typeof SIMD === 'undefined') {
- quit();
-}
-
-gczeal(14,2);
-var Float32x4 = SIMD.Float32x4;
-function test() {
- var v = Float32x4(1,2,3,4);
- var good = {valueOf: () => 42};
- Float32x4.replaceLane(v, 0, good);
-}
-test();
diff --git a/js/src/jit-test/tests/SIMD/bug953108.js b/js/src/jit-test/tests/SIMD/bug953108.js
deleted file mode 100644
index a8ae80e170..0000000000
--- a/js/src/jit-test/tests/SIMD/bug953108.js
+++ /dev/null
@@ -1,10 +0,0 @@
-/*
- * Any copyright is dedicated to the Public Domain.
- * http://creativecommons.org/licenses/publicdomain/
- */
-
-if (!this.hasOwnProperty("TypedObject") || !this.hasOwnProperty("SIMD"))
- quit();
-
-var Float32x4 = SIMD.Float32x4;
-Float32x4.array(1);
diff --git a/js/src/jit-test/tests/SIMD/check.js b/js/src/jit-test/tests/SIMD/check.js
deleted file mode 100644
index bef0b6c688..0000000000
--- a/js/src/jit-test/tests/SIMD/check.js
+++ /dev/null
@@ -1,25 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var f1 = SIMD.Float32x4(1, 2, 3, 4);
- var i1 = SIMD.Int32x4(1, 2, -3, 4);
- var b1 = SIMD.Bool32x4(true, true, false, true);
- var i = 0;
- try {
- for (; i < 150; i++) {
- if (i > 148)
- i1 = f1;
- assertEqVec(SIMD.Int32x4.check(i1), i1);
- assertEqVec(SIMD.Float32x4.check(f1), f1);
- assertEqVec(SIMD.Bool32x4.check(b1), b1);
- }
- } catch (ex) {
- assertEq(i, 149);
- assertEq(ex instanceof TypeError, true);
- }
-}
-
-f();
-
diff --git a/js/src/jit-test/tests/SIMD/compare.js b/js/src/jit-test/tests/SIMD/compare.js
deleted file mode 100644
index 21dca20cd8..0000000000
--- a/js/src/jit-test/tests/SIMD/compare.js
+++ /dev/null
@@ -1,39 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var f1 = SIMD.Float32x4(1, 2, 3, 4);
- var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
-
- var i1 = SIMD.Int32x4(1, 2, -3, 4);
- var i2 = SIMD.Int32x4(1, -2, 3, 0);
-
- var u1 = SIMD.Uint32x4(1, 2, -3, 4);
- var u2 = SIMD.Uint32x4(1, -2, 3, 0x80000000);
-
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Int32x4.lessThan(i1, i2), [false, false, true, false]);
- assertEqX4(SIMD.Int32x4.lessThanOrEqual(i1, i2), [true, false, true, false]);
- assertEqX4(SIMD.Int32x4.equal(i1, i2), [true, false, false, false]);
- assertEqX4(SIMD.Int32x4.notEqual(i1, i2), [false, true, true, true]);
- assertEqX4(SIMD.Int32x4.greaterThan(i1, i2), [false, true, false, true]);
- assertEqX4(SIMD.Int32x4.greaterThanOrEqual(i1, i2), [true, true, false, true]);
-
- assertEqX4(SIMD.Uint32x4.lessThan(u1, u2), [false, true, false, true]);
- assertEqX4(SIMD.Uint32x4.lessThanOrEqual(u1, u2), [true, true, false, true]);
- assertEqX4(SIMD.Uint32x4.equal(u1, u2), [true, false, false, false]);
- assertEqX4(SIMD.Uint32x4.notEqual(u1, u2), [false, true, true, true]);
- assertEqX4(SIMD.Uint32x4.greaterThan(u1, u2), [false, false, true, false]);
- assertEqX4(SIMD.Uint32x4.greaterThanOrEqual(u1, u2), [true, false, true, false]);
-
- assertEqX4(SIMD.Float32x4.lessThan(f1, f2), [false, true, true, false]);
- assertEqX4(SIMD.Float32x4.lessThanOrEqual(f1, f2), [false, true, true, false]);
- assertEqX4(SIMD.Float32x4.equal(f1, f2), [false, false, false, false]);
- assertEqX4(SIMD.Float32x4.notEqual(f1, f2), [true, true, true, true]);
- assertEqX4(SIMD.Float32x4.greaterThan(f1, f2), [false, false, false, true]);
- assertEqX4(SIMD.Float32x4.greaterThanOrEqual(f1, f2), [false, false, false, true]);
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/complex-4.js b/js/src/jit-test/tests/SIMD/complex-4.js
deleted file mode 100644
index ca5e8b0f67..0000000000
--- a/js/src/jit-test/tests/SIMD/complex-4.js
+++ /dev/null
@@ -1,70 +0,0 @@
-load(libdir + 'simd.js');
-
-if (typeof SIMD === "undefined")
- quit();
-
-setJitCompilerOption("baseline.warmup.trigger", 10);
-setJitCompilerOption("ion.warmup.trigger", 90);
-var max = 100; // Make have the warm-up counter high enough to
- // consider inlining functions.
-
-var f4 = SIMD.Int32x4; // :TODO: Support Float32x4 arith.
-var f4add = f4.add;
-var f4sub = f4.sub;
-var f4mul = f4.mul;
-
-function c4mul(z1, z2) {
- var { re: re1, im: im1 } = z1;
- var { re: re2, im: im2 } = z2;
- var rere = f4mul(re1, re2);
- var reim = f4mul(re1, im2);
- var imre = f4mul(im1, re2);
- var imim = f4mul(im1, im2);
- return { re: f4sub(rere, imim), im: f4add(reim, imre) };
-}
-
-function c4inv(z) {
- var { re: re, im: im } = z;
- var minus = f4(-1, -1, -1, -1);
- return { re: re, im: f4mul(im, minus) };
-}
-
-function c4inv_inplace(z) {
- var res = c4inv(z);
- z.re = res.re;
- z.im = res.im;
-}
-
-function c4norm(z) {
- var { re: re, im: im } = c4mul(z, c4inv(z));
- return re;
-}
-
-function c4scale(z, s) {
- var { re: re, im: im } = z;
- var f4s = f4(s, s, s, s);
- return { re: f4mul(re, f4s), im: f4mul(im, f4s) };
-}
-
-var rotate90 = { re: f4(0, 0, 0, 0), im: f4(1, 1, 1, 1) };
-var cardinals = { re: f4(1, 0, -1, 0), im: f4(0, 1, 0, -1) };
-
-function test(dots) {
- for (var j = 0; j < 4; j++) {
- dots = c4mul(rotate90, dots);
- if (j % 2 == 0) // Magic !
- c4inv_inplace(dots);
- dots = c4scale(dots, 2);
- }
- return dots;
-}
-
-assertEqX4(c4norm(cardinals), simdToArray(f4.splat(1)));
-var cardinals16 = c4scale(cardinals, 16);
-
-for (var i = 0; i < max; i++) {
- var res = test(cardinals);
- assertEqX4(c4norm(res), simdToArray(f4.splat(16 * 16)));
- assertEqX4(res.re, simdToArray(cardinals16.re));
- assertEqX4(res.im, simdToArray(cardinals16.im));
-}
diff --git a/js/src/jit-test/tests/SIMD/convert.js b/js/src/jit-test/tests/SIMD/convert.js
deleted file mode 100644
index b27a041e7f..0000000000
--- a/js/src/jit-test/tests/SIMD/convert.js
+++ /dev/null
@@ -1,68 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 30);
-
-var cast = (function() {
- var i32 = new Int32Array(1);
- var f32 = new Float32Array(i32.buffer);
- return {
- fromInt32Bits(x) {
- i32[0] = x;
- return f32[0];
- },
-
- fromFloat32Bits(x) {
- f32[0] = x;
- return i32[0];
- }
- }
-})();
-
-function f() {
- // No bailout here.
- var f4 = SIMD.Float32x4(1, 2, 3, 4);
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- var BitOrZero = (x) => x | 0;
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Float32x4.fromInt32x4(i4), unaryX4(BitOrZero, f4, Math.fround));
- assertEqX4(SIMD.Float32x4.fromInt32x4Bits(i4), unaryX4(cast.fromInt32Bits, f4, Math.fround));
- assertEqX4(SIMD.Int32x4.fromFloat32x4(f4), unaryX4(Math.fround, i4, BitOrZero));
- assertEqX4(SIMD.Int32x4.fromFloat32x4Bits(f4), unaryX4(cast.fromFloat32Bits, i4, BitOrZero));
- }
-}
-
-function uglyDuckling(val) {
- // We bail out when i == 149 because the conversion will return
- // 0x80000000 and the input actually wasn't in bounds.
- val = Math.fround(val);
- for (var i = 0; i < 150; i++) {
- var caught = false;
- try {
- var v = SIMD.Float32x4(i < 149 ? 0 : val, 0, 0, 0)
- SIMD.Int32x4.fromFloat32x4(v);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-}
-
-function dontBail() {
- // On x86, the conversion will return 0x80000000, which will imply that we
- // check the input values. However, we shouldn't bail out in this case.
- for (var i = 0; i < 150; i++) {
- var v = SIMD.Float32x4(i < 149 ? 0 : -Math.pow(2, 31), 0, 0, 0)
- SIMD.Int32x4.fromFloat32x4(v);
- }
-}
-
-f();
-
-dontBail();
-dontBail();
-
-uglyDuckling(Math.pow(2, 31));
-uglyDuckling(NaN);
-uglyDuckling(-Math.pow(2, 32));
diff --git a/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js b/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js
deleted file mode 100644
index 63e9215d9f..0000000000
--- a/js/src/jit-test/tests/SIMD/float32x4-binary-arith.js
+++ /dev/null
@@ -1,33 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function maxNum(x, y) {
- if (x != x)
- return y;
- if (y != y)
- return x;
- return Math.max(x, y);
-}
-
-function minNum(x, y) {
- if (x != x)
- return y;
- if (y != y)
- return x;
- return Math.min(x, y);
-}
-
-function f() {
- var f1 = SIMD.Float32x4(1, 2, 3, 4);
- var f2 = SIMD.Float32x4(4, 3, 2, 1);
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Float32x4.div(f1, f2), binaryX((x, y) => x / y, f1, f2));
- assertEqX4(SIMD.Float32x4.min(f1, f2), binaryX(Math.min, f1, f2));
- assertEqX4(SIMD.Float32x4.max(f1, f2), binaryX(Math.max, f1, f2));
- assertEqX4(SIMD.Float32x4.minNum(f1, f2), binaryX(minNum, f1, f2));
- assertEqX4(SIMD.Float32x4.maxNum(f1, f2), binaryX(maxNum, f1, f2));
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/getters.js b/js/src/jit-test/tests/SIMD/getters.js
deleted file mode 100644
index 5a895bbe2f..0000000000
--- a/js/src/jit-test/tests/SIMD/getters.js
+++ /dev/null
@@ -1,48 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var i4 = SIMD.Int32x4(1, -2, 3, -4);
- var u4 = SIMD.Uint32x4(1, -2, 3, 0x88000000);
- var b4 = SIMD.Bool32x4(true, true, false, true);
-
-
- var bt4 = SIMD.Bool32x4(true, true, true, true);
- var bf4 = SIMD.Bool32x4(false, false, false, false);
-
- var v = Math.fround(13.37);
- var f4 = SIMD.Float32x4(13.37, NaN, Infinity, -0);
-
- for (var i = 0; i < 150; i++) {
- assertEq(SIMD.Int32x4.extractLane(i4, 0), 1);
- assertEq(SIMD.Int32x4.extractLane(i4, 1), -2);
- assertEq(SIMD.Int32x4.extractLane(i4, 2), 3);
- assertEq(SIMD.Int32x4.extractLane(i4, 3), -4);
-
- assertEq(SIMD.Uint32x4.extractLane(u4, 0), 1);
- assertEq(SIMD.Uint32x4.extractLane(u4, 1), -2 >>> 0);
- assertEq(SIMD.Uint32x4.extractLane(u4, 2), 3);
- assertEq(SIMD.Uint32x4.extractLane(u4, 3), 0x88000000);
-
- assertEq(SIMD.Float32x4.extractLane(f4, 0), v);
- assertEq(SIMD.Float32x4.extractLane(f4, 1), NaN);
- assertEq(SIMD.Float32x4.extractLane(f4, 2), Infinity);
- assertEq(SIMD.Float32x4.extractLane(f4, 3), -0);
-
- assertEq(SIMD.Bool32x4.extractLane(b4, 0), true);
- assertEq(SIMD.Bool32x4.extractLane(b4, 1), true);
- assertEq(SIMD.Bool32x4.extractLane(b4, 2), false);
- assertEq(SIMD.Bool32x4.extractLane(b4, 3), true);
-
- assertEq(SIMD.Bool32x4.anyTrue(b4), true);
- assertEq(SIMD.Bool32x4.allTrue(b4), false);
-
- assertEq(SIMD.Bool32x4.anyTrue(bt4), true);
- assertEq(SIMD.Bool32x4.allTrue(bt4), true);
- assertEq(SIMD.Bool32x4.anyTrue(bf4), false);
- assertEq(SIMD.Bool32x4.allTrue(bf4), false);
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/inline-missing-arguments.js b/js/src/jit-test/tests/SIMD/inline-missing-arguments.js
deleted file mode 100644
index 5ef91d072a..0000000000
--- a/js/src/jit-test/tests/SIMD/inline-missing-arguments.js
+++ /dev/null
@@ -1,81 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function test(i) {
- assertEqX4(SIMD.Int32x4(), [0, 0, 0, 0]);
- assertEqX4(SIMD.Int32x4(i), [i, 0, 0, 0]);
- assertEqX4(SIMD.Int32x4(i, 1), [i, 1, 0, 0]);
- assertEqX4(SIMD.Int32x4(i, 1, 2), [i, 1, 2, 0]);
- assertEqX4(SIMD.Int32x4(i, 1, 2, 3), [i, 1, 2, 3]);
- assertEqX4(SIMD.Int32x4(i, 1, 2, 3, 4), [i, 1, 2, 3]);
-
- assertEqVecArr(SIMD.Int16x8(), [0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i), [i, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i, 1), [i, 1, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i, 1, 2), [i, 1, 2, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3), [i, 1, 2, 3, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4), [i, 1, 2, 3, 4, 0, 0, 0]);
- assertEqVecArr(SIMD.Int16x8(i, 1, 2, 3, 4, 5, 6),
- [i, 1, 2, 3, 4, 5, 6, 0]);
- j = i & 32
- assertEqVecArr(SIMD.Int8x16(), [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j), [j, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1), [j, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1, 2), [j, 1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3), [j, 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4), [j, 1, 2, 3, 4, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6),
- [j, 1, 2, 3, 4, 5, 6, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
- assertEqVecArr(SIMD.Int8x16(j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12),
- [j, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 0, 0, 0]);
-
- assertEqX4(SIMD.Float32x4(), [NaN, NaN, NaN, NaN]);
- assertEqX4(SIMD.Float32x4(i), [i, NaN, NaN, NaN]);
- assertEqX4(SIMD.Float32x4(i, 1), [i, 1, NaN, NaN]);
- assertEqX4(SIMD.Float32x4(i, 1, 2), [i, 1, 2, NaN]);
- assertEqX4(SIMD.Float32x4(i, 1, 2, 3), [i, 1, 2, 3 ]);
- assertEqX4(SIMD.Float32x4(i, 1, 2, 3, 4), [i, 1, 2, 3 ]);
-
- var b = i % 2 > 0 ;
- assertEqX4(SIMD.Bool32x4(), [false, false, false, false]);
- assertEqX4(SIMD.Bool32x4(b), [b, false, false, false]);
- assertEqX4(SIMD.Bool32x4(b, true), [b, true, false, false]);
- assertEqX4(SIMD.Bool32x4(b, false, true), [b, false, true, false]);
- assertEqX4(SIMD.Bool32x4(b, false, true, true), [b, false, true, true ]);
- assertEqX4(SIMD.Bool32x4(b, false, true, true, true), [b, false, true, true ]);
-
- assertEqVecArr(SIMD.Bool16x8(),
- [false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b),
- [b, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b, true),
- [b, true, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b, false, true),
- [b, false, true, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b, false, true, true),
- [b, false, true, true, false, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b, false, true, true, true),
- [b, false, true, true, true, false, false, false]);
- assertEqVecArr(SIMD.Bool16x8(b, false, true, true, true, true),
- [b, false, true, true, true, true, false, false]);
-
- assertEqVecArr(SIMD.Bool8x16(),
- [false, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b),
- [b, false, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b, true),
- [b, true, false, false, false, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b, false, true),
- [b, false, true, false, false, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b, false, true, true),
- [b, false, true, true, false, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b, false, true, true, true),
- [b, false, true, true, true, false, false, false, false, false, false, false, false, false, false, false]);
- assertEqVecArr(SIMD.Bool8x16(b, false, true, true, true, true, false, true, true, true),
- [b, false, true, true, true, true, false, true, true, true, false, false, false, false, false, false]);
-}
-
-for(var i=0; i<300; i++) {
- test(i);
-}
diff --git a/js/src/jit-test/tests/SIMD/load.js b/js/src/jit-test/tests/SIMD/load.js
deleted file mode 100644
index 5cdb8cce63..0000000000
--- a/js/src/jit-test/tests/SIMD/load.js
+++ /dev/null
@@ -1,123 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 40);
-
-function f() {
- var f32 = new Float32Array(16);
- for (var i = 0; i < 16; i++)
- f32[i] = i + 1;
-
- var f64 = new Float64Array(f32.buffer);
- var i32 = new Int32Array(f32.buffer);
- var u32 = new Uint32Array(f32.buffer);
- var i16 = new Int16Array(f32.buffer);
- var u16 = new Uint16Array(f32.buffer);
- var i8 = new Int8Array(f32.buffer);
- var u8 = new Uint8Array(f32.buffer);
-
- function testLoad() {
- assertEqX4(SIMD.Float32x4.load(f64, 0), [1,2,3,4]);
- assertEqX4(SIMD.Float32x4.load(f32, 1), [2,3,4,5]);
- assertEqX4(SIMD.Float32x4.load(i32, 2), [3,4,5,6]);
- assertEqX4(SIMD.Float32x4.load(i16, 3 << 1), [4,5,6,7]);
- assertEqX4(SIMD.Float32x4.load(u16, 4 << 1), [5,6,7,8]);
- assertEqX4(SIMD.Float32x4.load(i8 , 5 << 2), [6,7,8,9]);
- assertEqX4(SIMD.Float32x4.load(u8 , 6 << 2), [7,8,9,10]);
-
- assertEqX4(SIMD.Float32x4.load(f64, (16 >> 1) - (4 >> 1)), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(f32, 16 - 4), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(i32, 16 - 4), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(i16, (16 << 1) - (4 << 1)), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(u16, (16 << 1) - (4 << 1)), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(i8, (16 << 2) - (4 << 2)), [13,14,15,16]);
- assertEqX4(SIMD.Float32x4.load(u8, (16 << 2) - (4 << 2)), [13,14,15,16]);
- }
-
- function testLoad1() {
- assertEqX4(SIMD.Float32x4.load1(f64, 0), [1,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(f32, 1), [2,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i32, 2), [3,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i16, 3 << 1), [4,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(u16, 4 << 1), [5,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i8 , 5 << 2), [6,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(u8 , 6 << 2), [7,0,0,0]);
-
- assertEqX4(SIMD.Float32x4.load1(f64, (16 >> 1) - (4 >> 1)), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(f32, 16 - 4), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i32, 16 - 4), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i16, (16 << 1) - (4 << 1)), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(u16, (16 << 1) - (4 << 1)), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(i8, (16 << 2) - (4 << 2)), [13,0,0,0]);
- assertEqX4(SIMD.Float32x4.load1(u8, (16 << 2) - (4 << 2)), [13,0,0,0]);
- }
-
- function testLoad2() {
- assertEqX4(SIMD.Float32x4.load2(f64, 0), [1,2,0,0]);
- assertEqX4(SIMD.Float32x4.load2(f32, 1), [2,3,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i32, 2), [3,4,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i16, 3 << 1), [4,5,0,0]);
- assertEqX4(SIMD.Float32x4.load2(u16, 4 << 1), [5,6,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i8 , 5 << 2), [6,7,0,0]);
- assertEqX4(SIMD.Float32x4.load2(u8 , 6 << 2), [7,8,0,0]);
-
- assertEqX4(SIMD.Float32x4.load2(f64, (16 >> 1) - (4 >> 1)), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(f32, 16 - 4), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i32, 16 - 4), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i16, (16 << 1) - (4 << 1)), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(u16, (16 << 1) - (4 << 1)), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(i8, (16 << 2) - (4 << 2)), [13,14,0,0]);
- assertEqX4(SIMD.Float32x4.load2(u8, (16 << 2) - (4 << 2)), [13,14,0,0]);
- }
-
- function testLoad3() {
- assertEqX4(SIMD.Float32x4.load3(f64, 0), [1,2,3,0]);
- assertEqX4(SIMD.Float32x4.load3(f32, 1), [2,3,4,0]);
- assertEqX4(SIMD.Float32x4.load3(i32, 2), [3,4,5,0]);
- assertEqX4(SIMD.Float32x4.load3(i16, 3 << 1), [4,5,6,0]);
- assertEqX4(SIMD.Float32x4.load3(u16, 4 << 1), [5,6,7,0]);
- assertEqX4(SIMD.Float32x4.load3(i8 , 5 << 2), [6,7,8,0]);
- assertEqX4(SIMD.Float32x4.load3(u8 , 6 << 2), [7,8,9,0]);
-
- assertEqX4(SIMD.Float32x4.load3(f64, (16 >> 1) - (4 >> 1)), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(f32, 16 - 4), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(i32, 16 - 4), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(i16, (16 << 1) - (4 << 1)), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(u16, (16 << 1) - (4 << 1)), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(i8, (16 << 2) - (4 << 2)), [13,14,15,0]);
- assertEqX4(SIMD.Float32x4.load3(u8, (16 << 2) - (4 << 2)), [13,14,15,0]);
- }
-
- for (var i = 0; i < 150; i++) {
- testLoad();
- testLoad1();
- testLoad2();
- testLoad3();
- }
-}
-
-f();
-
-function testBailout(uglyDuckling) {
- var f32 = new Float32Array(16);
- for (var i = 0; i < 16; i++)
- f32[i] = i + 1;
-
- var i8 = new Int8Array(f32.buffer);
-
- for (var i = 0; i < 150; i++) {
- var caught = false;
- try {
- SIMD.Float32x4.load(i8, (i < 149) ? 0 : uglyDuckling);
- } catch (e) {
- print(e);
- assertEq(e instanceof RangeError, true);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-}
-
-print('Testing range checks...');
-testBailout(-1);
-testBailout(-15);
-testBailout(12 * 4 + 1);
diff --git a/js/src/jit-test/tests/SIMD/nursery-overflow.js b/js/src/jit-test/tests/SIMD/nursery-overflow.js
deleted file mode 100644
index 5aecff908b..0000000000
--- a/js/src/jit-test/tests/SIMD/nursery-overflow.js
+++ /dev/null
@@ -1,29 +0,0 @@
-load(libdir + 'simd.js');
-
-if (typeof SIMD === "undefined")
- quit();
-
-setJitCompilerOption("baseline.warmup.trigger", 10);
-setJitCompilerOption("ion.warmup.trigger", 30);
-
-var i4 = SIMD.Int32x4;
-var i4sub = SIMD.Int32x4.sub;
-
-function simdbox(i) {
- return i4(i, i, i, i);
-}
-
-function test() {
- var arr = [];
-
- // overflow the nursery with live SIMD objects.
- for (var i = 0; i < 100000; i++) {
- arr.push(simdbox(i));
- }
-
- return arr;
-}
-
-var arr = test();
-for (var i = 0; i < arr.length; i++)
- assertEqX4(arr[i], [i, i, i, i]);
diff --git a/js/src/jit-test/tests/SIMD/recover.js b/js/src/jit-test/tests/SIMD/recover.js
deleted file mode 100644
index a8fb0002ed..0000000000
--- a/js/src/jit-test/tests/SIMD/recover.js
+++ /dev/null
@@ -1,70 +0,0 @@
-load(libdir + 'simd.js');
-
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-// This test case ensure that if we are able to optimize SIMD, then we can use
-// recover instructions to get rid of the allocations. So, there is no value
-// (and the test case would fail) if we are not able to inline SIMD
-// constructors.
-if (!isSimdAvailable())
- quit();
-
-setJitCompilerOption("baseline.warmup.trigger", 10);
-setJitCompilerOption("ion.warmup.trigger", 20);
-
-// This function is used to cause an invalidation after having removed a branch
-// after DCE. This is made to check if we correctly recover an array
-// allocation.
-var uceFault = function (i) {
- if (i > 98)
- uceFault = function (i) { return true; };
- return false;
-};
-
-// Check that we can correctly recover a boxed value.
-var uceFault_simdBox_i4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_i4'));
-function simdBox_i4(i) {
- var a = SIMD.Int32x4(i, i, i, i);
- if (uceFault_simdBox_i4(i) || uceFault_simdBox_i4(i))
- assertEqX4(a, [i, i, i, i]);
- assertRecoveredOnBailout(a, true);
- return 0;
-}
-
-var uceFault_simdBox_u4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_u4'));
-function simdBox_u4(i) {
- var a = SIMD.Uint32x4(i, 98 - i, i + 0x7ffffff0, i + 0xffffff00);
- if (uceFault_simdBox_u4(i) || uceFault_simdBox_u4(i))
- assertEqX4(a, [i, 98 - i, i + 0x7ffffff0, i + 0xffffff00].map(x => x >>> 0));
- assertRecoveredOnBailout(a, true);
- return 0;
-}
-
-var uceFault_simdBox_f4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_f4'));
-function simdBox_f4(i) {
- var a = SIMD.Float32x4(i, i + 0.1, i + 0.2, i + 0.3);
- if (uceFault_simdBox_f4(i) || uceFault_simdBox_f4(i))
- assertEqX4(a, [i, i + 0.1, i + 0.2, i + 0.3].map(Math.fround));
- assertRecoveredOnBailout(a, true);
- return 0;
-}
-
-var uceFault_simdBox_b4 = eval(uneval(uceFault).replace('uceFault', 'uceFault_simdBox_b4'));
-function simdBox_b4(i) {
- var val1 = i%2 === 0,
- val2 = !val1;
-
- var a = SIMD.Bool32x4(val1, val2, val1, val2);
- if (uceFault_simdBox_b4(i) || uceFault_simdBox_b4(i))
- assertEqX4(a, [val1, val2, val1, val2]);
- assertRecoveredOnBailout(a, true);
- return 0;
-}
-
-for (var i = 0; i < 100; i++) {
- simdBox_i4(i);
- simdBox_u4(i);
- simdBox_f4(i);
- simdBox_b4(i);
-}
diff --git a/js/src/jit-test/tests/SIMD/replacelane.js b/js/src/jit-test/tests/SIMD/replacelane.js
deleted file mode 100644
index c6b37ad5ab..0000000000
--- a/js/src/jit-test/tests/SIMD/replacelane.js
+++ /dev/null
@@ -1,181 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var f4 = SIMD.Float32x4(1, 2, 3, 4);
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- var b4 = SIMD.Bool32x4(true, false, true, false);
-
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Int32x4.replaceLane(i4, 0, 42), [42, 2, 3, 4]);
- assertEqX4(SIMD.Int32x4.replaceLane(i4, 1, 42), [1, 42, 3, 4]);
- assertEqX4(SIMD.Int32x4.replaceLane(i4, 2, 42), [1, 2, 42, 4]);
- assertEqX4(SIMD.Int32x4.replaceLane(i4, 3, 42), [1, 2, 3, 42]);
-
- assertEqX4(SIMD.Float32x4.replaceLane(f4, 0, 42), [42, 2, 3, 4]);
- assertEqX4(SIMD.Float32x4.replaceLane(f4, 1, 42), [1, 42, 3, 4]);
- assertEqX4(SIMD.Float32x4.replaceLane(f4, 2, 42), [1, 2, 42, 4]);
- assertEqX4(SIMD.Float32x4.replaceLane(f4, 3, 42), [1, 2, 3, 42]);
-
- assertEqX4(SIMD.Bool32x4.replaceLane(b4, 0, false), [false, false, true, false]);
- assertEqX4(SIMD.Bool32x4.replaceLane(b4, 1, true), [true, true, true, false]);
- assertEqX4(SIMD.Bool32x4.replaceLane(b4, 2, false), [true, false, false, false]);
- assertEqX4(SIMD.Bool32x4.replaceLane(b4, 3, true), [true, false, true, true]);
- }
-}
-
-f();
-
-function e() {
- var f4 = SIMD.Float32x4(1, 2, 3, 4);
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- var b4 = SIMD.Bool32x4(true, false, true, false);
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : f4, 0, 42);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Int32x4.replaceLane(i < 149 ? i4 : b4, 0, 42);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 4, 42);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Int32x4.replaceLane(i4, i < 149 ? 0 : 1.1, 42);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : i4, 0, 42);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Float32x4.replaceLane(i < 149 ? f4 : b4, 0, 42);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 4, 42);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Float32x4.replaceLane(f4, i < 149 ? 0 : 1.1, 42);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : i4, 0, true);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Bool32x4.replaceLane(i < 149 ? b4 : f4, 0, true);
- } catch(e) {
- assertEq(e instanceof TypeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 4, true);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
- for (let i = 0; i < 150; i++) {
- let caught = false;
- try {
- let x = SIMD.Bool32x4.replaceLane(b4, i < 149 ? 0 : 1.1, true);
- } catch(e) {
- assertEq(e instanceof RangeError, true);
- assertEq(i, 149);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-
-}
-
-e();
diff --git a/js/src/jit-test/tests/SIMD/saturate.js b/js/src/jit-test/tests/SIMD/saturate.js
deleted file mode 100644
index a98cf7ad79..0000000000
--- a/js/src/jit-test/tests/SIMD/saturate.js
+++ /dev/null
@@ -1,37 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-const INT8_MIN = -128;
-const INT8_MAX = 127;
-const UINT8_MAX = 255;
-
-function sat8(x) {
- if (x < INT8_MIN) return INT8_MIN;
- if (x > INT8_MAX) return INT8_MAX;
- return x;
-}
-
-function usat8(x) {
- if (x < 0) return 0;
- if (x > UINT8_MAX) return UINT8_MAX;
- return x;
-}
-
-function f() {
- var i1 = SIMD.Int8x16(1, 100, 3, 4);
- var i2 = SIMD.Int8x16(4, 30, 2, 1);
-
- var u1 = SIMD.Uint8x16(1, 2, 3, 4);
- var u2 = SIMD.Uint8x16(4, 3, 2, 1);
-
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Int8x16.addSaturate(i1, i2), binaryX((x, y) => sat8(x + y), i1, i2));
- assertEqX4(SIMD.Int8x16.subSaturate(i1, i2), binaryX((x, y) => sat8(x - y), i1, i2));
-
- assertEqX4(SIMD.Uint8x16.addSaturate(u1, u2), binaryX((x, y) => usat8(x + y), u1, u2));
- assertEqX4(SIMD.Uint8x16.subSaturate(u1, u2), binaryX((x, y) => usat8(x - y), u1, u2));
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/select.js b/js/src/jit-test/tests/SIMD/select.js
deleted file mode 100644
index 3f0d783dac..0000000000
--- a/js/src/jit-test/tests/SIMD/select.js
+++ /dev/null
@@ -1,35 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function select(type, mask, ifTrue, ifFalse) {
- var arr = [];
- for (var i = 0; i < 4; i++) {
- var selector = SIMD.Bool32x4.extractLane(mask, i);
- arr.push(type.extractLane(selector ? ifTrue : ifFalse, i));
- }
- return arr;
-}
-
-function f() {
- var f1 = SIMD.Float32x4(1, 2, 3, 4);
- var f2 = SIMD.Float32x4(NaN, Infinity, 3.14, -0);
-
- var i1 = SIMD.Int32x4(2, 3, 5, 8);
- var i2 = SIMD.Int32x4(13, 37, 24, 42);
-
- var TTFT = SIMD.Bool32x4(true, true, false, true);
- var TFTF = SIMD.Bool32x4(true, false, true, false);
-
- var mask = SIMD.Int32x4(0xdeadbeef, 0xbaadf00d, 0x00ff1ce, 0xdeadc0de);
-
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Float32x4.select(TTFT, f1, f2), select(SIMD.Float32x4, TTFT, f1, f2));
- assertEqX4(SIMD.Float32x4.select(TFTF, f1, f2), select(SIMD.Float32x4, TFTF, f1, f2));
-
- assertEqX4(SIMD.Int32x4.select(TFTF, i1, i2), select(SIMD.Int32x4, TFTF, i1, i2));
- assertEqX4(SIMD.Int32x4.select(TTFT, i1, i2), select(SIMD.Int32x4, TTFT, i1, i2));
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/shift.js b/js/src/jit-test/tests/SIMD/shift.js
deleted file mode 100644
index 8448e52ecb..0000000000
--- a/js/src/jit-test/tests/SIMD/shift.js
+++ /dev/null
@@ -1,75 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function curry(f, arg) { return f.bind(null, arg); }
-
-function binaryLsh(count, v) { count &= 31; return (v << count) | 0; }
-function lsh(count) { return curry(binaryLsh, count); }
-
-function binaryRsh(count, v) { count &= 31; return (v >> count) | 0; }
-function rsh(count) { return curry(binaryRsh, count); }
-
-function binaryUlsh(count, v) { count &= 31; return (v << count) >>> 0; }
-function ulsh(count) { return curry(binaryUlsh, count); }
-
-function binaryUrsh(count, v) { count &= 31; return v >>> count; }
-function ursh(count) { return curry(binaryUrsh, count); }
-
-function f() {
- var v = SIMD.Int32x4(1, 2, -3, 4);
- var u = SIMD.Uint32x4(1, 0x55005500, -3, 0xaa00aa00);
- var a = [1, 2, -3, 4];
- var b = [1, 0x55005500, -3, 0xaa00aa00];
-
- var shifts = [-2, -1, 0, 1, 31, 32, 33];
-
- var r;
- for (var i = 0; i < 150; i++) {
- // Constant shift counts
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, -1), a.map(lsh(-1)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 0), a.map(lsh(0)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 1), a.map(lsh(1)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 2), a.map(lsh(2)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 31), a.map(lsh(31)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 32), a.map(lsh(32)));
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, 33), a.map(lsh(33)));
-
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, -1), a.map(rsh(31)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 0), a.map(rsh(0)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 1), a.map(rsh(1)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 2), a.map(rsh(2)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 31), a.map(rsh(31)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 32), a.map(rsh(32)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, 33), a.map(rsh(33)));
-
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, -1), b.map(ulsh(-1)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 0), b.map(ulsh(0)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 1), b.map(ulsh(1)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 2), b.map(ulsh(2)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 31), b.map(ulsh(31)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 32), b.map(ulsh(32)));
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, 33), b.map(ulsh(33)));
-
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, -1), b.map(ursh(-1)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 0), b.map(ursh(0)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 1), b.map(ursh(1)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 2), b.map(ursh(2)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 31), b.map(ursh(31)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 32), b.map(ursh(32)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, 33), b.map(ursh(33)));
-
- // Non constant shift counts
- var c = shifts[i % shifts.length];
-
- assertEqX4(SIMD.Int32x4.shiftLeftByScalar(v, c), a.map(lsh(c)));
- assertEqX4(SIMD.Int32x4.shiftRightByScalar(v, c), a.map(rsh(c)));
-
- assertEqX4(SIMD.Uint32x4.shiftLeftByScalar(u, c), b.map(ulsh(c)));
- assertEqX4(SIMD.Uint32x4.shiftRightByScalar(u, c), b.map(ursh(c)));
- }
- return r;
-}
-
-f();
-
diff --git a/js/src/jit-test/tests/SIMD/shuffle.js b/js/src/jit-test/tests/SIMD/shuffle.js
deleted file mode 100644
index 505f01131f..0000000000
--- a/js/src/jit-test/tests/SIMD/shuffle.js
+++ /dev/null
@@ -1,86 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var i1 = SIMD.Int32x4(1, 2, 3, 4);
- var i2 = SIMD.Int32x4(5, 6, 7, 8);
-
- var leet = Math.fround(13.37);
- var f1 = SIMD.Float32x4(-.5, -0, Infinity, leet);
- var f2 = SIMD.Float32x4(42, .5, 23, -10);
-
- // computes all rotations of a given array
- function *gen(arr) {
- var previous = arr.slice().splice(0, 4);
- var i = 4;
- for (var j = 0; j < 8; j++) {
- yield previous.slice();
- previous = previous.splice(1, previous.length - 1);
- previous.push(arr[i]);
- i = (i + 1) % arr.length;
- }
- }
-
- var compI = [];
- var baseI = [];
- for (var i = 0; i < 8; i++)
- baseI.push(SIMD.Int32x4.extractLane(i < 4 ? i1 : i2, i % 4));
- for (var k of gen(baseI))
- compI.push(k);
-
- var compF = [];
- var baseF = [];
- for (var i = 0; i < 8; i++)
- baseF.push(SIMD.Float32x4.extractLane(i < 4 ? f1 : f2, i % 4));
- for (var k of gen(baseF))
- compF.push(k);
-
- for (var i = 0; i < 150; i++) {
- // Variable lanes
- var r = SIMD.Float32x4.shuffle(f1, f2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
- assertEqX4(r, compF[i % 8]);
-
- // Constant lanes
- assertEqX4(SIMD.Float32x4.shuffle(f1, f2, 3, 2, 4, 5), [leet, Infinity, 42, .5]);
-
- // Variable lanes
- var r = SIMD.Int32x4.shuffle(i1, i2, i % 8, (i + 1) % 8, (i + 2) % 8, (i + 3) % 8);
- assertEqX4(r, compI[i % 8]);
-
- // Constant lanes
- assertEqX4(SIMD.Int32x4.shuffle(i1, i2, 3, 2, 4, 5), [4, 3, 5, 6]);
- }
-}
-
-function testBailouts(expectException, uglyDuckling) {
- var i1 = SIMD.Int32x4(1, 2, 3, 4);
- var i2 = SIMD.Int32x4(5, 6, 7, 8);
-
- for (var i = 0; i < 150; i++) {
- // Test bailouts
- var value = i == 149 ? uglyDuckling : 0;
- var caught = false;
- try {
- assertEqX4(SIMD.Int32x4.shuffle(i1, i2, value, 2, 4, 5), [1, 3, 5, 6]);
- } catch(e) {
- print(e);
- caught = true;
- assertEq(i, 149);
- assertEq(e instanceof TypeError || e instanceof RangeError, true);
- }
- if (i == 149)
- assertEq(caught, expectException);
- }
-}
-
-f();
-testBailouts(true, -1);
-testBailouts(true, 8);
-testBailouts(true, 2.5);
-testBailouts(true, undefined);
-testBailouts(true, {});
-testBailouts(true, 'one');
-testBailouts(false, false);
-testBailouts(false, null);
-testBailouts(false, " 0.0 ");
diff --git a/js/src/jit-test/tests/SIMD/splat.js b/js/src/jit-test/tests/SIMD/splat.js
deleted file mode 100644
index 38eda3085e..0000000000
--- a/js/src/jit-test/tests/SIMD/splat.js
+++ /dev/null
@@ -1,15 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Int32x4.splat(42), [42, 42, 42, 42]);
- assertEqX4(SIMD.Float32x4.splat(42), [42, 42, 42, 42]);
- assertEqX4(SIMD.Bool32x4.splat(true), [true, true, true, true]);
- assertEqX4(SIMD.Bool32x4.splat(false), [false, false, false, false]);
- }
-}
-
-f();
-
diff --git a/js/src/jit-test/tests/SIMD/store.js b/js/src/jit-test/tests/SIMD/store.js
deleted file mode 100644
index 8cfa354277..0000000000
--- a/js/src/jit-test/tests/SIMD/store.js
+++ /dev/null
@@ -1,143 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 40);
-
-function f() {
- var f32 = new Float32Array(16);
- for (var i = 0; i < 16; i++)
- f32[i] = i + 1;
-
- var f64 = new Float64Array(f32.buffer);
- var i32 = new Int32Array(f32.buffer);
- var u32 = new Uint32Array(f32.buffer);
- var i16 = new Int16Array(f32.buffer);
- var u16 = new Uint16Array(f32.buffer);
- var i8 = new Int8Array(f32.buffer);
- var u8 = new Uint8Array(f32.buffer);
-
- var f4 = SIMD.Float32x4(42, 43, 44, 45);
-
- function check(n) {
- assertEq(f32[0], 42);
- assertEq(f32[1], n > 1 ? 43 : 2);
- assertEq(f32[2], n > 2 ? 44 : 3);
- assertEq(f32[3], n > 3 ? 45 : 4);
-
- f32[0] = 1;
- f32[1] = 2;
- f32[2] = 3;
- f32[3] = 4;
- }
-
- function testStore() {
- SIMD.Float32x4.store(f64, 0, f4);
- check(4);
- SIMD.Float32x4.store(f32, 0, f4);
- check(4);
- SIMD.Float32x4.store(i32, 0, f4);
- check(4);
- SIMD.Float32x4.store(u32, 0, f4);
- check(4);
- SIMD.Float32x4.store(i16, 0, f4);
- check(4);
- SIMD.Float32x4.store(u16, 0, f4);
- check(4);
- SIMD.Float32x4.store(i8, 0, f4);
- check(4);
- SIMD.Float32x4.store(u8, 0, f4);
- check(4);
- }
-
- function testStore1() {
- SIMD.Float32x4.store1(f64, 0, f4);
- check(1);
- SIMD.Float32x4.store1(f32, 0, f4);
- check(1);
- SIMD.Float32x4.store1(i32, 0, f4);
- check(1);
- SIMD.Float32x4.store1(u32, 0, f4);
- check(1);
- SIMD.Float32x4.store1(i16, 0, f4);
- check(1);
- SIMD.Float32x4.store1(u16, 0, f4);
- check(1);
- SIMD.Float32x4.store1(i8, 0, f4);
- check(1);
- SIMD.Float32x4.store1(u8, 0, f4);
- check(1);
- }
-
- function testStore2() {
- SIMD.Float32x4.store2(f64, 0, f4);
- check(2);
- SIMD.Float32x4.store2(f32, 0, f4);
- check(2);
- SIMD.Float32x4.store2(i32, 0, f4);
- check(2);
- SIMD.Float32x4.store2(u32, 0, f4);
- check(2);
- SIMD.Float32x4.store2(i16, 0, f4);
- check(2);
- SIMD.Float32x4.store2(u16, 0, f4);
- check(2);
- SIMD.Float32x4.store2(i8, 0, f4);
- check(2);
- SIMD.Float32x4.store2(u8, 0, f4);
- check(2);
- }
-
- function testStore3() {
- SIMD.Float32x4.store3(f64, 0, f4);
- check(3);
- SIMD.Float32x4.store3(f32, 0, f4);
- check(3);
- SIMD.Float32x4.store3(i32, 0, f4);
- check(3);
- SIMD.Float32x4.store3(u32, 0, f4);
- check(3);
- SIMD.Float32x4.store3(i16, 0, f4);
- check(3);
- SIMD.Float32x4.store3(u16, 0, f4);
- check(3);
- SIMD.Float32x4.store3(i8, 0, f4);
- check(3);
- SIMD.Float32x4.store3(u8, 0, f4);
- check(3);
- }
-
- for (var i = 0; i < 150; i++) {
- testStore();
- testStore1();
- testStore2();
- testStore3();
- }
-}
-
-f();
-
-function testBailout(uglyDuckling) {
- var f32 = new Float32Array(16);
- for (var i = 0; i < 16; i++)
- f32[i] = i + 1;
-
- var i8 = new Int8Array(f32.buffer);
-
- var f4 = SIMD.Float32x4(42, 43, 44, 45);
-
- for (var i = 0; i < 150; i++) {
- var caught = false;
- try {
- SIMD.Float32x4.store(i8, (i < 149) ? 0 : (16 << 2) - (4 << 2) + 1, f4);
- } catch (e) {
- print(e);
- assertEq(e instanceof RangeError, true);
- caught = true;
- }
- assertEq(i < 149 || caught, true);
- }
-}
-
-print('Testing range checks...');
-testBailout(-1);
-testBailout(-15);
-testBailout(12 * 4 + 1);
diff --git a/js/src/jit-test/tests/SIMD/swizzle.js b/js/src/jit-test/tests/SIMD/swizzle.js
deleted file mode 100644
index 2fd56620be..0000000000
--- a/js/src/jit-test/tests/SIMD/swizzle.js
+++ /dev/null
@@ -1,104 +0,0 @@
-if (!this.hasOwnProperty("SIMD"))
- quit();
-
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-function f() {
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
-
- var leet = Math.fround(13.37);
- var f4 = SIMD.Float32x4(-.5, -0, Infinity, leet);
-
- var compI = [
- [1,2,3,4],
- [2,3,4,1],
- [3,4,1,2],
- [4,1,2,3]
- ];
-
- var compF = [
- [-.5, -0, Infinity, leet],
- [-0, Infinity, leet, -.5],
- [Infinity, leet, -.5, -0],
- [leet, -.5, -0, Infinity]
- ];
-
- for (var i = 0; i < 150; i++) {
- // Variable lanes
- var r = SIMD.Float32x4.swizzle(f4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
- assertEqX4(r, compF[i % 4]);
-
- // Constant lanes
- assertEqX4(SIMD.Float32x4.swizzle(f4, 3, 2, 1, 0), [leet, Infinity, -0, -.5]);
-
- // Variable lanes
- var r = SIMD.Int32x4.swizzle(i4, i % 4, (i + 1) % 4, (i + 2) % 4, (i + 3) % 4);
- assertEqX4(r, compI[i % 4]);
-
- // Constant lanes
- assertEqX4(SIMD.Int32x4.swizzle(i4, 3, 2, 1, 0), [4, 3, 2, 1]);
- }
-}
-
-function testBailouts(expectException, uglyDuckling) {
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- for (var i = 0; i < 150; i++) {
- // Test bailouts
- var value = i == 149 ? uglyDuckling : 0;
- var caught = false;
- try {
- assertEqX4(SIMD.Int32x4.swizzle(i4, value, 3, 2, 0), [1, 4, 3, 1]);
- } catch(e) {
- print(e);
- caught = true;
- assertEq(i, 149);
- assertEq(e instanceof TypeError || e instanceof RangeError, true);
- }
- if (i == 149)
- assertEq(caught, expectException);
- }
-}
-
-function testInt32x4SwizzleBailout() {
- // Test out-of-bounds non-constant indices. This is expected to throw.
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Int32x4.swizzle(i4, i, 3, 2, 0), [i + 1, 4, 3, 1]);
- }
-}
-
-f();
-testBailouts(true, -1);
-testBailouts(true, 4);
-testBailouts(true, 2.5);
-testBailouts(true, undefined);
-testBailouts(true, {});
-testBailouts(true, 'one');
-testBailouts(false, false);
-testBailouts(false, null);
-testBailouts(false, " 0.0 ");
-
-try {
- testInt32x4SwizzleBailout();
- throw 'not caught';
-} catch(e) {
- assertEq(e instanceof RangeError, true);
-}
-
-(function() {
- var zappa = 0;
-
- function testBailouts() {
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- for (var i = 0; i < 300; i++) {
- var value = i == 299 ? 2.5 : 1;
- SIMD.Int32x4.swizzle(i4, value, 3, 2, 0);
- zappa = i;
- }
- }
-
- try { testBailouts(); } catch (e) {}
- assertEq(zappa, 298);
-})();
diff --git a/js/src/jit-test/tests/SIMD/uconvert.js b/js/src/jit-test/tests/SIMD/uconvert.js
deleted file mode 100644
index a45fd7af9c..0000000000
--- a/js/src/jit-test/tests/SIMD/uconvert.js
+++ /dev/null
@@ -1,86 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 30);
-
-// Testing Uint32 <-> Float32 conversions.
-// These conversions deserve special attention because SSE doesn't provide
-// simple conversion instructions.
-
-// Convert an Uint32Array to a Float32Array using scalar conversions.
-function cvt_utof_scalar(u32s, f32s) {
- assertEq(u32s.length, f32s.length);
- for (var i = 0; i < u32s.length; i++) {
- f32s[i] = u32s[i];
- }
-}
-
-// Convert an Uint32Array to a Float32Array using simd conversions.
-function cvt_utof_simd(u32s, f32s) {
- assertEq(u32s.length, f32s.length);
- for (var i = 0; i < u32s.length; i += 4) {
- SIMD.Float32x4.store(f32s, i, SIMD.Float32x4.fromUint32x4(SIMD.Uint32x4.load(u32s, i)));
- }
-}
-
-// Convert a Float32Array to an Uint32Array using scalar conversions.
-function cvt_ftou_scalar(f32s, u32s) {
- assertEq(f32s.length, u32s.length);
- for (var i = 0; i < f32s.length; i++) {
- u32s[i] = f32s[i];
- }
-}
-
-// Convert a Float32Array to an Uint32Array using simd conversions.
-function cvt_ftou_simd(f32s, u32s) {
- assertEq(f32s.length, u32s.length);
- for (var i = 0; i < f32s.length; i += 4) {
- SIMD.Uint32x4.store(u32s, i, SIMD.Uint32x4.fromFloat32x4(SIMD.Float32x4.load(f32s, i)));
- }
-}
-
-function check(a, b) {
- assertEq(a.length, b.length);
- for (var i = 0; i < a.length; i++) {
- assertEq(a[i], b[i]);
- }
-}
-
-// Uint32x4 --> Float32x4 tests.
-var src = new Uint32Array(8000);
-var dst1 = new Float32Array(8000);
-var dst2 = new Float32Array(8000);
-
-for (var i = 0; i < 2000; i++) {
- src[i] = i;
- src[i + 2000] = 0x7fffffff - i;
- src[i + 4000] = 0x80000000 + i;
- src[i + 6000] = 0xffffffff - i;
-}
-
-for (var n = 0; n < 10; n++) {
- cvt_utof_scalar(src, dst1);
- cvt_utof_simd(src, dst2);
- check(dst1, dst2);
-}
-
-// Float32x4 --> Uint32x4 tests.
-var fsrc = dst1;
-var fdst1 = new Uint32Array(8000);
-var fdst2 = new Uint32Array(8000);
-
-// The 0xffffffff entries in fsrc round to 0x1.0p32f which throws.
-// Go as high as 0x0.ffffffp32f.
-for (var i = 0; i < 2000; i++) {
- fsrc[i + 6000] = 0xffffff7f - i;
-}
-
-// Truncation towards 0.
-fsrc[1990] = -0.9
-fsrc[1991] = 0.9
-fsrc[1992] = 1.9
-
-for (var n = 0; n < 10; n++) {
- cvt_ftou_scalar(fsrc, fdst1);
- cvt_ftou_simd(fsrc, fdst2);
- check(fdst1, fdst2);
-}
diff --git a/js/src/jit-test/tests/SIMD/unary.js b/js/src/jit-test/tests/SIMD/unary.js
deleted file mode 100644
index 34ec3fb100..0000000000
--- a/js/src/jit-test/tests/SIMD/unary.js
+++ /dev/null
@@ -1,35 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("ion.warmup.trigger", 50);
-
-var notf = (function() {
- var i32 = new Int32Array(1);
- var f32 = new Float32Array(i32.buffer);
- return function(x) {
- f32[0] = x;
- i32[0] = ~i32[0];
- return f32[0];
- }
-})();
-
-function f() {
- var f4 = SIMD.Float32x4(1, 2, 3, 4);
- var i4 = SIMD.Int32x4(1, 2, 3, 4);
- var b4 = SIMD.Bool32x4(true, false, true, false);
- var BitOrZero = (x) => x | 0;
- for (var i = 0; i < 150; i++) {
- assertEqX4(SIMD.Float32x4.neg(f4), unaryX4((x) => -x, f4, Math.fround));
- assertEqX4(SIMD.Float32x4.abs(f4), unaryX4(Math.abs, f4, Math.fround));
- assertEqX4(SIMD.Float32x4.sqrt(f4), unaryX4(Math.sqrt, f4, Math.fround));
-
- assertEqX4(SIMD.Float32x4.reciprocalApproximation(f4), unaryX4((x) => 1 / x, f4, Math.fround), assertNear);
- assertEqX4(SIMD.Float32x4.reciprocalSqrtApproximation(f4), unaryX4((x) => 1 / Math.sqrt(x), f4, Math.fround), assertNear);
-
- assertEqX4(SIMD.Int32x4.not(i4), unaryX4((x) => ~x, i4, BitOrZero));
- assertEqX4(SIMD.Int32x4.neg(i4), unaryX4((x) => -x, i4, BitOrZero));
-
- assertEqX4(SIMD.Bool32x4.not(b4), unaryX4((x) => !x, b4, (x) => x ));
- }
-}
-
-f();
diff --git a/js/src/jit-test/tests/SIMD/unbox.js b/js/src/jit-test/tests/SIMD/unbox.js
deleted file mode 100644
index 2fffddd066..0000000000
--- a/js/src/jit-test/tests/SIMD/unbox.js
+++ /dev/null
@@ -1,144 +0,0 @@
-load(libdir + 'simd.js');
-
-setJitCompilerOption("baseline.warmup.trigger", 10);
-setJitCompilerOption("ion.warmup.trigger", 30);
-
-var max = 40, pivot = 35;
-
-var i32x4 = SIMD.Int32x4;
-var f32x4 = SIMD.Float32x4;
-var i32x4Add = SIMD.Int32x4.add;
-
-var FakeSIMDType = function (o) { this.x = o.x; this.y = o.y; this.z = o.z; this.w = o.w; };
-if (this.hasOwnProperty("TypedObject")) {
- var TO = TypedObject;
- FakeSIMDType = new TO.StructType({ x: TO.int32, y: TO.int32, z: TO.int32, w: TO.int32 });
-}
-
-function simdunbox_bail_undef(i, lhs, rhs) {
- return i32x4Add(lhs, rhs);
-}
-
-function simdunbox_bail_object(i, lhs, rhs) {
- return i32x4Add(lhs, rhs);
-}
-
-function simdunbox_bail_typeobj(i, lhs, rhs) {
- return i32x4Add(lhs, rhs);
-}
-
-function simdunbox_bail_badsimd(i, lhs, rhs) {
- return i32x4Add(lhs, rhs);
-}
-
-var arr_undef = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
-var fail_undef = 0;
-var arr_object = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
-var fail_object = 0;
-var arr_typeobj = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
-var fail_typeobj = 0;
-var arr_badsimd = [ i32x4(0, 1, 1, 2), i32x4(1, 1, 2, 3) ];
-var fail_badsimd = 0;
-for (var i = 0; i < max; i++) {
- try {
- arr_undef[i + 2] = simdunbox_bail_undef(i, arr_undef[i], arr_undef[i + 1]);
- } catch (x) {
- arr_undef[i + 2] = arr_undef[i - 1];
- fail_undef++;
- }
-
- try {
- arr_object[i + 2] = simdunbox_bail_object(i, arr_object[i], arr_object[i + 1]);
- } catch (x) {
- arr_object[i + 2] = arr_object[i - 1];
- fail_object++;
- }
-
- try {
- arr_typeobj[i + 2] = simdunbox_bail_typeobj(i, arr_typeobj[i], arr_typeobj[i + 1]);
- } catch (x) {
- arr_typeobj[i + 2] = arr_typeobj[i - 1];
- fail_typeobj++;
- }
-
- try {
- arr_badsimd[i + 2] = simdunbox_bail_badsimd(i, arr_badsimd[i], arr_badsimd[i + 1]);
- } catch (x) {
- arr_badsimd[i + 2] = arr_badsimd[i - 1];
- fail_badsimd++;
- }
-
- if (i + 2 == pivot) {
- arr_undef[pivot] = undefined;
- arr_object[pivot] = { x: 0, y: 1, z: 2, w: 3 };
- arr_typeobj[pivot] = new FakeSIMDType({ x: 0, y: 1, z: 2, w: 3 });
- arr_badsimd[pivot] = f32x4(0, 1, 2, 3);
- }
-}
-
-assertEq(fail_undef, 2);
-assertEq(fail_object, 2);
-assertEq(fail_typeobj, 2);
-assertEq(fail_badsimd, 2);
-
-// Assert that all SIMD values are correct.
-function assertEqX4(real, expected, assertFunc) {
- if (typeof assertFunc === 'undefined')
- assertFunc = assertEq;
-
- assertFunc(real.x, expected[0]);
- assertFunc(real.y, expected[1]);
- assertFunc(real.z, expected[2]);
- assertFunc(real.w, expected[3]);
-}
-
-var fib = [0, 1];
-for (i = 0; i < max + 5; i++)
- fib[i+2] = (fib[i] + fib[i+1]) | 0;
-
-for (i = 0; i < max; i++) {
- if (i == pivot)
- continue;
- var ref = fib.slice(i < pivot ? i : i - 3);
- assertEqX4(arr_undef[i], ref);
- assertEqX4(arr_object[i], ref);
- assertEqX4(arr_typeobj[i], ref);
- assertEqX4(arr_badsimd[i], ref);
-}
-
-// Check that unbox operations aren't removed
-(function() {
-
- function add(i, v, w) {
- if (i % 2 == 0) {
- SIMD.Int32x4.add(v, w);
- } else {
- SIMD.Float32x4.add(v, w);
- }
- }
-
- var i = 0;
- var caught = false;
- var f4 = SIMD.Float32x4(1,2,3,4);
- var i4 = SIMD.Int32x4(1,2,3,4);
- try {
- for (; i < 200; i++) {
- if (i % 2 == 0) {
- add(i, i4, i4);
- } else if (i == 199) {
- add(i, i4, f4);
- } else {
- add(i, f4, f4);
- }
- }
- } catch(e) {
- print(e);
- assertEq(e instanceof TypeError, true);
- assertEq(i, 199);
- caught = true;
- }
-
- assertEq(i < 199 || caught, true);
-
-})();
-
diff --git a/js/src/jit-test/tests/asm.js/bug1126251.js b/js/src/jit-test/tests/asm.js/bug1126251.js
index 8470a97727..77aa56dbeb 100644
--- a/js/src/jit-test/tests/asm.js/bug1126251.js
+++ b/js/src/jit-test/tests/asm.js/bug1126251.js
@@ -13,41 +13,3 @@ var v = asmLink(asmCompile('global', `
`), this)();
assertEq(v, NaN);
-
-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
- quit(0);
-}
-
-var v = asmLink(asmCompile('global', `
- "use asm";
- var frd = global.Math.fround;
- var Float32x4 = global.SIMD.Float32x4;
- var splat = Float32x4.splat;
- var ext = Float32x4.extractLane;
- function e() {
- var v = Float32x4(0,0,0,0);
- var x = frd(0.);
- v = splat(.1e+71);
- x = ext(v,0);
- x = frd(x / x);
- return +x;
- }
- return e;
-`), this)();
-
-assertEq(v, NaN);
-
-// Bug 1130618: without GVN
-setJitCompilerOption("ion.gvn.enable", 0);
-var v = asmLink(asmCompile('global', `
- "use asm";
- var Float32x4 = global.SIMD.Float32x4;
- var splat = Float32x4.splat;
- var ext = Float32x4.extractLane;
- function e() {
- return +ext(splat(.1e+71),0);
- }
- return e;
-`), this)();
-
-assertEq(v, Infinity);
diff --git a/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js b/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js
deleted file mode 100644
index edcc069ffb..0000000000
--- a/js/src/jit-test/tests/asm.js/bug1201124-simd-proxy.js
+++ /dev/null
@@ -1,28 +0,0 @@
-load(libdir + "asm.js");
-load(libdir + "asserts.js");
-
-if (typeof newGlobal !== 'function' ||
- !isSimdAvailable() ||
- typeof SIMD === 'undefined')
-{
- quit();
-}
-
-var stdlib = new (newGlobal().Proxy)(this, new Proxy({
- simdGet: 0,
- getOwnPropertyDescriptor(t, pk) {
- if (pk === "SIMD" && this.simdGet++ === 1) {
- return {};
- }
- return Reflect.getOwnPropertyDescriptor(t, pk);
- }
-}, {
- get(t, pk, r) {
- print("trap", pk);
- return Reflect.get(t, pk, r);
- }
-}));
-
-var m = asmCompile('stdlib', '"use asm"; var i4=stdlib.SIMD.Int32x4; var i4add=i4.add; return {}');
-
-assertAsmLinkFail(m, stdlib);
diff --git a/js/src/jit-test/tests/asm.js/simd-fbirds.js b/js/src/jit-test/tests/asm.js/simd-fbirds.js
deleted file mode 100644
index f94d409359..0000000000
--- a/js/src/jit-test/tests/asm.js/simd-fbirds.js
+++ /dev/null
@@ -1,197 +0,0 @@
-/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 ; js-indent-level : 2 ; js-curly-indent-offset: 0 -*- */
-
-// Author: Peter Jensen
-
-load(libdir + "asm.js");
-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
- print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-const NUM_BIRDS = 30;
-const NUM_UPDATES = 20;
-const ACCEL_DATA_STEPS = 30;
-
-var buffer = new ArrayBuffer(0x200000);
-var bufferF32 = new Float32Array(buffer);
-
-var actualBirds = 0;
-
-function init() {
- actualBirds = 0;
- // Make it a power of two, for quick modulo wrapping.
- var accelDataValues = [10.0, 9.5, 9.0, 8.0, 7.0, 6.0, 5.5, 5.0, 5.0, 5.0, 5.5, 6.0, 7.0, 8.0, 9.0, 10.0];
- accelDataValues = accelDataValues.map(function(v) { return 50*v; });
- var accelDataValuesLength = accelDataValues.length;
- assertEq(accelDataValuesLength, 16); // Hard coded in the asm.js module
- for (i = 0; i < accelDataValuesLength; i++)
- bufferF32[i + NUM_BIRDS * 2] = accelDataValues[i];
-}
-
-function addBird(pos, vel) {
- bufferF32[actualBirds] = pos;
- bufferF32[actualBirds + NUM_BIRDS] = vel;
- actualBirds++;
- return actualBirds - 1;
-}
-
-function getActualBirds() {
- return actualBirds;
-}
-
-var code = `
- "use asm";
- var toF = global.Math.fround;
- var u8 = new global.Uint8Array(buffer);
- var f32 = new global.Float32Array(buffer);
- const maxBirds = 100000;
- const maxBirdsx4 = 400000;
- const maxBirdsx8 = 800000;
- const accelMask = 0x3c;
- const mk4 = 0x000ffff0;
-
- const getMaxPos = 1000.0;
- const getAccelDataSteps = imp.accelDataSteps | 0;
- var getActualBirds = imp.getActualBirds;
-
- var i4 = global.SIMD.Int32x4;
- var f4 = global.SIMD.Float32x4;
- var b4 = global.SIMD.Bool32x4;
- var i4add = i4.add;
- var i4and = i4.and;
- var f4select = f4.select;
- var f4add = f4.add;
- var f4sub = f4.sub;
- var f4mul = f4.mul;
- var f4greaterThan = f4.greaterThan;
- var f4splat = f4.splat;
- var f4load = f4.load;
- var f4store = f4.store;
- var b4any = b4.anyTrue;
-
- const zerox4 = f4(0.0,0.0,0.0,0.0);
-
- function declareHeapSize() {
- f32[0x0007ffff] = toF(0.0);
- }
-
- function update(timeDelta) {
- timeDelta = toF(timeDelta);
- // var steps = Math.ceil(timeDelta/accelData.interval);
- var steps = 0;
- var subTimeDelta = toF(0.0);
- var actualBirds = 0;
- var maxPos = toF(0.0);
- var maxPosx4 = f4(0.0,0.0,0.0,0.0);
- var subTimeDeltax4 = f4(0.0,0.0,0.0,0.0);
- var subTimeDeltaSquaredx4 = f4(0.0,0.0,0.0,0.0);
- var point5x4 = f4(0.5, 0.5, 0.5, 0.5);
- var i = 0;
- var len = 0;
- var accelIndex = 0;
- var newPosx4 = f4(0.0,0.0,0.0,0.0);
- var newVelx4 = f4(0.0,0.0,0.0,0.0);
- var accel = toF(0.0);
- var accelx4 = f4(0.0,0.0,0.0,0.0);
- var a = 0;
- var posDeltax4 = f4(0.0,0.0,0.0,0.0);
- var cmpx4 = b4(0,0,0,0);
- var newVelTruex4 = f4(0.0,0.0,0.0,0.0);
-
- steps = getAccelDataSteps | 0;
- subTimeDelta = toF(toF(timeDelta / toF(steps | 0)) / toF(1000.0));
- actualBirds = getActualBirds() | 0;
- maxPos = toF(+getMaxPos);
- maxPosx4 = f4splat(maxPos);
- subTimeDeltax4 = f4splat(subTimeDelta);
- subTimeDeltaSquaredx4 = f4mul(subTimeDeltax4, subTimeDeltax4);
-
- len = ((actualBirds + 3) >> 2) << 4;
-
- for (i = 0; (i | 0) < (len | 0); i = (i + 16) | 0) {
- accelIndex = 0;
- newPosx4 = f4load(u8, i & mk4);
- newVelx4 = f4load(u8, (i & mk4) + maxBirdsx4);
- for (a = 0; (a | 0) < (steps | 0); a = (a + 1) | 0) {
- accel = toF(f32[(accelIndex & accelMask) + maxBirdsx8 >> 2]);
- accelx4 = f4splat(accel);
- accelIndex = (accelIndex + 4) | 0;
- posDeltax4 = f4mul(point5x4, f4mul(accelx4, subTimeDeltaSquaredx4));
- posDeltax4 = f4add(posDeltax4, f4mul(newVelx4, subTimeDeltax4));
- newPosx4 = f4add(newPosx4, posDeltax4);
- newVelx4 = f4add(newVelx4, f4mul(accelx4, subTimeDeltax4));
- cmpx4 = f4greaterThan(newPosx4, maxPosx4);
-
- if (b4any(cmpx4)) {
- // Work around unimplemented 'neg' operation, using 0 - x.
- newVelTruex4 = f4sub(zerox4, newVelx4);
- newVelx4 = f4select(cmpx4, newVelTruex4, newVelx4);
- }
- }
- f4store(u8, i & mk4, newPosx4);
- f4store(u8, (i & mk4) + maxBirdsx4, newVelx4);
- }
- }
-
- return update;
-`
-
-var ffi = {
- getActualBirds,
- accelDataSteps: ACCEL_DATA_STEPS
-};
-
-var fbirds = asmLink(asmCompile('global', 'imp', 'buffer', code), this, ffi, buffer);
-
-init();
-for (var i = 0; i < NUM_BIRDS; i++) {
- addBird(i / 10, Math.exp(2, NUM_BIRDS - i));
-}
-
-var b = dateNow();
-for (var j = 0; j < NUM_UPDATES; j++) {
- fbirds(16);
-}
-print(dateNow() - b);
-
-assertEq(bufferF32[0], 0);
-assertEq(bufferF32[1], 0.10000000149011612);
-assertEq(bufferF32[2], 0.20000000298023224);
-assertEq(bufferF32[3], 0.30000001192092896);
-assertEq(bufferF32[4], 0.4000000059604645);
-assertEq(bufferF32[5], 0.5);
-assertEq(bufferF32[6], 0.6000000238418579);
-assertEq(bufferF32[7], 0.699999988079071);
-assertEq(bufferF32[8], 0.800000011920929);
-assertEq(bufferF32[9], 0.8999999761581421);
-assertEq(bufferF32[10], 1);
-assertEq(bufferF32[11], 1.100000023841858);
-assertEq(bufferF32[12], 1.2000000476837158);
-assertEq(bufferF32[13], 1.2999999523162842);
-assertEq(bufferF32[14], 1.399999976158142);
-assertEq(bufferF32[15], 1.5);
-assertEq(bufferF32[16], 1.600000023841858);
-assertEq(bufferF32[17], 1.7000000476837158);
-assertEq(bufferF32[18], 1.7999999523162842);
-assertEq(bufferF32[19], 1.899999976158142);
-assertEq(bufferF32[20], 2);
-assertEq(bufferF32[21], 2.0999999046325684);
-assertEq(bufferF32[22], 2.200000047683716);
-assertEq(bufferF32[23], 2.299999952316284);
-assertEq(bufferF32[24], 2.4000000953674316);
-assertEq(bufferF32[25], 2.5);
-assertEq(bufferF32[26], 2.5999999046325684);
-assertEq(bufferF32[27], 2.700000047683716);
-assertEq(bufferF32[28], 2.799999952316284);
-assertEq(bufferF32[29], 2.9000000953674316);
-
-
-// Code used to generate the assertEq list above.
-function generateAssertList() {
- var buf = '';
- for (var k = 0; k < NUM_BIRDS; k++) {
- buf += 'assertEq(bufferF32['+ k + '], ' + bufferF32[k] + ');\n';
- }
- print(buf);
-}
-//generateAssertList();
diff --git a/js/src/jit-test/tests/asm.js/simd-mandelbrot.js b/js/src/jit-test/tests/asm.js/simd-mandelbrot.js
deleted file mode 100644
index 690548eb83..0000000000
--- a/js/src/jit-test/tests/asm.js/simd-mandelbrot.js
+++ /dev/null
@@ -1,1818 +0,0 @@
-/* -*- Mode: javascript; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 ; js-indent-level : 2 ; js-curly-indent-offset: 0 -*- */
-
-// Mandelbrot using SIMD
-// Author: Peter Jensen, Intel Corporation
-
-load(libdir + "asm.js");
-if (!isSimdAvailable() || typeof SIMD === 'undefined') {
- print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-// global variables
-const MAX_ITERATIONS = 10;
-const DRAW_ITERATIONS = 10;
-
-const CANVAS_WIDTH = 20;
-const CANVAS_HEIGHT = 20;
-
-const LIMIT_SHOW = 20 * 20 * 4;
-
-// Asm.js module buffer.
-var buffer = new ArrayBuffer(16 * 1024 * 1024);
-var view = new Uint8Array(buffer);
-
-var moduleCode = `
- "use asm"
- var b8 = new global.Uint8Array(buffer);
- var toF = global.Math.fround;
- var i4 = global.SIMD.Int32x4;
- var ci4 = i4.check;
- var f4 = global.SIMD.Float32x4;
- var i4add = i4.add;
- var i4and = i4.and;
- var i4ext = i4.extractLane;
- var i4sel = i4.select;
- var f4add = f4.add;
- var f4sub = f4.sub;
- var f4mul = f4.mul;
- var f4lessThanOrEqual = f4.lessThanOrEqual;
- var f4splat = f4.splat;
- var imul = global.Math.imul;
- var b4 = global.SIMD.Bool32x4;
- var b4any = b4.anyTrue;
- const zero4 = i4(0,0,0,0), one4 = i4(1,1,1,1), two4 = f4(2,2,2,2), four4 = f4(4,4,4,4);
-
- const mk0 = 0x007fffff;
-
- function declareHeapLength() {
- b8[0x00ffffff] = 0;
- }
-
- function mapColorAndSetPixel (x, y, width, value, max_iterations) {
- x = x | 0;
- y = y | 0;
- width = width | 0;
- value = value | 0;
- max_iterations = max_iterations | 0;
-
- var rgb = 0, r = 0, g = 0, b = 0, index = 0;
-
- index = (((imul((width >>> 0), (y >>> 0)) + x) | 0) * 4) | 0;
- if ((value | 0) == (max_iterations | 0)) {
- r = 0;
- g = 0;
- b = 0;
- } else {
- rgb = ~~toF(toF(toF(toF(value >>> 0) * toF(0xffff)) / toF(max_iterations >>> 0)) * toF(0xff));
- r = rgb & 0xff;
- g = (rgb >>> 8) & 0xff;
- b = (rgb >>> 16) & 0xff;
- }
- b8[(index & mk0) >> 0] = r;
- b8[(index & mk0) + 1 >> 0] = g;
- b8[(index & mk0) + 2 >> 0] = b;
- b8[(index & mk0) + 3 >> 0] = 255;
- }
-
- function mandelPixelX4 (xf, yf, yd, max_iterations) {
- xf = toF(xf);
- yf = toF(yf);
- yd = toF(yd);
- max_iterations = max_iterations | 0;
- var c_re4 = f4(0,0,0,0), c_im4 = f4(0,0,0,0);
- var z_re4 = f4(0,0,0,0), z_im4 = f4(0,0,0,0);
- var count4 = i4(0,0,0,0);
- var z_re24 = f4(0,0,0,0), z_im24 = f4(0,0,0,0);
- var new_re4 = f4(0,0,0,0), new_im4 = f4(0,0,0,0);
- var i = 0;
- var mb4 = b4(0,0,0,0);
-
- c_re4 = f4splat(xf);
- c_im4 = f4(yf, toF(yd + yf), toF(yd + toF(yd + yf)), toF(yd + toF(yd + toF(yd + yf))));
-
- z_re4 = c_re4;
- z_im4 = c_im4;
-
- for (i = 0; (i | 0) < (max_iterations | 0); i = (i + 1) | 0) {
- z_re24 = f4mul(z_re4, z_re4);
- z_im24 = f4mul(z_im4, z_im4);
- mb4 = f4lessThanOrEqual(f4add(z_re24, z_im24), four4);
- // If all 4 values are greater than 4.0, there's no reason to continue.
- if (!b4any(mb4))
- break;
-
- new_re4 = f4sub(z_re24, z_im24);
- new_im4 = f4mul(f4mul(two4, z_re4), z_im4);
- z_re4 = f4add(c_re4, new_re4);
- z_im4 = f4add(c_im4, new_im4);
- count4 = i4add(count4, i4sel(mb4, one4, zero4));
- }
- return ci4(count4);
- }
-
- function mandelColumnX4 (x, width, height, xf, yf, yd, max_iterations) {
- x = x | 0;
- width = width | 0;
- height = height | 0;
- xf = toF(xf);
- yf = toF(yf);
- yd = toF(yd);
- max_iterations = max_iterations | 0;
-
- var y = 0;
- var ydx4 = toF(0);
- var m4 = i4(0,0,0,0);
-
- ydx4 = toF(yd * toF(4));
- for (y = 0; (y | 0) < (height | 0); y = (y + 4) | 0) {
- m4 = ci4(mandelPixelX4(toF(xf), toF(yf), toF(yd), max_iterations));
- mapColorAndSetPixel(x | 0, y | 0, width, i4ext(m4,0), max_iterations);
- mapColorAndSetPixel(x | 0, (y + 1) | 0, width, i4ext(m4,1), max_iterations);
- mapColorAndSetPixel(x | 0, (y + 2) | 0, width, i4ext(m4,2), max_iterations);
- mapColorAndSetPixel(x | 0, (y + 3) | 0, width, i4ext(m4,3), max_iterations);
- yf = toF(yf + ydx4);
- }
- }
-
- function mandel (width, height, xc, yc, scale, max_iterations) {
- width = width | 0;
- height = height | 0;
- xc = toF(xc);
- yc = toF(yc);
- scale = toF(scale);
- max_iterations = max_iterations | 0;
-
- var x0 = toF(0), y0 = toF(0);
- var xd = toF(0), yd = toF(0);
- var xf = toF(0);
- var x = 0;
-
- x0 = toF(xc - toF(scale * toF(1.5)));
- y0 = toF(yc - scale);
- xd = toF(toF(scale * toF(3)) / toF(width >>> 0));
- yd = toF(toF(scale * toF(2)) / toF(height >>> 0));
- xf = x0;
-
- for (x = 0; (x | 0) < (width | 0); x = (x + 1) | 0) {
- mandelColumnX4(x, width, height, xf, y0, yd, max_iterations);
- xf = toF(xf + xd);
- }
- }
-
- return mandel;
-`;
-
-var FFI = {};
-var mandelbro = asmLink(asmCompile('global', 'ffi', 'buffer', moduleCode), this, FFI, buffer);
-
-function animateMandelbrot () {
- var scale_start = 1.0;
- var scale_end = 0.0005;
- var xc_start = -0.5;
- var yc_start = 0.0;
- var xc_end = 0.0;
- var yc_end = 0.75;
- var steps = 200.0;
- var scale_step = (scale_end - scale_start)/steps;
- var xc_step = (xc_end - xc_start)/steps;
- var yc_step = (yc_end - yc_start)/steps;
- var scale = scale_start;
- var xc = xc_start;
- var yc = yc_start;
- var i = 0;
- var now = dateNow();
-
- function draw1 () {
- mandelbro(CANVAS_WIDTH, CANVAS_HEIGHT, xc, yc, scale, MAX_ITERATIONS);
- if (scale < scale_end || scale > scale_start) {
- scale_step = -scale_step;
- xc_step = -xc_step;
- yc_step = -yc_step;
- }
- scale += scale_step;
- xc += xc_step;
- yc += yc_step;
- i++;
- }
-
- var b = dateNow();
- for (var j = DRAW_ITERATIONS; j --> 0;)
- draw1();
- print(dateNow() - b);
-}
-
-animateMandelbrot();
-
-assertEq(view[0], 0, "0th value should be 0");
-assertEq(view[1], 0, "1th value should be 0");
-assertEq(view[2], 0, "2th value should be 0");
-assertEq(view[3], 255, "3th value should be 255");
-assertEq(view[4], 230, "4th value should be 230");
-assertEq(view[5], 127, "5th value should be 127");
-assertEq(view[6], 25, "6th value should be 25");
-assertEq(view[7], 255, "7th value should be 255");
-assertEq(view[8], 230, "8th value should be 230");
-assertEq(view[9], 127, "9th value should be 127");
-assertEq(view[10], 25, "10th value should be 25");
-assertEq(view[11], 255, "11th value should be 255");
-assertEq(view[12], 205, "12th value should be 205");
-assertEq(view[13], 255, "13th value should be 255");
-assertEq(view[14], 50, "14th value should be 50");
-assertEq(view[15], 255, "15th value should be 255");
-assertEq(view[16], 205, "16th value should be 205");
-assertEq(view[17], 255, "17th value should be 255");
-assertEq(view[18], 50, "18th value should be 50");
-assertEq(view[19], 255, "19th value should be 255");
-assertEq(view[20], 205, "20th value should be 205");
-assertEq(view[21], 255, "21th value should be 255");
-assertEq(view[22], 50, "22th value should be 50");
-assertEq(view[23], 255, "23th value should be 255");
-assertEq(view[24], 205, "24th value should be 205");
-assertEq(view[25], 255, "25th value should be 255");
-assertEq(view[26], 50, "26th value should be 50");
-assertEq(view[27], 255, "27th value should be 255");
-assertEq(view[28], 205, "28th value should be 205");
-assertEq(view[29], 255, "29th value should be 255");
-assertEq(view[30], 50, "30th value should be 50");
-assertEq(view[31], 255, "31th value should be 255");
-assertEq(view[32], 179, "32th value should be 179");
-assertEq(view[33], 127, "33th value should be 127");
-assertEq(view[34], 76, "34th value should be 76");
-assertEq(view[35], 255, "35th value should be 255");
-assertEq(view[36], 179, "36th value should be 179");
-assertEq(view[37], 127, "37th value should be 127");
-assertEq(view[38], 76, "38th value should be 76");
-assertEq(view[39], 255, "39th value should be 255");
-assertEq(view[40], 179, "40th value should be 179");
-assertEq(view[41], 127, "41th value should be 127");
-assertEq(view[42], 76, "42th value should be 76");
-assertEq(view[43], 255, "43th value should be 255");
-assertEq(view[44], 154, "44th value should be 154");
-assertEq(view[45], 255, "45th value should be 255");
-assertEq(view[46], 101, "46th value should be 101");
-assertEq(view[47], 255, "47th value should be 255");
-assertEq(view[48], 78, "48th value should be 78");
-assertEq(view[49], 127, "49th value should be 127");
-assertEq(view[50], 178, "50th value should be 178");
-assertEq(view[51], 255, "51th value should be 255");
-assertEq(view[52], 52, "52th value should be 52");
-assertEq(view[53], 255, "53th value should be 255");
-assertEq(view[54], 203, "54th value should be 203");
-assertEq(view[55], 255, "55th value should be 255");
-assertEq(view[56], 154, "56th value should be 154");
-assertEq(view[57], 255, "57th value should be 255");
-assertEq(view[58], 101, "58th value should be 101");
-assertEq(view[59], 255, "59th value should be 255");
-assertEq(view[60], 179, "60th value should be 179");
-assertEq(view[61], 127, "61th value should be 127");
-assertEq(view[62], 76, "62th value should be 76");
-assertEq(view[63], 255, "63th value should be 255");
-assertEq(view[64], 205, "64th value should be 205");
-assertEq(view[65], 255, "65th value should be 255");
-assertEq(view[66], 50, "66th value should be 50");
-assertEq(view[67], 255, "67th value should be 255");
-assertEq(view[68], 205, "68th value should be 205");
-assertEq(view[69], 255, "69th value should be 255");
-assertEq(view[70], 50, "70th value should be 50");
-assertEq(view[71], 255, "71th value should be 255");
-assertEq(view[72], 230, "72th value should be 230");
-assertEq(view[73], 127, "73th value should be 127");
-assertEq(view[74], 25, "74th value should be 25");
-assertEq(view[75], 255, "75th value should be 255");
-assertEq(view[76], 230, "76th value should be 230");
-assertEq(view[77], 127, "77th value should be 127");
-assertEq(view[78], 25, "78th value should be 25");
-assertEq(view[79], 255, "79th value should be 255");
-assertEq(view[80], 0, "80th value should be 0");
-assertEq(view[81], 0, "81th value should be 0");
-assertEq(view[82], 0, "82th value should be 0");
-assertEq(view[83], 255, "83th value should be 255");
-assertEq(view[84], 230, "84th value should be 230");
-assertEq(view[85], 127, "85th value should be 127");
-assertEq(view[86], 25, "86th value should be 25");
-assertEq(view[87], 255, "87th value should be 255");
-assertEq(view[88], 205, "88th value should be 205");
-assertEq(view[89], 255, "89th value should be 255");
-assertEq(view[90], 50, "90th value should be 50");
-assertEq(view[91], 255, "91th value should be 255");
-assertEq(view[92], 205, "92th value should be 205");
-assertEq(view[93], 255, "93th value should be 255");
-assertEq(view[94], 50, "94th value should be 50");
-assertEq(view[95], 255, "95th value should be 255");
-assertEq(view[96], 205, "96th value should be 205");
-assertEq(view[97], 255, "97th value should be 255");
-assertEq(view[98], 50, "98th value should be 50");
-assertEq(view[99], 255, "99th value should be 255");
-assertEq(view[100], 205, "100th value should be 205");
-assertEq(view[101], 255, "101th value should be 255");
-assertEq(view[102], 50, "102th value should be 50");
-assertEq(view[103], 255, "103th value should be 255");
-assertEq(view[104], 205, "104th value should be 205");
-assertEq(view[105], 255, "105th value should be 255");
-assertEq(view[106], 50, "106th value should be 50");
-assertEq(view[107], 255, "107th value should be 255");
-assertEq(view[108], 205, "108th value should be 205");
-assertEq(view[109], 255, "109th value should be 255");
-assertEq(view[110], 50, "110th value should be 50");
-assertEq(view[111], 255, "111th value should be 255");
-assertEq(view[112], 179, "112th value should be 179");
-assertEq(view[113], 127, "113th value should be 127");
-assertEq(view[114], 76, "114th value should be 76");
-assertEq(view[115], 255, "115th value should be 255");
-assertEq(view[116], 179, "116th value should be 179");
-assertEq(view[117], 127, "117th value should be 127");
-assertEq(view[118], 76, "118th value should be 76");
-assertEq(view[119], 255, "119th value should be 255");
-assertEq(view[120], 154, "120th value should be 154");
-assertEq(view[121], 255, "121th value should be 255");
-assertEq(view[122], 101, "122th value should be 101");
-assertEq(view[123], 255, "123th value should be 255");
-assertEq(view[124], 103, "124th value should be 103");
-assertEq(view[125], 255, "125th value should be 255");
-assertEq(view[126], 152, "126th value should be 152");
-assertEq(view[127], 255, "127th value should be 255");
-assertEq(view[128], 0, "128th value should be 0");
-assertEq(view[129], 0, "129th value should be 0");
-assertEq(view[130], 0, "130th value should be 0");
-assertEq(view[131], 255, "131th value should be 255");
-assertEq(view[132], 0, "132th value should be 0");
-assertEq(view[133], 0, "133th value should be 0");
-assertEq(view[134], 0, "134th value should be 0");
-assertEq(view[135], 255, "135th value should be 255");
-assertEq(view[136], 128, "136th value should be 128");
-assertEq(view[137], 127, "137th value should be 127");
-assertEq(view[138], 127, "138th value should be 127");
-assertEq(view[139], 255, "139th value should be 255");
-assertEq(view[140], 154, "140th value should be 154");
-assertEq(view[141], 255, "141th value should be 255");
-assertEq(view[142], 101, "142th value should be 101");
-assertEq(view[143], 255, "143th value should be 255");
-assertEq(view[144], 179, "144th value should be 179");
-assertEq(view[145], 127, "145th value should be 127");
-assertEq(view[146], 76, "146th value should be 76");
-assertEq(view[147], 255, "147th value should be 255");
-assertEq(view[148], 205, "148th value should be 205");
-assertEq(view[149], 255, "149th value should be 255");
-assertEq(view[150], 50, "150th value should be 50");
-assertEq(view[151], 255, "151th value should be 255");
-assertEq(view[152], 205, "152th value should be 205");
-assertEq(view[153], 255, "153th value should be 255");
-assertEq(view[154], 50, "154th value should be 50");
-assertEq(view[155], 255, "155th value should be 255");
-assertEq(view[156], 230, "156th value should be 230");
-assertEq(view[157], 127, "157th value should be 127");
-assertEq(view[158], 25, "158th value should be 25");
-assertEq(view[159], 255, "159th value should be 255");
-assertEq(view[160], 0, "160th value should be 0");
-assertEq(view[161], 0, "161th value should be 0");
-assertEq(view[162], 0, "162th value should be 0");
-assertEq(view[163], 255, "163th value should be 255");
-assertEq(view[164], 230, "164th value should be 230");
-assertEq(view[165], 127, "165th value should be 127");
-assertEq(view[166], 25, "166th value should be 25");
-assertEq(view[167], 255, "167th value should be 255");
-assertEq(view[168], 205, "168th value should be 205");
-assertEq(view[169], 255, "169th value should be 255");
-assertEq(view[170], 50, "170th value should be 50");
-assertEq(view[171], 255, "171th value should be 255");
-assertEq(view[172], 205, "172th value should be 205");
-assertEq(view[173], 255, "173th value should be 255");
-assertEq(view[174], 50, "174th value should be 50");
-assertEq(view[175], 255, "175th value should be 255");
-assertEq(view[176], 205, "176th value should be 205");
-assertEq(view[177], 255, "177th value should be 255");
-assertEq(view[178], 50, "178th value should be 50");
-assertEq(view[179], 255, "179th value should be 255");
-assertEq(view[180], 205, "180th value should be 205");
-assertEq(view[181], 255, "181th value should be 255");
-assertEq(view[182], 50, "182th value should be 50");
-assertEq(view[183], 255, "183th value should be 255");
-assertEq(view[184], 205, "184th value should be 205");
-assertEq(view[185], 255, "185th value should be 255");
-assertEq(view[186], 50, "186th value should be 50");
-assertEq(view[187], 255, "187th value should be 255");
-assertEq(view[188], 179, "188th value should be 179");
-assertEq(view[189], 127, "189th value should be 127");
-assertEq(view[190], 76, "190th value should be 76");
-assertEq(view[191], 255, "191th value should be 255");
-assertEq(view[192], 179, "192th value should be 179");
-assertEq(view[193], 127, "193th value should be 127");
-assertEq(view[194], 76, "194th value should be 76");
-assertEq(view[195], 255, "195th value should be 255");
-assertEq(view[196], 154, "196th value should be 154");
-assertEq(view[197], 255, "197th value should be 255");
-assertEq(view[198], 101, "198th value should be 101");
-assertEq(view[199], 255, "199th value should be 255");
-assertEq(view[200], 103, "200th value should be 103");
-assertEq(view[201], 255, "201th value should be 255");
-assertEq(view[202], 152, "202th value should be 152");
-assertEq(view[203], 255, "203th value should be 255");
-assertEq(view[204], 78, "204th value should be 78");
-assertEq(view[205], 127, "205th value should be 127");
-assertEq(view[206], 178, "206th value should be 178");
-assertEq(view[207], 255, "207th value should be 255");
-assertEq(view[208], 0, "208th value should be 0");
-assertEq(view[209], 0, "209th value should be 0");
-assertEq(view[210], 0, "210th value should be 0");
-assertEq(view[211], 255, "211th value should be 255");
-assertEq(view[212], 0, "212th value should be 0");
-assertEq(view[213], 0, "213th value should be 0");
-assertEq(view[214], 0, "214th value should be 0");
-assertEq(view[215], 255, "215th value should be 255");
-assertEq(view[216], 78, "216th value should be 78");
-assertEq(view[217], 127, "217th value should be 127");
-assertEq(view[218], 178, "218th value should be 178");
-assertEq(view[219], 255, "219th value should be 255");
-assertEq(view[220], 128, "220th value should be 128");
-assertEq(view[221], 127, "221th value should be 127");
-assertEq(view[222], 127, "222th value should be 127");
-assertEq(view[223], 255, "223th value should be 255");
-assertEq(view[224], 154, "224th value should be 154");
-assertEq(view[225], 255, "225th value should be 255");
-assertEq(view[226], 101, "226th value should be 101");
-assertEq(view[227], 255, "227th value should be 255");
-assertEq(view[228], 205, "228th value should be 205");
-assertEq(view[229], 255, "229th value should be 255");
-assertEq(view[230], 50, "230th value should be 50");
-assertEq(view[231], 255, "231th value should be 255");
-assertEq(view[232], 205, "232th value should be 205");
-assertEq(view[233], 255, "233th value should be 255");
-assertEq(view[234], 50, "234th value should be 50");
-assertEq(view[235], 255, "235th value should be 255");
-assertEq(view[236], 230, "236th value should be 230");
-assertEq(view[237], 127, "237th value should be 127");
-assertEq(view[238], 25, "238th value should be 25");
-assertEq(view[239], 255, "239th value should be 255");
-assertEq(view[240], 0, "240th value should be 0");
-assertEq(view[241], 0, "241th value should be 0");
-assertEq(view[242], 0, "242th value should be 0");
-assertEq(view[243], 255, "243th value should be 255");
-assertEq(view[244], 205, "244th value should be 205");
-assertEq(view[245], 255, "245th value should be 255");
-assertEq(view[246], 50, "246th value should be 50");
-assertEq(view[247], 255, "247th value should be 255");
-assertEq(view[248], 205, "248th value should be 205");
-assertEq(view[249], 255, "249th value should be 255");
-assertEq(view[250], 50, "250th value should be 50");
-assertEq(view[251], 255, "251th value should be 255");
-assertEq(view[252], 205, "252th value should be 205");
-assertEq(view[253], 255, "253th value should be 255");
-assertEq(view[254], 50, "254th value should be 50");
-assertEq(view[255], 255, "255th value should be 255");
-assertEq(view[256], 205, "256th value should be 205");
-assertEq(view[257], 255, "257th value should be 255");
-assertEq(view[258], 50, "258th value should be 50");
-assertEq(view[259], 255, "259th value should be 255");
-assertEq(view[260], 205, "260th value should be 205");
-assertEq(view[261], 255, "261th value should be 255");
-assertEq(view[262], 50, "262th value should be 50");
-assertEq(view[263], 255, "263th value should be 255");
-assertEq(view[264], 179, "264th value should be 179");
-assertEq(view[265], 127, "265th value should be 127");
-assertEq(view[266], 76, "266th value should be 76");
-assertEq(view[267], 255, "267th value should be 255");
-assertEq(view[268], 179, "268th value should be 179");
-assertEq(view[269], 127, "269th value should be 127");
-assertEq(view[270], 76, "270th value should be 76");
-assertEq(view[271], 255, "271th value should be 255");
-assertEq(view[272], 154, "272th value should be 154");
-assertEq(view[273], 255, "273th value should be 255");
-assertEq(view[274], 101, "274th value should be 101");
-assertEq(view[275], 255, "275th value should be 255");
-assertEq(view[276], 52, "276th value should be 52");
-assertEq(view[277], 255, "277th value should be 255");
-assertEq(view[278], 203, "278th value should be 203");
-assertEq(view[279], 255, "279th value should be 255");
-assertEq(view[280], 0, "280th value should be 0");
-assertEq(view[281], 0, "281th value should be 0");
-assertEq(view[282], 0, "282th value should be 0");
-assertEq(view[283], 255, "283th value should be 255");
-assertEq(view[284], 0, "284th value should be 0");
-assertEq(view[285], 0, "285th value should be 0");
-assertEq(view[286], 0, "286th value should be 0");
-assertEq(view[287], 255, "287th value should be 255");
-assertEq(view[288], 0, "288th value should be 0");
-assertEq(view[289], 0, "289th value should be 0");
-assertEq(view[290], 0, "290th value should be 0");
-assertEq(view[291], 255, "291th value should be 255");
-assertEq(view[292], 0, "292th value should be 0");
-assertEq(view[293], 0, "293th value should be 0");
-assertEq(view[294], 0, "294th value should be 0");
-assertEq(view[295], 255, "295th value should be 255");
-assertEq(view[296], 0, "296th value should be 0");
-assertEq(view[297], 0, "297th value should be 0");
-assertEq(view[298], 0, "298th value should be 0");
-assertEq(view[299], 255, "299th value should be 255");
-assertEq(view[300], 52, "300th value should be 52");
-assertEq(view[301], 255, "301th value should be 255");
-assertEq(view[302], 203, "302th value should be 203");
-assertEq(view[303], 255, "303th value should be 255");
-assertEq(view[304], 52, "304th value should be 52");
-assertEq(view[305], 255, "305th value should be 255");
-assertEq(view[306], 203, "306th value should be 203");
-assertEq(view[307], 255, "307th value should be 255");
-assertEq(view[308], 179, "308th value should be 179");
-assertEq(view[309], 127, "309th value should be 127");
-assertEq(view[310], 76, "310th value should be 76");
-assertEq(view[311], 255, "311th value should be 255");
-assertEq(view[312], 205, "312th value should be 205");
-assertEq(view[313], 255, "313th value should be 255");
-assertEq(view[314], 50, "314th value should be 50");
-assertEq(view[315], 255, "315th value should be 255");
-assertEq(view[316], 205, "316th value should be 205");
-assertEq(view[317], 255, "317th value should be 255");
-assertEq(view[318], 50, "318th value should be 50");
-assertEq(view[319], 255, "319th value should be 255");
-assertEq(view[320], 230, "320th value should be 230");
-assertEq(view[321], 127, "321th value should be 127");
-assertEq(view[322], 25, "322th value should be 25");
-assertEq(view[323], 255, "323th value should be 255");
-assertEq(view[324], 205, "324th value should be 205");
-assertEq(view[325], 255, "325th value should be 255");
-assertEq(view[326], 50, "326th value should be 50");
-assertEq(view[327], 255, "327th value should be 255");
-assertEq(view[328], 205, "328th value should be 205");
-assertEq(view[329], 255, "329th value should be 255");
-assertEq(view[330], 50, "330th value should be 50");
-assertEq(view[331], 255, "331th value should be 255");
-assertEq(view[332], 205, "332th value should be 205");
-assertEq(view[333], 255, "333th value should be 255");
-assertEq(view[334], 50, "334th value should be 50");
-assertEq(view[335], 255, "335th value should be 255");
-assertEq(view[336], 205, "336th value should be 205");
-assertEq(view[337], 255, "337th value should be 255");
-assertEq(view[338], 50, "338th value should be 50");
-assertEq(view[339], 255, "339th value should be 255");
-assertEq(view[340], 179, "340th value should be 179");
-assertEq(view[341], 127, "341th value should be 127");
-assertEq(view[342], 76, "342th value should be 76");
-assertEq(view[343], 255, "343th value should be 255");
-assertEq(view[344], 154, "344th value should be 154");
-assertEq(view[345], 255, "345th value should be 255");
-assertEq(view[346], 101, "346th value should be 101");
-assertEq(view[347], 255, "347th value should be 255");
-assertEq(view[348], 154, "348th value should be 154");
-assertEq(view[349], 255, "349th value should be 255");
-assertEq(view[350], 101, "350th value should be 101");
-assertEq(view[351], 255, "351th value should be 255");
-assertEq(view[352], 128, "352th value should be 128");
-assertEq(view[353], 127, "353th value should be 127");
-assertEq(view[354], 127, "354th value should be 127");
-assertEq(view[355], 255, "355th value should be 255");
-assertEq(view[356], 52, "356th value should be 52");
-assertEq(view[357], 255, "357th value should be 255");
-assertEq(view[358], 203, "358th value should be 203");
-assertEq(view[359], 255, "359th value should be 255");
-assertEq(view[360], 0, "360th value should be 0");
-assertEq(view[361], 0, "361th value should be 0");
-assertEq(view[362], 0, "362th value should be 0");
-assertEq(view[363], 255, "363th value should be 255");
-assertEq(view[364], 0, "364th value should be 0");
-assertEq(view[365], 0, "365th value should be 0");
-assertEq(view[366], 0, "366th value should be 0");
-assertEq(view[367], 255, "367th value should be 255");
-assertEq(view[368], 0, "368th value should be 0");
-assertEq(view[369], 0, "369th value should be 0");
-assertEq(view[370], 0, "370th value should be 0");
-assertEq(view[371], 255, "371th value should be 255");
-assertEq(view[372], 0, "372th value should be 0");
-assertEq(view[373], 0, "373th value should be 0");
-assertEq(view[374], 0, "374th value should be 0");
-assertEq(view[375], 255, "375th value should be 255");
-assertEq(view[376], 0, "376th value should be 0");
-assertEq(view[377], 0, "377th value should be 0");
-assertEq(view[378], 0, "378th value should be 0");
-assertEq(view[379], 255, "379th value should be 255");
-assertEq(view[380], 0, "380th value should be 0");
-assertEq(view[381], 0, "381th value should be 0");
-assertEq(view[382], 0, "382th value should be 0");
-assertEq(view[383], 255, "383th value should be 255");
-assertEq(view[384], 52, "384th value should be 52");
-assertEq(view[385], 255, "385th value should be 255");
-assertEq(view[386], 203, "386th value should be 203");
-assertEq(view[387], 255, "387th value should be 255");
-assertEq(view[388], 179, "388th value should be 179");
-assertEq(view[389], 127, "389th value should be 127");
-assertEq(view[390], 76, "390th value should be 76");
-assertEq(view[391], 255, "391th value should be 255");
-assertEq(view[392], 205, "392th value should be 205");
-assertEq(view[393], 255, "393th value should be 255");
-assertEq(view[394], 50, "394th value should be 50");
-assertEq(view[395], 255, "395th value should be 255");
-assertEq(view[396], 205, "396th value should be 205");
-assertEq(view[397], 255, "397th value should be 255");
-assertEq(view[398], 50, "398th value should be 50");
-assertEq(view[399], 255, "399th value should be 255");
-assertEq(view[400], 205, "400th value should be 205");
-assertEq(view[401], 255, "401th value should be 255");
-assertEq(view[402], 50, "402th value should be 50");
-assertEq(view[403], 255, "403th value should be 255");
-assertEq(view[404], 205, "404th value should be 205");
-assertEq(view[405], 255, "405th value should be 255");
-assertEq(view[406], 50, "406th value should be 50");
-assertEq(view[407], 255, "407th value should be 255");
-assertEq(view[408], 205, "408th value should be 205");
-assertEq(view[409], 255, "409th value should be 255");
-assertEq(view[410], 50, "410th value should be 50");
-assertEq(view[411], 255, "411th value should be 255");
-assertEq(view[412], 205, "412th value should be 205");
-assertEq(view[413], 255, "413th value should be 255");
-assertEq(view[414], 50, "414th value should be 50");
-assertEq(view[415], 255, "415th value should be 255");
-assertEq(view[416], 154, "416th value should be 154");
-assertEq(view[417], 255, "417th value should be 255");
-assertEq(view[418], 101, "418th value should be 101");
-assertEq(view[419], 255, "419th value should be 255");
-assertEq(view[420], 128, "420th value should be 128");
-assertEq(view[421], 127, "421th value should be 127");
-assertEq(view[422], 127, "422th value should be 127");
-assertEq(view[423], 255, "423th value should be 255");
-assertEq(view[424], 154, "424th value should be 154");
-assertEq(view[425], 255, "425th value should be 255");
-assertEq(view[426], 101, "426th value should be 101");
-assertEq(view[427], 255, "427th value should be 255");
-assertEq(view[428], 128, "428th value should be 128");
-assertEq(view[429], 127, "429th value should be 127");
-assertEq(view[430], 127, "430th value should be 127");
-assertEq(view[431], 255, "431th value should be 255");
-assertEq(view[432], 103, "432th value should be 103");
-assertEq(view[433], 255, "433th value should be 255");
-assertEq(view[434], 152, "434th value should be 152");
-assertEq(view[435], 255, "435th value should be 255");
-assertEq(view[436], 0, "436th value should be 0");
-assertEq(view[437], 0, "437th value should be 0");
-assertEq(view[438], 0, "438th value should be 0");
-assertEq(view[439], 255, "439th value should be 255");
-assertEq(view[440], 0, "440th value should be 0");
-assertEq(view[441], 0, "441th value should be 0");
-assertEq(view[442], 0, "442th value should be 0");
-assertEq(view[443], 255, "443th value should be 255");
-assertEq(view[444], 0, "444th value should be 0");
-assertEq(view[445], 0, "445th value should be 0");
-assertEq(view[446], 0, "446th value should be 0");
-assertEq(view[447], 255, "447th value should be 255");
-assertEq(view[448], 0, "448th value should be 0");
-assertEq(view[449], 0, "449th value should be 0");
-assertEq(view[450], 0, "450th value should be 0");
-assertEq(view[451], 255, "451th value should be 255");
-assertEq(view[452], 0, "452th value should be 0");
-assertEq(view[453], 0, "453th value should be 0");
-assertEq(view[454], 0, "454th value should be 0");
-assertEq(view[455], 255, "455th value should be 255");
-assertEq(view[456], 0, "456th value should be 0");
-assertEq(view[457], 0, "457th value should be 0");
-assertEq(view[458], 0, "458th value should be 0");
-assertEq(view[459], 255, "459th value should be 255");
-assertEq(view[460], 0, "460th value should be 0");
-assertEq(view[461], 0, "461th value should be 0");
-assertEq(view[462], 0, "462th value should be 0");
-assertEq(view[463], 255, "463th value should be 255");
-assertEq(view[464], 78, "464th value should be 78");
-assertEq(view[465], 127, "465th value should be 127");
-assertEq(view[466], 178, "466th value should be 178");
-assertEq(view[467], 255, "467th value should be 255");
-assertEq(view[468], 154, "468th value should be 154");
-assertEq(view[469], 255, "469th value should be 255");
-assertEq(view[470], 101, "470th value should be 101");
-assertEq(view[471], 255, "471th value should be 255");
-assertEq(view[472], 205, "472th value should be 205");
-assertEq(view[473], 255, "473th value should be 255");
-assertEq(view[474], 50, "474th value should be 50");
-assertEq(view[475], 255, "475th value should be 255");
-assertEq(view[476], 205, "476th value should be 205");
-assertEq(view[477], 255, "477th value should be 255");
-assertEq(view[478], 50, "478th value should be 50");
-assertEq(view[479], 255, "479th value should be 255");
-assertEq(view[480], 205, "480th value should be 205");
-assertEq(view[481], 255, "481th value should be 255");
-assertEq(view[482], 50, "482th value should be 50");
-assertEq(view[483], 255, "483th value should be 255");
-assertEq(view[484], 205, "484th value should be 205");
-assertEq(view[485], 255, "485th value should be 255");
-assertEq(view[486], 50, "486th value should be 50");
-assertEq(view[487], 255, "487th value should be 255");
-assertEq(view[488], 179, "488th value should be 179");
-assertEq(view[489], 127, "489th value should be 127");
-assertEq(view[490], 76, "490th value should be 76");
-assertEq(view[491], 255, "491th value should be 255");
-assertEq(view[492], 179, "492th value should be 179");
-assertEq(view[493], 127, "493th value should be 127");
-assertEq(view[494], 76, "494th value should be 76");
-assertEq(view[495], 255, "495th value should be 255");
-assertEq(view[496], 128, "496th value should be 128");
-assertEq(view[497], 127, "497th value should be 127");
-assertEq(view[498], 127, "498th value should be 127");
-assertEq(view[499], 255, "499th value should be 255");
-assertEq(view[500], 52, "500th value should be 52");
-assertEq(view[501], 255, "501th value should be 255");
-assertEq(view[502], 203, "502th value should be 203");
-assertEq(view[503], 255, "503th value should be 255");
-assertEq(view[504], 0, "504th value should be 0");
-assertEq(view[505], 0, "505th value should be 0");
-assertEq(view[506], 0, "506th value should be 0");
-assertEq(view[507], 255, "507th value should be 255");
-assertEq(view[508], 78, "508th value should be 78");
-assertEq(view[509], 127, "509th value should be 127");
-assertEq(view[510], 178, "510th value should be 178");
-assertEq(view[511], 255, "511th value should be 255");
-assertEq(view[512], 52, "512th value should be 52");
-assertEq(view[513], 255, "513th value should be 255");
-assertEq(view[514], 203, "514th value should be 203");
-assertEq(view[515], 255, "515th value should be 255");
-assertEq(view[516], 0, "516th value should be 0");
-assertEq(view[517], 0, "517th value should be 0");
-assertEq(view[518], 0, "518th value should be 0");
-assertEq(view[519], 255, "519th value should be 255");
-assertEq(view[520], 0, "520th value should be 0");
-assertEq(view[521], 0, "521th value should be 0");
-assertEq(view[522], 0, "522th value should be 0");
-assertEq(view[523], 255, "523th value should be 255");
-assertEq(view[524], 0, "524th value should be 0");
-assertEq(view[525], 0, "525th value should be 0");
-assertEq(view[526], 0, "526th value should be 0");
-assertEq(view[527], 255, "527th value should be 255");
-assertEq(view[528], 0, "528th value should be 0");
-assertEq(view[529], 0, "529th value should be 0");
-assertEq(view[530], 0, "530th value should be 0");
-assertEq(view[531], 255, "531th value should be 255");
-assertEq(view[532], 0, "532th value should be 0");
-assertEq(view[533], 0, "533th value should be 0");
-assertEq(view[534], 0, "534th value should be 0");
-assertEq(view[535], 255, "535th value should be 255");
-assertEq(view[536], 0, "536th value should be 0");
-assertEq(view[537], 0, "537th value should be 0");
-assertEq(view[538], 0, "538th value should be 0");
-assertEq(view[539], 255, "539th value should be 255");
-assertEq(view[540], 0, "540th value should be 0");
-assertEq(view[541], 0, "541th value should be 0");
-assertEq(view[542], 0, "542th value should be 0");
-assertEq(view[543], 255, "543th value should be 255");
-assertEq(view[544], 0, "544th value should be 0");
-assertEq(view[545], 0, "545th value should be 0");
-assertEq(view[546], 0, "546th value should be 0");
-assertEq(view[547], 255, "547th value should be 255");
-assertEq(view[548], 154, "548th value should be 154");
-assertEq(view[549], 255, "549th value should be 255");
-assertEq(view[550], 101, "550th value should be 101");
-assertEq(view[551], 255, "551th value should be 255");
-assertEq(view[552], 205, "552th value should be 205");
-assertEq(view[553], 255, "553th value should be 255");
-assertEq(view[554], 50, "554th value should be 50");
-assertEq(view[555], 255, "555th value should be 255");
-assertEq(view[556], 205, "556th value should be 205");
-assertEq(view[557], 255, "557th value should be 255");
-assertEq(view[558], 50, "558th value should be 50");
-assertEq(view[559], 255, "559th value should be 255");
-assertEq(view[560], 205, "560th value should be 205");
-assertEq(view[561], 255, "561th value should be 255");
-assertEq(view[562], 50, "562th value should be 50");
-assertEq(view[563], 255, "563th value should be 255");
-assertEq(view[564], 179, "564th value should be 179");
-assertEq(view[565], 127, "565th value should be 127");
-assertEq(view[566], 76, "566th value should be 76");
-assertEq(view[567], 255, "567th value should be 255");
-assertEq(view[568], 179, "568th value should be 179");
-assertEq(view[569], 127, "569th value should be 127");
-assertEq(view[570], 76, "570th value should be 76");
-assertEq(view[571], 255, "571th value should be 255");
-assertEq(view[572], 154, "572th value should be 154");
-assertEq(view[573], 255, "573th value should be 255");
-assertEq(view[574], 101, "574th value should be 101");
-assertEq(view[575], 255, "575th value should be 255");
-assertEq(view[576], 103, "576th value should be 103");
-assertEq(view[577], 255, "577th value should be 255");
-assertEq(view[578], 152, "578th value should be 152");
-assertEq(view[579], 255, "579th value should be 255");
-assertEq(view[580], 0, "580th value should be 0");
-assertEq(view[581], 0, "581th value should be 0");
-assertEq(view[582], 0, "582th value should be 0");
-assertEq(view[583], 255, "583th value should be 255");
-assertEq(view[584], 0, "584th value should be 0");
-assertEq(view[585], 0, "585th value should be 0");
-assertEq(view[586], 0, "586th value should be 0");
-assertEq(view[587], 255, "587th value should be 255");
-assertEq(view[588], 0, "588th value should be 0");
-assertEq(view[589], 0, "589th value should be 0");
-assertEq(view[590], 0, "590th value should be 0");
-assertEq(view[591], 255, "591th value should be 255");
-assertEq(view[592], 0, "592th value should be 0");
-assertEq(view[593], 0, "593th value should be 0");
-assertEq(view[594], 0, "594th value should be 0");
-assertEq(view[595], 255, "595th value should be 255");
-assertEq(view[596], 0, "596th value should be 0");
-assertEq(view[597], 0, "597th value should be 0");
-assertEq(view[598], 0, "598th value should be 0");
-assertEq(view[599], 255, "599th value should be 255");
-assertEq(view[600], 0, "600th value should be 0");
-assertEq(view[601], 0, "601th value should be 0");
-assertEq(view[602], 0, "602th value should be 0");
-assertEq(view[603], 255, "603th value should be 255");
-assertEq(view[604], 0, "604th value should be 0");
-assertEq(view[605], 0, "605th value should be 0");
-assertEq(view[606], 0, "606th value should be 0");
-assertEq(view[607], 255, "607th value should be 255");
-assertEq(view[608], 0, "608th value should be 0");
-assertEq(view[609], 0, "609th value should be 0");
-assertEq(view[610], 0, "610th value should be 0");
-assertEq(view[611], 255, "611th value should be 255");
-assertEq(view[612], 0, "612th value should be 0");
-assertEq(view[613], 0, "613th value should be 0");
-assertEq(view[614], 0, "614th value should be 0");
-assertEq(view[615], 255, "615th value should be 255");
-assertEq(view[616], 0, "616th value should be 0");
-assertEq(view[617], 0, "617th value should be 0");
-assertEq(view[618], 0, "618th value should be 0");
-assertEq(view[619], 255, "619th value should be 255");
-assertEq(view[620], 0, "620th value should be 0");
-assertEq(view[621], 0, "621th value should be 0");
-assertEq(view[622], 0, "622th value should be 0");
-assertEq(view[623], 255, "623th value should be 255");
-assertEq(view[624], 0, "624th value should be 0");
-assertEq(view[625], 0, "625th value should be 0");
-assertEq(view[626], 0, "626th value should be 0");
-assertEq(view[627], 255, "627th value should be 255");
-assertEq(view[628], 154, "628th value should be 154");
-assertEq(view[629], 255, "629th value should be 255");
-assertEq(view[630], 101, "630th value should be 101");
-assertEq(view[631], 255, "631th value should be 255");
-assertEq(view[632], 205, "632th value should be 205");
-assertEq(view[633], 255, "633th value should be 255");
-assertEq(view[634], 50, "634th value should be 50");
-assertEq(view[635], 255, "635th value should be 255");
-assertEq(view[636], 205, "636th value should be 205");
-assertEq(view[637], 255, "637th value should be 255");
-assertEq(view[638], 50, "638th value should be 50");
-assertEq(view[639], 255, "639th value should be 255");
-assertEq(view[640], 179, "640th value should be 179");
-assertEq(view[641], 127, "641th value should be 127");
-assertEq(view[642], 76, "642th value should be 76");
-assertEq(view[643], 255, "643th value should be 255");
-assertEq(view[644], 179, "644th value should be 179");
-assertEq(view[645], 127, "645th value should be 127");
-assertEq(view[646], 76, "646th value should be 76");
-assertEq(view[647], 255, "647th value should be 255");
-assertEq(view[648], 154, "648th value should be 154");
-assertEq(view[649], 255, "649th value should be 255");
-assertEq(view[650], 101, "650th value should be 101");
-assertEq(view[651], 255, "651th value should be 255");
-assertEq(view[652], 128, "652th value should be 128");
-assertEq(view[653], 127, "653th value should be 127");
-assertEq(view[654], 127, "654th value should be 127");
-assertEq(view[655], 255, "655th value should be 255");
-assertEq(view[656], 52, "656th value should be 52");
-assertEq(view[657], 255, "657th value should be 255");
-assertEq(view[658], 203, "658th value should be 203");
-assertEq(view[659], 255, "659th value should be 255");
-assertEq(view[660], 0, "660th value should be 0");
-assertEq(view[661], 0, "661th value should be 0");
-assertEq(view[662], 0, "662th value should be 0");
-assertEq(view[663], 255, "663th value should be 255");
-assertEq(view[664], 0, "664th value should be 0");
-assertEq(view[665], 0, "665th value should be 0");
-assertEq(view[666], 0, "666th value should be 0");
-assertEq(view[667], 255, "667th value should be 255");
-assertEq(view[668], 0, "668th value should be 0");
-assertEq(view[669], 0, "669th value should be 0");
-assertEq(view[670], 0, "670th value should be 0");
-assertEq(view[671], 255, "671th value should be 255");
-assertEq(view[672], 0, "672th value should be 0");
-assertEq(view[673], 0, "673th value should be 0");
-assertEq(view[674], 0, "674th value should be 0");
-assertEq(view[675], 255, "675th value should be 255");
-assertEq(view[676], 0, "676th value should be 0");
-assertEq(view[677], 0, "677th value should be 0");
-assertEq(view[678], 0, "678th value should be 0");
-assertEq(view[679], 255, "679th value should be 255");
-assertEq(view[680], 0, "680th value should be 0");
-assertEq(view[681], 0, "681th value should be 0");
-assertEq(view[682], 0, "682th value should be 0");
-assertEq(view[683], 255, "683th value should be 255");
-assertEq(view[684], 0, "684th value should be 0");
-assertEq(view[685], 0, "685th value should be 0");
-assertEq(view[686], 0, "686th value should be 0");
-assertEq(view[687], 255, "687th value should be 255");
-assertEq(view[688], 0, "688th value should be 0");
-assertEq(view[689], 0, "689th value should be 0");
-assertEq(view[690], 0, "690th value should be 0");
-assertEq(view[691], 255, "691th value should be 255");
-assertEq(view[692], 0, "692th value should be 0");
-assertEq(view[693], 0, "693th value should be 0");
-assertEq(view[694], 0, "694th value should be 0");
-assertEq(view[695], 255, "695th value should be 255");
-assertEq(view[696], 0, "696th value should be 0");
-assertEq(view[697], 0, "697th value should be 0");
-assertEq(view[698], 0, "698th value should be 0");
-assertEq(view[699], 255, "699th value should be 255");
-assertEq(view[700], 0, "700th value should be 0");
-assertEq(view[701], 0, "701th value should be 0");
-assertEq(view[702], 0, "702th value should be 0");
-assertEq(view[703], 255, "703th value should be 255");
-assertEq(view[704], 0, "704th value should be 0");
-assertEq(view[705], 0, "705th value should be 0");
-assertEq(view[706], 0, "706th value should be 0");
-assertEq(view[707], 255, "707th value should be 255");
-assertEq(view[708], 154, "708th value should be 154");
-assertEq(view[709], 255, "709th value should be 255");
-assertEq(view[710], 101, "710th value should be 101");
-assertEq(view[711], 255, "711th value should be 255");
-assertEq(view[712], 179, "712th value should be 179");
-assertEq(view[713], 127, "713th value should be 127");
-assertEq(view[714], 76, "714th value should be 76");
-assertEq(view[715], 255, "715th value should be 255");
-assertEq(view[716], 205, "716th value should be 205");
-assertEq(view[717], 255, "717th value should be 255");
-assertEq(view[718], 50, "718th value should be 50");
-assertEq(view[719], 255, "719th value should be 255");
-assertEq(view[720], 154, "720th value should be 154");
-assertEq(view[721], 255, "721th value should be 255");
-assertEq(view[722], 101, "722th value should be 101");
-assertEq(view[723], 255, "723th value should be 255");
-assertEq(view[724], 52, "724th value should be 52");
-assertEq(view[725], 255, "725th value should be 255");
-assertEq(view[726], 203, "726th value should be 203");
-assertEq(view[727], 255, "727th value should be 255");
-assertEq(view[728], 128, "728th value should be 128");
-assertEq(view[729], 127, "729th value should be 127");
-assertEq(view[730], 127, "730th value should be 127");
-assertEq(view[731], 255, "731th value should be 255");
-assertEq(view[732], 78, "732th value should be 78");
-assertEq(view[733], 127, "733th value should be 127");
-assertEq(view[734], 178, "734th value should be 178");
-assertEq(view[735], 255, "735th value should be 255");
-assertEq(view[736], 0, "736th value should be 0");
-assertEq(view[737], 0, "737th value should be 0");
-assertEq(view[738], 0, "738th value should be 0");
-assertEq(view[739], 255, "739th value should be 255");
-assertEq(view[740], 0, "740th value should be 0");
-assertEq(view[741], 0, "741th value should be 0");
-assertEq(view[742], 0, "742th value should be 0");
-assertEq(view[743], 255, "743th value should be 255");
-assertEq(view[744], 0, "744th value should be 0");
-assertEq(view[745], 0, "745th value should be 0");
-assertEq(view[746], 0, "746th value should be 0");
-assertEq(view[747], 255, "747th value should be 255");
-assertEq(view[748], 0, "748th value should be 0");
-assertEq(view[749], 0, "749th value should be 0");
-assertEq(view[750], 0, "750th value should be 0");
-assertEq(view[751], 255, "751th value should be 255");
-assertEq(view[752], 0, "752th value should be 0");
-assertEq(view[753], 0, "753th value should be 0");
-assertEq(view[754], 0, "754th value should be 0");
-assertEq(view[755], 255, "755th value should be 255");
-assertEq(view[756], 0, "756th value should be 0");
-assertEq(view[757], 0, "757th value should be 0");
-assertEq(view[758], 0, "758th value should be 0");
-assertEq(view[759], 255, "759th value should be 255");
-assertEq(view[760], 0, "760th value should be 0");
-assertEq(view[761], 0, "761th value should be 0");
-assertEq(view[762], 0, "762th value should be 0");
-assertEq(view[763], 255, "763th value should be 255");
-assertEq(view[764], 0, "764th value should be 0");
-assertEq(view[765], 0, "765th value should be 0");
-assertEq(view[766], 0, "766th value should be 0");
-assertEq(view[767], 255, "767th value should be 255");
-assertEq(view[768], 0, "768th value should be 0");
-assertEq(view[769], 0, "769th value should be 0");
-assertEq(view[770], 0, "770th value should be 0");
-assertEq(view[771], 255, "771th value should be 255");
-assertEq(view[772], 0, "772th value should be 0");
-assertEq(view[773], 0, "773th value should be 0");
-assertEq(view[774], 0, "774th value should be 0");
-assertEq(view[775], 255, "775th value should be 255");
-assertEq(view[776], 0, "776th value should be 0");
-assertEq(view[777], 0, "777th value should be 0");
-assertEq(view[778], 0, "778th value should be 0");
-assertEq(view[779], 255, "779th value should be 255");
-assertEq(view[780], 0, "780th value should be 0");
-assertEq(view[781], 0, "781th value should be 0");
-assertEq(view[782], 0, "782th value should be 0");
-assertEq(view[783], 255, "783th value should be 255");
-assertEq(view[784], 78, "784th value should be 78");
-assertEq(view[785], 127, "785th value should be 127");
-assertEq(view[786], 178, "786th value should be 178");
-assertEq(view[787], 255, "787th value should be 255");
-assertEq(view[788], 154, "788th value should be 154");
-assertEq(view[789], 255, "789th value should be 255");
-assertEq(view[790], 101, "790th value should be 101");
-assertEq(view[791], 255, "791th value should be 255");
-assertEq(view[792], 179, "792th value should be 179");
-assertEq(view[793], 127, "793th value should be 127");
-assertEq(view[794], 76, "794th value should be 76");
-assertEq(view[795], 255, "795th value should be 255");
-assertEq(view[796], 205, "796th value should be 205");
-assertEq(view[797], 255, "797th value should be 255");
-assertEq(view[798], 50, "798th value should be 50");
-assertEq(view[799], 255, "799th value should be 255");
-assertEq(view[800], 128, "800th value should be 128");
-assertEq(view[801], 127, "801th value should be 127");
-assertEq(view[802], 127, "802th value should be 127");
-assertEq(view[803], 255, "803th value should be 255");
-assertEq(view[804], 0, "804th value should be 0");
-assertEq(view[805], 0, "805th value should be 0");
-assertEq(view[806], 0, "806th value should be 0");
-assertEq(view[807], 255, "807th value should be 255");
-assertEq(view[808], 26, "808th value should be 26");
-assertEq(view[809], 127, "809th value should be 127");
-assertEq(view[810], 229, "810th value should be 229");
-assertEq(view[811], 255, "811th value should be 255");
-assertEq(view[812], 0, "812th value should be 0");
-assertEq(view[813], 0, "813th value should be 0");
-assertEq(view[814], 0, "814th value should be 0");
-assertEq(view[815], 255, "815th value should be 255");
-assertEq(view[816], 0, "816th value should be 0");
-assertEq(view[817], 0, "817th value should be 0");
-assertEq(view[818], 0, "818th value should be 0");
-assertEq(view[819], 255, "819th value should be 255");
-assertEq(view[820], 0, "820th value should be 0");
-assertEq(view[821], 0, "821th value should be 0");
-assertEq(view[822], 0, "822th value should be 0");
-assertEq(view[823], 255, "823th value should be 255");
-assertEq(view[824], 0, "824th value should be 0");
-assertEq(view[825], 0, "825th value should be 0");
-assertEq(view[826], 0, "826th value should be 0");
-assertEq(view[827], 255, "827th value should be 255");
-assertEq(view[828], 0, "828th value should be 0");
-assertEq(view[829], 0, "829th value should be 0");
-assertEq(view[830], 0, "830th value should be 0");
-assertEq(view[831], 255, "831th value should be 255");
-assertEq(view[832], 0, "832th value should be 0");
-assertEq(view[833], 0, "833th value should be 0");
-assertEq(view[834], 0, "834th value should be 0");
-assertEq(view[835], 255, "835th value should be 255");
-assertEq(view[836], 0, "836th value should be 0");
-assertEq(view[837], 0, "837th value should be 0");
-assertEq(view[838], 0, "838th value should be 0");
-assertEq(view[839], 255, "839th value should be 255");
-assertEq(view[840], 0, "840th value should be 0");
-assertEq(view[841], 0, "841th value should be 0");
-assertEq(view[842], 0, "842th value should be 0");
-assertEq(view[843], 255, "843th value should be 255");
-assertEq(view[844], 0, "844th value should be 0");
-assertEq(view[845], 0, "845th value should be 0");
-assertEq(view[846], 0, "846th value should be 0");
-assertEq(view[847], 255, "847th value should be 255");
-assertEq(view[848], 0, "848th value should be 0");
-assertEq(view[849], 0, "849th value should be 0");
-assertEq(view[850], 0, "850th value should be 0");
-assertEq(view[851], 255, "851th value should be 255");
-assertEq(view[852], 0, "852th value should be 0");
-assertEq(view[853], 0, "853th value should be 0");
-assertEq(view[854], 0, "854th value should be 0");
-assertEq(view[855], 255, "855th value should be 255");
-assertEq(view[856], 0, "856th value should be 0");
-assertEq(view[857], 0, "857th value should be 0");
-assertEq(view[858], 0, "858th value should be 0");
-assertEq(view[859], 255, "859th value should be 255");
-assertEq(view[860], 0, "860th value should be 0");
-assertEq(view[861], 0, "861th value should be 0");
-assertEq(view[862], 0, "862th value should be 0");
-assertEq(view[863], 255, "863th value should be 255");
-assertEq(view[864], 103, "864th value should be 103");
-assertEq(view[865], 255, "865th value should be 255");
-assertEq(view[866], 152, "866th value should be 152");
-assertEq(view[867], 255, "867th value should be 255");
-assertEq(view[868], 154, "868th value should be 154");
-assertEq(view[869], 255, "869th value should be 255");
-assertEq(view[870], 101, "870th value should be 101");
-assertEq(view[871], 255, "871th value should be 255");
-assertEq(view[872], 179, "872th value should be 179");
-assertEq(view[873], 127, "873th value should be 127");
-assertEq(view[874], 76, "874th value should be 76");
-assertEq(view[875], 255, "875th value should be 255");
-assertEq(view[876], 205, "876th value should be 205");
-assertEq(view[877], 255, "877th value should be 255");
-assertEq(view[878], 50, "878th value should be 50");
-assertEq(view[879], 255, "879th value should be 255");
-assertEq(view[880], 179, "880th value should be 179");
-assertEq(view[881], 127, "881th value should be 127");
-assertEq(view[882], 76, "882th value should be 76");
-assertEq(view[883], 255, "883th value should be 255");
-assertEq(view[884], 179, "884th value should be 179");
-assertEq(view[885], 127, "885th value should be 127");
-assertEq(view[886], 76, "886th value should be 76");
-assertEq(view[887], 255, "887th value should be 255");
-assertEq(view[888], 128, "888th value should be 128");
-assertEq(view[889], 127, "889th value should be 127");
-assertEq(view[890], 127, "890th value should be 127");
-assertEq(view[891], 255, "891th value should be 255");
-assertEq(view[892], 103, "892th value should be 103");
-assertEq(view[893], 255, "893th value should be 255");
-assertEq(view[894], 152, "894th value should be 152");
-assertEq(view[895], 255, "895th value should be 255");
-assertEq(view[896], 26, "896th value should be 26");
-assertEq(view[897], 127, "897th value should be 127");
-assertEq(view[898], 229, "898th value should be 229");
-assertEq(view[899], 255, "899th value should be 255");
-assertEq(view[900], 0, "900th value should be 0");
-assertEq(view[901], 0, "901th value should be 0");
-assertEq(view[902], 0, "902th value should be 0");
-assertEq(view[903], 255, "903th value should be 255");
-assertEq(view[904], 0, "904th value should be 0");
-assertEq(view[905], 0, "905th value should be 0");
-assertEq(view[906], 0, "906th value should be 0");
-assertEq(view[907], 255, "907th value should be 255");
-assertEq(view[908], 0, "908th value should be 0");
-assertEq(view[909], 0, "909th value should be 0");
-assertEq(view[910], 0, "910th value should be 0");
-assertEq(view[911], 255, "911th value should be 255");
-assertEq(view[912], 0, "912th value should be 0");
-assertEq(view[913], 0, "913th value should be 0");
-assertEq(view[914], 0, "914th value should be 0");
-assertEq(view[915], 255, "915th value should be 255");
-assertEq(view[916], 0, "916th value should be 0");
-assertEq(view[917], 0, "917th value should be 0");
-assertEq(view[918], 0, "918th value should be 0");
-assertEq(view[919], 255, "919th value should be 255");
-assertEq(view[920], 0, "920th value should be 0");
-assertEq(view[921], 0, "921th value should be 0");
-assertEq(view[922], 0, "922th value should be 0");
-assertEq(view[923], 255, "923th value should be 255");
-assertEq(view[924], 0, "924th value should be 0");
-assertEq(view[925], 0, "925th value should be 0");
-assertEq(view[926], 0, "926th value should be 0");
-assertEq(view[927], 255, "927th value should be 255");
-assertEq(view[928], 0, "928th value should be 0");
-assertEq(view[929], 0, "929th value should be 0");
-assertEq(view[930], 0, "930th value should be 0");
-assertEq(view[931], 255, "931th value should be 255");
-assertEq(view[932], 0, "932th value should be 0");
-assertEq(view[933], 0, "933th value should be 0");
-assertEq(view[934], 0, "934th value should be 0");
-assertEq(view[935], 255, "935th value should be 255");
-assertEq(view[936], 0, "936th value should be 0");
-assertEq(view[937], 0, "937th value should be 0");
-assertEq(view[938], 0, "938th value should be 0");
-assertEq(view[939], 255, "939th value should be 255");
-assertEq(view[940], 0, "940th value should be 0");
-assertEq(view[941], 0, "941th value should be 0");
-assertEq(view[942], 0, "942th value should be 0");
-assertEq(view[943], 255, "943th value should be 255");
-assertEq(view[944], 0, "944th value should be 0");
-assertEq(view[945], 0, "945th value should be 0");
-assertEq(view[946], 0, "946th value should be 0");
-assertEq(view[947], 255, "947th value should be 255");
-assertEq(view[948], 154, "948th value should be 154");
-assertEq(view[949], 255, "949th value should be 255");
-assertEq(view[950], 101, "950th value should be 101");
-assertEq(view[951], 255, "951th value should be 255");
-assertEq(view[952], 179, "952th value should be 179");
-assertEq(view[953], 127, "953th value should be 127");
-assertEq(view[954], 76, "954th value should be 76");
-assertEq(view[955], 255, "955th value should be 255");
-assertEq(view[956], 205, "956th value should be 205");
-assertEq(view[957], 255, "957th value should be 255");
-assertEq(view[958], 50, "958th value should be 50");
-assertEq(view[959], 255, "959th value should be 255");
-assertEq(view[960], 179, "960th value should be 179");
-assertEq(view[961], 127, "961th value should be 127");
-assertEq(view[962], 76, "962th value should be 76");
-assertEq(view[963], 255, "963th value should be 255");
-assertEq(view[964], 179, "964th value should be 179");
-assertEq(view[965], 127, "965th value should be 127");
-assertEq(view[966], 76, "966th value should be 76");
-assertEq(view[967], 255, "967th value should be 255");
-assertEq(view[968], 179, "968th value should be 179");
-assertEq(view[969], 127, "969th value should be 127");
-assertEq(view[970], 76, "970th value should be 76");
-assertEq(view[971], 255, "971th value should be 255");
-assertEq(view[972], 154, "972th value should be 154");
-assertEq(view[973], 255, "973th value should be 255");
-assertEq(view[974], 101, "974th value should be 101");
-assertEq(view[975], 255, "975th value should be 255");
-assertEq(view[976], 103, "976th value should be 103");
-assertEq(view[977], 255, "977th value should be 255");
-assertEq(view[978], 152, "978th value should be 152");
-assertEq(view[979], 255, "979th value should be 255");
-assertEq(view[980], 0, "980th value should be 0");
-assertEq(view[981], 0, "981th value should be 0");
-assertEq(view[982], 0, "982th value should be 0");
-assertEq(view[983], 255, "983th value should be 255");
-assertEq(view[984], 0, "984th value should be 0");
-assertEq(view[985], 0, "985th value should be 0");
-assertEq(view[986], 0, "986th value should be 0");
-assertEq(view[987], 255, "987th value should be 255");
-assertEq(view[988], 0, "988th value should be 0");
-assertEq(view[989], 0, "989th value should be 0");
-assertEq(view[990], 0, "990th value should be 0");
-assertEq(view[991], 255, "991th value should be 255");
-assertEq(view[992], 0, "992th value should be 0");
-assertEq(view[993], 0, "993th value should be 0");
-assertEq(view[994], 0, "994th value should be 0");
-assertEq(view[995], 255, "995th value should be 255");
-assertEq(view[996], 0, "996th value should be 0");
-assertEq(view[997], 0, "997th value should be 0");
-assertEq(view[998], 0, "998th value should be 0");
-assertEq(view[999], 255, "999th value should be 255");
-assertEq(view[1000], 0, "1000th value should be 0");
-assertEq(view[1001], 0, "1001th value should be 0");
-assertEq(view[1002], 0, "1002th value should be 0");
-assertEq(view[1003], 255, "1003th value should be 255");
-assertEq(view[1004], 0, "1004th value should be 0");
-assertEq(view[1005], 0, "1005th value should be 0");
-assertEq(view[1006], 0, "1006th value should be 0");
-assertEq(view[1007], 255, "1007th value should be 255");
-assertEq(view[1008], 0, "1008th value should be 0");
-assertEq(view[1009], 0, "1009th value should be 0");
-assertEq(view[1010], 0, "1010th value should be 0");
-assertEq(view[1011], 255, "1011th value should be 255");
-assertEq(view[1012], 0, "1012th value should be 0");
-assertEq(view[1013], 0, "1013th value should be 0");
-assertEq(view[1014], 0, "1014th value should be 0");
-assertEq(view[1015], 255, "1015th value should be 255");
-assertEq(view[1016], 0, "1016th value should be 0");
-assertEq(view[1017], 0, "1017th value should be 0");
-assertEq(view[1018], 0, "1018th value should be 0");
-assertEq(view[1019], 255, "1019th value should be 255");
-assertEq(view[1020], 0, "1020th value should be 0");
-assertEq(view[1021], 0, "1021th value should be 0");
-assertEq(view[1022], 0, "1022th value should be 0");
-assertEq(view[1023], 255, "1023th value should be 255");
-assertEq(view[1024], 0, "1024th value should be 0");
-assertEq(view[1025], 0, "1025th value should be 0");
-assertEq(view[1026], 0, "1026th value should be 0");
-assertEq(view[1027], 255, "1027th value should be 255");
-assertEq(view[1028], 154, "1028th value should be 154");
-assertEq(view[1029], 255, "1029th value should be 255");
-assertEq(view[1030], 101, "1030th value should be 101");
-assertEq(view[1031], 255, "1031th value should be 255");
-assertEq(view[1032], 205, "1032th value should be 205");
-assertEq(view[1033], 255, "1033th value should be 255");
-assertEq(view[1034], 50, "1034th value should be 50");
-assertEq(view[1035], 255, "1035th value should be 255");
-assertEq(view[1036], 205, "1036th value should be 205");
-assertEq(view[1037], 255, "1037th value should be 255");
-assertEq(view[1038], 50, "1038th value should be 50");
-assertEq(view[1039], 255, "1039th value should be 255");
-assertEq(view[1040], 205, "1040th value should be 205");
-assertEq(view[1041], 255, "1041th value should be 255");
-assertEq(view[1042], 50, "1042th value should be 50");
-assertEq(view[1043], 255, "1043th value should be 255");
-assertEq(view[1044], 179, "1044th value should be 179");
-assertEq(view[1045], 127, "1045th value should be 127");
-assertEq(view[1046], 76, "1046th value should be 76");
-assertEq(view[1047], 255, "1047th value should be 255");
-assertEq(view[1048], 179, "1048th value should be 179");
-assertEq(view[1049], 127, "1049th value should be 127");
-assertEq(view[1050], 76, "1050th value should be 76");
-assertEq(view[1051], 255, "1051th value should be 255");
-assertEq(view[1052], 154, "1052th value should be 154");
-assertEq(view[1053], 255, "1053th value should be 255");
-assertEq(view[1054], 101, "1054th value should be 101");
-assertEq(view[1055], 255, "1055th value should be 255");
-assertEq(view[1056], 128, "1056th value should be 128");
-assertEq(view[1057], 127, "1057th value should be 127");
-assertEq(view[1058], 127, "1058th value should be 127");
-assertEq(view[1059], 255, "1059th value should be 255");
-assertEq(view[1060], 0, "1060th value should be 0");
-assertEq(view[1061], 0, "1061th value should be 0");
-assertEq(view[1062], 0, "1062th value should be 0");
-assertEq(view[1063], 255, "1063th value should be 255");
-assertEq(view[1064], 0, "1064th value should be 0");
-assertEq(view[1065], 0, "1065th value should be 0");
-assertEq(view[1066], 0, "1066th value should be 0");
-assertEq(view[1067], 255, "1067th value should be 255");
-assertEq(view[1068], 26, "1068th value should be 26");
-assertEq(view[1069], 127, "1069th value should be 127");
-assertEq(view[1070], 229, "1070th value should be 229");
-assertEq(view[1071], 255, "1071th value should be 255");
-assertEq(view[1072], 26, "1072th value should be 26");
-assertEq(view[1073], 127, "1073th value should be 127");
-assertEq(view[1074], 229, "1074th value should be 229");
-assertEq(view[1075], 255, "1075th value should be 255");
-assertEq(view[1076], 0, "1076th value should be 0");
-assertEq(view[1077], 0, "1077th value should be 0");
-assertEq(view[1078], 0, "1078th value should be 0");
-assertEq(view[1079], 255, "1079th value should be 255");
-assertEq(view[1080], 0, "1080th value should be 0");
-assertEq(view[1081], 0, "1081th value should be 0");
-assertEq(view[1082], 0, "1082th value should be 0");
-assertEq(view[1083], 255, "1083th value should be 255");
-assertEq(view[1084], 0, "1084th value should be 0");
-assertEq(view[1085], 0, "1085th value should be 0");
-assertEq(view[1086], 0, "1086th value should be 0");
-assertEq(view[1087], 255, "1087th value should be 255");
-assertEq(view[1088], 0, "1088th value should be 0");
-assertEq(view[1089], 0, "1089th value should be 0");
-assertEq(view[1090], 0, "1090th value should be 0");
-assertEq(view[1091], 255, "1091th value should be 255");
-assertEq(view[1092], 0, "1092th value should be 0");
-assertEq(view[1093], 0, "1093th value should be 0");
-assertEq(view[1094], 0, "1094th value should be 0");
-assertEq(view[1095], 255, "1095th value should be 255");
-assertEq(view[1096], 0, "1096th value should be 0");
-assertEq(view[1097], 0, "1097th value should be 0");
-assertEq(view[1098], 0, "1098th value should be 0");
-assertEq(view[1099], 255, "1099th value should be 255");
-assertEq(view[1100], 0, "1100th value should be 0");
-assertEq(view[1101], 0, "1101th value should be 0");
-assertEq(view[1102], 0, "1102th value should be 0");
-assertEq(view[1103], 255, "1103th value should be 255");
-assertEq(view[1104], 0, "1104th value should be 0");
-assertEq(view[1105], 0, "1105th value should be 0");
-assertEq(view[1106], 0, "1106th value should be 0");
-assertEq(view[1107], 255, "1107th value should be 255");
-assertEq(view[1108], 154, "1108th value should be 154");
-assertEq(view[1109], 255, "1109th value should be 255");
-assertEq(view[1110], 101, "1110th value should be 101");
-assertEq(view[1111], 255, "1111th value should be 255");
-assertEq(view[1112], 205, "1112th value should be 205");
-assertEq(view[1113], 255, "1113th value should be 255");
-assertEq(view[1114], 50, "1114th value should be 50");
-assertEq(view[1115], 255, "1115th value should be 255");
-assertEq(view[1116], 205, "1116th value should be 205");
-assertEq(view[1117], 255, "1117th value should be 255");
-assertEq(view[1118], 50, "1118th value should be 50");
-assertEq(view[1119], 255, "1119th value should be 255");
-assertEq(view[1120], 205, "1120th value should be 205");
-assertEq(view[1121], 255, "1121th value should be 255");
-assertEq(view[1122], 50, "1122th value should be 50");
-assertEq(view[1123], 255, "1123th value should be 255");
-assertEq(view[1124], 205, "1124th value should be 205");
-assertEq(view[1125], 255, "1125th value should be 255");
-assertEq(view[1126], 50, "1126th value should be 50");
-assertEq(view[1127], 255, "1127th value should be 255");
-assertEq(view[1128], 205, "1128th value should be 205");
-assertEq(view[1129], 255, "1129th value should be 255");
-assertEq(view[1130], 50, "1130th value should be 50");
-assertEq(view[1131], 255, "1131th value should be 255");
-assertEq(view[1132], 179, "1132th value should be 179");
-assertEq(view[1133], 127, "1133th value should be 127");
-assertEq(view[1134], 76, "1134th value should be 76");
-assertEq(view[1135], 255, "1135th value should be 255");
-assertEq(view[1136], 154, "1136th value should be 154");
-assertEq(view[1137], 255, "1137th value should be 255");
-assertEq(view[1138], 101, "1138th value should be 101");
-assertEq(view[1139], 255, "1139th value should be 255");
-assertEq(view[1140], 128, "1140th value should be 128");
-assertEq(view[1141], 127, "1141th value should be 127");
-assertEq(view[1142], 127, "1142th value should be 127");
-assertEq(view[1143], 255, "1143th value should be 255");
-assertEq(view[1144], 128, "1144th value should be 128");
-assertEq(view[1145], 127, "1145th value should be 127");
-assertEq(view[1146], 127, "1146th value should be 127");
-assertEq(view[1147], 255, "1147th value should be 255");
-assertEq(view[1148], 103, "1148th value should be 103");
-assertEq(view[1149], 255, "1149th value should be 255");
-assertEq(view[1150], 152, "1150th value should be 152");
-assertEq(view[1151], 255, "1151th value should be 255");
-assertEq(view[1152], 78, "1152th value should be 78");
-assertEq(view[1153], 127, "1153th value should be 127");
-assertEq(view[1154], 178, "1154th value should be 178");
-assertEq(view[1155], 255, "1155th value should be 255");
-assertEq(view[1156], 0, "1156th value should be 0");
-assertEq(view[1157], 0, "1157th value should be 0");
-assertEq(view[1158], 0, "1158th value should be 0");
-assertEq(view[1159], 255, "1159th value should be 255");
-assertEq(view[1160], 0, "1160th value should be 0");
-assertEq(view[1161], 0, "1161th value should be 0");
-assertEq(view[1162], 0, "1162th value should be 0");
-assertEq(view[1163], 255, "1163th value should be 255");
-assertEq(view[1164], 0, "1164th value should be 0");
-assertEq(view[1165], 0, "1165th value should be 0");
-assertEq(view[1166], 0, "1166th value should be 0");
-assertEq(view[1167], 255, "1167th value should be 255");
-assertEq(view[1168], 0, "1168th value should be 0");
-assertEq(view[1169], 0, "1169th value should be 0");
-assertEq(view[1170], 0, "1170th value should be 0");
-assertEq(view[1171], 255, "1171th value should be 255");
-assertEq(view[1172], 0, "1172th value should be 0");
-assertEq(view[1173], 0, "1173th value should be 0");
-assertEq(view[1174], 0, "1174th value should be 0");
-assertEq(view[1175], 255, "1175th value should be 255");
-assertEq(view[1176], 0, "1176th value should be 0");
-assertEq(view[1177], 0, "1177th value should be 0");
-assertEq(view[1178], 0, "1178th value should be 0");
-assertEq(view[1179], 255, "1179th value should be 255");
-assertEq(view[1180], 0, "1180th value should be 0");
-assertEq(view[1181], 0, "1181th value should be 0");
-assertEq(view[1182], 0, "1182th value should be 0");
-assertEq(view[1183], 255, "1183th value should be 255");
-assertEq(view[1184], 26, "1184th value should be 26");
-assertEq(view[1185], 127, "1185th value should be 127");
-assertEq(view[1186], 229, "1186th value should be 229");
-assertEq(view[1187], 255, "1187th value should be 255");
-assertEq(view[1188], 154, "1188th value should be 154");
-assertEq(view[1189], 255, "1189th value should be 255");
-assertEq(view[1190], 101, "1190th value should be 101");
-assertEq(view[1191], 255, "1191th value should be 255");
-assertEq(view[1192], 205, "1192th value should be 205");
-assertEq(view[1193], 255, "1193th value should be 255");
-assertEq(view[1194], 50, "1194th value should be 50");
-assertEq(view[1195], 255, "1195th value should be 255");
-assertEq(view[1196], 205, "1196th value should be 205");
-assertEq(view[1197], 255, "1197th value should be 255");
-assertEq(view[1198], 50, "1198th value should be 50");
-assertEq(view[1199], 255, "1199th value should be 255");
-assertEq(view[1200], 230, "1200th value should be 230");
-assertEq(view[1201], 127, "1201th value should be 127");
-assertEq(view[1202], 25, "1202th value should be 25");
-assertEq(view[1203], 255, "1203th value should be 255");
-assertEq(view[1204], 205, "1204th value should be 205");
-assertEq(view[1205], 255, "1205th value should be 255");
-assertEq(view[1206], 50, "1206th value should be 50");
-assertEq(view[1207], 255, "1207th value should be 255");
-assertEq(view[1208], 205, "1208th value should be 205");
-assertEq(view[1209], 255, "1209th value should be 255");
-assertEq(view[1210], 50, "1210th value should be 50");
-assertEq(view[1211], 255, "1211th value should be 255");
-assertEq(view[1212], 205, "1212th value should be 205");
-assertEq(view[1213], 255, "1213th value should be 255");
-assertEq(view[1214], 50, "1214th value should be 50");
-assertEq(view[1215], 255, "1215th value should be 255");
-assertEq(view[1216], 205, "1216th value should be 205");
-assertEq(view[1217], 255, "1217th value should be 255");
-assertEq(view[1218], 50, "1218th value should be 50");
-assertEq(view[1219], 255, "1219th value should be 255");
-assertEq(view[1220], 154, "1220th value should be 154");
-assertEq(view[1221], 255, "1221th value should be 255");
-assertEq(view[1222], 101, "1222th value should be 101");
-assertEq(view[1223], 255, "1223th value should be 255");
-assertEq(view[1224], 154, "1224th value should be 154");
-assertEq(view[1225], 255, "1225th value should be 255");
-assertEq(view[1226], 101, "1226th value should be 101");
-assertEq(view[1227], 255, "1227th value should be 255");
-assertEq(view[1228], 154, "1228th value should be 154");
-assertEq(view[1229], 255, "1229th value should be 255");
-assertEq(view[1230], 101, "1230th value should be 101");
-assertEq(view[1231], 255, "1231th value should be 255");
-assertEq(view[1232], 128, "1232th value should be 128");
-assertEq(view[1233], 127, "1233th value should be 127");
-assertEq(view[1234], 127, "1234th value should be 127");
-assertEq(view[1235], 255, "1235th value should be 255");
-assertEq(view[1236], 26, "1236th value should be 26");
-assertEq(view[1237], 127, "1237th value should be 127");
-assertEq(view[1238], 229, "1238th value should be 229");
-assertEq(view[1239], 255, "1239th value should be 255");
-assertEq(view[1240], 0, "1240th value should be 0");
-assertEq(view[1241], 0, "1241th value should be 0");
-assertEq(view[1242], 0, "1242th value should be 0");
-assertEq(view[1243], 255, "1243th value should be 255");
-assertEq(view[1244], 0, "1244th value should be 0");
-assertEq(view[1245], 0, "1245th value should be 0");
-assertEq(view[1246], 0, "1246th value should be 0");
-assertEq(view[1247], 255, "1247th value should be 255");
-assertEq(view[1248], 0, "1248th value should be 0");
-assertEq(view[1249], 0, "1249th value should be 0");
-assertEq(view[1250], 0, "1250th value should be 0");
-assertEq(view[1251], 255, "1251th value should be 255");
-assertEq(view[1252], 0, "1252th value should be 0");
-assertEq(view[1253], 0, "1253th value should be 0");
-assertEq(view[1254], 0, "1254th value should be 0");
-assertEq(view[1255], 255, "1255th value should be 255");
-assertEq(view[1256], 0, "1256th value should be 0");
-assertEq(view[1257], 0, "1257th value should be 0");
-assertEq(view[1258], 0, "1258th value should be 0");
-assertEq(view[1259], 255, "1259th value should be 255");
-assertEq(view[1260], 0, "1260th value should be 0");
-assertEq(view[1261], 0, "1261th value should be 0");
-assertEq(view[1262], 0, "1262th value should be 0");
-assertEq(view[1263], 255, "1263th value should be 255");
-assertEq(view[1264], 78, "1264th value should be 78");
-assertEq(view[1265], 127, "1265th value should be 127");
-assertEq(view[1266], 178, "1266th value should be 178");
-assertEq(view[1267], 255, "1267th value should be 255");
-assertEq(view[1268], 179, "1268th value should be 179");
-assertEq(view[1269], 127, "1269th value should be 127");
-assertEq(view[1270], 76, "1270th value should be 76");
-assertEq(view[1271], 255, "1271th value should be 255");
-assertEq(view[1272], 205, "1272th value should be 205");
-assertEq(view[1273], 255, "1273th value should be 255");
-assertEq(view[1274], 50, "1274th value should be 50");
-assertEq(view[1275], 255, "1275th value should be 255");
-assertEq(view[1276], 205, "1276th value should be 205");
-assertEq(view[1277], 255, "1277th value should be 255");
-assertEq(view[1278], 50, "1278th value should be 50");
-assertEq(view[1279], 255, "1279th value should be 255");
-assertEq(view[1280], 0, "1280th value should be 0");
-assertEq(view[1281], 0, "1281th value should be 0");
-assertEq(view[1282], 0, "1282th value should be 0");
-assertEq(view[1283], 255, "1283th value should be 255");
-assertEq(view[1284], 205, "1284th value should be 205");
-assertEq(view[1285], 255, "1285th value should be 255");
-assertEq(view[1286], 50, "1286th value should be 50");
-assertEq(view[1287], 255, "1287th value should be 255");
-assertEq(view[1288], 205, "1288th value should be 205");
-assertEq(view[1289], 255, "1289th value should be 255");
-assertEq(view[1290], 50, "1290th value should be 50");
-assertEq(view[1291], 255, "1291th value should be 255");
-assertEq(view[1292], 205, "1292th value should be 205");
-assertEq(view[1293], 255, "1293th value should be 255");
-assertEq(view[1294], 50, "1294th value should be 50");
-assertEq(view[1295], 255, "1295th value should be 255");
-assertEq(view[1296], 205, "1296th value should be 205");
-assertEq(view[1297], 255, "1297th value should be 255");
-assertEq(view[1298], 50, "1298th value should be 50");
-assertEq(view[1299], 255, "1299th value should be 255");
-assertEq(view[1300], 205, "1300th value should be 205");
-assertEq(view[1301], 255, "1301th value should be 255");
-assertEq(view[1302], 50, "1302th value should be 50");
-assertEq(view[1303], 255, "1303th value should be 255");
-assertEq(view[1304], 179, "1304th value should be 179");
-assertEq(view[1305], 127, "1305th value should be 127");
-assertEq(view[1306], 76, "1306th value should be 76");
-assertEq(view[1307], 255, "1307th value should be 255");
-assertEq(view[1308], 154, "1308th value should be 154");
-assertEq(view[1309], 255, "1309th value should be 255");
-assertEq(view[1310], 101, "1310th value should be 101");
-assertEq(view[1311], 255, "1311th value should be 255");
-assertEq(view[1312], 154, "1312th value should be 154");
-assertEq(view[1313], 255, "1313th value should be 255");
-assertEq(view[1314], 101, "1314th value should be 101");
-assertEq(view[1315], 255, "1315th value should be 255");
-assertEq(view[1316], 0, "1316th value should be 0");
-assertEq(view[1317], 0, "1317th value should be 0");
-assertEq(view[1318], 0, "1318th value should be 0");
-assertEq(view[1319], 255, "1319th value should be 255");
-assertEq(view[1320], 0, "1320th value should be 0");
-assertEq(view[1321], 0, "1321th value should be 0");
-assertEq(view[1322], 0, "1322th value should be 0");
-assertEq(view[1323], 255, "1323th value should be 255");
-assertEq(view[1324], 0, "1324th value should be 0");
-assertEq(view[1325], 0, "1325th value should be 0");
-assertEq(view[1326], 0, "1326th value should be 0");
-assertEq(view[1327], 255, "1327th value should be 255");
-assertEq(view[1328], 0, "1328th value should be 0");
-assertEq(view[1329], 0, "1329th value should be 0");
-assertEq(view[1330], 0, "1330th value should be 0");
-assertEq(view[1331], 255, "1331th value should be 255");
-assertEq(view[1332], 0, "1332th value should be 0");
-assertEq(view[1333], 0, "1333th value should be 0");
-assertEq(view[1334], 0, "1334th value should be 0");
-assertEq(view[1335], 255, "1335th value should be 255");
-assertEq(view[1336], 0, "1336th value should be 0");
-assertEq(view[1337], 0, "1337th value should be 0");
-assertEq(view[1338], 0, "1338th value should be 0");
-assertEq(view[1339], 255, "1339th value should be 255");
-assertEq(view[1340], 0, "1340th value should be 0");
-assertEq(view[1341], 0, "1341th value should be 0");
-assertEq(view[1342], 0, "1342th value should be 0");
-assertEq(view[1343], 255, "1343th value should be 255");
-assertEq(view[1344], 0, "1344th value should be 0");
-assertEq(view[1345], 0, "1345th value should be 0");
-assertEq(view[1346], 0, "1346th value should be 0");
-assertEq(view[1347], 255, "1347th value should be 255");
-assertEq(view[1348], 179, "1348th value should be 179");
-assertEq(view[1349], 127, "1349th value should be 127");
-assertEq(view[1350], 76, "1350th value should be 76");
-assertEq(view[1351], 255, "1351th value should be 255");
-assertEq(view[1352], 205, "1352th value should be 205");
-assertEq(view[1353], 255, "1353th value should be 255");
-assertEq(view[1354], 50, "1354th value should be 50");
-assertEq(view[1355], 255, "1355th value should be 255");
-assertEq(view[1356], 205, "1356th value should be 205");
-assertEq(view[1357], 255, "1357th value should be 255");
-assertEq(view[1358], 50, "1358th value should be 50");
-assertEq(view[1359], 255, "1359th value should be 255");
-assertEq(view[1360], 0, "1360th value should be 0");
-assertEq(view[1361], 0, "1361th value should be 0");
-assertEq(view[1362], 0, "1362th value should be 0");
-assertEq(view[1363], 255, "1363th value should be 255");
-assertEq(view[1364], 205, "1364th value should be 205");
-assertEq(view[1365], 255, "1365th value should be 255");
-assertEq(view[1366], 50, "1366th value should be 50");
-assertEq(view[1367], 255, "1367th value should be 255");
-assertEq(view[1368], 205, "1368th value should be 205");
-assertEq(view[1369], 255, "1369th value should be 255");
-assertEq(view[1370], 50, "1370th value should be 50");
-assertEq(view[1371], 255, "1371th value should be 255");
-assertEq(view[1372], 205, "1372th value should be 205");
-assertEq(view[1373], 255, "1373th value should be 255");
-assertEq(view[1374], 50, "1374th value should be 50");
-assertEq(view[1375], 255, "1375th value should be 255");
-assertEq(view[1376], 205, "1376th value should be 205");
-assertEq(view[1377], 255, "1377th value should be 255");
-assertEq(view[1378], 50, "1378th value should be 50");
-assertEq(view[1379], 255, "1379th value should be 255");
-assertEq(view[1380], 205, "1380th value should be 205");
-assertEq(view[1381], 255, "1381th value should be 255");
-assertEq(view[1382], 50, "1382th value should be 50");
-assertEq(view[1383], 255, "1383th value should be 255");
-assertEq(view[1384], 205, "1384th value should be 205");
-assertEq(view[1385], 255, "1385th value should be 255");
-assertEq(view[1386], 50, "1386th value should be 50");
-assertEq(view[1387], 255, "1387th value should be 255");
-assertEq(view[1388], 179, "1388th value should be 179");
-assertEq(view[1389], 127, "1389th value should be 127");
-assertEq(view[1390], 76, "1390th value should be 76");
-assertEq(view[1391], 255, "1391th value should be 255");
-assertEq(view[1392], 179, "1392th value should be 179");
-assertEq(view[1393], 127, "1393th value should be 127");
-assertEq(view[1394], 76, "1394th value should be 76");
-assertEq(view[1395], 255, "1395th value should be 255");
-assertEq(view[1396], 103, "1396th value should be 103");
-assertEq(view[1397], 255, "1397th value should be 255");
-assertEq(view[1398], 152, "1398th value should be 152");
-assertEq(view[1399], 255, "1399th value should be 255");
-assertEq(view[1400], 78, "1400th value should be 78");
-assertEq(view[1401], 127, "1401th value should be 127");
-assertEq(view[1402], 178, "1402th value should be 178");
-assertEq(view[1403], 255, "1403th value should be 255");
-assertEq(view[1404], 52, "1404th value should be 52");
-assertEq(view[1405], 255, "1405th value should be 255");
-assertEq(view[1406], 203, "1406th value should be 203");
-assertEq(view[1407], 255, "1407th value should be 255");
-assertEq(view[1408], 0, "1408th value should be 0");
-assertEq(view[1409], 0, "1409th value should be 0");
-assertEq(view[1410], 0, "1410th value should be 0");
-assertEq(view[1411], 255, "1411th value should be 255");
-assertEq(view[1412], 0, "1412th value should be 0");
-assertEq(view[1413], 0, "1413th value should be 0");
-assertEq(view[1414], 0, "1414th value should be 0");
-assertEq(view[1415], 255, "1415th value should be 255");
-assertEq(view[1416], 52, "1416th value should be 52");
-assertEq(view[1417], 255, "1417th value should be 255");
-assertEq(view[1418], 203, "1418th value should be 203");
-assertEq(view[1419], 255, "1419th value should be 255");
-assertEq(view[1420], 128, "1420th value should be 128");
-assertEq(view[1421], 127, "1421th value should be 127");
-assertEq(view[1422], 127, "1422th value should be 127");
-assertEq(view[1423], 255, "1423th value should be 255");
-assertEq(view[1424], 128, "1424th value should be 128");
-assertEq(view[1425], 127, "1425th value should be 127");
-assertEq(view[1426], 127, "1426th value should be 127");
-assertEq(view[1427], 255, "1427th value should be 255");
-assertEq(view[1428], 205, "1428th value should be 205");
-assertEq(view[1429], 255, "1429th value should be 255");
-assertEq(view[1430], 50, "1430th value should be 50");
-assertEq(view[1431], 255, "1431th value should be 255");
-assertEq(view[1432], 205, "1432th value should be 205");
-assertEq(view[1433], 255, "1433th value should be 255");
-assertEq(view[1434], 50, "1434th value should be 50");
-assertEq(view[1435], 255, "1435th value should be 255");
-assertEq(view[1436], 230, "1436th value should be 230");
-assertEq(view[1437], 127, "1437th value should be 127");
-assertEq(view[1438], 25, "1438th value should be 25");
-assertEq(view[1439], 255, "1439th value should be 255");
-assertEq(view[1440], 0, "1440th value should be 0");
-assertEq(view[1441], 0, "1441th value should be 0");
-assertEq(view[1442], 0, "1442th value should be 0");
-assertEq(view[1443], 255, "1443th value should be 255");
-assertEq(view[1444], 230, "1444th value should be 230");
-assertEq(view[1445], 127, "1445th value should be 127");
-assertEq(view[1446], 25, "1446th value should be 25");
-assertEq(view[1447], 255, "1447th value should be 255");
-assertEq(view[1448], 205, "1448th value should be 205");
-assertEq(view[1449], 255, "1449th value should be 255");
-assertEq(view[1450], 50, "1450th value should be 50");
-assertEq(view[1451], 255, "1451th value should be 255");
-assertEq(view[1452], 205, "1452th value should be 205");
-assertEq(view[1453], 255, "1453th value should be 255");
-assertEq(view[1454], 50, "1454th value should be 50");
-assertEq(view[1455], 255, "1455th value should be 255");
-assertEq(view[1456], 205, "1456th value should be 205");
-assertEq(view[1457], 255, "1457th value should be 255");
-assertEq(view[1458], 50, "1458th value should be 50");
-assertEq(view[1459], 255, "1459th value should be 255");
-assertEq(view[1460], 205, "1460th value should be 205");
-assertEq(view[1461], 255, "1461th value should be 255");
-assertEq(view[1462], 50, "1462th value should be 50");
-assertEq(view[1463], 255, "1463th value should be 255");
-assertEq(view[1464], 205, "1464th value should be 205");
-assertEq(view[1465], 255, "1465th value should be 255");
-assertEq(view[1466], 50, "1466th value should be 50");
-assertEq(view[1467], 255, "1467th value should be 255");
-assertEq(view[1468], 179, "1468th value should be 179");
-assertEq(view[1469], 127, "1469th value should be 127");
-assertEq(view[1470], 76, "1470th value should be 76");
-assertEq(view[1471], 255, "1471th value should be 255");
-assertEq(view[1472], 179, "1472th value should be 179");
-assertEq(view[1473], 127, "1473th value should be 127");
-assertEq(view[1474], 76, "1474th value should be 76");
-assertEq(view[1475], 255, "1475th value should be 255");
-assertEq(view[1476], 179, "1476th value should be 179");
-assertEq(view[1477], 127, "1477th value should be 127");
-assertEq(view[1478], 76, "1478th value should be 76");
-assertEq(view[1479], 255, "1479th value should be 255");
-assertEq(view[1480], 128, "1480th value should be 128");
-assertEq(view[1481], 127, "1481th value should be 127");
-assertEq(view[1482], 127, "1482th value should be 127");
-assertEq(view[1483], 255, "1483th value should be 255");
-assertEq(view[1484], 103, "1484th value should be 103");
-assertEq(view[1485], 255, "1485th value should be 255");
-assertEq(view[1486], 152, "1486th value should be 152");
-assertEq(view[1487], 255, "1487th value should be 255");
-assertEq(view[1488], 0, "1488th value should be 0");
-assertEq(view[1489], 0, "1489th value should be 0");
-assertEq(view[1490], 0, "1490th value should be 0");
-assertEq(view[1491], 255, "1491th value should be 255");
-assertEq(view[1492], 0, "1492th value should be 0");
-assertEq(view[1493], 0, "1493th value should be 0");
-assertEq(view[1494], 0, "1494th value should be 0");
-assertEq(view[1495], 255, "1495th value should be 255");
-assertEq(view[1496], 128, "1496th value should be 128");
-assertEq(view[1497], 127, "1497th value should be 127");
-assertEq(view[1498], 127, "1498th value should be 127");
-assertEq(view[1499], 255, "1499th value should be 255");
-assertEq(view[1500], 154, "1500th value should be 154");
-assertEq(view[1501], 255, "1501th value should be 255");
-assertEq(view[1502], 101, "1502th value should be 101");
-assertEq(view[1503], 255, "1503th value should be 255");
-assertEq(view[1504], 179, "1504th value should be 179");
-assertEq(view[1505], 127, "1505th value should be 127");
-assertEq(view[1506], 76, "1506th value should be 76");
-assertEq(view[1507], 255, "1507th value should be 255");
-assertEq(view[1508], 205, "1508th value should be 205");
-assertEq(view[1509], 255, "1509th value should be 255");
-assertEq(view[1510], 50, "1510th value should be 50");
-assertEq(view[1511], 255, "1511th value should be 255");
-assertEq(view[1512], 205, "1512th value should be 205");
-assertEq(view[1513], 255, "1513th value should be 255");
-assertEq(view[1514], 50, "1514th value should be 50");
-assertEq(view[1515], 255, "1515th value should be 255");
-assertEq(view[1516], 230, "1516th value should be 230");
-assertEq(view[1517], 127, "1517th value should be 127");
-assertEq(view[1518], 25, "1518th value should be 25");
-assertEq(view[1519], 255, "1519th value should be 255");
-assertEq(view[1520], 0, "1520th value should be 0");
-assertEq(view[1521], 0, "1521th value should be 0");
-assertEq(view[1522], 0, "1522th value should be 0");
-assertEq(view[1523], 255, "1523th value should be 255");
-assertEq(view[1524], 230, "1524th value should be 230");
-assertEq(view[1525], 127, "1525th value should be 127");
-assertEq(view[1526], 25, "1526th value should be 25");
-assertEq(view[1527], 255, "1527th value should be 255");
-assertEq(view[1528], 230, "1528th value should be 230");
-assertEq(view[1529], 127, "1529th value should be 127");
-assertEq(view[1530], 25, "1530th value should be 25");
-assertEq(view[1531], 255, "1531th value should be 255");
-assertEq(view[1532], 205, "1532th value should be 205");
-assertEq(view[1533], 255, "1533th value should be 255");
-assertEq(view[1534], 50, "1534th value should be 50");
-assertEq(view[1535], 255, "1535th value should be 255");
-assertEq(view[1536], 205, "1536th value should be 205");
-assertEq(view[1537], 255, "1537th value should be 255");
-assertEq(view[1538], 50, "1538th value should be 50");
-assertEq(view[1539], 255, "1539th value should be 255");
-assertEq(view[1540], 205, "1540th value should be 205");
-assertEq(view[1541], 255, "1541th value should be 255");
-assertEq(view[1542], 50, "1542th value should be 50");
-assertEq(view[1543], 255, "1543th value should be 255");
-assertEq(view[1544], 205, "1544th value should be 205");
-assertEq(view[1545], 255, "1545th value should be 255");
-assertEq(view[1546], 50, "1546th value should be 50");
-assertEq(view[1547], 255, "1547th value should be 255");
-assertEq(view[1548], 205, "1548th value should be 205");
-assertEq(view[1549], 255, "1549th value should be 255");
-assertEq(view[1550], 50, "1550th value should be 50");
-assertEq(view[1551], 255, "1551th value should be 255");
-assertEq(view[1552], 179, "1552th value should be 179");
-assertEq(view[1553], 127, "1553th value should be 127");
-assertEq(view[1554], 76, "1554th value should be 76");
-assertEq(view[1555], 255, "1555th value should be 255");
-assertEq(view[1556], 179, "1556th value should be 179");
-assertEq(view[1557], 127, "1557th value should be 127");
-assertEq(view[1558], 76, "1558th value should be 76");
-assertEq(view[1559], 255, "1559th value should be 255");
-assertEq(view[1560], 179, "1560th value should be 179");
-assertEq(view[1561], 127, "1561th value should be 127");
-assertEq(view[1562], 76, "1562th value should be 76");
-assertEq(view[1563], 255, "1563th value should be 255");
-assertEq(view[1564], 154, "1564th value should be 154");
-assertEq(view[1565], 255, "1565th value should be 255");
-assertEq(view[1566], 101, "1566th value should be 101");
-assertEq(view[1567], 255, "1567th value should be 255");
-assertEq(view[1568], 26, "1568th value should be 26");
-assertEq(view[1569], 127, "1569th value should be 127");
-assertEq(view[1570], 229, "1570th value should be 229");
-assertEq(view[1571], 255, "1571th value should be 255");
-assertEq(view[1572], 0, "1572th value should be 0");
-assertEq(view[1573], 0, "1573th value should be 0");
-assertEq(view[1574], 0, "1574th value should be 0");
-assertEq(view[1575], 255, "1575th value should be 255");
-assertEq(view[1576], 154, "1576th value should be 154");
-assertEq(view[1577], 255, "1577th value should be 255");
-assertEq(view[1578], 101, "1578th value should be 101");
-assertEq(view[1579], 255, "1579th value should be 255");
-assertEq(view[1580], 179, "1580th value should be 179");
-assertEq(view[1581], 127, "1581th value should be 127");
-assertEq(view[1582], 76, "1582th value should be 76");
-assertEq(view[1583], 255, "1583th value should be 255");
-assertEq(view[1584], 205, "1584th value should be 205");
-assertEq(view[1585], 255, "1585th value should be 255");
-assertEq(view[1586], 50, "1586th value should be 50");
-assertEq(view[1587], 255, "1587th value should be 255");
-assertEq(view[1588], 205, "1588th value should be 205");
-assertEq(view[1589], 255, "1589th value should be 255");
-assertEq(view[1590], 50, "1590th value should be 50");
-assertEq(view[1591], 255, "1591th value should be 255");
-assertEq(view[1592], 230, "1592th value should be 230");
-assertEq(view[1593], 127, "1593th value should be 127");
-assertEq(view[1594], 25, "1594th value should be 25");
-assertEq(view[1595], 255, "1595th value should be 255");
-assertEq(view[1596], 230, "1596th value should be 230");
-assertEq(view[1597], 127, "1597th value should be 127");
-assertEq(view[1598], 25, "1598th value should be 25");
-assertEq(view[1599], 255, "1599th value should be 255");
-
-// Code used to generate the assertEq list above.
-function generateAssertList() {
- function template(i, x) {
- return 'assertEq(view[' + i + '], ' + x + ', "' + i + 'th value should be ' + x + '");\n';
- }
- var buf = ''
- for (var i = 0; i < LIMIT_SHOW; i++)
- buf += template(i, view[i]);
- print(buf);
-}
-//generateAssertList();
diff --git a/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js b/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
index e82b3e4173..523c5ae4ab 100644
--- a/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
+++ b/js/src/jit-test/tests/asm.js/testAsmJSWasmMixing.js
@@ -13,13 +13,3 @@ asmLink(asmJS, this, null, asmJSBuf);
var wasmMem = wasmEvalText('(module (memory 1 1) (export "mem" memory))').exports.mem;
assertAsmLinkFail(asmJS, this, null, wasmMem.buffer);
-if (!getBuildConfiguration().x64 && isSimdAvailable() && this["SIMD"]) {
- var simdJS = asmCompile('stdlib', 'ffis', 'buf', USE_ASM + 'var i32 = new stdlib.Int32Array(buf); var i32x4 = stdlib.SIMD.Int32x4; return {}');
- assertAsmLinkFail(simdJS, this, null, asmJSBuf);
- assertAsmLinkFail(simdJS, this, null, wasmMem.buffer);
-
- var simdJSBuf = new ArrayBuffer(BUF_MIN);
- asmLink(simdJS, this, null, simdJSBuf);
- asmLink(simdJS, this, null, simdJSBuf); // multiple SIMD.js instantiations succeed
- assertAsmLinkFail(asmJS, this, null, simdJSBuf); // but not asm.js
-}
diff --git a/js/src/jit-test/tests/asm.js/testBug1099216.js b/js/src/jit-test/tests/asm.js/testBug1099216.js
deleted file mode 100644
index 3514f307d5..0000000000
--- a/js/src/jit-test/tests/asm.js/testBug1099216.js
+++ /dev/null
@@ -1,61 +0,0 @@
-if (typeof SIMD === 'undefined' || !isSimdAvailable()) {
- print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-(function(global) {
- "use asm";
- var frd = global.Math.fround;
- var fx4 = global.SIMD.Float32x4;
- var fc4 = fx4.check;
- var fsp = fx4.splat;
- function s(){}
- function d(x){x=fc4(x);}
- function e() {
- var x = frd(0);
- x = frd(x / x);
- s();
- d(fsp(x));
- }
- return e;
-})(this)();
-
-(function(m) {
- "use asm"
- var k = m.SIMD.Bool32x4
- var g = m.SIMD.Int32x4
- var gc = g.check;
- var h = g.select
- function f() {
- var x = k(0, 0, 0, 0)
- var y = g(1, 2, 3, 4)
- return gc(h(x, y, y))
- }
- return f;
-})(this)();
-
-t = (function(global) {
- "use asm"
- var toF = global.Math.fround
- var f4 = global.SIMD.Float32x4
- var f4c = f4.check
- function p(x, y, width, value, max_iterations) {
- x = x | 0
- y = y | 0
- width = width | 0
- value = value | 0
- max_iterations = max_iterations | 0
- }
- function m(xf, yf, yd, max_iterations) {
- xf = toF(xf)
- yf = toF(yf)
- yd = toF(yd)
- max_iterations = max_iterations | 0
- var _ = f4(0, 0, 0, 0), c_im4 = f4(0, 0, 0, 0)
- c_im4 = f4(yf, yd, yd, yf)
- return f4c(c_im4);
- }
- return {p:p,m:m};
-})(this)
-t.p();
-t.m();
diff --git a/js/src/jit-test/tests/asm.js/testJumpRange.js b/js/src/jit-test/tests/asm.js/testJumpRange.js
index bd983544bc..6e5f6200f5 100644
--- a/js/src/jit-test/tests/asm.js/testJumpRange.js
+++ b/js/src/jit-test/tests/asm.js/testJumpRange.js
@@ -18,26 +18,6 @@ for (let threshold of [0, 50, 100, 5000, -1]) {
return h
`)()(), 45);
- if (isSimdAvailable() && this.SIMD) {
- var buf = new ArrayBuffer(BUF_MIN);
- new Int32Array(buf)[0] = 10;
- new Float32Array(buf)[1] = 42;
- assertEq(asmCompile('stdlib', 'ffis', 'buf',
- USE_ASM + `
- var H = new stdlib.Uint8Array(buf);
- var i4 = stdlib.SIMD.Int32x4;
- var f4 = stdlib.SIMD.Float32x4;
- var i4load = i4.load;
- var f4load = f4.load;
- var toi4 = i4.fromFloat32x4;
- var i4ext = i4.extractLane;
- function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
- function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
- function h(i) { i=i|0; return g(i)|0 }
- return h
- `)(this, null, buf)(0), 52);
- }
-
enableSPSProfiling();
asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
disableSPSProfiling();
diff --git a/js/src/jit-test/tests/asm.js/testProfiling.js b/js/src/jit-test/tests/asm.js/testProfiling.js
index 564f6f359b..97bca989a1 100644
--- a/js/src/jit-test/tests/asm.js/testProfiling.js
+++ b/js/src/jit-test/tests/asm.js/testProfiling.js
@@ -206,19 +206,6 @@ var stacks = disableSingleStepProfiling();
assertStackContainsSeq(stacks, ">,f1,>,<,f1,>,>,<,f1,>,f2,>,<,f1,>,<,f2,>,<,f1,>,f2,>,<,f1,>,>,<,f1,>,<,f1,>,f1,>,>");
-if (isSimdAvailable() && typeof SIMD !== 'undefined') {
- // SIMD out-of-bounds exit
- var buf = new ArrayBuffer(0x10000);
- var f = asmLink(asmCompile('g','ffi','buf', USE_ASM + 'var f4=g.SIMD.float32x4; var f4l=f4.load; var u8=new g.Uint8Array(buf); function f(i) { i=i|0; return f4l(u8, 0xFFFF + i | 0); } return f'), this, {}, buf);
- enableSingleStepProfiling();
- assertThrowsInstanceOf(() => f(4), RangeError);
- var stacks = disableSingleStepProfiling();
- // TODO check that expected is actually the correctly expected string, when
- // SIMD is implemented on ARM.
- assertStackContainsSeq(stacks, ">,f,>,inline stub,f,>");
-}
-
-
// Thunks
setJitCompilerOption("jump-threshold", 0);
var h = asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'));
diff --git a/js/src/jit-test/tests/asm.js/testSIMD-16x8.js b/js/src/jit-test/tests/asm.js/testSIMD-16x8.js
deleted file mode 100644
index 6f017892ce..0000000000
--- a/js/src/jit-test/tests/asm.js/testSIMD-16x8.js
+++ /dev/null
@@ -1,510 +0,0 @@
-load(libdir + "asm.js");
-load(libdir + "simd.js");
-load(libdir + "asserts.js");
-
-// Set to true to see more JS debugging spew.
-const DEBUG = false;
-
-if (!isSimdAvailable()) {
- DEBUG && print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-// Tests for 16x8 SIMD types: Int16x8, Uint16x8, Bool16x8.
-
-const I16x8 = 'var i16x8 = glob.SIMD.Int16x8;'
-const I16x8CHK = 'var i16x8chk = i16x8.check;'
-const I16x8EXT = 'var i16x8ext = i16x8.extractLane;'
-const I16x8REP = 'var i16x8rep = i16x8.replaceLane;'
-const I16x8U16x8 = 'var i16x8u16x8 = i16x8.fromUint16x8Bits;'
-
-const U16x8 = 'var u16x8 = glob.SIMD.Uint16x8;'
-const U16x8CHK = 'var u16x8chk = u16x8.check;'
-const U16x8EXT = 'var u16x8ext = u16x8.extractLane;'
-const U16x8REP = 'var u16x8rep = u16x8.replaceLane;'
-const U16x8I16x8 = 'var u16x8i16x8 = u16x8.fromInt16x8Bits;'
-
-const B16x8 = 'var b16x8 = glob.SIMD.Bool16x8;'
-const B16x8CHK = 'var b16x8chk = b16x8.check;'
-const B16x8EXT = 'var b16x8ext = b16x8.extractLane;'
-const B16x8REP = 'var b16x8rep = b16x8.replaceLane;'
-
-const INT16_MAX = 0x7fff
-const INT16_MIN = -0x10000
-const UINT16_MAX = 0xffff
-
-// Linking
-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {} return f"), {SIMD:{Int16x8: SIMD.Int16x8}})(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {} return f"), {SIMD:{Uint16x8: SIMD.Uint16x8}})(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {} return f"), {SIMD:{Bool16x8: SIMD.Bool16x8}})(), undefined);
-
-// Local variable of Int16x8 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int16x8(1,2,3,4,5,6,7,8);} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8;} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8();} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8,9);} return f");
-assertAsmTypeFail('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8|0);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {var x=i16x8(1,2,3,4,5,6,7," + (INT16_MAX + 1) + ");} return f"), this)(), undefined);
-
-// Local variable of Uint16x8 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint16x8(1,2,3,4,5,6,7,8);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8;} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8();} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8,9);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8|0);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7,8);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + U16x8 + "function f() {var x=u16x8(1,2,3,4,5,6,7," + (UINT16_MAX + 1) + ");} return f"), this)(), undefined);
-
-// Local variable of Bool16x8 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool16x8(1,0,0,0, 0,0,0,0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8;} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8();} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,1.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0|0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,0,0,0, 1);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + B16x8 + "function f() {var x=b16x8(1,0,0,0, 0,-1,-2,0);} return f"), this)(), undefined);
-
-// Only signed Int16x8 allowed as return value.
-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + "function f() {return i16x8(1,2,3,4,5,6,7,8);} return f"), this)(),
- [1, 2, 3, 4, 5, 6, 7, 8]);
-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + "function f() {return i16x8chk(i16x8(1,2,3,32771,5,6,7,8));} return f"), this)(),
- [1, 2, 3, -32765, 5, 6, 7, 8]);
-assertAsmTypeFail('glob', USE_ASM + U16x8 + "function f() {return u16x8(1,2,3,4,5,6,7,8);} return f");
-assertAsmTypeFail('glob', USE_ASM + U16x8 + U16x8CHK + "function f() {return u16x8chk(u16x8(1,2,3,4,5,6,7,8));} return f");
-
-// Test splat.
-function splat(x) {
- let r = []
- for (let i = 0; i < 8; i++)
- r.push(x);
- return r
-}
-
-splatB = asmLink(asmCompile('glob', USE_ASM + B16x8 +
- 'var splat = b16x8.splat;' +
- 'function f(x) { x = x|0; return splat(x); } return f'), this);
-assertEqVecArr(splatB(true), splat(true));
-assertEqVecArr(splatB(false), splat(false));
-
-
-splatB0 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
- 'var splat = b16x8.splat;' +
- 'function f() { var x = 0; return splat(x); } return f'), this);
-assertEqVecArr(splatB0(), splat(false));
-splatB1 = asmLink(asmCompile('glob', USE_ASM + B16x8 +
- 'var splat = b16x8.splat;' +
- 'function f() { var x = 1; return splat(x); } return f'), this);
-assertEqVecArr(splatB1(), splat(true));
-
-splatI = asmLink(asmCompile('glob', USE_ASM + I16x8 +
- 'var splat = i16x8.splat;' +
- 'function f(x) { x = x|0; return splat(x); } return f'), this);
-for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
- assertEqVecArr(splatI(x), splat(x << 16 >> 16));
-}
-
-splatIc = asmLink(asmCompile('glob', USE_ASM + I16x8 +
- 'var splat = i16x8.splat;' +
- 'function f() { var x = 100; return splat(x); } return f'), this);
-assertEqVecArr(splatIc(), splat(100))
-
-splatU = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
- 'var splat = u16x8.splat;' +
- 'function f(x) { x = x|0; return i16x8u16x8(splat(x)); } return f'), this);
-for (let x of [0, 1, -1, 0x12345, 0x1234, -1000, -1000000]) {
- assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatI(x)), splat(x << 16 >>> 16));
-}
-
-splatUc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8U16x8 +
- 'var splat = u16x8.splat;' +
- 'function f() { var x = 200; return i16x8u16x8(splat(x)); } return f'), this);
-assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(splatUc()), splat(200))
-
-
-// Test extractLane.
-//
-// The lane index must be a literal int, and we generate different code for
-// different lanes.
-function extractI(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8EXT +
- `function f() {var x=i16x8(${a.join(',')}); return i16x8ext(x, ${i})|0; } return f`), this)();
-}
-a = [-1,2,-3,4,-5,6,-7,-8];
-for (var i = 0; i < 8; i++)
- assertEq(extractI(a, i), a[i]);
-a = a.map(x => -x);
-for (var i = 0; i < 8; i++)
- assertEq(extractI(a, i), a[i]);
-
-function extractU(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8EXT +
- `function f() {var x=u16x8(${a.join(',')}); return u16x8ext(x, ${i})|0; } return f`), this)();
-}
-a = [1,255,12,13,14,150,200,3];
-for (var i = 0; i < 8; i++)
- assertEq(extractU(a, i), a[i]);
-a = a.map(x => UINT16_MAX-x);
-for (var i = 0; i < 8; i++)
- assertEq(extractU(a, i), a[i]);
-
-function extractB(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8EXT +
- `function f() {var x=b16x8(${a.join(',')}); return b16x8ext(x, ${i})|0; } return f`), this)();
-}
-a = [1,1,0,1, 1,0,0,0];
-for (var i = 0; i < 8; i++)
- assertEq(extractB(a, i), a[i]);
-a = a.map(x => 1-x);
-for (var i = 0; i < 8; i++)
- assertEq(extractB(a, i), a[i]);
-
-// Test replaceLane.
-function replaceI(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8REP +
- `function f(v) {v=v|0; var x=i16x8(${a.join(',')}); return i16x8rep(x,${i},v); } return f`), this);
-}
-a = [-1,2,-3,4,-5,6,-7,-9];
-for (var i = 0; i < 8; i++) {
- var f = replaceI(a, i);
- var b = a.slice(0);
- b[i] = -20;
- assertEqVecArr(f(-20), b);
-}
-
-function replaceU(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + U16x8 + U16x8REP + I16x8 + I16x8U16x8 +
- `function f(v) {v=v|0; var x=u16x8(${a.join(',')}); return i16x8u16x8(u16x8rep(x,${i},v)); } return f`), this);
-}
-a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
-for (var i = 0; i < 8; i++) {
- var rawf = replaceU(a, i);
- var f = x => SIMD.Uint16x8.fromInt16x8Bits(rawf(x))
- var b = a.slice(0);
- b[i] = 1000;
- assertEqVecArr(f(1000), b);
-}
-
-function replaceB(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8REP +
- `function f(v) {v=v|0; var x=b16x8(${a.join(',')}); return b16x8rep(x,${i},v); } return f`), this);
-}
-a = [1,1,0,1,1,0,0,0];
-for (var i = 0; i < 8; i++) {
- var f = replaceB(a, i);
- var b = a.slice(0);
- let v = 1 - a[i];
- b[i] = v;
- assertEqVecArr(f(v), b.map(x => !!x));
-}
-
-
-// Test select.
-selectI = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK +
- 'var select = i16x8.select;' +
- 'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
- 'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
- 'function f(x) { x = b16x8chk(x); return select(x, a, b); } return f'), this);
-assertEqVecArr(selectI(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
- [ 5,6,-3,8,-5,6,-7,12]);
-
-selectU = asmLink(asmCompile('glob', USE_ASM + I16x8 + B16x8 + B16x8CHK + U16x8 + I16x8U16x8 + U16x8I16x8 +
- 'var select = u16x8.select;' +
- 'var a = i16x8(-1,2,-3,4,-5, 6,-7, 8);' +
- 'var b = i16x8( 5,6, 7,8, 9,10,11,12);' +
- 'function f(x) { x = b16x8chk(x); return i16x8u16x8(select(x, u16x8i16x8(a), u16x8i16x8(b))); } return f'), this);
-assertEqVecArr(selectU(SIMD.Bool16x8( 0,0, 1,0, 1,1, 1, 0)),
- [ 5,6,-3,8,-5,6,-7,12]);
-
-// Test swizzle.
-function swizzle(vec, lanes) {
- let r = [];
- for (let i = 0; i < 8; i++)
- r.push(vec[lanes[i]]);
- return r;
-}
-
-function swizzleI(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- 'var swz = i16x8.swizzle;' +
- `function f(a) { a = i16x8chk(a); return swz(a, ${lanes.join()}); } return f`), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(asm(v1), swizzle(a1, lanes));
- assertEqVecArr(asm(v2), swizzle(a2, lanes));
-}
-
-swizzleI([3, 4, 7, 1, 4, 3, 1, 2]);
-swizzleI([0, 0, 0, 0, 0, 0, 0, 0]);
-swizzleI([7, 7, 7, 7, 7, 7, 7, 7]);
-
-function swizzleU(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
- 'var swz = u16x8.swizzle;' +
- `function f(a) { a = i16x8chk(a); return i16x8u16x8(swz(u16x8i16x8(a), ${lanes.join()})); } return f`), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(asm(v1), swizzle(a1, lanes));
- assertEqVecArr(asm(v2), swizzle(a2, lanes));
-}
-
-swizzleU([3, 4, 7, 1, 4, 3, 1, 2]);
-swizzleU([0, 0, 0, 0, 0, 0, 0, 0]);
-swizzleU([7, 7, 7, 7, 7, 7, 7, 7]);
-
-// Out-of-range lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
- 'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
-assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
- 'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8); } return f');
-// Missing lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I16x8 + 'var swz = i16x8.swizzle; ' +
- 'function f() { var x=i16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
-assertAsmTypeFail('glob', USE_ASM + U16x8 + 'var swz = u16x8.swizzle; ' +
- 'function f() { var x=u16x8(0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7); } return f');
-
-
-// Test shuffle.
-function shuffle(vec1, vec2, lanes) {
- let r = [];
- let vec = vec1.concat(vec2)
- for (let i = 0; i < 8; i++)
- r.push(vec[lanes[i]]);
- return r;
-}
-
-function shuffleI(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- 'var shuf = i16x8.shuffle;' +
- `function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
-}
-
-function shuffleU(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 + I16x8U16x8 +
- 'var shuf = u16x8.shuffle;' +
- 'function f(a1, a2) { a1 = i16x8chk(a1); a2 = i16x8chk(a2); ' +
- `return i16x8u16x8(shuf(u16x8i16x8(a1), u16x8i16x8(a2), ${lanes.join()})); } return f`), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
-}
-
-shuffleI([0, 0, 0, 0, 0, 0, 0, 0])
-shuffleI([15, 15, 15, 15, 15, 15, 15, 15])
-shuffleI([6, 2, 0, 14, 6, 10, 11, 1])
-
-shuffleU([7, 7, 7, 7, 7, 7, 7, 7])
-shuffleU([8, 15, 15, 15, 15, 15, 15, 15])
-shuffleU([6, 2, 0, 14, 6, 10, 11, 1])
-
-// Test unary operators.
-function unaryI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- `var fut = i16x8.${opname};` +
- 'function f(v) { v = i16x8chk(v); return fut(v); } return f'), this);
- let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
- let v = SIMD.Int16x8(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-function unaryU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
- `var fut = u16x8.${opname};` +
- 'function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v))); } return f'), this);
- let a = [65000-1,2,65000-3,4,65000-5,6,65000-7,65000-9];
- let v = SIMD.Int16x8(...a);
- assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v)), a.map(lanefunc));
-}
-
-function unaryB(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
- `var fut = b16x8.${opname};` +
- 'function f(v) { v = b16x8chk(v); return fut(v); } return f'), this);
- let a = [1,1,0,1,1,0,0,0];
- let v = SIMD.Bool16x8(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-unaryI('not', x => ~x << 16 >> 16);
-unaryU('not', x => ~x << 16 >>> 16);
-unaryB('not', x => !x);
-unaryI('neg', x => -x << 16 >> 16);
-unaryU('neg', x => -x << 16 >>> 16);
-
-
-// Test binary operators.
-function zipmap(a1, a2, f) {
- assertEq(a1.length, a2.length);
- let r = [];
- for (var i = 0; i < a1.length; i++)
- r.push(f(a1[i], a2[i]));
- return r
-}
-
-function binaryI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- `var fut = i16x8.${opname};` +
- 'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-function binaryU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
- `var fut = u16x8.${opname};` +
- 'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return i16x8u16x8(fut(u16x8i16x8(v1), u16x8i16x8(v2))); } return f'), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >>> 16);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- let res = SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v1, v2));
- assertEqVecArr(res, ref);
-}
-
-function binaryB(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + B16x8 + B16x8CHK +
- `var fut = b16x8.${opname};` +
- 'function f(v1, v2) { v1 = b16x8chk(v1); v2 = b16x8chk(v2); return fut(v1, v2); } return f'), this);
- let a = [1,1,0,1,1,0,0,0];
- let v = SIMD.Bool16x8(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-binaryI('add', (x, y) => (x + y) << 16 >> 16);
-binaryI('sub', (x, y) => (x - y) << 16 >> 16);
-binaryI('mul', (x, y) => (x * y) << 16 >> 16);
-binaryU('add', (x, y) => (x + y) << 16 >>> 16);
-binaryU('sub', (x, y) => (x - y) << 16 >>> 16);
-binaryU('mul', (x, y) => (x * y) << 16 >>> 16);
-
-binaryI('and', (x, y) => (x & y) << 16 >> 16);
-binaryI('or', (x, y) => (x | y) << 16 >> 16);
-binaryI('xor', (x, y) => (x ^ y) << 16 >> 16);
-binaryU('and', (x, y) => (x & y) << 16 >>> 16);
-binaryU('or', (x, y) => (x | y) << 16 >>> 16);
-binaryU('xor', (x, y) => (x ^ y) << 16 >>> 16);
-
-function sat(x, lo, hi) {
- if (x < lo) return lo;
- if (x > hi) return hi;
- return x
-}
-function isat(x) { return sat(x, -32768, 32767); }
-function usat(x) { return sat(x, 0, 0xffff); }
-
-binaryI('addSaturate', (x, y) => isat(x + y))
-binaryI('subSaturate', (x, y) => isat(x - y))
-binaryU('addSaturate', (x, y) => usat(x + y))
-binaryU('subSaturate', (x, y) => usat(x - y))
-
-
-// Test shift operators.
-function zip1map(a, s, f) {
- return a.map(x => f(x, s));
-}
-
-function shiftI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- `var fut = i16x8.${opname};` +
- 'function f(v, s) { v = i16x8chk(v); s = s|0; return fut(v, s); } return f'), this);
- let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
- let v = SIMD.Int16x8(...a);
- for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
- let ref = zip1map(a, s, lanefunc);
- // 1. Test dynamic shift amount.
- assertEqVecArr(simdfunc(v, s), ref);
-
- // 2. Test constant shift amount.
- let cstf = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- `var fut = i16x8.${opname};` +
- `function f(v) { v = i16x8chk(v); return fut(v, ${s}); } return f`), this);
- assertEqVecArr(cstf(v, s), ref);
- }
-}
-
-function shiftU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
- `var fut = u16x8.${opname};` +
- 'function f(v, s) { v = i16x8chk(v); s = s|0; return i16x8u16x8(fut(u16x8i16x8(v), s)); } return f'), this);
- let a = [-1,2,-3,0x80,0x7f,6,0x8000,0x7fff];
- let v = SIMD.Int16x8(...a);
- for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
- let ref = zip1map(a, s, lanefunc);
- // 1. Test dynamic shift amount.
- assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(simdfunc(v, s)), ref);
-
- // 2. Test constant shift amount.
- let cstf = asmLink(asmCompile('glob', USE_ASM + U16x8 + I16x8 + I16x8CHK + U16x8I16x8 + I16x8U16x8 +
- `var fut = u16x8.${opname};` +
- `function f(v) { v = i16x8chk(v); return i16x8u16x8(fut(u16x8i16x8(v), ${s})); } return f`), this);
- assertEqVecArr(SIMD.Uint16x8.fromInt16x8Bits(cstf(v, s)), ref);
- }
-}
-
-shiftI('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >> 16);
-shiftU('shiftLeftByScalar', (x,s) => (x << (s & 15)) << 16 >>> 16);
-shiftI('shiftRightByScalar', (x,s) => ((x << 16 >> 16) >> (s & 15)) << 16 >> 16);
-shiftU('shiftRightByScalar', (x,s) => ((x << 16 >>> 16) >>> (s & 15)) << 16 >>> 16);
-
-
-// Comparisons.
-function compareI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK +
- `var fut = i16x8.${opname};` +
- 'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(v1, v2); } return f'), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >> 16);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-function compareU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I16x8 + I16x8CHK + U16x8 + U16x8I16x8 +
- `var fut = u16x8.${opname};` +
- 'function f(v1, v2) { v1 = i16x8chk(v1); v2 = i16x8chk(v2); return fut(u16x8i16x8(v1), u16x8i16x8(v2)); } return f'), this);
- let a1 = [ -1,2, -3,0x8000,0x7f,6,-7, 8].map(x => x << 16 >>> 16);
- let a2 = [0x8000,2,0x8000,0x7fff, 0,0, 8,-9].map(x => x << 16 >>> 16);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int16x8(...a1);
- let v2 = SIMD.Int16x8(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-compareI("equal", (x,y) => x == y);
-compareU("equal", (x,y) => x == y);
-compareI("notEqual", (x,y) => x != y);
-compareU("notEqual", (x,y) => x != y);
-compareI("lessThan", (x,y) => x < y);
-compareU("lessThan", (x,y) => x < y);
-compareI("lessThanOrEqual", (x,y) => x <= y);
-compareU("lessThanOrEqual", (x,y) => x <= y);
-compareI("greaterThan", (x,y) => x > y);
-compareU("greaterThan", (x,y) => x > y);
-compareI("greaterThanOrEqual", (x,y) => x >= y);
-compareU("greaterThanOrEqual", (x,y) => x >= y);
diff --git a/js/src/jit-test/tests/asm.js/testSIMD-8x16.js b/js/src/jit-test/tests/asm.js/testSIMD-8x16.js
deleted file mode 100644
index 160da82a8e..0000000000
--- a/js/src/jit-test/tests/asm.js/testSIMD-8x16.js
+++ /dev/null
@@ -1,524 +0,0 @@
-load(libdir + "asm.js");
-load(libdir + "simd.js");
-load(libdir + "asserts.js");
-
-// Set to true to see more JS debugging spew.
-const DEBUG = false;
-
-if (!isSimdAvailable()) {
- DEBUG && print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-// Tests for 8x16 SIMD types: Int8x16, Uint8x16, Bool8x16.
-
-const I8x16 = 'var i8x16 = glob.SIMD.Int8x16;'
-const I8x16CHK = 'var i8x16chk = i8x16.check;'
-const I8x16EXT = 'var i8x16ext = i8x16.extractLane;'
-const I8x16REP = 'var i8x16rep = i8x16.replaceLane;'
-const I8x16U8x16 = 'var i8x16u8x16 = i8x16.fromUint8x16Bits;'
-
-const U8x16 = 'var u8x16 = glob.SIMD.Uint8x16;'
-const U8x16CHK = 'var u8x16chk = u8x16.check;'
-const U8x16EXT = 'var u8x16ext = u8x16.extractLane;'
-const U8x16REP = 'var u8x16rep = u8x16.replaceLane;'
-const U8x16I8x16 = 'var u8x16i8x16 = u8x16.fromInt8x16Bits;'
-
-const B8x16 = 'var b8x16 = glob.SIMD.Bool8x16;'
-const B8x16CHK = 'var b8x16chk = b8x16.check;'
-const B8x16EXT = 'var b8x16ext = b8x16.extractLane;'
-const B8x16REP = 'var b8x16rep = b8x16.replaceLane;'
-
-const INT8_MAX = 127
-const INT8_MIN = -128
-const UINT8_MAX = 255
-
-// Linking
-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {} return f"), {SIMD:{Int8x16: SIMD.Int8x16}})(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {} return f"), {SIMD:{Uint8x16: SIMD.Uint8x16}})(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {} return f"), {SIMD:{Bool8x16: SIMD.Bool8x16}})(), undefined);
-
-// Local variable of Int8x16 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16;} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16();} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
-assertAsmTypeFail('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {var x=i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (INT8_MAX + 1) + ");} return f"), this)(), undefined);
-
-// Local variable of Uint8x16 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Uint8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16;} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16();} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16|0);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + U8x16 + "function f() {var x=u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15," + (UINT8_MAX + 1) + ");} return f"), this)(), undefined);
-
-// Local variable of Bool8x16 type.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Bool8x16(1,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1);} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16;} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16();} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1|0);} return f");
-assertAsmTypeFail('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + B8x16 + "function f() {var x=b8x16(1,0,0,0,0,0,0,0,0,1,-1,2,-2,1,1,1);} return f"), this)(), undefined);
-
-// Only signed Int8x16 allowed as return value.
-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + "function f() {return i8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f"), this)(),
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-assertEqVecArr(asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + "function f() {return i8x16chk(i8x16(1,2,3,132,5,6,7,8,9,10,11,12,13,14,15,16));} return f"), this)(),
- [1, 2, 3, -124, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]);
-assertAsmTypeFail('glob', USE_ASM + U8x16 + "function f() {return u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16);} return f");
-assertAsmTypeFail('glob', USE_ASM + U8x16 + U8x16CHK + "function f() {return u8x16chk(u8x16(1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16));} return f");
-
-// Test splat.
-function splat(x) {
- let r = []
- for (let i = 0; i < 16; i++)
- r.push(x);
- return r
-}
-
-splatB = asmLink(asmCompile('glob', USE_ASM + B8x16 +
- 'var splat = b8x16.splat;' +
- 'function f(x) { x = x|0; return splat(x); } return f'), this);
-assertEqVecArr(splatB(true), splat(true));
-assertEqVecArr(splatB(false), splat(false));
-
-
-splatB0 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
- 'var splat = b8x16.splat;' +
- 'function f() { var x = 0; return splat(x); } return f'), this);
-assertEqVecArr(splatB0(), splat(false));
-splatB1 = asmLink(asmCompile('glob', USE_ASM + B8x16 +
- 'var splat = b8x16.splat;' +
- 'function f() { var x = 1; return splat(x); } return f'), this);
-assertEqVecArr(splatB1(), splat(true));
-
-splatI = asmLink(asmCompile('glob', USE_ASM + I8x16 +
- 'var splat = i8x16.splat;' +
- 'function f(x) { x = x|0; return splat(x); } return f'), this);
-for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
- assertEqVecArr(splatI(x), splat(x << 24 >> 24));
-}
-
-splatIc = asmLink(asmCompile('glob', USE_ASM + I8x16 +
- 'var splat = i8x16.splat;' +
- 'function f() { var x = 100; return splat(x); } return f'), this);
-assertEqVecArr(splatIc(), splat(100))
-
-splatU = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
- 'var splat = u8x16.splat;' +
- 'function f(x) { x = x|0; return i8x16u8x16(splat(x)); } return f'), this);
-for (let x of [0, 1, -1, 0x1234, 0x12, 1000, -1000000]) {
- assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatI(x)), splat(x << 24 >>> 24));
-}
-
-splatUc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16U8x16 +
- 'var splat = u8x16.splat;' +
- 'function f() { var x = 200; return i8x16u8x16(splat(x)); } return f'), this);
-assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(splatUc()), splat(200))
-
-
-// Test extractLane.
-//
-// The lane index must be a literal int, and we generate different code for
-// different lanes.
-function extractI(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16EXT +
- `function f() {var x=i8x16(${a.join(',')}); return i8x16ext(x, ${i})|0; } return f`), this)();
-}
-a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
-for (var i = 0; i < 16; i++)
- assertEq(extractI(a, i), a[i]);
-a = a.map(x => -x);
-for (var i = 0; i < 16; i++)
- assertEq(extractI(a, i), a[i]);
-
-function extractU(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16EXT +
- `function f() {var x=u8x16(${a.join(',')}); return u8x16ext(x, ${i})|0; } return f`), this)();
-}
-a = [1,255,12,13,14,150,200,3,4,5,6,7,8,9,10,16];
-for (var i = 0; i < 16; i++)
- assertEq(extractU(a, i), a[i]);
-a = a.map(x => 255-x);
-for (var i = 0; i < 16; i++)
- assertEq(extractU(a, i), a[i]);
-
-function extractB(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16EXT +
- `function f() {var x=b8x16(${a.join(',')}); return b8x16ext(x, ${i})|0; } return f`), this)();
-}
-a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
-for (var i = 0; i < 16; i++)
- assertEq(extractB(a, i), a[i]);
-a = a.map(x => 1-x);
-for (var i = 0; i < 16; i++)
- assertEq(extractB(a, i), a[i]);
-
-// Test replaceLane.
-function replaceI(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16REP +
- `function f(v) {v=v|0; var x=i8x16(${a.join(',')}); return i8x16rep(x,${i},v); } return f`), this);
-}
-a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
-for (var i = 0; i < 16; i++) {
- var f = replaceI(a, i);
- var b = a.slice(0);
- b[i] = -20;
- assertEqVecArr(f(-20), b);
-}
-
-function replaceU(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + U8x16 + U8x16REP + I8x16 + I8x16U8x16 +
- `function f(v) {v=v|0; var x=u8x16(${a.join(',')}); x=u8x16rep(x,${i},v); return i8x16u8x16(x); } return f`), this);
-}
-a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
-for (var i = 0; i < 16; i++) {
- // Result returned as Int8x16, convert back.
- var rawf = replaceU(a, i);
- var f = x => SIMD.Uint8x16.fromInt8x16Bits(rawf(x));
- var b = a.slice(0);
- b[i] = 100;
- assertEqVecArr(f(100), b);
-}
-
-function replaceB(a, i) {
- return asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16REP +
- `function f(v) {v=v|0; var x=b8x16(${a.join(',')}); return b8x16rep(x,${i},v); } return f`), this);
-}
-a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
-for (var i = 0; i < 16; i++) {
- var f = replaceB(a, i);
- var b = a.slice(0);
- v = 1 - a[i];
- b[i] = v;
- assertEqVecArr(f(v), b.map(x => !!x));
-}
-
-
-// Test select.
-selectI = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK +
- 'var select = i8x16.select;' +
- 'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
- 'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32, 0);' +
- 'function f(x) { x = b8x16chk(x); return select(x, a, b); } return f'), this);
-assertEqVecArr(selectI(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1)),
- [ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
-
-selectU = asmLink(asmCompile('glob', USE_ASM + I8x16 + B8x16 + B8x16CHK + U8x16 + I8x16U8x16 + U8x16I8x16 +
- 'var select = u8x16.select;' +
- 'var a = i8x16(-1,2,-3,4,-5, 6,-7, 8,-9,10,-11,12,-13,-14,-15,-16);' +
- 'var b = i8x16( 5,6, 7,8, 9,10,11,12,13,14, 15,16,-77, 45, 32, 0);' +
- 'function f(x) { x = b8x16chk(x); return i8x16u8x16(select(x, u8x16i8x16(a), u8x16i8x16(b))); } return f'), this);
-assertEqVecArr(selectU(SIMD.Bool8x16( 0,0, 1,0, 1,1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1)),
- [ 5,6,-3,8,-5,6,-7,12,-9,10,15,16,-13,-14,32,-16]);
-
-
-// Test swizzle.
-function swizzle(vec, lanes) {
- let r = [];
- for (let i = 0; i < 16; i++)
- r.push(vec[lanes[i]]);
- return r;
-}
-
-function swizzleI(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- 'var swz = i8x16.swizzle;' +
- `function f(a) { a = i8x16chk(a); return swz(a, ${lanes.join()}); } return f`), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(asm(v1), swizzle(a1, lanes));
- assertEqVecArr(asm(v2), swizzle(a2, lanes));
-}
-
-swizzleI([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
-swizzleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-swizzleI([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
-
-function swizzleU(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
- 'var swz = u8x16.swizzle;' +
- `function f(a) { a = i8x16chk(a); return i8x16u8x16(swz(u8x16i8x16(a), ${lanes.join()})); } return f`), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(asm(v1), swizzle(a1, lanes));
- assertEqVecArr(asm(v2), swizzle(a2, lanes));
-}
-
-swizzleU([10, 1, 7, 5, 1, 2, 6, 8, 5, 13, 0, 6, 2, 8, 0, 9]);
-swizzleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-swizzleU([15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15, 15]);
-
-// Out-of-range lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
- 'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
- 'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16); } return f');
-// Missing lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var swz = i8x16.swizzle; ' +
- 'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var swz = u8x16.swizzle; ' +
- 'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); swz(x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
-
-
-// Test shuffle.
-function shuffle(vec1, vec2, lanes) {
- let r = [];
- let vec = vec1.concat(vec2);
- for (let i = 0; i < 16; i++)
- r.push(vec[lanes[i]]);
- return r;
-}
-
-function shuffleI(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- 'var shuf = i8x16.shuffle;' +
- `function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); return shuf(a1, a2, ${lanes.join()}); } return f`), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
-}
-
-shuffleI([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
-shuffleI([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-shuffleI([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
-
-function shuffleU(lanes) {
- let asm = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 + I8x16U8x16 +
- 'var shuf = u8x16.shuffle;' +
- 'function f(a1, a2) { a1 = i8x16chk(a1); a2 = i8x16chk(a2); ' +
- `return i8x16u8x16(shuf(u8x16i8x16(a1), u8x16i8x16(a2), ${lanes.join()})); } return f`), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(asm(v1, v2), shuffle(a1, a2, lanes));
-}
-
-shuffleU([31, 9, 5, 4, 29, 12, 19, 10, 16, 22, 10, 9, 6, 18, 9, 8]);
-shuffleU([ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]);
-shuffleU([31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31, 31]);
-
-
-// Out-of-range lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
- 'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
- 'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,32); } return f');
-// Missing lane indexes.
-assertAsmTypeFail('glob', USE_ASM + I8x16 + 'var shuf = i8x16.shuffle; ' +
- 'function f() { var x=i8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
-assertAsmTypeFail('glob', USE_ASM + U8x16 + 'var shuf = u8x16.shuffle; ' +
- 'function f() { var x=u8x16(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0); shuf(x,x,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15); } return f');
-
-
-// Test unary operators.
-function unaryI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- `var fut = i8x16.${opname};` +
- 'function f(v) { v = i8x16chk(v); return fut(v); } return f'), this);
- let a = [-1,2,-3,4,-5,6,-7,8,-9,10,-11,12,-13,-14,-15,-16];
- let v = SIMD.Int8x16(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-function unaryU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
- `var fut = u8x16.${opname};` +
- 'function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v))); } return f'), this);
- let a = [256-1,2,256-3,4,256-5,6,256-7,8,256-9,10,256-11,12,256-13,256-14,256-15,256-16];
- let v = SIMD.Int8x16(...a);
- assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v)), a.map(lanefunc));
-}
-
-function unaryB(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
- `var fut = b8x16.${opname};` +
- 'function f(v) { v = b8x16chk(v); return fut(v); } return f'), this);
- let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
- let v = SIMD.Bool8x16(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-unaryI('not', x => ~x << 24 >> 24);
-unaryU('not', x => ~x << 24 >>> 24);
-unaryB('not', x => !x);
-unaryI('neg', x => -x << 24 >> 24);
-unaryU('neg', x => -x << 24 >>> 24);
-
-
-// Test binary operators.
-function zipmap(a1, a2, f) {
- assertEq(a1.length, a2.length);
- let r = [];
- for (var i = 0; i < a1.length; i++)
- r.push(f(a1[i], a2[i]));
- return r
-}
-
-function binaryI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- `var fut = i8x16.${opname};` +
- 'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-function binaryU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
- `var fut = u8x16.${opname};` +
- 'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return i8x16u8x16(fut(u8x16i8x16(v1), u8x16i8x16(v2))); } return f'), this);
- let a1 = [ -1,2, -3,0x80,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x & 0xff);
- let a2 = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff].map(x => x & 0xff);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- let res = SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v1, v2));
- assertEqVecArr(res, ref);
-}
-
-function binaryB(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + B8x16 + B8x16CHK +
- `var fut = b8x16.${opname};` +
- 'function f(v1, v2) { v1 = b8x16chk(v1); v2 = b8x16chk(v2); return fut(v1, v2); } return f'), this);
- let a = [1,1,0,1,1,0,0,0,1,1,1,1,0,0,0,1];
- let v = SIMD.Bool8x16(...a);
- assertEqVecArr(simdfunc(v), a.map(lanefunc));
-}
-
-binaryI('add', (x, y) => (x + y) << 24 >> 24);
-binaryI('sub', (x, y) => (x - y) << 24 >> 24);
-binaryI('mul', (x, y) => (x * y) << 24 >> 24);
-binaryU('add', (x, y) => (x + y) << 24 >>> 24);
-binaryU('sub', (x, y) => (x - y) << 24 >>> 24);
-binaryU('mul', (x, y) => (x * y) << 24 >>> 24);
-
-binaryI('and', (x, y) => (x & y) << 24 >> 24);
-binaryI('or', (x, y) => (x | y) << 24 >> 24);
-binaryI('xor', (x, y) => (x ^ y) << 24 >> 24);
-binaryU('and', (x, y) => (x & y) << 24 >>> 24);
-binaryU('or', (x, y) => (x | y) << 24 >>> 24);
-binaryU('xor', (x, y) => (x ^ y) << 24 >>> 24);
-
-function sat(x, lo, hi) {
- if (x < lo) return lo;
- if (x > hi) return hi;
- return x
-}
-function isat(x) { return sat(x, -128, 127); }
-function usat(x) { return sat(x, 0, 255); }
-
-binaryI('addSaturate', (x, y) => isat(x + y))
-binaryI('subSaturate', (x, y) => isat(x - y))
-binaryU('addSaturate', (x, y) => usat(x + y))
-binaryU('subSaturate', (x, y) => usat(x - y))
-
-// Test shift operators.
-function zip1map(a, s, f) {
- return a.map(x => f(x, s));
-}
-
-function shiftI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- `var fut = i8x16.${opname};` +
- 'function f(v, s) { v = i8x16chk(v); s = s|0; return fut(v, s); } return f'), this);
- let a = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
- let v = SIMD.Int8x16(...a);
- for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
- let ref = zip1map(a, s, lanefunc);
- // 1. Test dynamic shift amount.
- assertEqVecArr(simdfunc(v, s), ref);
-
- // 2. Test constant shift amount.
- let cstf = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- `var fut = i8x16.${opname};` +
- `function f(v) { v = i8x16chk(v); return fut(v, ${s}); } return f`), this);
- assertEqVecArr(cstf(v, s), ref);
- }
-}
-
-function shiftU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
- `var fut = u8x16.${opname};` +
- 'function f(v, s) { v = i8x16chk(v); s = s|0; return i8x16u8x16(fut(u8x16i8x16(v), s)); } return f'), this);
- let a = [0x80,2,0x80,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16,0xff];
- let v = SIMD.Int8x16(...a);
- for (let s of [0, 1, 2, 6, 7, 8, 9, 10, 16, 255, -1, -8, -7, -1000]) {
- let ref = zip1map(a, s, lanefunc);
- // 1. Test dynamic shift amount.
- assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(simdfunc(v, s)), ref);
-
- // 2. Test constant shift amount.
- let cstf = asmLink(asmCompile('glob', USE_ASM + U8x16 + I8x16 + I8x16CHK + U8x16I8x16 + I8x16U8x16 +
- `var fut = u8x16.${opname};` +
- `function f(v) { v = i8x16chk(v); return i8x16u8x16(fut(u8x16i8x16(v), ${s})); } return f`), this);
- assertEqVecArr(SIMD.Uint8x16.fromInt8x16Bits(cstf(v, s)), ref);
- }
-}
-
-shiftI('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >> 24);
-shiftU('shiftLeftByScalar', (x,s) => (x << (s & 7)) << 24 >>> 24);
-shiftI('shiftRightByScalar', (x,s) => ((x << 24 >> 24) >> (s & 7)) << 24 >> 24);
-shiftU('shiftRightByScalar', (x,s) => ((x << 24 >>> 24) >>> (s & 7)) << 24 >>> 24);
-
-
-// Comparisons.
-function compareI(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK +
- `var fut = i8x16.${opname};` +
- 'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(v1, v2); } return f'), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16];
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1];
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-function compareU(opname, lanefunc) {
- let simdfunc = asmLink(asmCompile('glob', USE_ASM + I8x16 + I8x16CHK + U8x16 + U8x16I8x16 +
- `var fut = u8x16.${opname};` +
- 'function f(v1, v2) { v1 = i8x16chk(v1); v2 = i8x16chk(v2); return fut(u8x16i8x16(v1), u8x16i8x16(v2)); } return f'), this);
- let a1 = [ -1,2, -3,-128,0x7f,6,-7, 8,-9, 10,-11, 12,-13,-14,-15, -16].map(x => x << 24 >>> 24);
- let a2 = [-128,2,-128,0x7f, 0,0, 8,-9,10,-11, 12,-13,-14,-15,-16, -1].map(x => x << 24 >>> 24);
- let ref = zipmap(a1, a2, lanefunc);
- let v1 = SIMD.Int8x16(...a1);
- let v2 = SIMD.Int8x16(...a2);
- assertEqVecArr(simdfunc(v1, v2), ref);
-}
-
-compareI("equal", (x,y) => x == y);
-compareU("equal", (x,y) => x == y);
-compareI("notEqual", (x,y) => x != y);
-compareU("notEqual", (x,y) => x != y);
-compareI("lessThan", (x,y) => x < y);
-compareU("lessThan", (x,y) => x < y);
-compareI("lessThanOrEqual", (x,y) => x <= y);
-compareU("lessThanOrEqual", (x,y) => x <= y);
-compareI("greaterThan", (x,y) => x > y);
-compareU("greaterThan", (x,y) => x > y);
-compareI("greaterThanOrEqual", (x,y) => x >= y);
-compareU("greaterThanOrEqual", (x,y) => x >= y);
diff --git a/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js b/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js
deleted file mode 100644
index c5a5fb2bca..0000000000
--- a/js/src/jit-test/tests/asm.js/testSIMD-bitcasts.js
+++ /dev/null
@@ -1,84 +0,0 @@
-load(libdir + "asm.js");
-load(libdir + "simd.js");
-load(libdir + "asserts.js");
-
-// Set to true to see more JS debugging spew.
-const DEBUG = false;
-
-if (!isSimdAvailable()) {
- DEBUG && print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-// Test all bit-casts and normal loads and stores.
-var heap = new ArrayBuffer(BUF_MIN);
-var asU8 = new Uint8Array(heap);
-var allTypes = [
- "Int8x16",
- "Int16x8",
- "Int32x4",
- "Uint8x16",
- "Uint16x8",
- "Uint32x4",
- "Float32x4"
-];
-
-// Generate a load bit-cast store test function that performs:
-//
-// function f(a, b) {
-// vec = src.load(H, a);
-// cast = dst.from«src»Bits(vec);
-// store(H, b, cast);
-// }
-//
-// Here, `H` is the heap provided by `heap`.
-function test_func(src, dst) {
- text = `
- "use asm";
- var src = glob.SIMD.${src};
- var dst = glob.SIMD.${dst};
- var ld = src.load;
- var st = dst.store;
- var bc = dst.from${src}Bits;
-
- var H = new glob.Uint8Array(heap);
-
- function f(a, b) {
- a = a|0;
- b = b|0;
-
- st(H, b, bc(ld(H, a)));
- }
-
- return f;
- `;
- return asmLink(asmCompile('glob', 'ffi', 'heap', text), this, null, heap);
-}
-
-function assertBuf16(a, b) {
- for (let i=0; i < 16; i++) {
- assertEq(asU8[a+i], asU8[b+i]);
- }
-}
-
-for (let src of allTypes) {
- for (let dst of allTypes) {
- // Skip identity conversions.
- if (src == dst) continue;
-
- print(src, dst);
- let f = test_func(src, dst);
- // Initialize with pseudo-random data.
- for (let i = 0; i < 64; i++) {
- asU8[i] = (i + 17) * 97;
- }
-
- // Aligned load/store.
- f(0, 16);
- assertBuf16(0, 16);
-
- // Unaligned access.
- f(1, 27);
- assertBuf16(1, 27);
- }
-}
diff --git a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js b/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
deleted file mode 100644
index d826c106ba..0000000000
--- a/js/src/jit-test/tests/asm.js/testSIMD-load-store.js
+++ /dev/null
@@ -1,457 +0,0 @@
-// |jit-test|
-load(libdir + "asm.js");
-load(libdir + "simd.js");
-load(libdir + "asserts.js");
-
-// Avoid pathological --ion-eager compile times due to bails in loops
-setJitCompilerOption('ion.warmup.trigger', 1000000);
-
-// Set to true to see more JS debugging spew
-const DEBUG = false;
-
-if (!isSimdAvailable() || typeof SIMD === 'undefined' || !isAsmJSCompilationAvailable()) {
- DEBUG && print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-const RuntimeError = WebAssembly.RuntimeError;
-
-const INT32_MAX = Math.pow(2, 31) - 1;
-const INT32_MIN = INT32_MAX + 1 | 0;
-
-try {
-
-// Load / Store
-var IMPORTS = USE_ASM + 'var H=new glob.Uint8Array(heap); var i4=glob.SIMD.Int32x4; var ci4=i4.check; var load=i4.load; var store=i4.store;';
-
-// Bad number of args
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load();} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 4, 5);} return f");
-
-// Bad type of args
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(3, 5);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, 5.0);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0.;load(H, i);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=new glob.Int32Array(heap); function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var H2=42; function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;load(H2, i)} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "var f4=glob.SIMD.Float32x4; function f(){var i=0;var vec=f4(1,2,3,4); store(H, i, vec)} return f");
-
-// Bad coercions of returned values
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return load(H, i)|0;} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){var i=0;return +load(H, i);} return f");
-
-// Literal index constants
-var buf = new ArrayBuffer(BUF_MIN);
-var SIZE_TA = BUF_MIN >> 2
-var asI32 = new Int32Array(buf);
-asI32[SIZE_TA - 4] = 4;
-asI32[SIZE_TA - 3] = 3;
-asI32[SIZE_TA - 2] = 2;
-asI32[SIZE_TA - 1] = 1;
-
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, -1);} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1) + ");} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 15) + ");} return f");
-asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f(){load(H, " + (INT32_MAX + 1 - 16) + ");} return f");
-
-assertAsmLinkFail(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 15) + "));} return f"), this, {}, buf);
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + (BUF_MIN - 16) + "));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', IMPORTS + "function f() {return ci4(load(H, " + BUF_MIN + " - 16 | 0));} return f"), this, {}, buf)(), [4, 3, 2, 1]);
-
-var CONSTANT_INDEX = 42;
-var CONSTANT_BYTE_INDEX = CONSTANT_INDEX << 2;
-
-var loadStoreCode = `
- "use asm";
-
- var H = new glob.Uint8Array(heap);
-
- var i4 = glob.SIMD.Int32x4;
- var i4load = i4.load;
- var i4store = i4.store;
- var ci4 = i4.check;
-
- var f4 = glob.SIMD.Float32x4;
- var f4load = f4.load;
- var f4store = f4.store;
- var cf4 = f4.check;
-
- function f32l(i) { i=i|0; return cf4(f4load(H, i|0)); }
- function f32lcst() { return cf4(f4load(H, ${CONSTANT_BYTE_INDEX})); }
- function f32s(i, vec) { i=i|0; vec=cf4(vec); f4store(H, i|0, vec); }
- function f32scst(vec) { vec=cf4(vec); f4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
-
- function i32l(i) { i=i|0; return ci4(i4load(H, i|0)); }
- function i32lcst() { return ci4(i4load(H, ${CONSTANT_BYTE_INDEX})); }
- function i32s(i, vec) { i=i|0; vec=ci4(vec); i4store(H, i|0, vec); }
- function i32scst(vec) { vec=ci4(vec); i4store(H, ${CONSTANT_BYTE_INDEX}, vec); }
-
- function f32lbndcheck(i) {
- i=i|0;
- if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
- if ((i|0) < 0) i = 0;
- return cf4(f4load(H, i|0));
- }
- function f32sbndcheck(i, vec) {
- i=i|0;
- vec=cf4(vec);
- if ((i|0) > ${CONSTANT_BYTE_INDEX}) i=${CONSTANT_BYTE_INDEX};
- if ((i|0) < 0) i = 0;
- return cf4(f4store(H, i|0, vec));
- }
-
- return {
- f32l: f32l,
- f32lcst: f32lcst,
- f32s: f32s,
- f32scst: f32scst,
- f32lbndcheck: f32lbndcheck,
- f32sbndcheck: f32sbndcheck,
- i32l: i32l,
- i32lcst: i32lcst,
- i32s: i32s,
- i32scst: i32scst
- }
-`;
-
-const SIZE = 0x8000;
-
-var F32 = new Float32Array(SIZE);
-var reset = function() {
- for (var i = 0; i < SIZE; i++)
- F32[i] = i + 1;
-};
-reset();
-
-var buf = F32.buffer;
-var m = asmLink(asmCompile('glob', 'ffi', 'heap', loadStoreCode), this, null, buf);
-
-function slice(TA, i, n) { return Array.prototype.slice.call(TA, i, i + n); }
-
-// Float32x4.load
-function f32l(n) { return m.f32l((n|0) << 2 | 0); };
-
-// Correct accesses
-assertEqX4(f32l(0), slice(F32, 0, 4));
-assertEqX4(f32l(1), slice(F32, 1, 4));
-assertEqX4(f32l(SIZE - 4), slice(F32, SIZE - 4, 4));
-
-assertEqX4(m.f32lcst(), slice(F32, CONSTANT_INDEX, 4));
-assertEqX4(m.f32lbndcheck(CONSTANT_BYTE_INDEX), slice(F32, CONSTANT_INDEX, 4));
-
-// OOB
-assertThrowsInstanceOf(() => f32l(-1), RuntimeError);
-assertThrowsInstanceOf(() => f32l(SIZE), RuntimeError);
-assertThrowsInstanceOf(() => f32l(SIZE - 1), RuntimeError);
-assertThrowsInstanceOf(() => f32l(SIZE - 2), RuntimeError);
-assertThrowsInstanceOf(() => f32l(SIZE - 3), RuntimeError);
-
-var code = `
- "use asm";
- var f4 = glob.SIMD.Float32x4;
- var f4l = f4.load;
- var u8 = new glob.Uint8Array(heap);
-
- function g(x) {
- x = x|0;
- // set a constraint on the size of the heap
- var ptr = 0;
- ptr = u8[0xFFFF] | 0;
- // give a precise range to x
- x = (x>>0) > 5 ? 5 : x;
- x = (x>>0) < 0 ? 0 : x;
- // ptr value gets a precise range but the bounds check shouldn't get
- // eliminated.
- return f4l(u8, 0xFFFA + x | 0);
- }
-
- return g;
-`;
-assertThrowsInstanceOf(() => asmLink(asmCompile('glob', 'ffi', 'heap', code), this, {}, new ArrayBuffer(0x10000))(0), RuntimeError);
-
-// Float32x4.store
-function f32s(n, v) { return m.f32s((n|0) << 2 | 0, v); };
-
-var vec = SIMD.Float32x4(5,6,7,8);
-var vec2 = SIMD.Float32x4(0,1,2,3);
-var vecWithNaN = SIMD.Float32x4(NaN, 2, NaN, 4);
-
-reset();
-f32s(0, vec);
-assertEqX4(vec, slice(F32, 0, 4));
-
-reset();
-f32s(0, vec2);
-assertEqX4(vec2, slice(F32, 0, 4));
-
-reset();
-f32s(4, vec);
-assertEqX4(vec, slice(F32, 4, 4));
-
-reset();
-f32s(4, vecWithNaN);
-assertEqX4(vecWithNaN, slice(F32, 4, 4));
-
-reset();
-m.f32scst(vec2);
-assertEqX4(vec2, slice(F32, CONSTANT_INDEX, 4));
-
-reset();
-m.f32sbndcheck(CONSTANT_BYTE_INDEX, vec);
-assertEqX4(vec, slice(F32, CONSTANT_INDEX, 4));
-
-// OOB
-reset();
-assertThrowsInstanceOf(() => f32s(SIZE - 3, vec), RuntimeError);
-assertThrowsInstanceOf(() => f32s(SIZE - 2, vec), RuntimeError);
-assertThrowsInstanceOf(() => f32s(SIZE - 1, vec), RuntimeError);
-assertThrowsInstanceOf(() => f32s(SIZE, vec), RuntimeError);
-for (var i = 0; i < SIZE; i++)
- assertEq(F32[i], i + 1);
-
-// Int32x4.load
-var I32 = new Int32Array(buf);
-reset = function () {
- for (var i = 0; i < SIZE; i++)
- I32[i] = i + 1;
-};
-reset();
-
-function i32(n) { return m.i32l((n|0) << 2 | 0); };
-
-// Correct accesses
-assertEqX4(i32(0), slice(I32, 0, 4));
-assertEqX4(i32(1), slice(I32, 1, 4));
-assertEqX4(i32(SIZE - 4), slice(I32, SIZE - 4, 4));
-
-assertEqX4(m.i32lcst(), slice(I32, CONSTANT_INDEX, 4));
-
-// OOB
-assertThrowsInstanceOf(() => i32(-1), RuntimeError);
-assertThrowsInstanceOf(() => i32(SIZE), RuntimeError);
-assertThrowsInstanceOf(() => i32(SIZE - 1), RuntimeError);
-assertThrowsInstanceOf(() => i32(SIZE - 2), RuntimeError);
-assertThrowsInstanceOf(() => i32(SIZE - 3), RuntimeError);
-
-// Int32x4.store
-function i32s(n, v) { return m.i32s((n|0) << 2 | 0, v); };
-
-var vec = SIMD.Int32x4(5,6,7,8);
-var vec2 = SIMD.Int32x4(0,1,2,3);
-
-reset();
-i32s(0, vec);
-assertEqX4(vec, slice(I32, 0, 4));
-
-reset();
-i32s(0, vec2);
-assertEqX4(vec2, slice(I32, 0, 4));
-
-reset();
-i32s(4, vec);
-assertEqX4(vec, slice(I32, 4, 4));
-
-reset();
-m.i32scst(vec2);
-assertEqX4(vec2, slice(I32, CONSTANT_INDEX, 4));
-
-// OOB
-reset();
-assertThrowsInstanceOf(() => i32s(SIZE - 3, vec), RuntimeError);
-assertThrowsInstanceOf(() => i32s(SIZE - 2, vec), RuntimeError);
-assertThrowsInstanceOf(() => i32s(SIZE - 1, vec), RuntimeError);
-assertThrowsInstanceOf(() => i32s(SIZE - 0, vec), RuntimeError);
-for (var i = 0; i < SIZE; i++)
- assertEq(I32[i], i + 1);
-
-// Partial loads and stores
-(function() {
-
-// Variable indexes
-function MakeCodeFor(typeName) {
- return `
- "use asm";
- var type = glob.SIMD.${typeName};
- var c = type.check;
-
- var l1 = type.load1;
- var l2 = type.load2;
-
- var s1 = type.store1;
- var s2 = type.store2;
-
- var u8 = new glob.Uint8Array(heap);
-
- function load1(i) { i=i|0; return l1(u8, i); }
- function load2(i) { i=i|0; return l2(u8, i); }
-
- function loadCst1() { return l1(u8, 41 << 2); }
- function loadCst2() { return l2(u8, 41 << 2); }
-
- function store1(i, x) { i=i|0; x=c(x); return s1(u8, i, x); }
- function store2(i, x) { i=i|0; x=c(x); return s2(u8, i, x); }
-
- function storeCst1(x) { x=c(x); return s1(u8, 41 << 2, x); }
- function storeCst2(x) { x=c(x); return s2(u8, 41 << 2, x); }
-
- return {
- load1: load1,
- load2: load2,
- loadCst1: loadCst1,
- loadCst2: loadCst2,
- store1: store1,
- store2: store2,
- storeCst1: storeCst1,
- storeCst2: storeCst2,
- }
-`;
-}
-
-var SIZE = 0x10000;
-
-function TestPartialLoads(m, typedArray, x, y, z, w) {
- // Fill array with predictable values
- for (var i = 0; i < SIZE; i += 4) {
- typedArray[i] = x(i);
- typedArray[i + 1] = y(i);
- typedArray[i + 2] = z(i);
- typedArray[i + 3] = w(i);
- }
-
- // Test correct loads
- var i = 0, j = 0; // i in elems, j in bytes
- assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
- assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
-
- j += 4;
- assertEqX4(m.load1(j), [y(i), 0, 0, 0]);
- assertEqX4(m.load2(j), [y(i), z(i), 0, 0]);
-
- j += 4;
- assertEqX4(m.load1(j), [z(i), 0, 0, 0]);
- assertEqX4(m.load2(j), [z(i), w(i), 0, 0]);
-
- j += 4;
- assertEqX4(m.load1(j), [w(i), 0, 0, 0]);
- assertEqX4(m.load2(j), [w(i), x(i+4), 0, 0]);
-
- j += 4;
- i += 4;
- assertEqX4(m.load1(j), [x(i), 0, 0, 0]);
- assertEqX4(m.load2(j), [x(i), y(i), 0, 0]);
-
- // Test loads with constant indexes (41)
- assertEqX4(m.loadCst1(), [y(40), 0, 0, 0]);
- assertEqX4(m.loadCst2(), [y(40), z(40), 0, 0]);
-
- // Test limit and OOB accesses
- assertEqX4(m.load1((SIZE - 1) << 2), [w(SIZE - 4), 0, 0, 0]);
- assertThrowsInstanceOf(() => m.load1(((SIZE - 1) << 2) + 1), RuntimeError);
-
- assertEqX4(m.load2((SIZE - 2) << 2), [z(SIZE - 4), w(SIZE - 4), 0, 0]);
- assertThrowsInstanceOf(() => m.load2(((SIZE - 2) << 2) + 1), RuntimeError);
-}
-
-// Partial stores
-function TestPartialStores(m, typedArray, typeName, x, y, z, w) {
- var val = SIMD[typeName](x, y, z, w);
-
- function Reset() {
- for (var i = 0; i < SIZE; i++)
- typedArray[i] = i + 1;
- }
- function CheckNotModified(low, high) {
- for (var i = low; i < high; i++)
- assertEq(typedArray[i], i + 1);
- }
-
- function TestStore1(i) {
- m.store1(i, val);
- CheckNotModified(0, i >> 2);
- assertEq(typedArray[i >> 2], x);
- CheckNotModified((i >> 2) + 1, SIZE);
- typedArray[i >> 2] = (i >> 2) + 1;
- }
-
- function TestStore2(i) {
- m.store2(i, val);
- CheckNotModified(0, i >> 2);
- assertEq(typedArray[i >> 2], x);
- assertEq(typedArray[(i >> 2) + 1], y);
- CheckNotModified((i >> 2) + 2, SIZE);
- typedArray[i >> 2] = (i >> 2) + 1;
- typedArray[(i >> 2) + 1] = (i >> 2) + 2;
- }
-
- function TestOOBStore(f) {
- assertThrowsInstanceOf(f, RuntimeError);
- CheckNotModified(0, SIZE);
- }
-
- Reset();
-
- TestStore1(0);
- TestStore1(1 << 2);
- TestStore1(2 << 2);
- TestStore1(3 << 2);
- TestStore1(1337 << 2);
-
- var i = (SIZE - 1) << 2;
- TestStore1(i);
- TestOOBStore(() => m.store1(i + 1, val));
- TestOOBStore(() => m.store1(-1, val));
-
- TestStore2(0);
- TestStore2(1 << 2);
- TestStore2(2 << 2);
- TestStore2(3 << 2);
- TestStore2(1337 << 2);
-
- var i = (SIZE - 2) << 2;
- TestStore2(i);
- TestOOBStore(() => m.store2(i + 1, val));
- TestOOBStore(() => m.store2(-1, val));
-
- // Constant indexes (41)
- m.storeCst1(val);
- CheckNotModified(0, 41);
- assertEq(typedArray[41], x);
- CheckNotModified(42, SIZE);
- typedArray[41] = 42;
-
- m.storeCst2(val);
- CheckNotModified(0, 41);
- assertEq(typedArray[41], x);
- assertEq(typedArray[42], y);
- CheckNotModified(43, SIZE);
- typedArray[41] = 42;
- typedArray[42] = 43;
-}
-
-var f32 = new Float32Array(SIZE);
-var mFloat32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Float32x4')), this, null, f32.buffer);
-
-TestPartialLoads(mFloat32x4, f32,
- (i) => i + 1,
- (i) => Math.fround(13.37),
- (i) => Math.fround(1/i),
- (i) => Math.fround(Math.sqrt(0x2000 - i)));
-
-TestPartialStores(mFloat32x4, f32, 'Float32x4', 42, -0, NaN, 0.1337);
-
-var i32 = new Int32Array(f32.buffer);
-var mInt32x4 = asmLink(asmCompile('glob', 'ffi', 'heap', MakeCodeFor('Int32x4')), this, null, i32.buffer);
-
-TestPartialLoads(mInt32x4, i32,
- (i) => i + 1 | 0,
- (i) => -i | 0,
- (i) => i * 2 | 0,
- (i) => 42);
-
-TestPartialStores(mInt32x4, i32, 'Int32x4', 42, -3, 13, 37);
-
-})();
-
-} catch (e) { print('stack: ', e.stack); throw e }
diff --git a/js/src/jit-test/tests/asm.js/testSIMD.js b/js/src/jit-test/tests/asm.js/testSIMD.js
deleted file mode 100644
index 29786bc52c..0000000000
--- a/js/src/jit-test/tests/asm.js/testSIMD.js
+++ /dev/null
@@ -1,1575 +0,0 @@
-load(libdir + "asm.js");
-load(libdir + "simd.js");
-load(libdir + "asserts.js");
-var heap = new ArrayBuffer(0x10000);
-
-// Avoid pathological --ion-eager compile times due to bails in loops
-setJitCompilerOption('ion.warmup.trigger', 1000000);
-
-// Set to true to see more JS debugging spew
-const DEBUG = false;
-
-if (!isSimdAvailable() || typeof SIMD === 'undefined' || !isAsmJSCompilationAvailable()) {
- DEBUG && print("won't run tests as simd extensions aren't activated yet");
- quit(0);
-}
-
-const I32 = 'var i4 = glob.SIMD.Int32x4;'
-const CI32 = 'var ci4 = i4.check;'
-const I32A = 'var i4a = i4.add;'
-const I32S = 'var i4s = i4.sub;'
-const I32M = 'var i4m = i4.mul;'
-const I32U32 = 'var i4u4 = i4.fromUint32x4Bits;'
-
-const U32 = 'var u4 = glob.SIMD.Uint32x4;'
-const CU32 = 'var cu4 = u4.check;'
-const U32A = 'var u4a = u4.add;'
-const U32S = 'var u4s = u4.sub;'
-const U32M = 'var u4m = u4.mul;'
-const U32I32 = 'var u4i4 = u4.fromInt32x4Bits;'
-
-const F32 = 'var f4 = glob.SIMD.Float32x4;'
-const CF32 = 'var cf4 = f4.check;'
-const F32A = 'var f4a = f4.add;'
-const F32S = 'var f4s = f4.sub;'
-const F32M = 'var f4m = f4.mul;'
-const F32D = 'var f4d = f4.div;'
-const FROUND = 'var f32=glob.Math.fround;'
-const B32 = 'var b4 = glob.SIMD.Bool32x4;'
-const CB32 = 'var cb4 = b4.check;'
-
-const EXTI4 = 'var e = i4.extractLane;'
-const EXTU4 = 'var e = u4.extractLane;'
-const EXTF4 = 'var e = f4.extractLane;'
-const EXTB4 = 'var e = b4.extractLane;'
-
-// anyTrue / allTrue on boolean vectors.
-const ANYB4 = 'var anyt=b4.anyTrue;'
-const ALLB4 = 'var allt=b4.allTrue;'
-
-const INT32_MAX = Math.pow(2, 31) - 1;
-const INT32_MIN = INT32_MAX + 1 | 0;
-const UINT32_MAX = Math.pow(2, 32) - 1;
-
-const assertEqFFI = {assertEq:assertEq};
-
-function CheckI4(header, code, expected) {
- // code needs to contain a local called x
- header = USE_ASM + I32 + CI32 + EXTI4 + F32 + header;
- var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return ci4(x)} return f'), this)();
- assertEqX4(observed, expected);
-}
-
-function CheckU4(header, code, expected) {
- // code needs to contain a local called x.
- header = USE_ASM + U32 + CU32 + EXTU4 + I32 + CI32 + I32U32 + header;
- var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return ci4(i4u4(x))} return f'), this)();
- // We can't return an unsigned SIMD type. Return Int32x4, convert to unsigned here.
- observed = SIMD.Uint32x4.fromInt32x4Bits(observed)
- assertEqX4(observed, expected);
-}
-
-function CheckF4(header, code, expected) {
- // code needs to contain a local called x
- header = USE_ASM + F32 + CF32 + EXTF4 + header;
- var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return cf4(x)} return f'), this)();
- assertEqX4(observed, expected.map(Math.fround));
-}
-
-function CheckB4(header, code, expected) {
- // code needs to contain a local called x
- header = USE_ASM + B32 + CB32 + header;
- var observed = asmLink(asmCompile('glob', header + ';function f() {' + code + ';return cb4(x)} return f'), this)();
- assertEqX4(observed, expected);
-}
-
-try {
-
-// 1. Constructors
-
-// 1.1 Compilation
-assertAsmTypeFail('glob', USE_ASM + "var i4 = Int32x4 ; return {}") ;
-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Int32x4 ; return {}") ;
-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.globglob.Int32x4 ; return {}") ;
-assertAsmTypeFail('glob', USE_ASM + "var i4 = glob.Math.Int32x4 ; return {}") ;
-assertAsmTypeFail('glob', USE_ASM + "var herd = glob.SIMD.ponyX4 ; return {}") ;
-
-// 1.2 Linking
-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: 42});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: Math.fround});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: 42}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: Math.fround}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: new Array}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: SIMD.Float32x4}});
-
-var [Type, int32] = [TypedObject.StructType, TypedObject.int32];
-var MyStruct = new Type({'x': int32, 'y': int32, 'z': int32, 'w': int32});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: MyStruct}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + I32 + "return {}"), {SIMD: {Int32x4: new MyStruct}});
-
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {} return f"), {SIMD:{Int32x4: SIMD.Int32x4}})(), undefined);
-
-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: 42}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: Math.fround}});
-assertAsmLinkFail(asmCompile('glob', USE_ASM + F32 + "return {}"), {SIMD: {Float32x4: new Array}});
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {} return f"), {SIMD:{Float32x4: SIMD.Float32x4}})(), undefined);
-
-// 1.3 Correctness
-// 1.3.1 Local variables declarations
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=Int32x4(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4();} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2, 3, 4.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1, 2.0, 3, 4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4a(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,2+2|0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MIN - 1) + ");} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {var x=i4(i4(1,2,3,4));} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3,4);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {var x=i4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4();} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,f32(3.),4.);} return f");
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1.,2.,3.,4.);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MIN - 1) + ");} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3," + (INT32_MAX + 1) + ");} return f"), this)(), undefined);
-
-// Places where NumLit can creep in
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; switch(i|0) {case i4(1,2,3,4): z=1; break; default: z=2; break;}} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {i=i|0; var z=0; return i * i4(1,2,3,4) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(i) {var x=i4(1,2,3,i4(4,5,6,7))} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=i4(1,2,3,f4(4,5,6,7))} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(i) {var x=f4(1,2,3,i4(4,5,6,7))} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {return +i4(1,2,3,4)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4)|0} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f() {return f32(i4(1,2,3,4))} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "function f() {return cf4(i4(1,2,3,4))} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {return +f4(1,2,3,4)} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4)|0} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + FROUND + "function f() {return f32(f4(1,2,3,4))} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + "function f() {return ci4(f4(1,2,3,4))} return f");
-
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {return i4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f() {return ci4(i4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {return f4(1,2,3,4);} return f"), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f() {return cf4(f4(1,2,3,4));} return f"), this)(), [1, 2, 3, 4]);
-
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + "function f() {i4(1,2,3,4);} return f"), this)(), undefined);
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + "function f() {f4(1,2,3,4);} return f"), this)(), undefined);
-
-// Int32x4 ctor should accept int?
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + CI32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return ci4(i4(i32[i>>2], 2, 3, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [0, 2, 3, 4]);
-// Float32x4 ctor should accept floatish (i.e. float || float? || floatish) and doublit
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + FROUND + "var h=new glob.Float32Array(heap); function f(i) {i=i|0; return cf4(f4(h[i>>2], f32(2), f32(3), f32(4)))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [NaN, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + FROUND + "function f(i) {i=i|0; return cf4(f4(f32(1) + f32(2), f32(2), f32(3), f32(4)))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + FROUND + "function f(i) {i=i|0; return cf4(f4(f32(1) + f32(2), 2.0, 3.0, 4.0))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [3, 2, 3, 4]);
-// Bool32x4 ctor should accept int?
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + B32 + CB32 + "var i32=new glob.Int32Array(heap); function f(i) {i=i|0; return cb4(b4(i32[i>>2], 2, 0, 4))} return f"), this, {}, new ArrayBuffer(0x10000))(0x20000), [false, true, false, true]);
-
-// 1.3.2 Getters - Reading values out of lanes
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1; return e(x,1) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1; return e(x + x, 1) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=1.; return e(x, 1) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + EXTF4 + "var f32=glob.Math.fround;" + I32 + "function f() {var x=f32(1); return e(x, 1) | 0;} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return x.length|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=e(i4(1,2,3,4),1); return x|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return (e(x,0) > (1>>>0)) | 0;} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,-1) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,4) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,.5) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); return e(x,x) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTF4 + "function f() {var x=i4(1,2,3,4); return e(x,0) | 0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + EXTI4 + "function f() {var x=i4(1,2,3,4); var i=0; return e(x,i) | 0;} return f");
-
-// The signMask property is no longer supported. Replaced by allTrue / anyTrue.
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=42; return x.signMask;} return f");
-assertAsmTypeFail('glob', USE_ASM + "function f() {var x=42.; return x.signMask;} return f");
-assertAsmTypeFail('glob', USE_ASM + FROUND + "function f() {var x=f32(42.); return x.signMask;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + 'function f() { var x=i4(1,2,3,4); return x.signMask | 0 } return f');
-assertAsmTypeFail('glob', USE_ASM + U32 + 'function f() { var x=u4(1,2,3,4); return x.signMask | 0 } return f');
-assertAsmTypeFail('glob', USE_ASM + F32 + FROUND + 'var Infinity = glob.Infinity; function f() { var x=f4(0,0,0,0); x=f4(f32(1), f32(-13.37), f32(42), f32(-Infinity)); return x.signMask | 0 } return f');
-
-// Check lane extraction.
-function CheckLanes(innerBody, type, expected) {
- var coerceBefore, coerceAfter, extractLane;
-
- if (type === SIMD.Int32x4) {
- coerceBefore = '';
- coerceAfter = '|0';
- extractLane = 'ei';
- } else if (type === SIMD.Uint32x4) {
- // Coerce Uint32 lanes to double so they can be legally returned.
- coerceBefore = '+';
- coerceAfter = '';
- extractLane = 'eu';
- } else if (type === SIMD.Float32x4) {
- coerceBefore = '+';
- coerceAfter = '';
- extractLane = 'ef';
- expected = expected.map(Math.fround);
- } else if (type === SIMD.Bool32x4) {
- coerceBefore = '';
- coerceAfter = '|0';
- extractLane = 'eb';
- } else throw "unexpected type in CheckLanes";
-
- for (var i = 0; i < 4; i++) {
- var lane = i;
- var laneCheckCode = `"use asm";
- var i4=glob.SIMD.Int32x4;
- var u4=glob.SIMD.Uint32x4;
- var f4=glob.SIMD.Float32x4;
- var b4=glob.SIMD.Bool32x4;
- var ei=i4.extractLane;
- var eu=u4.extractLane;
- var ef=f4.extractLane;
- var eb=b4.extractLane;
- function f() {${innerBody}; return ${coerceBefore}${extractLane}(x, ${lane})${coerceAfter} }
- return f;`;
- assertEq(asmLink(asmCompile('glob', laneCheckCode), this)(), expected[i]);
- }
-}
-function CheckLanesI4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Int32x4, expected); }
-function CheckLanesU4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Uint32x4, expected); }
-function CheckLanesF4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Float32x4, expected); }
-function CheckLanesB4(innerBody, expected) { return CheckLanes(innerBody, SIMD.Bool32x4, expected); }
-
-CheckLanesI4('var x=i4(0,0,0,0);', [0,0,0,0]);
-CheckLanesI4('var x=i4(1,2,3,4);', [1,2,3,4]);
-CheckLanesI4('var x=i4(' + INT32_MIN + ',2,3,' + INT32_MAX + ')', [INT32_MIN,2,3,INT32_MAX]);
-CheckLanesI4('var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
-CheckLanesI4('var a=1; var b=i4(9,8,7,6); var c=13.37; var x=i4(1,2,3,4); var y=i4(5,6,7,8)', [1,2,3,4]);
-CheckLanesI4('var y=i4(5,6,7,8); var x=i4(1,2,3,4)', [1,2,3,4]);
-
-CheckLanesU4('var x=u4(0,0,0,0);', [0,0,0,0]);
-CheckLanesU4('var x=u4(1,2,3,4000000000);', [1,2,3,4000000000]);
-CheckLanesU4('var x=u4(' + INT32_MIN + ',2,3,' + UINT32_MAX + ')', [INT32_MIN>>>0,2,3,UINT32_MAX]);
-CheckLanesU4('var x=u4(1,2,3,4); var y=u4(5,6,7,8)', [1,2,3,4]);
-CheckLanesU4('var a=1; var b=u4(9,8,7,6); var c=13.37; var x=u4(1,2,3,4); var y=u4(5,6,7,8)', [1,2,3,4]);
-CheckLanesU4('var y=u4(5,6,7,8); var x=u4(1,2,3,4)', [1,2,3,4]);
-
-CheckLanesF4('var x=f4(' + INT32_MAX + ', 2, 3, ' + INT32_MIN + ')', [INT32_MAX, 2, 3, INT32_MIN]);
-CheckLanesF4('var x=f4(' + (INT32_MAX + 1) + ', 2, 3, 4)', [INT32_MAX + 1, 2, 3, 4]);
-CheckLanesF4('var x=f4(1.3, 2.4, 3.5, 98.76)', [1.3, 2.4, 3.5, 98.76]);
-CheckLanesF4('var x=f4(13.37, 2., 3., -0)', [13.37, 2, 3, -0]);
-
-CheckLanesB4('var x=b4(0,0,0,0);', [0,0,0,0]);
-CheckLanesB4('var x=b4(0,1,0,0);', [0,1,0,0]);
-CheckLanesB4('var x=b4(0,2,0,0);', [0,1,0,0]);
-CheckLanesB4('var x=b4(-1,0,1,-1);', [1,0,1,1]);
-
-// 1.3.3. Variable assignments
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4();} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1.0, 2, 3, 4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2.0, 3, 4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3.0, 4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, 4.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4(1, 2, 3, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var c=4.0; x=i4(1, 2, 3, +c);} return f");
-
-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); i32[0] = x;} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + I32 + "var i32=new glob.Int32Array(heap); function f() {var x=i4(1,2,3,4); x = i32[0];} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Float32Array(heap); function f() {var x=f4(1,2,3,4); f32[0] = x;} return f");
-assertAsmTypeFail('glob', 'ffi', 'heap', USE_ASM + F32 + "var f32=new glob.Int32Array(heap); function f() {var x=f4(1,2,3,4); x = f32[0];} return f");
-
-CheckI4('', 'var x=i4(1,2,3,4); x=i4(5,6,7,8)', [5, 6, 7, 8]);
-CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(5,c|0,7,8)', [5, 6, 7, 8]);
-CheckI4('', 'var x=i4(8,7,6,5); x=i4(e(x,3)|0,e(x,2)|0,e(x,1)|0,e(x,0)|0)', [5, 6, 7, 8]);
-
-CheckU4('', 'var x=u4(1,2,3,4); x=u4(5,6,7,4000000000)', [5, 6, 7, 4000000000]);
-CheckU4('', 'var x=u4(1,2,3,4); var c=6; x=u4(5,c|0,7,8)', [5, 6, 7, 8]);
-CheckU4('', 'var x=u4(8,7,6,5); x=u4(e(x,3)|0,e(x,2)|0,e(x,1)|0,e(x,0)|0)', [5, 6, 7, 8]);
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=f4(1,2,3,c);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=f4(1.,2.,3.,c);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4.; x=f4(1,2,3,c);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4.; x=f4(1.,2.,3.,c);} return f");
-
-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(f32(5),f32(6),y,f32(8))', [5, 6, 7, 8]);
-CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5),f32(6),f32(7),f32(8))', [5, 6, 7, 8]);
-CheckF4(FROUND, 'var x=f4(1,2,3,4); x=f4(f32(5.),f32(6.),f32(7.),f32(8.))', [5, 6, 7, 8]);
-CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(5.,6.,7.,8.)', [5, 6, 7, 8]);
-CheckF4('', 'var x=f4(1.,2.,3.,4.); x=f4(1,2,3,4)', [1, 2, 3, 4]);
-CheckF4(FROUND, 'var x=f4(1.,2.,3.,4.); var y=f32(7.); x=f4(9, 4, 2, 1)', [9, 4, 2, 1]);
-CheckF4('', 'var x=f4(8.,7.,6.,5.); x=f4(e(x,3),e(x,2),e(x,1),e(x,0))', [5, 6, 7, 8]);
-
-// Optimization for all lanes from the same definition.
-CheckI4('', 'var x=i4(1,2,3,4); var c=6; x=i4(c|0,c|0,c|0,c|0)', [6, 6, 6, 6]);
-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(7.); x=f4(y,y,y,y)', [7, 7, 7, 7]);
-CheckI4('', 'var x=i4(1,2,3,4); var c=0; c=e(x,3)|0; x=i4(c,c,c,c)', [4, 4, 4, 4]);
-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(0); y=e(x,2); x=f4(y,y,y,y)', [3, 3, 3, 3]);
-CheckI4('', 'var x=i4(1,2,3,4); var c=0; var d=0; c=e(x,3)|0; d=e(x,3)|0; x=i4(c,d,d,c)', [4, 4, 4, 4]);
-CheckF4(FROUND, 'var x=f4(1,2,3,4); var y=f32(0); var z=f32(0); y=e(x,2); z=e(x,2); x=f4(y,z,y,z)', [3, 3, 3, 3]);
-
-// Uses in ternary conditionals
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; c=x?c:c;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=1?x:c;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "function f() {var x=f4(1,2,3,4); var c=4; x=1?c:x;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "function f() {var x=f4(1,2,3,4); var y=i4(1,2,3,4); x=1?x:y;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + "function f() {var x=f4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
-assertAsmTypeFail('glob', USE_ASM + B32 + I32 + "function f() {var x=b4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + "function f() {var x=u4(1,2,3,4); var y=i4(1,2,3,4); x=1?y:y;} return f");
-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + "function f() {var x=i4(1,2,3,4); var y=u4(1,2,3,4); x=1?y:y;} return f");
-
-CheckF4('', 'var x=f4(1,2,3,4); var y=f4(4,3,2,1); x=3?y:x', [4, 3, 2, 1]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=x|0; var v=f4(1,2,3,4); var w=f4(5,6,7,8); return cf4(x?w:v);} return f"), this)(1), [5,6,7,8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(v) {v=cf4(v); var w=f4(5,6,7,8); return cf4(4?w:v);} return f"), this)(SIMD.Float32x4(1,2,3,4)), [5,6,7,8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(v, x) {v=cf4(v); x=x|0; var w=f4(5,6,7,8); return cf4(x?w:v);} return f"), this)(SIMD.Float32x4(1,2,3,4), 0), [1,2,3,4]);
-
-CheckI4('', 'var x=i4(1,2,3,4); var y=i4(4,3,2,1); x=e(x,0)?y:x', [4, 3, 2, 1]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=x|0; var v=i4(1,2,3,4); var w=i4(5,6,7,8); return ci4(x?w:v);} return f"), this)(1), [5,6,7,8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(v) {v=ci4(v); var w=i4(5,6,7,8); return ci4(4?w:v);} return f"), this)(SIMD.Int32x4(1,2,3,4)), [5,6,7,8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(v, x) {v=ci4(v); x=x|0; var w=i4(5,6,7,8); return ci4(x?w:v);} return f"), this)(SIMD.Int32x4(1,2,3,4), 0), [1,2,3,4]);
-
-// Unsigned SIMD types can't be function arguments or return values.
-assertAsmTypeFail('glob', USE_ASM + U32 + CU32 + "function f(x) {x=cu4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + U32 + CU32 + "function f() {x=u4(0,0,0,0); return cu4(x);} return f");
-
-// 1.3.4 Return values
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1; return ci4(x)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1; return ci4(x + x)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {var x=1.; return ci4(x)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + "function f() {var x=f32(1.); return ci4(x)} return f");
-
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f() {var x=i4(1,2,3,4); return ci4(x)} return f"), this)(), [1,2,3,4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f() {var x=f4(1,2,3,4); return cf4(x)} return f"), this)(), [1,2,3,4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f() {var x=b4(1,2,0,4); return cb4(x)} return f"), this)(), [true,true,false,true]);
-
-// 1.3.5 Coerce and pass arguments
-// Via check
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4();} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); ci4(x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f() {ci4(1.);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + "function f() {ci4(f32(1.));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + CF32 + "function f(x) {x=cf4(x); ci4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return 1 + ci4(x) | 0;} return f");
-
-var i32x4 = SIMD.Int32x4(1, 3, 3, 7);
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x)} return f"), this)(i32x4), undefined);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return ci4(x);} return f"), this)(i32x4), [1,3,3,7]);
-
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4();} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); cf4(x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4(1);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f() {cf4(1.);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + FROUND + "function f() {cf4(f32(1.));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + F32 + CF32 + "function f(x) {x=cf4(x); cf4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return 1 + cf4(x) | 0;} return f");
-
-var f32x4 = SIMD.Float32x4(13.37, 42.42, -0, NaN);
-assertEq(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x)} return f"), this)(f32x4), undefined);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return cf4(x);} return f"), this)(f32x4), [13.37, 42.42, -0, NaN].map(Math.fround));
-
-var b32x4 = SIMD.Bool32x4(true, false, false, true);
-assertEq(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x)} return f"), this)(b32x4), undefined);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x); return cb4(x);} return f"), this)(b32x4), [true, false, false, true]);
-
-// Legacy coercions
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4();} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {x=i4(1,2,3,4);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x,y) {x=i4(y);y=+y} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return i4(x);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {return +i4(1,2,3,4)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "function f(x) {return 0|i4(1,2,3,4)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + FROUND + "function f(x) {return f32(i4(1,2,3,4))} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "function f(x) {return f4(i4(1,2,3,4))} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return f4(x);} return f");
-
-
-function assertCaught(f) {
- var caught = false;
- try {
- f.apply(null, Array.prototype.slice.call(arguments, 1));
- } catch (e) {
- DEBUG && print('Assert caught: ', e, '\n', e.stack);
- assertEq(e instanceof TypeError, true);
- caught = true;
- }
- assertEq(caught, true);
-}
-
-var f = asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + "function f(x) {x=cf4(x); return cf4(x);} return f"), this);
-assertCaught(f);
-assertCaught(f, 1);
-assertCaught(f, {});
-assertCaught(f, "I sincerely am a SIMD typed object.");
-assertCaught(f, SIMD.Int32x4(1,2,3,4));
-assertCaught(f, SIMD.Bool32x4(true, true, false, true));
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + "function f(x) {x=ci4(x); return ci4(x);} return f"), this);
-assertCaught(f);
-assertCaught(f, 1);
-assertCaught(f, {});
-assertCaught(f, "I sincerely am a SIMD typed object.");
-assertCaught(f, SIMD.Float32x4(4,3,2,1));
-assertCaught(f, SIMD.Bool32x4(true, true, false, true));
-
-var f = asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + "function f(x) {x=cb4(x); return cb4(x);} return f"), this);
-assertCaught(f);
-assertCaught(f, 1);
-assertCaught(f, {});
-assertCaught(f, "I sincerely am a SIMD typed object.");
-assertCaught(f, SIMD.Int32x4(1,2,3,4));
-assertCaught(f, SIMD.Float32x4(4,3,2,1));
-
-// 1.3.6 Globals
-// 1.3.6.1 Local globals
-// Read
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; x=g|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; x=+g;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; x=g|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; x=+g;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); x=f32(g);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CI32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); x=ci4(g);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); x=cf4(g);} return f");
-assertAsmTypeFail('glob', USE_ASM + U32 + I32 + CI32 + "var g=u4(1,2,3,4); function f() {var x=i4(1,2,3,4); x=ci4(g);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0; function f() {var x=i4(1,2,3,4); x=g|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=0.; function f() {var x=i4(1,2,3,4); x=+g;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=i4(1,2,3,4); x=f32(g);} return f");
-
-// Unsigned SIMD globals are not allowed.
-assertAsmTypeFail('glob', USE_ASM + U32 + "var g=u4(0,0,0,0); function f() {var x=u4(1,2,3,4); x=g;} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0; function f() {var x=f4(0.,0.,0.,0.); x=g|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=0.; function f() {var x=f4(0.,0.,0.,0.); x=+g;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var f32=glob.Math.fround; var g=f32(0.); function f() {var x=f4(0.,0.,0.,0.); x=f32(g);} return f");
-
-CheckI4('var x=i4(1,2,3,4)', '', [1, 2, 3, 4]);
-CheckI4('var _=42; var h=i4(5,5,5,5); var __=13.37; var x=i4(4,7,9,2);', '', [4,7,9,2]);
-
-CheckF4('var x=f4(1.,2.,3.,4.)', '', [1, 2, 3, 4]);
-CheckF4('var _=42; var h=f4(5.,5.,5.,5.); var __=13.37; var x=f4(4.,13.37,9.,-0.);', '', [4, 13.37, 9, -0]);
-CheckF4('var x=f4(1,2,3,4)', '', [1, 2, 3, 4]);
-
-CheckB4('var x=b4(1,0,3,0)', '', [true, false, true, false]);
-CheckB4('var _=42; var h=b4(5,0,5,5); var __=13.37; var x=b4(0,0,9,2);', '', [false, false, true, true]);
-
-// Write
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4; g=x|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); function f() {var x=4.; g=+x;} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var g=i4(1,2,3,4); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4; g=x|0;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); function f() {var x=4.; g=+x;} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var g=f4(1., 2., 3., 4.); var f32=glob.Math.fround; function f() {var x=f32(4.); g=f32(x);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CI32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=ci4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + I32 + CF32 + "var g=f4(1., 2., 3., 4.); function f() {var x=i4(1,2,3,4); g=cf4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=cf4(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CI32 + "var g=i4(1,2,3,4); function f() {var x=f4(1.,2.,3.,4.); g=ci4(x);} return f");
-
-CheckI4('var x=i4(0,0,0,0);', 'x=i4(1,2,3,4)', [1,2,3,4]);
-CheckF4('var x=f4(0.,0.,0.,0.);', 'x=f4(5.,3.,4.,2.)', [5,3,4,2]);
-CheckB4('var x=b4(0,0,0,0);', 'x=b4(0,0,1,1)', [false, false, true, true]);
-
-CheckI4('var x=i4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=i4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
-CheckF4('var x=f4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=f4(1,2,3,4); y=24; z=4.9; w=23.10;', [1,2,3,4]);
-CheckB4('var x=b4(0,0,0,0); var y=42; var z=3.9; var w=13.37', 'x=b4(1,0,0,0); y=24; z=4.9; w=23.10;', [true, false, false, false]);
-
-// 1.3.6.2 Imported globals
-// Read
-var Int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {return ci4(g)} return f"), this, {g: SIMD.Int32x4(1,2,3,4)})();
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 0), 1);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 1), 2);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 2), 3);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 3), 4);
-
-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Float32x4(1,2,3,4)])
- assertCaught(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {return ci4(g)} return f"), this, {g: v});
-
-// Unsigned SIMD globals are not allowed.
-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + CU32 + "var g=cu4(ffi.g); function f() {} return f");
-
-var Float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {return cf4(g)} return f"), this, {g: SIMD.Float32x4(1,2,3,4)})();
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 0), 1);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 1), 2);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 2), 3);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 3), 4);
-
-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Int32x4(1,2,3,4)])
- assertCaught(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {return cf4(g)} return f"), this, {g: v});
-
-var Bool32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {return cb4(g)} return f"), this, {g: SIMD.Bool32x4(false, false, false, true)})();
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 0), false);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 1), false);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 2), false);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 3), true);
-
-for (var v of [1, {}, "totally legit SIMD variable", SIMD.Int32x4(1,2,3,4)])
- assertCaught(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {return cb4(g)} return f"), this, {g: v});
-
-// Write
-var Int32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + I32 + CI32 + "var g=ci4(ffi.g); function f() {g=i4(4,5,6,7); return ci4(g)} return f"), this, {g: SIMD.Int32x4(1,2,3,4)})();
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 0), 4);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 1), 5);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 2), 6);
-assertEq(SIMD.Int32x4.extractLane(Int32x4, 3), 7);
-
-var Float32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + F32 + CF32 + "var g=cf4(ffi.g); function f() {g=f4(4.,5.,6.,7.); return cf4(g)} return f"), this, {g: SIMD.Float32x4(1,2,3,4)})();
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 0), 4);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 1), 5);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 2), 6);
-assertEq(SIMD.Float32x4.extractLane(Float32x4, 3), 7);
-
-var Bool32x4 = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + "var g=cb4(ffi.g); function f() {g=b4(1,1,0,0); return cb4(g)} return f"), this, {g: SIMD.Bool32x4(1,1,1,0)})();
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 0), true);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 1), true);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 2), false);
-assertEq(SIMD.Bool32x4.extractLane(Bool32x4, 3), false);
-
-// 2. SIMD operations
-// 2.1 Compilation
-assertAsmTypeFail('glob', USE_ASM + "var add = Int32x4.add; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32A + I32 + "return {}");
-assertAsmTypeFail('glob', USE_ASM + "var g = 3; var add = g.add; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.doTheHarlemShake; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var div = i4.div; return {}");
-assertAsmTypeFail('glob', USE_ASM + "var f32 = glob.Math.fround; var i4a = f32.add; return {}");
-// Operation exists, but in a different type.
-assertAsmTypeFail('glob', USE_ASM + I32 + "var func = i4.fromUint32x4; return {}");
-
-// 2.2 Linking
-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {});
-assertAsmLinkAlwaysFail(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: Math.fround});
-
-var oldInt32x4Add = SIMD.Int32x4.add;
-var code = asmCompile('glob', USE_ASM + I32 + I32A + "return {}");
-for (var v of [42, Math.fround, SIMD.Float32x4.add, function(){}, SIMD.Int32x4.mul]) {
- SIMD.Int32x4.add = v;
- assertAsmLinkFail(code, {SIMD: {Int32x4: SIMD.Int32x4}});
-}
-SIMD.Int32x4.add = oldInt32x4Add; // finally replace the add function with the original one
-assertEq(asmLink(asmCompile('glob', USE_ASM + I32 + I32A + "function f() {} return f"), {SIMD: {Int32x4: SIMD.Int32x4}})(), undefined);
-
-// 2.3. Binary arithmetic operations
-// 2.3.1 Additions
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a();} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(13, 37);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(23.10, 19.89);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 42);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); x=i4a(x, 13.37);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(1,2,3,4); var y=4; x=i4a(x, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; x=i4a(y, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + "function f() {var x=i4(0,0,0,0); var y=4; y=i4a(x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + U32 + "function f() {var x=i4(0,0,0,0); var y=u4(1,2,3,4); y=i4a(x, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=i4a(x, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=i4a(x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); y=f4a(x, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + F32A + "function f() {var x=i4(0,0,0,0); var y=f4(4,3,2,1); x=f4a(y, y);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0; y=i4a(x,x)|0} return f');
-assertAsmTypeFail('glob', USE_ASM + I32 + I32A + 'function f() {var x=i4(1,2,3,4); var y=0.; y=+i4a(x,x)} return f');
-
-CheckI4(I32A, 'var z=i4(1,2,3,4); var y=i4(0,1,0,3); var x=i4(0,0,0,0); x=i4a(z,y)', [1,3,3,7]);
-CheckI4(I32A, 'var x=i4(2,3,4,5); var y=i4(0,1,0,3); x=i4a(x,y)', [2,4,4,8]);
-CheckI4(I32A, 'var x=i4(1,2,3,4); x=i4a(x,x)', [2,4,6,8]);
-CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=i4a(x,y)', [INT32_MIN,3,3,7]);
-CheckI4(I32A, 'var x=i4(' + INT32_MAX + ',2,3,4); var y=i4(1,1,0,3); x=ci4(i4a(x,y))', [INT32_MIN,3,3,7]);
-
-CheckU4(U32A, 'var z=u4(1,2,3,4); var y=u4(0,1,0,3); var x=u4(0,0,0,0); x=u4a(z,y)', [1,3,3,7]);
-CheckU4(U32A, 'var x=u4(2,3,4,5); var y=u4(0,1,0,3); x=u4a(x,y)', [2,4,4,8]);
-CheckU4(U32A, 'var x=u4(1,2,3,4); x=u4a(x,x)', [2,4,6,8]);
-
-CheckF4(F32A, 'var x=f4(1,2,3,4); x=f4a(x,x)', [2,4,6,8]);
-CheckF4(F32A, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [5,5,8,6]);
-CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4a(x,y)', [Math.fround(13.37) + 4,5,8,6]);
-CheckF4(F32A, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4a(x,y))', [Math.fround(13.37) + 4,5,8,6]);
-
-// 2.3.2. Subtracts
-CheckI4(I32S, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4s(x,y)', [2,1,3,2]);
-CheckI4(I32S, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4s(x,y)', [4,2,0,-2]);
-CheckI4(I32S, 'var x=i4(1,2,3,4); x=i4s(x,x)', [0,0,0,0]);
-CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=i4s(x,y)', [INT32_MAX,1,3,1]);
-CheckI4(I32S, 'var x=i4(' + INT32_MIN + ',2,3,4); var y=i4(1,1,0,3); x=ci4(i4s(x,y))', [INT32_MAX,1,3,1]);
-
-CheckU4(U32S, 'var x=u4(1,2,3,4); var y=u4(-1,1,0,2); x=u4s(x,y)', [2,1,3,2]);
-CheckU4(U32S, 'var x=u4(5,4,3,2); var y=u4(1,2,3,4); x=u4s(x,y)', [4,2,0,-2>>>0]);
-CheckU4(U32S, 'var x=u4(1,2,3,4); x=u4s(x,x)', [0,0,0,0]);
-CheckU4(U32S, 'var x=u4(' + INT32_MIN + ',2,3,4); var y=u4(1,1,0,3); x=u4s(x,y)', [INT32_MAX,1,3,1]);
-CheckU4(U32S, 'var x=u4(' + INT32_MIN + ',2,3,4); var y=u4(1,1,0,3); x=cu4(u4s(x,y))', [INT32_MAX,1,3,1]);
-
-CheckF4(F32S, 'var x=f4(1,2,3,4); x=f4s(x,x)', [0,0,0,0]);
-CheckF4(F32S, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [-3,-1,-2,2]);
-CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4s(x,y)', [Math.fround(13.37) - 4,-1,-2,2]);
-CheckF4(F32S, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4s(x,y))', [Math.fround(13.37) - 4,-1,-2,2]);
-
-{
- // Bug 1216099
- let code = `
- "use asm";
- var f4 = global.SIMD.Float32x4;
- var f4sub = f4.sub;
- const zerox4 = f4(0.0, 0.0, 0.0, 0.0);
- function f() {
- var newVelx4 = f4(0.0, 0.0, 0.0, 0.0);
- var newVelTruex4 = f4(0.0,0.0,0.0,0.0);
- newVelTruex4 = f4sub(zerox4, newVelx4);
- }
- // return statement voluntarily missing
- `;
- assertAsmTypeFail('global', code);
-}
-
-// 2.3.3. Multiplications / Divisions
-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.div; function f() {} return f");
-
-CheckI4(I32M, 'var x=i4(1,2,3,4); var y=i4(-1,1,0,2); x=i4m(x,y)', [-1,2,0,8]);
-CheckI4(I32M, 'var x=i4(5,4,3,2); var y=i4(1,2,3,4); x=i4m(x,y)', [5,8,9,8]);
-CheckI4(I32M, 'var x=i4(1,2,3,4); x=i4m(x,x)', [1,4,9,16]);
-(function() {
- var m = INT32_MIN, M = INT32_MAX, imul = Math.imul;
- CheckI4(I32M, `var x=i4(${m},${m}, ${M}, ${M}); var y=i4(2,-3,4,-5); x=i4m(x,y)`,
- [imul(m, 2), imul(m, -3), imul(M, 4), imul(M, -5)]);
- CheckI4(I32M, `var x=i4(${m},${m}, ${M}, ${M}); var y=i4(${m}, ${M}, ${m}, ${M}); x=i4m(x,y)`,
- [imul(m, m), imul(m, M), imul(M, m), imul(M, M)]);
-})();
-
-CheckF4(F32M, 'var x=f4(1,2,3,4); x=f4m(x,x)', [1,4,9,16]);
-CheckF4(F32M, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [4,6,15,8]);
-CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=f4m(x,y)', [Math.fround(13.37) * 4,6,15,8]);
-CheckF4(F32M, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=cf4(f4m(x,y))', [Math.fround(13.37) * 4,6,15,8]);
-
-var f32x4 = SIMD.Float32x4(0, NaN, -0, NaN);
-var another = SIMD.Float32x4(NaN, -1, -0, NaN);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32M + CF32 + "function f(x, y) {x=cf4(x); y=cf4(y); x=f4m(x,y); return cf4(x);} return f"), this)(f32x4, another), [NaN, NaN, 0, NaN]);
-
-CheckF4(F32D, 'var x=f4(1,2,3,4); x=f4d(x,x)', [1,1,1,1]);
-CheckF4(F32D, 'var x=f4(1,2,3,4); var y=f4(4,3,5,2); x=f4d(x,y)', [1/4,2/3,3/5,2]);
-CheckF4(F32D, 'var x=f4(13.37,1,1,4); var y=f4(4,0,-0.,2); x=f4d(x,y)', [Math.fround(13.37) / 4,+Infinity,-Infinity,2]);
-
-var f32x4 = SIMD.Float32x4(0, 0, -0, NaN);
-var another = SIMD.Float32x4(0, -0, 0, 0);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + F32D + CF32 + "function f(x,y) {x=cf4(x); y=cf4(y); x=f4d(x,y); return cf4(x);} return f"), this)(f32x4, another), [NaN, NaN, NaN, NaN]);
-
-// Unary arithmetic operators
-function CheckUnaryF4(op, checkFunc, assertFunc) {
- var _ = asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + 'var op=f4.' + op + '; function f(x){x=cf4(x); return cf4(op(x)); } return f'), this);
- return function(input) {
- var simd = SIMD.Float32x4(input[0], input[1], input[2], input[3]);
-
- var exp = input.map(Math.fround).map(checkFunc).map(Math.fround);
- var obs = _(simd);
- assertEqX4(obs, exp, assertFunc);
- }
-}
-
-function CheckUnaryI4(op, checkFunc) {
- var _ = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + 'var op=i4.' + op + '; function f(x){x=ci4(x); return ci4(op(x)); } return f'), this);
- return function(input) {
- var simd = SIMD.Int32x4(input[0], input[1], input[2], input[3]);
- assertEqX4(_(simd), input.map(checkFunc).map(function(x) { return x | 0}));
- }
-}
-
-function CheckUnaryU4(op, checkFunc) {
- var _ = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32U32 + U32 + U32I32 +
- 'var op=u4.' + op + '; function f(x){x=ci4(x); return ci4(i4u4(op(u4i4(x)))); } return f'), this);
- return function(input) {
- var simd = SIMD.Int32x4(input[0], input[1], input[2], input[3]);
- var res = SIMD.Uint32x4.fromInt32x4Bits(_(simd));
- assertEqX4(res, input.map(checkFunc).map(function(x) { return x >>> 0 }));
- }
-}
-
-function CheckUnaryB4(op, checkFunc) {
- var _ = asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + 'var op=b4.' + op + '; function f(x){x=cb4(x); return cb4(op(x)); } return f'), this);
- return function(input) {
- var simd = SIMD.Bool32x4(input[0], input[1], input[2], input[3]);
- assertEqX4(_(simd), input.map(checkFunc).map(function(x) { return !!x}));
- }
-}
-
-CheckUnaryI4('neg', function(x) { return -x })([1, -2, INT32_MIN, INT32_MAX]);
-CheckUnaryI4('not', function(x) { return ~x })([1, -2, INT32_MIN, INT32_MAX]);
-
-CheckUnaryU4('neg', function(x) { return -x })([1, -2, INT32_MIN, INT32_MAX]);
-CheckUnaryU4('not', function(x) { return ~x })([1, -2, INT32_MIN, INT32_MAX]);
-
-var CheckNotB = CheckUnaryB4('not', function(x) { return !x });
-CheckNotB([true, false, true, true]);
-CheckNotB([true, true, true, true]);
-CheckNotB([false, false, false, false]);
-
-var CheckAbs = CheckUnaryF4('abs', Math.abs);
-CheckAbs([1, 42.42, 0.63, 13.37]);
-CheckAbs([NaN, -Infinity, Infinity, 0]);
-
-var CheckNegF = CheckUnaryF4('neg', function(x) { return -x });
-CheckNegF([1, 42.42, 0.63, 13.37]);
-CheckNegF([NaN, -Infinity, Infinity, 0]);
-
-var CheckSqrt = CheckUnaryF4('sqrt', function(x) { return Math.sqrt(x); });
-CheckSqrt([1, 42.42, 0.63, 13.37]);
-CheckSqrt([NaN, -Infinity, Infinity, 0]);
-
-// ReciprocalApproximation and reciprocalSqrtApproximation give approximate results
-function assertNear(a, b) {
- if (a !== a && b === b)
- throw 'Observed NaN, expected ' + b;
- if (Math.abs(a - b) > 1e-3)
- throw 'More than 1e-3 between ' + a + ' and ' + b;
-}
-var CheckRecp = CheckUnaryF4('reciprocalApproximation', function(x) { return 1 / x; }, assertNear);
-CheckRecp([1, 42.42, 0.63, 13.37]);
-CheckRecp([NaN, -Infinity, Infinity, 0]);
-
-var CheckRecp = CheckUnaryF4('reciprocalSqrtApproximation', function(x) { return 1 / Math.sqrt(x); }, assertNear);
-CheckRecp([1, 42.42, 0.63, 13.37]);
-CheckRecp([NaN, -Infinity, Infinity, 0]);
-
-// Min/Max
-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4m=i4.min; function f() {} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var f4d=i4.max; function f() {} return f");
-
-const F32MIN = 'var min = f4.min;'
-const F32MAX = 'var max = f4.max;'
-
-CheckF4(F32MIN, 'var x=f4(1,2,3,4); x=min(x,x)', [1,2,3,4]);
-CheckF4(F32MIN, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=min(x,y)', [4,2,3,2]);
-CheckF4(F32MIN + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=min(x,y)', [2310,-Infinity,3,-0]);
-
-CheckF4(F32MIN, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=min(x,y)', [0,-0,-0,-0]);
-CheckF4(F32MIN + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=min(x,y)', [NaN, NaN, NaN, 0]);
-
-CheckF4(F32MAX, 'var x=f4(1,2,3,4); x=max(x,x)', [1,2,3,4]);
-CheckF4(F32MAX, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=max(x,y)', [13.37, 3, 5, 4]);
-CheckF4(F32MAX + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=max(x,y)', [+Infinity,3,5,0]);
-
-CheckF4(F32MAX, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=max(x,y)', [0,0,0,-0]);
-CheckF4(F32MAX + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=max(x,y)', [NaN, NaN, NaN, 0]);
-
-const F32MINNUM = 'var min = f4.minNum;'
-const F32MAXNUM = 'var max = f4.maxNum;'
-
-CheckF4(F32MINNUM, 'var x=f4(1,2,3,4); x=min(x,x)', [1,2,3,4]);
-CheckF4(F32MINNUM, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=min(x,y)', [4,2,3,2]);
-CheckF4(F32MINNUM + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=min(x,y)', [2310,-Infinity,3,-0]);
-
-CheckF4(F32MINNUM, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=min(x,y)', [0,-0,-0,-0]);
-CheckF4(F32MINNUM + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=min(x,y)', [NaN, 0, 0, 0]);
-
-CheckF4(F32MAXNUM, 'var x=f4(1,2,3,4); x=max(x,x)', [1,2,3,4]);
-CheckF4(F32MAXNUM, 'var x=f4(13.37,2,3,4); var y=f4(4,3,5,2); x=max(x,y)', [13.37, 3, 5, 4]);
-CheckF4(F32MAXNUM + FROUND + 'var Infinity = glob.Infinity;', 'var x=f4(0,0,0,0); var y=f4(2310,3,5,0); x=f4(f32(+Infinity),f32(-Infinity),f32(3),f32(-0.)); x=max(x,y)', [+Infinity,3,5,0]);
-
-CheckF4(F32MAXNUM, 'var x=f4(0,0,-0,-0); var y=f4(0,-0,0,-0); x=max(x,y)', [0,0,0,-0]);
-CheckF4(F32MAXNUM + FROUND + 'var NaN = glob.NaN;', 'var x=f4(0,0,0,0); var y=f4(0,0,0,0); var n=f32(0); n=f32(NaN); x=f4(n,0.,n,0.); y=f4(n,n,0.,0.); x=max(x,y)', [NaN, 0, 0, 0]);
-
-// ReplaceLane
-const RLF = 'var r = f4.replaceLane;';
-
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + "function f() {var x = f4(1,2,3,4); x = r(x, 0, 1);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + "function f() {var x = f4(1,2,3,4); x = r(x, 0, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(x, 4, f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(x, f32(0), f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(1, 0, f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(1, 0., f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); x = r(f32(1), 0, f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); var l = 0; x = r(x, l, f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + RLF + FROUND + "function f() {var x = f4(1,2,3,4); var y = i4(1,2,3,4); x = r(y, 0, f32(1));} return f");
-
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 0, f32(13.37));', [Math.fround(13.37), 2, 3, 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 1, f32(13.37));', [1, Math.fround(13.37), 3, 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 2, f32(13.37));', [1, 2, Math.fround(13.37), 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, f32(13.37));', [1, 2, 3, Math.fround(13.37)]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, f32(13.37) + f32(6.63));', [1, 2, 3, Math.fround(Math.fround(13.37) + Math.fround(6.63))]);
-
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 0, 13.37);', [Math.fround(13.37), 2, 3, 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 1, 13.37);', [1, Math.fround(13.37), 3, 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 2, 13.37);', [1, 2, Math.fround(13.37), 4]);
-CheckF4(RLF + FROUND, 'var x = f4(1,2,3,4); x = r(x, 3, 13.37);', [1, 2, 3, Math.fround(13.37)]);
-
-const RLI = 'var r = i4.replaceLane;';
-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 0, 42);', [42, 2, 3, 4]);
-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 1, 42);', [1, 42, 3, 4]);
-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 2, 42);', [1, 2, 42, 4]);
-CheckI4(RLI, 'var x = i4(1,2,3,4); x = r(x, 3, 42);', [1, 2, 3, 42]);
-
-const RLU = 'var r = u4.replaceLane;';
-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 0, 42);', [42, 2, 3, 4]);
-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 1, 42);', [1, 42, 3, 4]);
-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 2, 42);', [1, 2, 42, 4]);
-CheckU4(RLU, 'var x = u4(1,2,3,4); x = r(x, 3, 42);', [1, 2, 3, 42]);
-
-const RLB = 'var r = b4.replaceLane;';
-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 0, 0);', [false, true, false, false]);
-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 1, 0);', [true, false, false, false]);
-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 2, 2);', [true, true, true, false]);
-CheckB4(RLB, 'var x = b4(1,1,0,0); x = r(x, 3, 1);', [true, true, false, true]);
-
-// Comparisons
-// Comparison operators produce Bool32x4 vectors.
-const T = true;
-const F = false;
-
-const EQI32 = 'var eq = i4.equal';
-const NEI32 = 'var ne = i4.notEqual';
-const LTI32 = 'var lt = i4.lessThan;';
-const LEI32 = 'var le = i4.lessThanOrEqual';
-const GTI32 = 'var gt = i4.greaterThan;';
-const GEI32 = 'var ge = i4.greaterThanOrEqual';
-
-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=eq(a,b)', [F, F, F, F]);
-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=eq(a,b)', [F, F, F, F]);
-CheckB4(I32+EQI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=eq(a,b)', [T, F, F, F]);
-
-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=ne(a,b)', [T, T, T, T]);
-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=ne(a,b)', [T, T, T, T]);
-CheckB4(I32+NEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=ne(a,b)', [F, T, T, T]);
-
-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=lt(a,b)', [F, F, F, F]);
-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=lt(a,b)', [T, T, T, T]);
-CheckB4(I32+LTI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=lt(a,b)', [F, T, T, F]);
-
-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=le(a,b)', [F, F, F, F]);
-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=le(a,b)', [T, T, T, T]);
-CheckB4(I32+LEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=le(a,b)', [T, T, T, F]);
-
-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=gt(a,b)', [T, T, T, T]);
-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=gt(a,b)', [F, F, F, F]);
-CheckB4(I32+GTI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=gt(a,b)', [F, F, F, T]);
-
-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(1,2,3,4); var b=i4(-1,1,0,2); x=ge(a,b)', [T, T, T, T]);
-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(-1,1,0,2); var b=i4(1,2,3,4); x=ge(a,b)', [F, F, F, F]);
-CheckB4(I32+GEI32, 'var x=b4(0,0,0,0); var a=i4(1,0,3,4); var b=i4(1,1,7,0); x=ge(a,b)', [T, F, F, T]);
-
-const EQU32 = 'var eq = u4.equal';
-const NEU32 = 'var ne = u4.notEqual';
-const LTU32 = 'var lt = u4.lessThan;';
-const LEU32 = 'var le = u4.lessThanOrEqual';
-const GTU32 = 'var gt = u4.greaterThan;';
-const GEU32 = 'var ge = u4.greaterThanOrEqual';
-
-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=eq(a,b)', [F, F, F, F]);
-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=eq(a,b)', [F, F, F, F]);
-CheckB4(U32+EQU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=eq(a,b)', [T, F, F, F]);
-
-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=ne(a,b)', [T, T, T, T]);
-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=ne(a,b)', [T, T, T, T]);
-CheckB4(U32+NEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=ne(a,b)', [F, T, T, T]);
-
-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=lt(a,b)', [T, F, F, F]);
-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=lt(a,b)', [F, T, T, T]);
-CheckB4(U32+LTU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=lt(a,b)', [F, T, T, F]);
-
-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=le(a,b)', [T, F, F, F]);
-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=le(a,b)', [F, T, T, T]);
-CheckB4(U32+LEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=le(a,b)', [T, T, T, F]);
-
-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=gt(a,b)', [F, T, T, T]);
-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=gt(a,b)', [T, F, F, F]);
-CheckB4(U32+GTU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=gt(a,b)', [F, F, F, T]);
-
-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(1,2,3,4); var b=u4(-1,1,0,2); x=ge(a,b)', [F, T, T, T]);
-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(-1,1,0,2); var b=u4(1,2,3,4); x=ge(a,b)', [T, F, F, F]);
-CheckB4(U32+GEU32, 'var x=b4(0,0,0,0); var a=u4(1,0,3,4); var b=u4(1,1,7,0); x=ge(a,b)', [T, F, F, T]);
-
-const LTF32 = 'var lt=f4.lessThan;';
-const LEF32 = 'var le=f4.lessThanOrEqual;';
-const GTF32 = 'var gt=f4.greaterThan;';
-const GEF32 = 'var ge=f4.greaterThanOrEqual;';
-const EQF32 = 'var eq=f4.equal;';
-const NEF32 = 'var ne=f4.notEqual;';
-
-assertAsmTypeFail('glob', USE_ASM + F32 + "var lt=f4.lessThan; function f() {var x=f4(1,2,3,4); var y=f4(5,6,7,8); x=lt(x,y);} return f");
-
-CheckB4(F32+LTF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=lt(y,z)', [F, F, F, F]);
-CheckB4(F32+LTF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=lt(y,z)', [T, T, T, T]);
-CheckB4(F32+LTF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=lt(y,z)', [F, T, T, F]);
-CheckB4(F32+LTF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=lt(y,z);', [F, F, F, F]);
-
-CheckB4(F32+LEF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=le(y,z)', [F, F, F, F]);
-CheckB4(F32+LEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=le(y,z)', [T, T, T, T]);
-CheckB4(F32+LEF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=le(y,z)', [T, T, T, F]);
-CheckB4(F32+LEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=le(y,z);', [T, T, F, F]);
-
-CheckB4(F32+EQF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=eq(y,z)', [F, F, F, F]);
-CheckB4(F32+EQF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=eq(y,z)', [F, F, F, F]);
-CheckB4(F32+EQF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=eq(y,z)', [T, F, F, F]);
-CheckB4(F32+EQF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=eq(y,z);', [T, T, F, F]);
-
-CheckB4(F32+NEF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=ne(y,z)', [T, T, T, T]);
-CheckB4(F32+NEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=ne(y,z)', [T, T, T, T]);
-CheckB4(F32+NEF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=ne(y,z)', [F, T, T, T]);
-CheckB4(F32+NEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=ne(y,z);', [F, F, T, T]);
-
-CheckB4(F32+GTF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=gt(y,z)', [T, T, T, T]);
-CheckB4(F32+GTF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=gt(y,z)', [F, F, F, F]);
-CheckB4(F32+GTF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=gt(y,z)', [F, F, F, T]);
-CheckB4(F32+GTF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=gt(y,z);', [F, F, F, F]);
-
-CheckB4(F32+GEF32, 'var y=f4(1,2,3,4); var z=f4(-1,1,0,2); var x=b4(0,0,0,0); x=ge(y,z)', [T, T, T, T]);
-CheckB4(F32+GEF32, 'var y=f4(-1,1,0,2); var z=f4(1,2,3,4); var x=b4(0,0,0,0); x=ge(y,z)', [F, F, F, F]);
-CheckB4(F32+GEF32, 'var y=f4(1,0,3,4); var z=f4(1,1,7,0); var x=b4(0,0,0,0); x=ge(y,z)', [T, F, F, T]);
-CheckB4(F32+GEF32 + 'const nan = glob.NaN; const fround=glob.Math.fround', 'var y=f4(0,0,0,0); var z=f4(0,0,0,0); var x=b4(0,0,0,0); y=f4(fround(0.0),fround(-0.0),fround(0.0),fround(nan)); z=f4(fround(-0.0),fround(0.0),fround(nan),fround(0.0)); x=ge(y,z);', [T, T, F, F]);
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LTI32 + B32 + ANYB4 + 'function f(x){x=ci4(x); var y=i4(-1,0,4,5); var b=b4(0,0,0,0); b=lt(x,y); return anyt(b)|0;} return f'), this);
-assertEq(f(SIMD.Int32x4(1,2,3,4)), 1);
-assertEq(f(SIMD.Int32x4(1,2,4,5)), 0);
-assertEq(f(SIMD.Int32x4(1,2,3,5)), 1);
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LTI32 + B32 + ALLB4 + 'function f(x){x=ci4(x); var y=i4(-1,0,4,5); var b=b4(0,0,0,0); b=lt(x,y); return allt(b)|0;} return f'), this);
-assertEq(f(SIMD.Int32x4(-2,-2,3,4)), 1);
-assertEq(f(SIMD.Int32x4(1,2,4,5)), 0);
-assertEq(f(SIMD.Int32x4(1,2,3,5)), 0);
-
-// Conversions operators
-const CVTIF = 'var cvt=f4.fromInt32x4;';
-const CVTFI = 'var cvt=i4.fromFloat32x4;';
-const CVTUF = 'var cvt=f4.fromUint32x4;';
-const CVTFU = 'var cvt=u4.fromFloat32x4;';
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromInt32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromUint32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + U32 + "var cvt=u4.fromInt32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + U32 + "var cvt=u4.fromUint32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var cvt=f4.fromFloat32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIF + "function f() {var x=i4(1,2,3,4); x=cvt(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIF + "function f() {var x=f4(1,2,3,4); x=cvt(x);} return f");
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + CF32 + CI32 + CVTIF + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(x); return cf4(y);} return f'), this);
-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4]);
-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, Math.fround(INT32_MIN), Math.fround(INT32_MAX), -1]);
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + U32 + U32I32 + F32 + CF32 + CI32 + CVTUF +
- 'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(u4i4(x)); return cf4(y);} return f'), this);
-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4]);
-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, Math.fround(INT32_MAX+1), Math.fround(INT32_MAX), Math.fround(UINT32_MAX)]);
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + CVTFI + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); y=cvt(x); return ci4(y);} return f'), this);
-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4]);
-// Test that INT32_MIN (exactly representable as an float32) and the first
-// integer representable as an float32 can be converted.
-assertEqX4(f(SIMD.Float32x4(INT32_MIN, INT32_MAX - 64, -0, 0)), [INT32_MIN, INT32_MAX - 64, 0, 0].map(Math.fround));
-// Test boundaries: first integer less than INT32_MIN and representable as a float32
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(INT32_MIN - 129, 0, 0, 0)), RangeError);
-// INT_MAX + 1
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Math.pow(2, 31), 0, 0, 0)), RangeError);
-// Special values
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(NaN, 0, 0, 0)), RangeError);
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Infinity, 0, 0, 0)), RangeError);
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-Infinity, 0, 0, 0)), RangeError);
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + U32 + I32U32 + F32 + CF32 + CVTFU +
- 'function f(x){x=cf4(x); var y=u4(0,0,0,0); y=cvt(x); return ci4(i4u4(y));} return f'), this);
-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4]);
-// TODO: Test negative numbers > -1. They should truncate to 0. See https://github.com/tc39/ecmascript_simd/issues/315
-assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(f(SIMD.Float32x4(0xffffff00, INT32_MAX+1, -0, 0))),
- [0xffffff00, INT32_MAX+1, 0, 0].map(Math.fround));
-// Test boundaries: -1 or less.
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-1, 0, 0, 0)), RangeError);
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Math.pow(2, 32), 0, 0, 0)), RangeError);
-// Special values
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(NaN, 0, 0, 0)), RangeError);
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(Infinity, 0, 0, 0)), RangeError);
-assertThrowsInstanceOf(() => f(SIMD.Float32x4(-Infinity, 0, 0, 0)), RangeError);
-
-// Cast operators
-const CVTIFB = 'var cvt=f4.fromInt32x4Bits;';
-const CVTFIB = 'var cvt=i4.fromFloat32x4Bits;';
-
-var cast = (function() {
- var i32 = new Int32Array(1);
- var f32 = new Float32Array(i32.buffer);
-
- function fromInt32Bits(x) {
- i32[0] = x;
- return f32[0];
- }
-
- function fromFloat32Bits(x) {
- f32[0] = x;
- return i32[0];
- }
-
- return {
- fromInt32Bits,
- fromFloat32Bits
- }
-})();
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "var cvt=i4.fromInt32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + F32 + "var cvt=f4.fromFloat32x4; return {}");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIFB + "function f() {var x=i4(1,2,3,4); x=cvt(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CVTIFB + "function f() {var x=f4(1,2,3,4); x=cvt(x);} return f");
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + CVTIFB + CF32 + CI32 + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); y=cvt(x); return cf4(y);} return f'), this);
-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromInt32Bits));
-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, INT32_MIN, INT32_MAX, -1].map(cast.fromInt32Bits));
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + F32 + F32A + CVTIFB + CF32 + CI32 + 'function f(x){x=ci4(x); var y=f4(0,0,0,0); var z=f4(1,1,1,1); y=cvt(x); y=f4a(y, z); return cf4(y)} return f'), this);
-assertEqX4(f(SIMD.Int32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromInt32Bits).map((x) => x+1));
-assertEqX4(f(SIMD.Int32x4(0,INT32_MIN,INT32_MAX,-1)), [0, INT32_MIN, INT32_MAX, -1].map(cast.fromInt32Bits).map((x) => x+1));
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + CVTFIB + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); y=cvt(x); return ci4(y);} return f'), this);
-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromFloat32Bits));
-assertEqX4(f(SIMD.Float32x4(-0,NaN,+Infinity,-Infinity)), [-0, NaN, +Infinity, -Infinity].map(cast.fromFloat32Bits));
-
-var f = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + F32 + CF32 + I32A + CVTFIB + 'function f(x){x=cf4(x); var y=i4(0,0,0,0); var z=i4(1,1,1,1); y=cvt(x); y=i4a(y,z); return ci4(y);} return f'), this);
-assertEqX4(f(SIMD.Float32x4(1,2,3,4)), [1, 2, 3, 4].map(cast.fromFloat32Bits).map((x) => x+1));
-assertEqX4(f(SIMD.Float32x4(-0,NaN,+Infinity,-Infinity)), [-0, NaN, +Infinity, -Infinity].map(cast.fromFloat32Bits).map((x) => x+1));
-
-// Bitwise ops
-const ANDI32 = 'var andd=i4.and;';
-const ORI32 = 'var orr=i4.or;';
-const XORI32 = 'var xorr=i4.xor;';
-
-CheckI4(ANDI32, 'var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=andd(x,y)', [42 & 2, 1337 & 4, -1 & 7, 13 & 15]);
-CheckI4(ORI32, ' var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=orr(x,y)', [42 | 2, 1337 | 4, -1 | 7, 13 | 15]);
-CheckI4(XORI32, 'var x=i4(42,1337,-1,13); var y=i4(2, 4, 7, 15); x=xorr(x,y)', [42 ^ 2, 1337 ^ 4, -1 ^ 7, 13 ^ 15]);
-
-const ANDU32 = 'var andd=u4.and;';
-const ORU32 = 'var orr=u4.or;';
-const XORU32 = 'var xorr=u4.xor;';
-
-CheckU4(ANDU32, 'var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=andd(x,y)', [42 & 2, 1337 & 4, (-1 & 7) >>> 0, 13 & 15]);
-CheckU4(ORU32, ' var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=orr(x,y)', [42 | 2, 1337 | 4, (-1 | 7) >>> 0, 13 | 15]);
-CheckU4(XORU32, 'var x=u4(42,1337,-1,13); var y=u4(2, 4, 7, 15); x=xorr(x,y)', [42 ^ 2, 1337 ^ 4, (-1 ^ 7) >>> 0, 13 ^ 15]);
-
-const ANDB32 = 'var andd=b4.and;';
-const ORB32 = 'var orr=b4.or;';
-const XORB32 = 'var xorr=b4.xor;';
-
-CheckB4(ANDB32, 'var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=andd(x,y)', [true, false, false, false]);
-CheckB4(ORB32, ' var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=orr(x,y)', [true, true, true, false]);
-CheckB4(XORB32, 'var x=b4(1,0,1,0); var y=b4(1,1,0,0); x=xorr(x,y)', [false, true, true, false]);
-
-// No bitwise ops on Float32x4.
-const ANDF32 = 'var andd=f4.and;';
-const ORF32 = 'var orr=f4.or;';
-const XORF32 = 'var xorr=f4.xor;';
-const NOTF32 = 'var nott=f4.not;';
-
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + ANDF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=andd(x,y);} return f');
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + ORF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=orr(x,y);} return f');
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + XORF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); var y=f4(19.89, 2.4, 8.15, 16.36); x=xorr(x,y);} return f');
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + NOTF32 + 'function f() {var x=f4(42, 13.37,-1.42, 23.10); x=nott(x);} return f');
-
-// Logical ops
-const LSHI = 'var lsh=i4.shiftLeftByScalar;'
-const RSHI = 'var rsh=i4.shiftRightByScalar;'
-
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + FROUND + LSHI + "function f() {var x=f4(1,2,3,4); return ci4(lsh(x,f32(42)));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + F32 + FROUND + LSHI + "function f() {var x=f4(1,2,3,4); return ci4(lsh(x,42));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + LSHI + "function f() {var x=i4(1,2,3,4); return ci4(lsh(x,42.0));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + CI32 + FROUND + LSHI + "function f() {var x=i4(1,2,3,4); return ci4(lsh(x,f32(42)));} return f");
-
-var input = 'i4(0, 1, ' + INT32_MIN + ', ' + INT32_MAX + ')';
-var vinput = [0, 1, INT32_MIN, INT32_MAX];
-
-function Lsh(i) { return function(x) { return (x << (i & 31)) | 0 } }
-function Rsh(i) { return function(x) { return (x >> (i & 31)) | 0 } }
-
-var asmLsh = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + LSHI + 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(lsh(v, x+y))} return f;'), this)
-var asmRsh = asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + RSHI + 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(rsh(v, x+y))} return f;'), this)
-
-for (var i = 1; i < 64; i++) {
- CheckI4(LSHI, 'var x=' + input + '; x=lsh(x, ' + i + ')', vinput.map(Lsh(i)));
- CheckI4(RSHI, 'var x=' + input + '; x=rsh(x, ' + i + ')', vinput.map(Rsh(i)));
-
- assertEqX4(asmLsh(i, 3), vinput.map(Lsh(i + 3)));
- assertEqX4(asmRsh(i, 3), vinput.map(Rsh(i + 3)));
-}
-
-// Same thing for Uint32x4.
-const LSHU = 'var lsh=u4.shiftLeftByScalar;'
-const RSHU = 'var rsh=u4.shiftRightByScalar;'
-
-input = 'u4(0, 1, 0x80008000, ' + INT32_MAX + ')';
-vinput = [0, 1, 0x80008000, INT32_MAX];
-
-function uLsh(i) { return function(x) { return (x << (i & 31)) >>> 0 } }
-function uRsh(i) { return function(x) { return (x >>> (i & 31)) } }
-
-// Need to bitcast to Int32x4 before returning result.
-asmLsh = asmLink(asmCompile('glob', USE_ASM + U32 + CU32 + LSHU + I32 + CI32 + I32U32 +
- 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(i4u4(lsh(v, x+y)));} return f;'), this)
-asmRsh = asmLink(asmCompile('glob', USE_ASM + U32 + CU32 + RSHU + I32 + CI32 + I32U32 +
- 'function f(x, y){x=x|0;y=y|0; var v=' + input + ';return ci4(i4u4(rsh(v, x+y)));} return f;'), this)
-
-for (var i = 1; i < 64; i++) {
- // Constant shifts.
- CheckU4(LSHU, 'var x=' + input + '; x=lsh(x, ' + i + ')', vinput.map(uLsh(i)));
- CheckU4(RSHU, 'var x=' + input + '; x=rsh(x, ' + i + ')', vinput.map(uRsh(i)));
-
- // Dynamically computed shifts. The asm function returns a Int32x4.
- assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(asmLsh(i, 3)), vinput.map(uLsh(i + 3)));
- assertEqX4(SIMD.Uint32x4.fromInt32x4Bits(asmRsh(i, 3)), vinput.map(uRsh(i + 3)));
-}
-
-// Select
-const I32SEL = 'var i4sel = i4.select;'
-const U32SEL = 'var u4sel = u4.select;'
-const F32SEL = 'var f4sel = f4.select;'
-
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var x=f4(1,2,3,4); return ci4(i4sel(x,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=f4(1,2,3,4); var x=i4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=f4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=i4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=i4(1,2,3,4); var x=i4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); return ci4(i4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=i4(1,2,3,4); var y=f4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,2,3,4); var x=i4(1,2,3,4); var y=b4(5,6,7,8); return ci4(i4sel(m,x,y));} return f");
-
-assertAsmTypeFail('glob', USE_ASM + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); return cf4(f4sel(x,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); var x=i4(1,2,3,4); return cf4(f4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=f4(1,2,3,4); var x=f4(1,2,3,4); return cf4(f4sel(m,x,x));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=i4(1,2,3,4); var x=f4(1,2,3,4); var y=i4(5,6,7,8); return cf4(f4sel(m,x,y));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + CF32 + F32SEL + "function f() {var m=i4(1,2,3,4); var x=i4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y));} return f");
-
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,0,0,0); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 6, 7, 8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(1,1,1,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,1,0,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 2, 7, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + CI32 + I32SEL + "function f() {var m=b4(0,0,1,1); var x=i4(1,2,3,4); var y=i4(5,6,7,8); return ci4(i4sel(m,x,y)); } return f"), this)(), [5, 6, 3, 4]);
-
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,0,0,0); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 6, 7, 8]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(1,1,1,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,1,0,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 2, 7, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + B32 + F32 + CF32 + F32SEL + "function f() {var m=b4(0,0,1,1); var x=f4(1,2,3,4); var y=f4(5,6,7,8); return cf4(f4sel(m,x,y)); } return f"), this)(), [5, 6, 3, 4]);
-
-CheckU4(B32 + U32SEL, "var m=b4(0,0,0,0); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 6, 7, 8]);
-CheckU4(B32 + U32SEL, "var m=b4(1,1,1,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [1, 2, 3, 4]);
-CheckU4(B32 + U32SEL, "var m=b4(0,1,0,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 2, 7, 4]);
-CheckU4(B32 + U32SEL, "var m=b4(0,0,1,1); var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=u4sel(m,x,y);", [5, 6, 3, 4]);
-
-// Splat
-const I32SPLAT = 'var splat=i4.splat;'
-const U32SPLAT = 'var splat=u4.splat;'
-const F32SPLAT = 'var splat=f4.splat;'
-const B32SPLAT = 'var splat=b4.splat;'
-
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); var p=f4(1.,2.,3.,4.); p=splat(f32(1));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(1, 2)} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat()} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(m);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + "function f() {var m=i4(1,2,3,4); m=splat(1.0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + I32SPLAT + FROUND + "function f() {var m=i4(1,2,3,4); m=splat(f32(1.0));} return f");
-
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32SPLAT + 'function f(){return ci4(splat(42));} return f'), this)(), [42, 42, 42, 42]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + B32SPLAT + 'function f(){return cb4(splat(42));} return f'), this)(), [true, true, true, true]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + B32 + CB32 + B32SPLAT + 'function f(){return cb4(splat(0));} return f'), this)(), [false, false, false, false]);
-CheckU4(B32 + U32SPLAT, "var x=u4(1,2,3,4); x=splat(0);", [0, 0, 0, 0]);
-CheckU4(B32 + U32SPLAT, "var x=u4(1,2,3,4); x=splat(0xaabbccdd);", [0xaabbccdd, 0xaabbccdd, 0xaabbccdd, 0xaabbccdd]);
-
-const l33t = Math.fround(13.37);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1)));} return f'), this)(), [1, 1, 1, 1]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(1.0));} return f'), this)(), [1, 1, 1, 1]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1 >>> 0)));} return f'), this)(), [1, 1, 1, 1]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(13.37)));} return f'), this)(), [l33t, l33t, l33t, l33t]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(13.37));} return f'), this)(), [l33t, l33t, l33t, l33t]);
-
-var i32view = new Int32Array(heap);
-var f32view = new Float32Array(heap);
-i32view[0] = 42;
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + I32 + CI32 + I32SPLAT + 'var i32=new glob.Int32Array(heap); function f(){return ci4(splat(i32[0]));} return f'), this, {}, heap)(), [42, 42, 42, 42]);
-f32view[0] = 42;
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + F32SPLAT + 'var f32=new glob.Float32Array(heap); function f(){return cf4(splat(f32[0]));} return f'), this, {}, heap)(), [42, 42, 42, 42]);
-assertEqX4(asmLink(asmCompile('glob', 'ffi', 'heap', USE_ASM + F32 + CF32 + F32SPLAT + FROUND + 'function f(){return cf4(splat(f32(1) + f32(2)));} return f'), this, {}, heap)(), [3, 3, 3, 3]);
-
-// Dead code
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + 'function f(){var x=i4(1,2,3,4); return ci4(x); x=i4(5,6,7,8); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + EXTI4 + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); c=e(x,0)|0; return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32A + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); x=i4a(x,x); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
-assertEqX4(asmLink(asmCompile('glob', USE_ASM + I32 + CI32 + I32S + 'function f(){var x=i4(1,2,3,4); var c=0; return ci4(x); x=i4s(x,x); return ci4(x);} return f'), this)(), [1, 2, 3, 4]);
-
-// Swizzle
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, -1, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 4, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 0.0, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); x=swizzle(x, 0, 0, 0, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var swizzle=i4.swizzle; function f() {var x=i4(1,2,3,4); var y=42; x=swizzle(x, 0, 0, 0, y);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var swizzle=i4.swizzle; function f() {var x=f4(1,2,3,4); x=swizzle(x, 0, 0, 0, 0);} return f");
-
-function swizzle(arr, lanes) {
- return [arr[lanes[0]], arr[lanes[1]], arr[lanes[2]], arr[lanes[3]]];
-}
-
-var before = Date.now();
-for (var i = 0; i < Math.pow(4, 4); i++) {
- var lanes = [i & 3, (i >> 2) & 3, (i >> 4) & 3, (i >> 6) & 3];
- CheckI4('var swizzle=i4.swizzle;', 'var x=i4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
- CheckU4('var swizzle=u4.swizzle;', 'var x=u4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
- CheckF4('var swizzle=f4.swizzle;', 'var x=f4(1,2,3,4); x=swizzle(x, ' + lanes.join(',') + ')', swizzle([1,2,3,4], lanes));
-}
-DEBUG && print('time for checking all swizzles:', Date.now() - before);
-
-// Shuffle
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, -1, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 8, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 0.0, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, 0, 0, 0, 0, 0);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); x=shuffle(x, y, 0, 0, 0, x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var shuffle=i4.shuffle; function f() {var x=i4(1,2,3,4); var y=i4(1,2,3,4); var z=42; x=shuffle(x, y, 0, 0, 0, z);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + F32 + "var shuffle=i4.shuffle; function f() {var x=f4(1,2,3,4); x=shuffle(x, x, 0, 0, 0, 0);} return f");
-
-function shuffle(lhs, rhs, lanes) {
- return [(lanes[0] < 4 ? lhs : rhs)[lanes[0] % 4],
- (lanes[1] < 4 ? lhs : rhs)[lanes[1] % 4],
- (lanes[2] < 4 ? lhs : rhs)[lanes[2] % 4],
- (lanes[3] < 4 ? lhs : rhs)[lanes[3] % 4]];
-}
-
-before = Date.now();
-
-const LANE_SELECTORS = [
- // Four of lhs or four of rhs, equivalent to swizzle
- [0, 1, 2, 3],
- [4, 5, 6, 7],
- [0, 2, 3, 1],
- [4, 7, 4, 6],
- // One of lhs, three of rhs
- [0, 4, 5, 6],
- [4, 0, 5, 6],
- [4, 5, 0, 6],
- [4, 5, 6, 0],
- // Two of lhs, two of rhs
- // in one shufps
- [1, 2, 4, 5],
- [4, 5, 1, 2],
- // in two shufps
- [7, 0, 5, 2],
- [0, 7, 5, 2],
- [0, 7, 2, 5],
- [7, 0, 2, 5],
- // Three of lhs, one of rhs
- [7, 0, 1, 2],
- [0, 7, 1, 2],
- [0, 1, 7, 2],
- [0, 1, 2, 7],
- // Impl-specific special cases for swizzle
- [2, 3, 2, 3],
- [0, 1, 0, 1],
- [0, 0, 1, 1],
- [2, 2, 3, 3],
- // Impl-specific special cases for shuffle (case and swapped case)
- [2, 3, 6, 7], [6, 7, 2, 3],
- [0, 1, 4, 5], [4, 5, 0, 1],
- [0, 4, 1, 5], [4, 0, 5, 1],
- [2, 6, 3, 7], [6, 2, 7, 3],
- [4, 1, 2, 3], [0, 5, 6, 7],
- // Insert one element from rhs into lhs keeping other elements unchanged
- [7, 1, 2, 3],
- [0, 7, 2, 3],
- [0, 1, 7, 2],
- // These are effectively vector selects
- [0, 5, 2, 3],
- [0, 1, 6, 3],
- [4, 5, 2, 3],
- [4, 1, 6, 3]
-];
-
-for (var lanes of LANE_SELECTORS) {
- CheckI4('var shuffle=i4.shuffle;', 'var x=i4(1,2,3,4); var y=i4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
- CheckU4('var shuffle=u4.shuffle;', 'var x=u4(1,2,3,4); var y=u4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
- CheckF4('var shuffle=f4.shuffle;', 'var x=f4(1,2,3,4); var y=f4(5,6,7,8); x=shuffle(x, y, ' + lanes.join(',') + ')', shuffle([1,2,3,4], [5,6,7,8], lanes));
-}
-DEBUG && print('time for checking all shuffles:', Date.now() - before);
-
-// 3. Function calls
-// 3.1. No math builtins
-assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); return +fround(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); return +sin(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.ceil; function f() {var x=i4(1,2,3,4); return +ceil(x);} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); return +pow(1.0, x);} return f");
-
-assertAsmTypeFail('glob', USE_ASM + I32 + "var fround=glob.Math.fround; function f() {var x=i4(1,2,3,4); x=i4(fround(3));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var sin=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(sin(3.0));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var ceil=glob.Math.sin; function f() {var x=i4(1,2,3,4); x=i4(ceil(3.0));} return f");
-assertAsmTypeFail('glob', USE_ASM + I32 + "var pow=glob.Math.pow; function f() {var x=i4(1,2,3,4); x=i4(pow(1.0, 2.0));} return f");
-
-// 3.2. FFI calls
-// Can't pass SIMD arguments to FFI
-assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); func(x);} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + "var func=ffi.func; function f() {var x=u4(1,2,3,4); func(x);} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); func(x);} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + "var func=ffi.func; function f() {var x=b4(1,2,3,4); func(x);} return f");
-
-// Can't have FFI return SIMD values
-assertAsmTypeFail('glob', 'ffi', USE_ASM + I32 + "var func=ffi.func; function f() {var x=i4(1,2,3,4); x=i4(func());} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + U32 + "var func=ffi.func; function f() {var x=u4(1,2,3,4); x=i4(func());} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + F32 + "var func=ffi.func; function f() {var x=f4(1,2,3,4); x=f4(func());} return f");
-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + "var func=ffi.func; function f() {var x=b4(1,2,3,4); x=b4(func());} return f");
-
-// 3.3 Internal calls
-// asm.js -> asm.js
-// Retrieving values from asm.js
-var code = USE_ASM + I32 + CI32 + I32A + EXTI4 + `
- var check = ffi.check;
-
- function g() {
- var i = 0;
- var y = i4(0,0,0,0);
- var tmp = i4(0,0,0,0); var z = i4(1,1,1,1);
- var w = i4(5,5,5,5);
- for (; (i|0) < 30; i = i + 1 |0)
- y = i4a(z, y);
- y = i4a(w, y);
- check(e(y,0) | 0, e(y,1) | 0, e(y,2) | 0, e(y,3) | 0);
- return ci4(y);
- }
-
- function f(x) {
- x = ci4(x);
- var y = i4(0,0,0,0);
- y = ci4(g());
- check(e(y,0) | 0, e(y,1) | 0, e(y,2) | 0, e(y,3) | 0);
- return ci4(x);
- }
- return f;
-`;
-
-var v4 = SIMD.Int32x4(1,2,3,4);
-function check(x, y, z, w) {
- assertEq(x, 35);
- assertEq(y, 35);
- assertEq(z, 35);
- assertEq(w, 35);
-}
-var ffi = {check};
-assertEqX4(asmLink(asmCompile('glob', 'ffi', code), this, ffi)(v4), [1,2,3,4]);
-
-// Passing arguments from asm.js to asm.js
-var code = USE_ASM + I32 + CI32 + I32A + EXTI4 + `
- var assertEq = ffi.assertEq;
-
- function internal([args]) {
- [coerc]
- assertEq(e([last],0) | 0, [i] | 0);
- assertEq(e([last],1) | 0, [i] + 1 |0);
- assertEq(e([last],2) | 0, [i] + 2 |0);
- assertEq(e([last],3) | 0, [i] + 3 |0);
- }
-
- function external() {
- [decls]
- internal([args]);
- }
- return external;
-`;
-
-var ffi = {assertEq};
-var args = '';
-var decls = '';
-var coerc = '';
-for (var i = 1; i < 10; ++i) {
- var j = i;
- args += ((i > 1) ? ', ':'') + 'x' + i;
- decls += 'var x' + i + ' = i4(' + j++ + ', ' + j++ + ', ' + j++ + ', ' + j++ + ');\n';
- coerc += 'x' + i + ' = ci4(x' + i + ');\n';
- last = 'x' + i;
- var c = code.replace(/\[args\]/g, args)
- .replace(/\[last\]/g, last)
- .replace(/\[decls\]/i, decls)
- .replace(/\[coerc\]/i, coerc)
- .replace(/\[i\]/g, i);
- asmLink(asmCompile('glob', 'ffi', c), this, ffi)();
-}
-
-// Bug 1240524
-assertAsmTypeFail(USE_ASM + B32 + 'var x = b4(0, 0, 0, 0); frd(x);');
-
-// Passing boolean results to extern functions.
-// Verify that these functions are typed correctly.
-function isone(x) { return (x===1)|0 }
-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + ANYB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(anyt(i)|0)|0; } return f'), this, {isone:isone});
-assertEq(f(SIMD.Bool32x4(0,0,1,0)), 1)
-assertEq(f(SIMD.Bool32x4(0,0,0,0)), 0)
-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + ANYB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(anyt(i))|0; } return f');
-
-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + ALLB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(allt(i)|0)|0; } return f'), this, {isone:isone});
-assertEq(f(SIMD.Bool32x4(1,1,1,1)), 1)
-assertEq(f(SIMD.Bool32x4(0,1,0,0)), 0)
-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + ALLB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(allt(i))|0; } return f');
-
-var f = asmLink(asmCompile('glob', 'ffi', USE_ASM + B32 + CB32 + EXTB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(e(i,2)|0)|0; } return f'), this, {isone:isone});
-assertEq(f(SIMD.Bool32x4(1,1,1,1)), 1)
-assertEq(f(SIMD.Bool32x4(0,1,0,0)), 0)
-assertAsmTypeFail('glob', 'ffi', USE_ASM + B32 + CB32 + EXTB4 + 'var isone=ffi.isone; function f(i) { i=cb4(i); return isone(e(i,2))|0; } return f');
-
-// Stress-test for register spilling code and stack depth checks
-var code = `
- "use asm";
- var i4 = glob.SIMD.Int32x4;
- var i4a = i4.add;
- var e = i4.extractLane;
- var assertEq = ffi.assertEq;
- function g() {
- var x = i4(1,2,3,4);
- var y = i4(2,3,4,5);
- var z = i4(0,0,0,0);
- z = i4a(x, y);
- assertEq(e(z,0) | 0, 3);
- assertEq(e(z,1) | 0, 5);
- assertEq(e(z,2) | 0, 7);
- assertEq(e(z,3) | 0, 9);
- }
- return g
-`
-asmLink(asmCompile('glob', 'ffi', code), this, assertEqFFI)();
-
-(function() {
- var code = `
- "use asm";
- var i4 = glob.SIMD.Int32x4;
- var i4a = i4.add;
- var e = i4.extractLane;
- var assertEq = ffi.assertEq;
- var one = ffi.one;
-
- // Function call with arguments on the stack (1 on x64, 3 on x86)
- function h(x1, x2, x3, x4, x5, x6, x7) {
- x1=x1|0
- x2=x2|0
- x3=x3|0
- x4=x4|0
- x5=x5|0
- x6=x6|0
- x7=x7|0
- return x1 + x2 |0
- }
-
- function g() {
- var x = i4(1,2,3,4);
- var y = i4(2,3,4,5);
- var z = i4(0,0,0,0);
- var w = 1;
- z = i4a(x, y);
- w = w + (one() | 0) | 0;
- assertEq(e(z,0) | 0, 3);
- assertEq(e(z,1) | 0, 5);
- assertEq(e(z,2) | 0, 7);
- assertEq(e(z,3) | 0, 9);
- h(1, 2, 3, 4, 42, 42, 42)|0
- return w | 0;
- }
- return g
- `;
-
- asmLink(asmCompile('glob', 'ffi', code), this, {assertEq: assertEq, one: () => 1})();
-})();
-
-// Function calls with mixed arguments on the stack (SIMD and scalar). In the
-// worst case (x64), we have 6 int arg registers and 8 float registers.
-(function() {
- var code = `
- "use asm";
- var i4 = glob.SIMD.Int32x4;
- var e = i4.extractLane;
- var ci4 = i4.check;
- function h(
- // In registers:
- gpr1, gpr2, gpr3, gpr4, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8,
- // On the stack:
- sint1, ssimd1, sdouble1, ssimd2, sint2, sint3, sint4, ssimd3, sdouble2
- )
- {
- gpr1=gpr1|0;
- gpr2=gpr2|0;
- gpr3=gpr3|0;
- gpr4=gpr4|0;
-
- xmm1=+xmm1;
- xmm2=+xmm2;
- xmm3=+xmm3;
- xmm4=+xmm4;
- xmm5=+xmm5;
- xmm6=+xmm6;
- xmm7=+xmm7;
- xmm8=+xmm8;
-
- sint1=sint1|0;
- ssimd1=ci4(ssimd1);
- sdouble1=+sdouble1;
- ssimd2=ci4(ssimd2);
- sint2=sint2|0;
- sint3=sint3|0;
- sint4=sint4|0;
- ssimd3=ci4(ssimd3);
- sdouble2=+sdouble2;
-
- return (e(ssimd1,0)|0) + (e(ssimd2,1)|0) + (e(ssimd3,2)|0) + sint2 + gpr3 | 0;
- }
-
- function g() {
- var simd1 = i4(1,2,3,4);
- var simd2 = i4(5,6,7,8);
- var simd3 = i4(9,10,11,12);
- return h(1, 2, 3, 4,
- 1., 2., 3., 4., 5., 6., 7., 8.,
- 5, simd1, 9., simd2, 6, 7, 8, simd3, 10.) | 0;
- }
- return g
- `;
-
- assertEq(asmLink(asmCompile('glob', 'ffi', code), this)(), 1 + 6 + 11 + 6 + 3);
-})();
-
-// Check that the interrupt callback doesn't erase high components of simd
-// registers:
-
-// WARNING: must be the last test in this file
-(function() {
- var iters = 2000000;
- var code = `
- "use asm";
- var i4 = glob.SIMD.Int32x4;
- var i4a = i4.add;
- var ci4 = i4.check;
- function _() {
- var i = 0;
- var n = i4(0,0,0,0);
- var one = i4(1,1,1,1);
- for (; (i>>>0) < ` + iters + `; i=(i+1)>>>0) {
- n = i4a(n, one);
- }
- return ci4(n);
- }
- return _;`;
- // This test relies on the fact that setting the timeout will call the
- // interrupt callback at fixed intervals, even before the timeout.
- timeout(1000);
- var x4 = asmLink(asmCompile('glob', code), this)();
- assertEq(SIMD.Int32x4.extractLane(x4,0), iters);
- assertEq(SIMD.Int32x4.extractLane(x4,1), iters);
- assertEq(SIMD.Int32x4.extractLane(x4,2), iters);
- assertEq(SIMD.Int32x4.extractLane(x4,3), iters);
-})();
-
-} catch(e) {
- print('Stack:', e.stack)
- print('Error:', e)
- throw e;
-}
diff --git a/js/src/jit-test/tests/asm.js/testZOOB.js b/js/src/jit-test/tests/asm.js/testZOOB.js
index ae59611d6e..9874f5c1e1 100644
--- a/js/src/jit-test/tests/asm.js/testZOOB.js
+++ b/js/src/jit-test/tests/asm.js/testZOOB.js
@@ -95,106 +95,6 @@ function assertEqX4(observed, expected) {
assertEq(observed.w, expected.w);
}
-function testSimdX4(ctor, shift, scale, disp, simdName, simdCtor) {
- var arr = new ctor(ab);
-
- var c = asmCompile('glob', 'imp', 'b',
- USE_ASM +
- 'var arr=new glob.' + ctor.name + '(b); ' +
- 'var SIMD_' + simdName + ' = glob.SIMD.' + simdName + '; ' +
- 'var SIMD_' + simdName + '_check = SIMD_' + simdName + '.check; ' +
- 'var SIMD_' + simdName + '_load = SIMD_' + simdName + '.load; ' +
- 'var SIMD_' + simdName + '_load2 = SIMD_' + simdName + '.load2; ' +
- 'var SIMD_' + simdName + '_load1 = SIMD_' + simdName + '.load1; ' +
- 'var SIMD_' + simdName + '_store = SIMD_' + simdName + '.store; ' +
- 'var SIMD_' + simdName + '_store2 = SIMD_' + simdName + '.store2; ' +
- 'var SIMD_' + simdName + '_store1 = SIMD_' + simdName + '.store1; ' +
- 'function load(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
- 'function load2(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
- 'function load1(i) {i=i|0; return SIMD_' + simdName + '_check(SIMD_' + simdName + '_load1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ')) } ' +
- 'function store(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
- 'function store2(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store2(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
- 'function store1(i,j) {i=i|0;j=SIMD_' + simdName + '_check(j); SIMD_' + simdName + '_store1(arr, ((i<<' + scale + ')+' + disp + ')>>' + shift + ', j) } ' +
- 'return { load: load, load2: load2, load1: load1, store: store, store2 : store2, store1 : store1 }');
- var f = asmLink(c, this, null, ab);
-
- const RuntimeError = WebAssembly.RuntimeError;
-
- for (var i of indices) {
- var index = ((i<<scale)+disp)>>shift;
-
- var v, v2, v1;
- var t = false, t2 = false, t1 = false;
- try { v = simdCtor.load(arr, index); }
- catch (e) {
- assertEq(e instanceof RangeError, true);
- t = true;
- }
- try { v2 = simdCtor.load2(arr, index); }
- catch (e) {
- assertEq(e instanceof RangeError, true);
- t2 = true;
- }
- try { v1 = simdCtor.load1(arr, index); }
- catch (e) {
- assertEq(e instanceof RangeError, true);
- t1 = true;
- }
-
- // Loads
- var l, l2, l1;
- var r = false, r2 = false, r1 = false;
- try { l = f.load(i); }
- catch (e) {
- assertEq(e instanceof RuntimeError, true);
- r = true;
- }
- try { l2 = f.load2(i); }
- catch (e) {
- assertEq(e instanceof RuntimeError, true);
- r2 = true;
- }
- try { l1 = f.load1(i); }
- catch (e) {
- assertEq(e instanceof RuntimeError, true);
- r1 = true;
- }
- assertEq(t, r);
- assertEq(t2, r2);
- assertEq(t1, r1);
- if (!t) assertEqX4(v, l);
- if (!t2) assertEqX4(v2, l2);
- if (!t1) assertEqX4(v1, l1);
-
- // Stores
- if (!t) {
- simdCtor.store(arr, index, simdCtor.neg(v));
- f.store(i, v);
- assertEqX4(simdCtor.load(arr, index), v);
- } else
- assertThrowsInstanceOf(() => f.store(i, simdCtor()), RuntimeError);
- if (!t2) {
- simdCtor.store2(arr, index, simdCtor.neg(v2));
- f.store2(i, v2);
- assertEqX4(simdCtor.load2(arr, index), v2);
- } else
- assertThrowsInstanceOf(() => f.store2(i, simdCtor()), RuntimeError);
- if (!t1) {
- simdCtor.store1(arr, index, simdCtor.neg(v1));
- f.store1(i, v1);
- assertEqX4(simdCtor.load1(arr, index), v1);
- } else
- assertThrowsInstanceOf(() => f.store1(i, simdCtor()), RuntimeError);
- }
-}
-
-function testFloat32x4(ctor, shift, scale, disp) {
- testSimdX4(ctor, shift, scale, disp, 'Float32x4', SIMD.Float32x4);
-}
-function testInt32x4(ctor, shift, scale, disp) {
- testSimdX4(ctor, shift, scale, disp, 'Int32x4', SIMD.Int32x4);
-}
-
function test(tester, ctor, shift) {
var arr = new ctor(ab);
for (var i = 0; i < arr.length; i++)
@@ -218,15 +118,3 @@ test(testInt, Int32Array, 2);
test(testInt, Uint32Array, 2);
test(testFloat32, Float32Array, 2);
test(testFloat64, Float64Array, 3);
-if (typeof SIMD !== 'undefined' && isSimdAvailable()) {
- // Avoid pathological --ion-eager compile times due to bails in loops
- setJitCompilerOption('ion.warmup.trigger', 1000000);
-
- // Use a fresh ArrayBuffer so prepareForAsmJS can allocated a guard page
- // which SIMD.js needs. Since the original ArrayBuffer was prepared for
- // asm.js that didn't use SIMD.js, it has no guard page (on 32-bit).
- ab = new ArrayBuffer(BUF_MIN);
-
- test(testInt32x4, Uint8Array, 0);
- test(testFloat32x4, Uint8Array, 0);
-}
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
index ce27e4de19..4bcaadee47 100644
--- a/js/src/jit/BaselineBailouts.cpp
+++ b/js/src/jit/BaselineBailouts.cpp
@@ -1959,7 +1959,6 @@ jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
case Bailout_NonStringInput:
case Bailout_NonSymbolInput:
case Bailout_NonBigIntInput:
- case Bailout_UnexpectedSimdInput:
case Bailout_NonSharedTypedArrayInput:
case Bailout_Debugger:
case Bailout_UninitializedThis:
diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
index 600b56d096..fa02c24374 100644
--- a/js/src/jit/BaselineIC.cpp
+++ b/js/src/jit/BaselineIC.cpp
@@ -15,7 +15,6 @@
#include "jstypes.h"
#include "builtin/Eval.h"
-#include "builtin/SIMD.h"
#include "gc/Policy.h"
#include "jit/BaselineDebugModeOSR.h"
#include "jit/BaselineJIT.h"
@@ -5457,68 +5456,6 @@ TryAttachFunCallStub(JSContext* cx, ICCall_Fallback* stub, HandleScript script,
return true;
}
-// Check if target is a native SIMD operation which returns a SIMD type.
-// If so, set res to a template object matching the SIMD type produced and return true.
-static bool
-GetTemplateObjectForSimd(JSContext* cx, JSFunction* target, MutableHandleObject res)
-{
- const JSJitInfo* jitInfo = target->jitInfo();
- if (!jitInfo || jitInfo->type() != JSJitInfo::InlinableNative)
- return false;
-
- // Check if this is a native inlinable SIMD operation.
- SimdType ctrlType;
- switch (jitInfo->inlinableNative) {
- case InlinableNative::SimdInt8x16: ctrlType = SimdType::Int8x16; break;
- case InlinableNative::SimdUint8x16: ctrlType = SimdType::Uint8x16; break;
- case InlinableNative::SimdInt16x8: ctrlType = SimdType::Int16x8; break;
- case InlinableNative::SimdUint16x8: ctrlType = SimdType::Uint16x8; break;
- case InlinableNative::SimdInt32x4: ctrlType = SimdType::Int32x4; break;
- case InlinableNative::SimdUint32x4: ctrlType = SimdType::Uint32x4; break;
- case InlinableNative::SimdFloat32x4: ctrlType = SimdType::Float32x4; break;
- case InlinableNative::SimdBool8x16: ctrlType = SimdType::Bool8x16; break;
- case InlinableNative::SimdBool16x8: ctrlType = SimdType::Bool16x8; break;
- case InlinableNative::SimdBool32x4: ctrlType = SimdType::Bool32x4; break;
- // This is not an inlinable SIMD operation.
- default: return false;
- }
-
- // The controlling type is not necessarily the return type.
- // Check the actual operation.
- SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
- SimdType retType;
-
- switch(simdOp) {
- case SimdOperation::Fn_allTrue:
- case SimdOperation::Fn_anyTrue:
- case SimdOperation::Fn_extractLane:
- // These operations return a scalar. No template object needed.
- return false;
-
- case SimdOperation::Fn_lessThan:
- case SimdOperation::Fn_lessThanOrEqual:
- case SimdOperation::Fn_equal:
- case SimdOperation::Fn_notEqual:
- case SimdOperation::Fn_greaterThan:
- case SimdOperation::Fn_greaterThanOrEqual:
- // These operations return a boolean vector with the same shape as the
- // controlling type.
- retType = GetBooleanSimdType(ctrlType);
- break;
-
- default:
- // All other operations return the controlling type.
- retType = ctrlType;
- break;
- }
-
- // Create a template object based on retType.
- RootedGlobalObject global(cx, cx->global());
- Rooted<SimdTypeDescr*> descr(cx, GlobalObject::getOrCreateSimdTypeDescr(cx, global, retType));
- res.set(cx->compartment()->jitCompartment()->getSimdTemplateObjectFor(cx, descr));
- return true;
-}
-
static void
EnsureArrayGroupAnalyzed(JSContext* cx, JSObject* obj)
{
@@ -5623,9 +5560,6 @@ GetTemplateObjectForNative(JSContext* cx, HandleFunction target, const CallArgs&
return !!res;
}
- if (JitSupportsSimd() && GetTemplateObjectForSimd(cx, target, res))
- return !!res;
-
return true;
}
@@ -5639,12 +5573,6 @@ GetTemplateObjectForClassHook(JSContext* cx, JSNative hook, CallArgs& args,
return !!templateObject;
}
- if (hook == SimdTypeDescr::call && JitSupportsSimd()) {
- Rooted<SimdTypeDescr*> descr(cx, &args.callee().as<SimdTypeDescr>());
- templateObject.set(cx->compartment()->jitCompartment()->getSimdTemplateObjectFor(cx, descr));
- return !!templateObject;
- }
-
return true;
}
diff --git a/js/src/jit/BaselineInspector.cpp b/js/src/jit/BaselineInspector.cpp
index d3b0fb71b5..4c22a1839c 100644
--- a/js/src/jit/BaselineInspector.cpp
+++ b/js/src/jit/BaselineInspector.cpp
@@ -624,25 +624,6 @@ BaselineInspector::getTemplateObjectForClassHook(jsbytecode* pc, const Class* cl
return nullptr;
}
-JSObject*
-BaselineInspector::getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType)
-{
- if (!hasBaselineScript())
- return nullptr;
-
- const ICEntry& entry = icEntryFromPC(pc);
- for (ICStub* stub = entry.firstStub(); stub; stub = stub->next()) {
- if (stub->isCall_ClassHook() && stub->toCall_ClassHook()->clasp() == &SimdTypeDescr::class_) {
- JSObject* templateObj = stub->toCall_ClassHook()->templateObject();
- InlineTypedObject& typedObj = templateObj->as<InlineTypedObject>();
- if (typedObj.typeDescr().as<SimdTypeDescr>().type() == simdType)
- return templateObj;
- }
- }
-
- return nullptr;
-}
-
LexicalEnvironmentObject*
BaselineInspector::templateNamedLambdaObject()
{
diff --git a/js/src/jit/BaselineInspector.h b/js/src/jit/BaselineInspector.h
index d7c4096238..556f28ee5b 100644
--- a/js/src/jit/BaselineInspector.h
+++ b/js/src/jit/BaselineInspector.h
@@ -117,7 +117,6 @@ class BaselineInspector
JSObject* getTemplateObject(jsbytecode* pc);
JSObject* getTemplateObjectForNative(jsbytecode* pc, Native native);
JSObject* getTemplateObjectForClassHook(jsbytecode* pc, const Class* clasp);
- JSObject* getTemplateObjectForSimdCtor(jsbytecode* pc, SimdType simdType);
// Sometimes the group a template object will have is known, even if the
// object itself isn't.
diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp
index 0459592448..d427f5f7f4 100644
--- a/js/src/jit/CodeGenerator.cpp
+++ b/js/src/jit/CodeGenerator.cpp
@@ -163,7 +163,6 @@ CodeGenerator::CodeGenerator(MIRGenerator* gen, LIRGraph* graph, MacroAssembler*
: CodeGeneratorSpecific(gen, graph, masm)
, ionScriptLabels_(gen->alloc())
, scriptCounts_(nullptr)
- , simdRefreshTemplatesDuringLink_(0)
{
}
@@ -5758,128 +5757,6 @@ CodeGenerator::visitNewTypedObject(LNewTypedObject* lir)
masm.bind(ool->rejoin());
}
-void
-CodeGenerator::visitSimdBox(LSimdBox* lir)
-{
- FloatRegister in = ToFloatRegister(lir->input());
- Register object = ToRegister(lir->output());
- Register temp = ToRegister(lir->temp());
- InlineTypedObject* templateObject = lir->mir()->templateObject();
- gc::InitialHeap initialHeap = lir->mir()->initialHeap();
- MIRType type = lir->mir()->input()->type();
-
- registerSimdTemplate(lir->mir()->simdType());
-
- MOZ_ASSERT(lir->safepoint()->liveRegs().has(in), "Save the input register across oolCallVM");
- OutOfLineCode* ool = oolCallVM(NewTypedObjectInfo, lir,
- ArgList(ImmGCPtr(templateObject), Imm32(initialHeap)),
- StoreRegisterTo(object));
-
- masm.createGCObject(object, temp, templateObject, initialHeap, ool->entry());
- masm.bind(ool->rejoin());
-
- Address objectData(object, InlineTypedObject::offsetOfDataStart());
- switch (type) {
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- masm.storeUnalignedSimd128Int(in, objectData);
- break;
- case MIRType::Float32x4:
- masm.storeUnalignedSimd128Float(in, objectData);
- break;
- default:
- MOZ_CRASH("Unknown SIMD kind when generating code for SimdBox.");
- }
-}
-
-void
-CodeGenerator::registerSimdTemplate(SimdType simdType)
-{
- simdRefreshTemplatesDuringLink_ |= 1 << uint32_t(simdType);
-}
-
-void
-CodeGenerator::captureSimdTemplate(JSContext* cx)
-{
- JitCompartment* jitCompartment = cx->compartment()->jitCompartment();
- while (simdRefreshTemplatesDuringLink_) {
- uint32_t typeIndex = mozilla::CountTrailingZeroes32(simdRefreshTemplatesDuringLink_);
- simdRefreshTemplatesDuringLink_ ^= 1 << typeIndex;
- SimdType type = SimdType(typeIndex);
-
- // Note: the weak-reference on the template object should not have been
- // garbage collected. It is either registered by IonBuilder, or verified
- // before using it in the EagerSimdUnbox phase.
- jitCompartment->registerSimdTemplateObjectFor(type);
- }
-}
-
-void
-CodeGenerator::visitSimdUnbox(LSimdUnbox* lir)
-{
- Register object = ToRegister(lir->input());
- FloatRegister simd = ToFloatRegister(lir->output());
- Register temp = ToRegister(lir->temp());
- Label bail;
-
- // obj->group()
- masm.loadPtr(Address(object, JSObject::offsetOfGroup()), temp);
-
- // Guard that the object has the same representation as the one produced for
- // SIMD value-type.
- Address clasp(temp, ObjectGroup::offsetOfClasp());
- static_assert(!SimdTypeDescr::Opaque, "SIMD objects are transparent");
- masm.branchPtr(Assembler::NotEqual, clasp, ImmPtr(&InlineTransparentTypedObject::class_),
- &bail);
-
- // obj->type()->typeDescr()
- // The previous class pointer comparison implies that the addendumKind is
- // Addendum_TypeDescr.
- masm.loadPtr(Address(temp, ObjectGroup::offsetOfAddendum()), temp);
-
- // Check for the /Kind/ reserved slot of the TypeDescr. This is an Int32
- // Value which is equivalent to the object class check.
- static_assert(JS_DESCR_SLOT_KIND < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
- Address typeDescrKind(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_KIND));
- masm.assertTestInt32(Assembler::Equal, typeDescrKind,
- "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_KIND).isInt32())");
- masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrKind), Imm32(js::type::Simd), &bail);
-
- SimdType type = lir->mir()->simdType();
-
- // Check if the SimdTypeDescr /Type/ match the specialization of this
- // MSimdUnbox instruction.
- static_assert(JS_DESCR_SLOT_TYPE < NativeObject::MAX_FIXED_SLOTS, "Load from fixed slots");
- Address typeDescrType(temp, NativeObject::getFixedSlotOffset(JS_DESCR_SLOT_TYPE));
- masm.assertTestInt32(Assembler::Equal, typeDescrType,
- "MOZ_ASSERT(obj->type()->typeDescr()->getReservedSlot(JS_DESCR_SLOT_TYPE).isInt32())");
- masm.branch32(Assembler::NotEqual, masm.ToPayload(typeDescrType), Imm32(int32_t(type)), &bail);
-
- // Load the value from the data of the InlineTypedObject.
- Address objectData(object, InlineTypedObject::offsetOfDataStart());
- switch (lir->mir()->type()) {
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- masm.loadUnalignedSimd128Int(objectData, simd);
- break;
- case MIRType::Float32x4:
- masm.loadUnalignedSimd128Float(objectData, simd);
- break;
- default:
- MOZ_CRASH("The impossible happened!");
- }
-
- bailoutFrom(&bail, lir->snapshot());
-}
-
typedef js::NamedLambdaObject* (*NewNamedLambdaObjectFn)(JSContext*, HandleFunction, gc::InitialHeap);
static const VMFunction NewNamedLambdaObjectInfo =
FunctionInfo<NewNamedLambdaObjectFn>(NamedLambdaObject::createTemplateObject,
@@ -9548,12 +9425,6 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
RootedScript script(cx, gen->info().script());
OptimizationLevel optimizationLevel = gen->optimizationInfo().level();
- // Capture the SIMD template objects which are used during the
- // compilation. This iterates over the template objects, using read-barriers
- // to let the GC know that the generated code relies on these template
- // objects.
- captureSimdTemplate(cx);
-
// We finished the new IonScript. Invalidate the current active IonScript,
// so we can replace it with this new (probably higher optimized) version.
if (script->hasIonScript()) {
@@ -10821,7 +10692,6 @@ CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir)
const MLoadUnboxedScalar* mir = lir->mir();
Scalar::Type readType = mir->readType();
- unsigned numElems = mir->numElems();
int width = Scalar::byteSize(mir->storageType());
bool canonicalizeDouble = mir->canonicalizeDoubles();
@@ -10829,11 +10699,11 @@ CodeGenerator::visitLoadUnboxedScalar(LLoadUnboxedScalar* lir)
Label fail;
if (lir->index()->isConstant()) {
Address source(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
- masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
+ masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
} else {
BaseIndex source(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
mir->offsetAdjustment());
- masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble, numElems);
+ masm.loadFromTypedArray(readType, source, out, temp, &fail, canonicalizeDouble);
}
if (fail.used())
@@ -10884,13 +10754,12 @@ CodeGenerator::visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole* lir)
template <typename T>
static inline void
StoreToTypedArray(MacroAssembler& masm, Scalar::Type writeType, const LAllocation* value,
- const T& dest, unsigned numElems = 0)
+ const T& dest)
{
- if (Scalar::isSimdType(writeType) ||
- writeType == Scalar::Float32 ||
+ if (writeType == Scalar::Float32 ||
writeType == Scalar::Float64)
{
- masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest, numElems);
+ masm.storeToTypedFloatArray(writeType, ToFloatRegister(value), dest);
} else {
if (value->isConstant())
masm.storeToTypedIntArray(writeType, Imm32(ToInt32(value)), dest);
@@ -10908,17 +10777,16 @@ CodeGenerator::visitStoreUnboxedScalar(LStoreUnboxedScalar* lir)
const MStoreUnboxedScalar* mir = lir->mir();
Scalar::Type writeType = mir->writeType();
- unsigned numElems = mir->numElems();
int width = Scalar::byteSize(mir->storageType());
if (lir->index()->isConstant()) {
Address dest(elements, ToInt32(lir->index()) * width + mir->offsetAdjustment());
- StoreToTypedArray(masm, writeType, value, dest, numElems);
+ StoreToTypedArray(masm, writeType, value, dest);
} else {
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width),
mir->offsetAdjustment());
- StoreToTypedArray(masm, writeType, value, dest, numElems);
+ StoreToTypedArray(masm, writeType, value, dest);
}
}
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
index 64fe9378b8..44804794a2 100644
--- a/js/src/jit/CodeGenerator.h
+++ b/js/src/jit/CodeGenerator.h
@@ -202,8 +202,6 @@ class CodeGenerator final : public CodeGeneratorSpecific
void visitNewObject(LNewObject* lir);
void visitOutOfLineNewObject(OutOfLineNewObject* ool);
void visitNewTypedObject(LNewTypedObject* lir);
- void visitSimdBox(LSimdBox* lir);
- void visitSimdUnbox(LSimdUnbox* lir);
void visitNewNamedLambdaObject(LNewNamedLambdaObject* lir);
void visitNewCallObject(LNewCallObject* lir);
void visitNewSingletonCallObject(LNewSingletonCallObject* lir);
@@ -581,22 +579,6 @@ class CodeGenerator final : public CodeGeneratorSpecific
#if defined(JS_ION_PERF)
PerfSpewer perfSpewer_;
#endif
-
- // This integer is a bit mask of all SimdTypeDescr::Type indexes. When a
- // MSimdBox instruction is encoded, it might have either been created by
- // IonBuilder, or by the Eager Simd Unbox phase.
- //
- // As the template objects are weak references, the JitCompartment is using
- // Read Barriers, but such barrier cannot be used during the compilation. To
- // work around this issue, the barriers are captured during
- // CodeGenerator::link.
- //
- // Instead of saving the pointers, we just save the index of the Read
- // Barriered objects in a bit mask.
- uint32_t simdRefreshTemplatesDuringLink_;
-
- void registerSimdTemplate(SimdType simdType);
- void captureSimdTemplate(JSContext* cx);
};
} // namespace jit
diff --git a/js/src/jit/EagerSimdUnbox.cpp b/js/src/jit/EagerSimdUnbox.cpp
deleted file mode 100644
index 0e6e2f2fdb..0000000000
--- a/js/src/jit/EagerSimdUnbox.cpp
+++ /dev/null
@@ -1,127 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-#include "jit/EagerSimdUnbox.h"
-
-#include "jit/MIR.h"
-#include "jit/MIRGenerator.h"
-#include "jit/MIRGraph.h"
-
-namespace js {
-namespace jit {
-
-// Do not optimize any Phi instruction which has conflicting Unbox operations,
-// as this might imply some intended polymorphism.
-static bool
-CanUnboxSimdPhi(const JitCompartment* jitCompartment, MPhi* phi, SimdType unboxType)
-{
- MOZ_ASSERT(phi->type() == MIRType::Object);
-
- // If we are unboxing, we are more than likely to have boxed this SIMD type
- // once in baseline, otherwise, we cannot create a MSimdBox as we have no
- // template object to use.
- if (!jitCompartment->maybeGetSimdTemplateObjectFor(unboxType))
- return false;
-
- MResumePoint* entry = phi->block()->entryResumePoint();
- MIRType mirType = SimdTypeToMIRType(unboxType);
- for (MUseIterator i(phi->usesBegin()), e(phi->usesEnd()); i != e; i++) {
- // If we cannot recover the Simd object at the entry of the basic block,
- // then we would have to box the content anyways.
- if ((*i)->consumer() == entry && !entry->isRecoverableOperand(*i))
- return false;
-
- if (!(*i)->consumer()->isDefinition())
- continue;
-
- MDefinition* def = (*i)->consumer()->toDefinition();
- if (def->isSimdUnbox() && def->toSimdUnbox()->type() != mirType)
- return false;
- }
-
- return true;
-}
-
-static void
-UnboxSimdPhi(const JitCompartment* jitCompartment, MIRGraph& graph, MPhi* phi, SimdType unboxType)
-{
- TempAllocator& alloc = graph.alloc();
-
- // Unbox and replace all operands.
- for (size_t i = 0, e = phi->numOperands(); i < e; i++) {
- MDefinition* op = phi->getOperand(i);
- MSimdUnbox* unbox = MSimdUnbox::New(alloc, op, unboxType);
- op->block()->insertAtEnd(unbox);
- phi->replaceOperand(i, unbox);
- }
-
- // Change the MIRType of the Phi.
- MIRType mirType = SimdTypeToMIRType(unboxType);
- phi->setResultType(mirType);
-
- MBasicBlock* phiBlock = phi->block();
- MInstruction* atRecover = phiBlock->safeInsertTop(nullptr, MBasicBlock::IgnoreRecover);
- MInstruction* at = phiBlock->safeInsertTop(atRecover);
-
- // Note, we capture the uses-list now, as new instructions are not visited.
- MUseIterator i(phi->usesBegin()), e(phi->usesEnd());
-
- // Add a MSimdBox, and replace all the Phi uses with it.
- JSObject* templateObject = jitCompartment->maybeGetSimdTemplateObjectFor(unboxType);
- InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
- MSimdBox* recoverBox = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
- recoverBox->setRecoveredOnBailout();
- phiBlock->insertBefore(atRecover, recoverBox);
-
- MSimdBox* box = nullptr;
- while (i != e) {
- MUse* use = *i++;
- MNode* ins = use->consumer();
-
- if ((ins->isDefinition() && ins->toDefinition()->isRecoveredOnBailout()) ||
- (ins->isResumePoint() && ins->toResumePoint()->isRecoverableOperand(use)))
- {
- use->replaceProducer(recoverBox);
- continue;
- }
-
- if (!box) {
- box = MSimdBox::New(alloc, nullptr, phi, inlineTypedObject, unboxType, gc::DefaultHeap);
- phiBlock->insertBefore(at, box);
- }
-
- use->replaceProducer(box);
- }
-}
-
-bool
-EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph)
-{
- const JitCompartment* jitCompartment = GetJitContext()->compartment->jitCompartment();
- for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
- if (mir->shouldCancel("Eager Simd Unbox"))
- return false;
-
- for (MInstructionReverseIterator ins = block->rbegin(); ins != block->rend(); ins++) {
- if (!ins->isSimdUnbox())
- continue;
-
- MSimdUnbox* unbox = ins->toSimdUnbox();
- if (!unbox->input()->isPhi())
- continue;
-
- MPhi* phi = unbox->input()->toPhi();
- if (!CanUnboxSimdPhi(jitCompartment, phi, unbox->simdType()))
- continue;
-
- UnboxSimdPhi(jitCompartment, graph, phi, unbox->simdType());
- }
- }
-
- return true;
-}
-
-} /* namespace jit */
-} /* namespace js */
diff --git a/js/src/jit/EagerSimdUnbox.h b/js/src/jit/EagerSimdUnbox.h
deleted file mode 100644
index 685a3d962b..0000000000
--- a/js/src/jit/EagerSimdUnbox.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
- * This Source Code Form is subject to the terms of the Mozilla Public
- * License, v. 2.0. If a copy of the MPL was not distributed with this
- * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
-
-// This file declares eager SIMD unboxing.
-#ifndef jit_EagerSimdUnbox_h
-#define jit_EagerSimdUnbox_h
-
-#include "mozilla/Attributes.h"
-
-namespace js {
-namespace jit {
-
-class MIRGenerator;
-class MIRGraph;
-
-MOZ_MUST_USE bool
-EagerSimdUnbox(MIRGenerator* mir, MIRGraph& graph);
-
-} // namespace jit
-} // namespace js
-
-#endif /* jit_EagerSimdUnbox_h */
diff --git a/js/src/jit/InlinableNatives.h b/js/src/jit/InlinableNatives.h
index 561eb4b2db..65d1693cc3 100644
--- a/js/src/jit/InlinableNatives.h
+++ b/js/src/jit/InlinableNatives.h
@@ -88,17 +88,6 @@
\
_(ObjectCreate) \
\
- _(SimdInt32x4) \
- _(SimdUint32x4) \
- _(SimdInt16x8) \
- _(SimdUint16x8) \
- _(SimdInt8x16) \
- _(SimdUint8x16) \
- _(SimdFloat32x4) \
- _(SimdBool32x4) \
- _(SimdBool16x8) \
- _(SimdBool8x16) \
- \
_(TestBailout) \
_(TestAssertFloat32) \
_(TestAssertRecoveredOnBailout) \
diff --git a/js/src/jit/Ion.cpp b/js/src/jit/Ion.cpp
index 1a8fa40dd3..1a949fae62 100644
--- a/js/src/jit/Ion.cpp
+++ b/js/src/jit/Ion.cpp
@@ -22,7 +22,6 @@
#include "jit/BaselineInspector.h"
#include "jit/BaselineJIT.h"
#include "jit/CodeGenerator.h"
-#include "jit/EagerSimdUnbox.h"
#include "jit/EdgeCaseAnalysis.h"
#include "jit/EffectiveAddressAnalysis.h"
#include "jit/FlowAliasAnalysis.h"
@@ -678,11 +677,6 @@ JitCompartment::sweep(FreeOp* fop, JSCompartment* compartment)
if (regExpTesterStub_ && !IsMarkedUnbarriered(rt, &regExpTesterStub_))
regExpTesterStub_ = nullptr;
-
- for (ReadBarrieredObject& obj : simdTemplateObjects_) {
- if (obj && IsAboutToBeFinalized(&obj))
- obj.set(nullptr);
- }
}
void
@@ -1600,17 +1594,6 @@ OptimizeMIR(MIRGenerator* mir)
return false;
}
- if (!JitOptions.disableRecoverIns && mir->optimizationInfo().eagerSimdUnboxEnabled()) {
- AutoTraceLog log(logger, TraceLogger_EagerSimdUnbox);
- if (!EagerSimdUnbox(mir, graph))
- return false;
- gs.spewPass("Eager Simd Unbox");
- AssertGraphCoherency(graph);
-
- if (mir->shouldCancel("Eager Simd Unbox"))
- return false;
- }
-
if (mir->optimizationInfo().amaEnabled()) {
AutoTraceLog log(logger, TraceLogger_AlignmentMaskAnalysis);
AlignmentMaskAnalysis ama(graph);
diff --git a/js/src/jit/IonBuilder.cpp b/js/src/jit/IonBuilder.cpp
index a440bfa598..ae3776c888 100644
--- a/js/src/jit/IonBuilder.cpp
+++ b/js/src/jit/IonBuilder.cpp
@@ -9163,11 +9163,6 @@ IonBuilder::getElemTryTypedObject(bool* emitted, MDefinition* obj, MDefinition*
return true;
switch (elemPrediction.kind()) {
- case type::Simd:
- // FIXME (bug 894105): load into a MIRType::float32x4 etc
- trackOptimizationOutcome(TrackedOutcome::GenericFailure);
- return true;
-
case type::Struct:
case type::Array:
return getElemTryComplexElemOfTypedObject(emitted,
@@ -10277,11 +10272,6 @@ IonBuilder::setElemTryTypedObject(bool* emitted, MDefinition* obj,
return true;
switch (elemPrediction.kind()) {
- case type::Simd:
- // FIXME (bug 894105): store a MIRType::float32x4 etc
- trackOptimizationOutcome(TrackedOutcome::GenericFailure);
- return true;
-
case type::Reference:
return setElemTryReferenceElemOfTypedObject(emitted, obj, index,
objPrediction, value, elemPrediction);
@@ -11897,10 +11887,6 @@ IonBuilder::getPropTryTypedObject(bool* emitted,
return true;
switch (fieldPrediction.kind()) {
- case type::Simd:
- // FIXME (bug 894104): load into a MIRType::float32x4 etc
- return true;
-
case type::Struct:
case type::Array:
return getPropTryComplexPropOfTypedObject(emitted,
@@ -12966,10 +12952,6 @@ IonBuilder::setPropTryTypedObject(bool* emitted, MDefinition* obj,
return true;
switch (fieldPrediction.kind()) {
- case type::Simd:
- // FIXME (bug 894104): store into a MIRType::float32x4 etc
- return true;
-
case type::Reference:
return setPropTryReferencePropOfTypedObject(emitted, obj, fieldOffset,
value, fieldPrediction, name);
diff --git a/js/src/jit/IonBuilder.h b/js/src/jit/IonBuilder.h
index 6f3fc027f6..96cfd821ca 100644
--- a/js/src/jit/IonBuilder.h
+++ b/js/src/jit/IonBuilder.h
@@ -914,52 +914,6 @@ class IonBuilder
InliningStatus inlineSetTypedObjectOffset(CallInfo& callInfo);
InliningStatus inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* target);
- // SIMD intrinsics and natives.
- InliningStatus inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* target);
-
- // SIMD helpers.
- bool canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
- InlineTypedObject** templateObj);
- MDefinition* unboxSimd(MDefinition* ins, SimdType type);
- IonBuilder::InliningStatus boxSimd(CallInfo& callInfo, MDefinition* ins,
- InlineTypedObject* templateObj);
- MDefinition* convertToBooleanSimdLane(MDefinition* scalar);
-
- InliningStatus inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type);
-
- InliningStatus inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
- MSimdBinaryArith::Operation op, SimdType type);
- InliningStatus inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
- MSimdBinaryBitwise::Operation op, SimdType type);
- InliningStatus inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
- MSimdBinarySaturating::Operation op, SimdType type);
- InliningStatus inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
- SimdType type);
- InliningStatus inlineSimdComp(CallInfo& callInfo, JSNative native,
- MSimdBinaryComp::Operation op, SimdType type);
- InliningStatus inlineSimdUnary(CallInfo& callInfo, JSNative native,
- MSimdUnaryArith::Operation op, SimdType type);
- InliningStatus inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type);
- InliningStatus inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type);
- InliningStatus inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type);
- InliningStatus inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
- unsigned numVectors);
- InliningStatus inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type);
- InliningStatus inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast,
- SimdType from, SimdType to);
- InliningStatus inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type);
-
- MOZ_MUST_USE bool prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType,
- MInstruction** elements, MDefinition** index,
- Scalar::Type* arrayType);
- InliningStatus inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type,
- unsigned numElems);
- InliningStatus inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type,
- unsigned numElems);
-
- InliningStatus inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
- SimdType type);
-
// Utility intrinsics.
InliningStatus inlineIsCallable(CallInfo& callInfo);
InliningStatus inlineIsConstructor(CallInfo& callInfo);
diff --git a/js/src/jit/IonOptimizationLevels.cpp b/js/src/jit/IonOptimizationLevels.cpp
index fa4cca4160..f5cfb4ebaf 100644
--- a/js/src/jit/IonOptimizationLevels.cpp
+++ b/js/src/jit/IonOptimizationLevels.cpp
@@ -24,7 +24,6 @@ OptimizationInfo::initNormalOptimizationInfo()
autoTruncate_ = true;
eaa_ = true;
- eagerSimdUnbox_ = true;
edgeCaseAnalysis_ = true;
eliminateRedundantChecks_ = true;
inlineInterpreted_ = true;
@@ -66,7 +65,6 @@ OptimizationInfo::initWasmOptimizationInfo()
ama_ = true;
autoTruncate_ = false;
- eagerSimdUnbox_ = false; // wasm has no boxing / unboxing.
edgeCaseAnalysis_ = false;
eliminateRedundantChecks_ = false;
scalarReplacement_ = false; // wasm has no objects.
diff --git a/js/src/jit/IonOptimizationLevels.h b/js/src/jit/IonOptimizationLevels.h
index 811a2d43eb..37ed713dc8 100644
--- a/js/src/jit/IonOptimizationLevels.h
+++ b/js/src/jit/IonOptimizationLevels.h
@@ -65,9 +65,6 @@ class OptimizationInfo
// Toggles whether native scripts get inlined.
bool inlineNative_;
- // Toggles whether eager unboxing of SIMD is used.
- bool eagerSimdUnbox_;
-
// Toggles whether global value numbering is used.
bool gvn_;
@@ -169,10 +166,6 @@ class OptimizationInfo
uint32_t compilerWarmUpThreshold(JSScript* script, jsbytecode* pc = nullptr) const;
- bool eagerSimdUnboxEnabled() const {
- return eagerSimdUnbox_ && !JitOptions.disableEagerSimdUnbox;
- }
-
bool gvnEnabled() const {
return gvn_ && !JitOptions.disableGvn;
}
diff --git a/js/src/jit/IonTypes.h b/js/src/jit/IonTypes.h
index 50b09cc30e..50d612ac8e 100644
--- a/js/src/jit/IonTypes.h
+++ b/js/src/jit/IonTypes.h
@@ -105,9 +105,6 @@ enum BailoutKind
Bailout_NonSymbolInput,
Bailout_NonBigIntInput,
- // SIMD Unbox expects a given type, bails out if it doesn't match.
- Bailout_UnexpectedSimdInput,
-
// Atomic operations require shared memory, bail out if the typed array
// maps unshared memory.
Bailout_NonSharedTypedArrayInput,
@@ -215,8 +212,6 @@ BailoutKindString(BailoutKind kind)
return "Bailout_NonSymbolInput";
case Bailout_NonBigIntInput:
return "Bailout_NonBigIntInput";
- case Bailout_UnexpectedSimdInput:
- return "Bailout_UnexpectedSimdInput";
case Bailout_NonSharedTypedArrayInput:
return "Bailout_NonSharedTypedArrayInput";
case Bailout_Debugger:
@@ -261,6 +256,19 @@ static const uint32_t VECTOR_SCALE_BITS = 3;
static const uint32_t VECTOR_SCALE_SHIFT = ELEMENT_TYPE_BITS + ELEMENT_TYPE_SHIFT;
static const uint32_t VECTOR_SCALE_MASK = (1 << VECTOR_SCALE_BITS) - 1;
+// The integer SIMD types have a lot of operations that do the exact same thing
+// for signed and unsigned integer types. Sometimes it is simpler to treat
+// signed and unsigned integer SIMD types as the same type, using a SimdSign to
+// distinguish the few cases where there is a difference.
+enum class SimdSign {
+ // Signedness is not applicable to this type. (i.e., Float or Bool).
+ NotApplicable,
+ // Treat as an unsigned integer with a range 0 .. 2^N-1.
+ Unsigned,
+ // Treat as a signed integer in two's complement encoding.
+ Signed,
+};
+
class SimdConstant {
public:
enum Type {
@@ -451,39 +459,6 @@ IsSimdType(MIRType type)
return ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK) != 0;
}
-// Returns the number of vector elements (hereby called "length") for a given
-// SIMD kind. It is the Y part of the name "Foo x Y".
-static inline unsigned
-SimdTypeToLength(MIRType type)
-{
- MOZ_ASSERT(IsSimdType(type));
- return 1 << ((unsigned(type) >> VECTOR_SCALE_SHIFT) & VECTOR_SCALE_MASK);
-}
-
-// Get the type of the individual lanes in a SIMD type.
-// For example, Int32x4 -> Int32, Float32x4 -> Float32 etc.
-static inline MIRType
-SimdTypeToLaneType(MIRType type)
-{
- MOZ_ASSERT(IsSimdType(type));
- static_assert(unsigned(MIRType::Last) <= ELEMENT_TYPE_MASK,
- "ELEMENT_TYPE_MASK should be larger than the last MIRType");
- return MIRType((unsigned(type) >> ELEMENT_TYPE_SHIFT) & ELEMENT_TYPE_MASK);
-}
-
-// Get the type expected when inserting a lane into a SIMD type.
-// This is the argument type expected by the MSimdValue constructors as well as
-// MSimdSplat and MSimdInsertElement.
-static inline MIRType
-SimdTypeToLaneArgumentType(MIRType type)
-{
- MIRType laneType = SimdTypeToLaneType(type);
-
- // Boolean lanes should be pre-converted to an Int32 with the values 0 or -1.
- // All other lane types are inserted directly.
- return laneType == MIRType::Boolean ? MIRType::Int32 : laneType;
-}
-
static inline MIRType
MIRTypeFromValueType(JSValueType type)
{
@@ -671,24 +646,6 @@ IsNullOrUndefined(MIRType type)
}
static inline bool
-IsFloatingPointSimdType(MIRType type)
-{
- return type == MIRType::Float32x4;
-}
-
-static inline bool
-IsIntegerSimdType(MIRType type)
-{
- return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Int32;
-}
-
-static inline bool
-IsBooleanSimdType(MIRType type)
-{
- return IsSimdType(type) && SimdTypeToLaneType(type) == MIRType::Boolean;
-}
-
-static inline bool
IsMagicType(MIRType type)
{
return type == MIRType::MagicHole ||
@@ -716,21 +673,13 @@ ScalarTypeToMIRType(Scalar::Type type)
return MIRType::Float32;
case Scalar::Float64:
return MIRType::Double;
- case Scalar::Float32x4:
- return MIRType::Float32x4;
- case Scalar::Int8x16:
- return MIRType::Int8x16;
- case Scalar::Int16x8:
- return MIRType::Int16x8;
- case Scalar::Int32x4:
- return MIRType::Int32x4;
case Scalar::BigInt64:
case Scalar::BigUint64:
MOZ_CRASH("NYI");
case Scalar::MaxTypedArrayViewType:
break;
}
- MOZ_CRASH("unexpected SIMD kind");
+ MOZ_CRASH("unexpected kind");
}
static inline const char*
diff --git a/js/src/jit/JitCompartment.h b/js/src/jit/JitCompartment.h
index f6282330e0..a6cbd86719 100644
--- a/js/src/jit/JitCompartment.h
+++ b/js/src/jit/JitCompartment.h
@@ -461,38 +461,12 @@ class JitCompartment
JitCode* regExpSearcherStub_;
JitCode* regExpTesterStub_;
- mozilla::EnumeratedArray<SimdType, SimdType::Count, ReadBarrieredObject> simdTemplateObjects_;
-
JitCode* generateStringConcatStub(JSContext* cx);
JitCode* generateRegExpMatcherStub(JSContext* cx);
JitCode* generateRegExpSearcherStub(JSContext* cx);
JitCode* generateRegExpTesterStub(JSContext* cx);
public:
- JSObject* getSimdTemplateObjectFor(JSContext* cx, Handle<SimdTypeDescr*> descr) {
- ReadBarrieredObject& tpl = simdTemplateObjects_[descr->type()];
- if (!tpl)
- tpl.set(TypedObject::createZeroed(cx, descr, 0, gc::TenuredHeap));
- return tpl.get();
- }
-
- JSObject* maybeGetSimdTemplateObjectFor(SimdType type) const {
- const ReadBarrieredObject& tpl = simdTemplateObjects_[type];
-
- // This function is used by Eager Simd Unbox phase, so we cannot use the
- // read barrier. For more information, see the comment above
- // CodeGenerator::simdRefreshTemplatesDuringLink_ .
- return tpl.unbarrieredGet();
- }
-
- // This function is used to call the read barrier, to mark the SIMD template
- // type as used. This function can only be called from the main thread.
- void registerSimdTemplateObjectFor(SimdType type) {
- ReadBarrieredObject& tpl = simdTemplateObjects_[type];
- MOZ_ASSERT(tpl.unbarrieredGet());
- tpl.get();
- }
-
JitCode* getStubCode(uint32_t key) {
ICStubCodeMap::AddPtr p = stubCodes_->lookupForAdd(key);
if (p)
diff --git a/js/src/jit/JitFrameIterator.h b/js/src/jit/JitFrameIterator.h
index b5b217b4e1..abe2dbb231 100644
--- a/js/src/jit/JitFrameIterator.h
+++ b/js/src/jit/JitFrameIterator.h
@@ -388,7 +388,6 @@ struct MaybeReadFallback
class RResumePoint;
-class RSimdBox;
// Reads frame information in snapshot-encoding order (that is, outermost frame
// to innermost frame).
@@ -450,7 +449,6 @@ class SnapshotIterator
void warnUnreadableAllocation();
private:
- friend class RSimdBox;
const FloatRegisters::RegisterContent* floatAllocationPointer(const RValueAllocation& a) const;
public:
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
index daae3d53b5..78f24bb803 100644
--- a/js/src/jit/JitOptions.cpp
+++ b/js/src/jit/JitOptions.cpp
@@ -83,9 +83,6 @@ DefaultJitOptions::DefaultJitOptions()
// Toggles whether Effective Address Analysis is globally disabled.
SET_DEFAULT(disableEaa, false);
- // Toggle whether eager simd unboxing is globally disabled.
- SET_DEFAULT(disableEagerSimdUnbox, false);
-
// Toggles whether Edge Case Analysis is gobally disabled.
SET_DEFAULT(disableEdgeCaseAnalysis, false);
diff --git a/js/src/jit/JitOptions.h b/js/src/jit/JitOptions.h
index 6abb894cb9..0fbfc99829 100644
--- a/js/src/jit/JitOptions.h
+++ b/js/src/jit/JitOptions.h
@@ -49,7 +49,6 @@ struct DefaultJitOptions
bool disableInlineBacktracking;
bool disableAma;
bool disableEaa;
- bool disableEagerSimdUnbox;
bool disableEdgeCaseAnalysis;
bool disableFlowAA;
bool disableGvn;
diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h
index e9143a6f41..bee3b3ebea 100644
--- a/js/src/jit/LIR.h
+++ b/js/src/jit/LIR.h
@@ -622,15 +622,6 @@ class LDefinition
case MIRType::Int64:
return LDefinition::GENERAL;
#endif
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- return LDefinition::SIMD128INT;
- case MIRType::Float32x4:
- return LDefinition::SIMD128FLOAT;
default:
MOZ_CRASH("unexpected type");
}
diff --git a/js/src/jit/Lowering.cpp b/js/src/jit/Lowering.cpp
index d315c618e7..11e7348547 100644
--- a/js/src/jit/Lowering.cpp
+++ b/js/src/jit/Lowering.cpp
@@ -3410,8 +3410,7 @@ LIRGenerator::visitLoadUnboxedScalar(MLoadUnboxedScalar* ins)
const LUse elements = useRegister(ins->elements());
const LAllocation index = useRegisterOrConstant(ins->index());
- MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()) ||
- ins->type() == MIRType::Boolean);
+ MOZ_ASSERT(IsNumberType(ins->type()) || ins->type() == MIRType::Boolean);
// We need a temp register for Uint32Array with known double result.
LDefinition tempDef = LDefinition::BogusTemp();
@@ -3502,12 +3501,7 @@ LIRGenerator::visitStoreUnboxedScalar(MStoreUnboxedScalar* ins)
MOZ_ASSERT(IsValidElementsType(ins->elements(), ins->offsetAdjustment()));
MOZ_ASSERT(ins->index()->type() == MIRType::Int32);
- if (ins->isSimdWrite()) {
- MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32x4, ins->value()->type() == MIRType::Float32x4);
- MOZ_ASSERT_IF(ins->writeType() == Scalar::Int8x16, ins->value()->type() == MIRType::Int8x16);
- MOZ_ASSERT_IF(ins->writeType() == Scalar::Int16x8, ins->value()->type() == MIRType::Int16x8);
- MOZ_ASSERT_IF(ins->writeType() == Scalar::Int32x4, ins->value()->type() == MIRType::Int32x4);
- } else if (ins->isFloatWrite()) {
+ if (ins->isFloatWrite()) {
MOZ_ASSERT_IF(ins->writeType() == Scalar::Float32, ins->value()->type() == MIRType::Float32);
MOZ_ASSERT_IF(ins->writeType() == Scalar::Float64, ins->value()->type() == MIRType::Double);
} else {
@@ -4284,7 +4278,7 @@ LIRGenerator::visitWasmParameter(MWasmParameter* ins)
#endif
);
} else {
- MOZ_ASSERT(IsNumberType(ins->type()) || IsSimdType(ins->type()));
+ MOZ_ASSERT(IsNumberType(ins->type()));
defineFixed(new(alloc()) LWasmParameter, ins, LArgument(abi.offsetFromArgBase()));
}
}
@@ -4310,8 +4304,6 @@ LIRGenerator::visitWasmReturn(MWasmReturn* ins)
lir->setOperand(0, useFixed(rval, ReturnFloat32Reg));
else if (rval->type() == MIRType::Double)
lir->setOperand(0, useFixed(rval, ReturnDoubleReg));
- else if (IsSimdType(rval->type()))
- lir->setOperand(0, useFixed(rval, ReturnSimd128Reg));
else if (rval->type() == MIRType::Int32)
lir->setOperand(0, useFixed(rval, ReturnReg));
else
@@ -4341,7 +4333,7 @@ LIRGenerator::visitWasmStackArg(MWasmStackArg* ins)
{
if (ins->arg()->type() == MIRType::Int64) {
add(new(alloc()) LWasmStackArgI64(useInt64RegisterOrConstantAtStart(ins->arg())), ins);
- } else if (IsFloatingPointType(ins->arg()->type()) || IsSimdType(ins->arg()->type())) {
+ } else if (IsFloatingPointType(ins->arg()->type())) {
MOZ_ASSERT(!ins->arg()->isEmittedAtUses());
add(new(alloc()) LWasmStackArg(useRegisterAtStart(ins->arg())), ins);
} else {
@@ -4456,217 +4448,6 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck* ins)
}
void
-LIRGenerator::visitSimdBox(MSimdBox* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->input()->type()));
- LUse in = useRegister(ins->input());
- LSimdBox* lir = new(alloc()) LSimdBox(in, temp());
- define(lir, ins);
- assignSafepoint(lir, ins);
-}
-
-void
-LIRGenerator::visitSimdUnbox(MSimdUnbox* ins)
-{
- MOZ_ASSERT(ins->input()->type() == MIRType::Object);
- MOZ_ASSERT(IsSimdType(ins->type()));
- LUse in = useRegister(ins->input());
- LSimdUnbox* lir = new(alloc()) LSimdUnbox(in, temp());
- assignSnapshot(lir, Bailout_UnexpectedSimdInput);
- define(lir, ins);
-}
-
-void
-LIRGenerator::visitSimdConstant(MSimdConstant* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- switch (ins->type()) {
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- define(new(alloc()) LSimd128Int(), ins);
- break;
- case MIRType::Float32x4:
- define(new(alloc()) LSimd128Float(), ins);
- break;
- default:
- MOZ_CRASH("Unknown SIMD kind when generating constant");
- }
-}
-
-void
-LIRGenerator::visitSimdConvert(MSimdConvert* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
- MDefinition* input = ins->input();
- LUse use = useRegister(input);
- if (ins->type() == MIRType::Int32x4) {
- MOZ_ASSERT(input->type() == MIRType::Float32x4);
- switch (ins->signedness()) {
- case SimdSign::Signed: {
- LFloat32x4ToInt32x4* lir = new(alloc()) LFloat32x4ToInt32x4(use, temp());
- if (!gen->compilingWasm())
- assignSnapshot(lir, Bailout_BoundsCheck);
- define(lir, ins);
- break;
- }
- case SimdSign::Unsigned: {
- LFloat32x4ToUint32x4* lir =
- new (alloc()) LFloat32x4ToUint32x4(use, temp(), temp(LDefinition::SIMD128INT));
- if (!gen->compilingWasm())
- assignSnapshot(lir, Bailout_BoundsCheck);
- define(lir, ins);
- break;
- }
- default:
- MOZ_CRASH("Unexpected SimdConvert sign");
- }
- } else if (ins->type() == MIRType::Float32x4) {
- MOZ_ASSERT(input->type() == MIRType::Int32x4);
- MOZ_ASSERT(ins->signedness() == SimdSign::Signed, "Unexpected SimdConvert sign");
- define(new(alloc()) LInt32x4ToFloat32x4(use), ins);
- } else {
- MOZ_CRASH("Unknown SIMD kind when generating constant");
- }
-}
-
-void
-LIRGenerator::visitSimdReinterpretCast(MSimdReinterpretCast* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()) && IsSimdType(ins->input()->type()));
- MDefinition* input = ins->input();
- LUse use = useRegisterAtStart(input);
- // :TODO: (Bug 1132894) We have to allocate a different register as redefine
- // and/or defineReuseInput are not yet capable of reusing the same register
- // with a different register type.
- define(new(alloc()) LSimdReinterpretCast(use), ins);
-}
-
-void
-LIRGenerator::visitSimdAllTrue(MSimdAllTrue* ins)
-{
- MDefinition* input = ins->input();
- MOZ_ASSERT(IsBooleanSimdType(input->type()));
-
- LUse use = useRegisterAtStart(input);
- define(new(alloc()) LSimdAllTrue(use), ins);
-}
-
-void
-LIRGenerator::visitSimdAnyTrue(MSimdAnyTrue* ins)
-{
- MDefinition* input = ins->input();
- MOZ_ASSERT(IsBooleanSimdType(input->type()));
-
- LUse use = useRegisterAtStart(input);
- define(new(alloc()) LSimdAnyTrue(use), ins);
-}
-
-void
-LIRGenerator::visitSimdUnaryArith(MSimdUnaryArith* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->input()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- // Cannot be at start, as the ouput is used as a temporary to store values.
- LUse in = use(ins->input());
-
- switch (ins->type()) {
- case MIRType::Int8x16:
- case MIRType::Bool8x16:
- define(new (alloc()) LSimdUnaryArithIx16(in), ins);
- break;
- case MIRType::Int16x8:
- case MIRType::Bool16x8:
- define(new (alloc()) LSimdUnaryArithIx8(in), ins);
- break;
- case MIRType::Int32x4:
- case MIRType::Bool32x4:
- define(new (alloc()) LSimdUnaryArithIx4(in), ins);
- break;
- case MIRType::Float32x4:
- define(new (alloc()) LSimdUnaryArithFx4(in), ins);
- break;
- default:
- MOZ_CRASH("Unknown SIMD kind for unary operation");
- }
-}
-
-void
-LIRGenerator::visitSimdBinaryComp(MSimdBinaryComp* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
- MOZ_ASSERT(IsBooleanSimdType(ins->type()));
-
- if (ShouldReorderCommutative(ins->lhs(), ins->rhs(), ins))
- ins->reverse();
-
- switch (ins->specialization()) {
- case MIRType::Int8x16: {
- MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
- LSimdBinaryCompIx16* add = new (alloc()) LSimdBinaryCompIx16();
- lowerForFPU(add, ins, ins->lhs(), ins->rhs());
- return;
- }
- case MIRType::Int16x8: {
- MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
- LSimdBinaryCompIx8* add = new (alloc()) LSimdBinaryCompIx8();
- lowerForFPU(add, ins, ins->lhs(), ins->rhs());
- return;
- }
- case MIRType::Int32x4: {
- MOZ_ASSERT(ins->signedness() == SimdSign::Signed);
- LSimdBinaryCompIx4* add = new (alloc()) LSimdBinaryCompIx4();
- lowerForCompIx4(add, ins, ins->lhs(), ins->rhs());
- return;
- }
- case MIRType::Float32x4: {
- MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
- LSimdBinaryCompFx4* add = new (alloc()) LSimdBinaryCompFx4();
- lowerForCompFx4(add, ins, ins->lhs(), ins->rhs());
- return;
- }
- default:
- MOZ_CRASH("Unknown compare type when comparing values");
- }
-}
-
-void
-LIRGenerator::visitSimdBinaryBitwise(MSimdBinaryBitwise* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- MDefinition* lhs = ins->lhs();
- MDefinition* rhs = ins->rhs();
- ReorderCommutative(&lhs, &rhs, ins);
- LSimdBinaryBitwise* lir = new(alloc()) LSimdBinaryBitwise;
- lowerForFPU(lir, ins, lhs, rhs);
-}
-
-void
-LIRGenerator::visitSimdShift(MSimdShift* ins)
-{
- MOZ_ASSERT(IsIntegerSimdType(ins->type()));
- MOZ_ASSERT(ins->lhs()->type() == ins->type());
- MOZ_ASSERT(ins->rhs()->type() == MIRType::Int32);
-
- LUse vector = useRegisterAtStart(ins->lhs());
- LAllocation value = useRegisterOrConstant(ins->rhs());
- // We need a temp register to mask the shift amount, but not if the shift
- // amount is a constant.
- LDefinition tempReg = value.isConstant() ? LDefinition::BogusTemp() : temp();
- LSimdShift* lir = new(alloc()) LSimdShift(vector, value, tempReg);
- defineReuseInput(lir, ins, 0);
-}
-
-void
LIRGenerator::visitLexicalCheck(MLexicalCheck* ins)
{
MDefinition* input = ins->input();
diff --git a/js/src/jit/Lowering.h b/js/src/jit/Lowering.h
index d0e00fb82f..9217ed9dcd 100644
--- a/js/src/jit/Lowering.h
+++ b/js/src/jit/Lowering.h
@@ -304,17 +304,6 @@ class LIRGenerator : public LIRGeneratorSpecific
void visitGetDOMProperty(MGetDOMProperty* ins);
void visitGetDOMMember(MGetDOMMember* ins);
void visitRecompileCheck(MRecompileCheck* ins);
- void visitSimdBox(MSimdBox* ins);
- void visitSimdUnbox(MSimdUnbox* ins);
- void visitSimdUnaryArith(MSimdUnaryArith* ins);
- void visitSimdBinaryComp(MSimdBinaryComp* ins);
- void visitSimdBinaryBitwise(MSimdBinaryBitwise* ins);
- void visitSimdShift(MSimdShift* ins);
- void visitSimdConstant(MSimdConstant* ins);
- void visitSimdConvert(MSimdConvert* ins);
- void visitSimdReinterpretCast(MSimdReinterpretCast* ins);
- void visitSimdAllTrue(MSimdAllTrue* ins);
- void visitSimdAnyTrue(MSimdAnyTrue* ins);
void visitPhi(MPhi* ins);
void visitBeta(MBeta* ins);
void visitObjectState(MObjectState* ins);
diff --git a/js/src/jit/MCallOptimize.cpp b/js/src/jit/MCallOptimize.cpp
index 064c7ee7d2..0f78ef3f2f 100644
--- a/js/src/jit/MCallOptimize.cpp
+++ b/js/src/jit/MCallOptimize.cpp
@@ -10,7 +10,6 @@
#include "jsstr.h"
#include "builtin/AtomicsObject.h"
-#include "builtin/SIMD.h"
#include "builtin/TestingFunctions.h"
#include "builtin/TypedObject.h"
#include "builtin/intl/Collator.h"
@@ -233,28 +232,6 @@ IonBuilder::inlineNativeCall(CallInfo& callInfo, JSFunction* target)
case InlinableNative::ObjectCreate:
return inlineObjectCreate(callInfo);
- // SIMD natives.
- case InlinableNative::SimdInt32x4:
- return inlineSimd(callInfo, target, SimdType::Int32x4);
- case InlinableNative::SimdUint32x4:
- return inlineSimd(callInfo, target, SimdType::Uint32x4);
- case InlinableNative::SimdInt16x8:
- return inlineSimd(callInfo, target, SimdType::Int16x8);
- case InlinableNative::SimdUint16x8:
- return inlineSimd(callInfo, target, SimdType::Uint16x8);
- case InlinableNative::SimdInt8x16:
- return inlineSimd(callInfo, target, SimdType::Int8x16);
- case InlinableNative::SimdUint8x16:
- return inlineSimd(callInfo, target, SimdType::Uint8x16);
- case InlinableNative::SimdFloat32x4:
- return inlineSimd(callInfo, target, SimdType::Float32x4);
- case InlinableNative::SimdBool32x4:
- return inlineSimd(callInfo, target, SimdType::Bool32x4);
- case InlinableNative::SimdBool16x8:
- return inlineSimd(callInfo, target, SimdType::Bool16x8);
- case InlinableNative::SimdBool8x16:
- return inlineSimd(callInfo, target, SimdType::Bool8x16);
-
// Testing functions.
case InlinableNative::TestBailout:
return inlineBailout(callInfo);
@@ -430,9 +407,6 @@ IonBuilder::inlineNonFunctionCall(CallInfo& callInfo, JSObject* target)
if (callInfo.constructing() && target->constructHook() == TypedObject::construct)
return inlineConstructTypedObject(callInfo, &target->as<TypeDescr>());
- if (!callInfo.constructing() && target->callHook() == SimdTypeDescr::call)
- return inlineConstructSimdObject(callInfo, &target->as<SimdTypeDescr>());
-
return InliningStatus_NotInlined;
}
@@ -3384,767 +3358,6 @@ IonBuilder::inlineConstructTypedObject(CallInfo& callInfo, TypeDescr* descr)
return InliningStatus_Inlined;
}
-// Main entry point for SIMD inlining.
-// When the controlling simdType is an integer type, sign indicates whether the lanes should
-// be treated as signed or unsigned integers.
-IonBuilder::InliningStatus
-IonBuilder::inlineSimd(CallInfo& callInfo, JSFunction* target, SimdType type)
-{
- if (!JitSupportsSimd()) {
- trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
- return InliningStatus_NotInlined;
- }
-
- JSNative native = target->native();
- const JSJitInfo* jitInfo = target->jitInfo();
- MOZ_ASSERT(jitInfo && jitInfo->type() == JSJitInfo::InlinableNative);
- SimdOperation simdOp = SimdOperation(jitInfo->nativeOp);
-
- switch(simdOp) {
- case SimdOperation::Constructor:
- // SIMD constructor calls are handled via inlineNonFunctionCall(), so
- // they won't show up here where target is required to be a JSFunction.
- // See also inlineConstructSimdObject().
- MOZ_CRASH("SIMD constructor call not expected.");
- case SimdOperation::Fn_check:
- return inlineSimdCheck(callInfo, native, type);
- case SimdOperation::Fn_splat:
- return inlineSimdSplat(callInfo, native, type);
- case SimdOperation::Fn_extractLane:
- return inlineSimdExtractLane(callInfo, native, type);
- case SimdOperation::Fn_replaceLane:
- return inlineSimdReplaceLane(callInfo, native, type);
- case SimdOperation::Fn_select:
- return inlineSimdSelect(callInfo, native, type);
- case SimdOperation::Fn_swizzle:
- return inlineSimdShuffle(callInfo, native, type, 1);
- case SimdOperation::Fn_shuffle:
- return inlineSimdShuffle(callInfo, native, type, 2);
-
- // Unary arithmetic.
- case SimdOperation::Fn_abs:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::abs, type);
- case SimdOperation::Fn_neg:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::neg, type);
- case SimdOperation::Fn_not:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::not_, type);
- case SimdOperation::Fn_reciprocalApproximation:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalApproximation,
- type);
- case SimdOperation::Fn_reciprocalSqrtApproximation:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::reciprocalSqrtApproximation,
- type);
- case SimdOperation::Fn_sqrt:
- return inlineSimdUnary(callInfo, native, MSimdUnaryArith::sqrt, type);
-
- // Binary arithmetic.
- case SimdOperation::Fn_add:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_add, type);
- case SimdOperation::Fn_sub:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_sub, type);
- case SimdOperation::Fn_mul:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_mul, type);
- case SimdOperation::Fn_div:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_div, type);
- case SimdOperation::Fn_max:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_max, type);
- case SimdOperation::Fn_min:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_min, type);
- case SimdOperation::Fn_maxNum:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_maxNum, type);
- case SimdOperation::Fn_minNum:
- return inlineSimdBinaryArith(callInfo, native, MSimdBinaryArith::Op_minNum, type);
-
- // Binary saturating.
- case SimdOperation::Fn_addSaturate:
- return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::add, type);
- case SimdOperation::Fn_subSaturate:
- return inlineSimdBinarySaturating(callInfo, native, MSimdBinarySaturating::sub, type);
-
- // Binary bitwise.
- case SimdOperation::Fn_and:
- return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::and_, type);
- case SimdOperation::Fn_or:
- return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::or_, type);
- case SimdOperation::Fn_xor:
- return inlineSimdBinaryBitwise(callInfo, native, MSimdBinaryBitwise::xor_, type);
-
- // Shifts.
- case SimdOperation::Fn_shiftLeftByScalar:
- return inlineSimdShift(callInfo, native, MSimdShift::lsh, type);
- case SimdOperation::Fn_shiftRightByScalar:
- return inlineSimdShift(callInfo, native, MSimdShift::rshForSign(GetSimdSign(type)), type);
-
- // Boolean unary.
- case SimdOperation::Fn_allTrue:
- return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */true, native, type);
- case SimdOperation::Fn_anyTrue:
- return inlineSimdAnyAllTrue(callInfo, /* IsAllTrue= */false, native, type);
-
- // Comparisons.
- case SimdOperation::Fn_lessThan:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThan, type);
- case SimdOperation::Fn_lessThanOrEqual:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::lessThanOrEqual, type);
- case SimdOperation::Fn_equal:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::equal, type);
- case SimdOperation::Fn_notEqual:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::notEqual, type);
- case SimdOperation::Fn_greaterThan:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThan, type);
- case SimdOperation::Fn_greaterThanOrEqual:
- return inlineSimdComp(callInfo, native, MSimdBinaryComp::greaterThanOrEqual, type);
-
- // Int <-> Float conversions.
- case SimdOperation::Fn_fromInt32x4:
- return inlineSimdConvert(callInfo, native, false, SimdType::Int32x4, type);
- case SimdOperation::Fn_fromUint32x4:
- return inlineSimdConvert(callInfo, native, false, SimdType::Uint32x4, type);
- case SimdOperation::Fn_fromFloat32x4:
- return inlineSimdConvert(callInfo, native, false, SimdType::Float32x4, type);
-
- // Load/store.
- case SimdOperation::Fn_load:
- return inlineSimdLoad(callInfo, native, type, GetSimdLanes(type));
- case SimdOperation::Fn_load1:
- return inlineSimdLoad(callInfo, native, type, 1);
- case SimdOperation::Fn_load2:
- return inlineSimdLoad(callInfo, native, type, 2);
- case SimdOperation::Fn_load3:
- return inlineSimdLoad(callInfo, native, type, 3);
- case SimdOperation::Fn_store:
- return inlineSimdStore(callInfo, native, type, GetSimdLanes(type));
- case SimdOperation::Fn_store1:
- return inlineSimdStore(callInfo, native, type, 1);
- case SimdOperation::Fn_store2:
- return inlineSimdStore(callInfo, native, type, 2);
- case SimdOperation::Fn_store3:
- return inlineSimdStore(callInfo, native, type, 3);
-
- // Bitcasts. One for each type with a memory representation.
- case SimdOperation::Fn_fromInt32x4Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Int32x4, type);
- case SimdOperation::Fn_fromUint32x4Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Uint32x4, type);
- case SimdOperation::Fn_fromInt16x8Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Int16x8, type);
- case SimdOperation::Fn_fromUint16x8Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Uint16x8, type);
- case SimdOperation::Fn_fromInt8x16Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Int8x16, type);
- case SimdOperation::Fn_fromUint8x16Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Uint8x16, type);
- case SimdOperation::Fn_fromFloat32x4Bits:
- return inlineSimdConvert(callInfo, native, true, SimdType::Float32x4, type);
- case SimdOperation::Fn_fromFloat64x2Bits:
- return InliningStatus_NotInlined;
- }
-
- MOZ_CRASH("Unexpected SIMD opcode");
-}
-
-// The representation of boolean SIMD vectors is the same as the corresponding
-// integer SIMD vectors with -1 lanes meaning true and 0 lanes meaning false.
-//
-// Functions that set the value of a boolean vector lane work by applying
-// ToBoolean on the input argument, so they accept any argument type, just like
-// the MNot and MTest instructions.
-//
-// Convert any scalar value into an appropriate SIMD lane value: An Int32 value
-// that is either 0 for false or -1 for true.
-MDefinition*
-IonBuilder::convertToBooleanSimdLane(MDefinition* scalar)
-{
- MSub* result;
-
- if (scalar->type() == MIRType::Boolean) {
- // The input scalar is already a boolean with the int32 values 0 / 1.
- // Compute result = 0 - scalar.
- result = MSub::New(alloc(), constant(Int32Value(0)), scalar);
- } else {
- // For any other type, let MNot handle the conversion to boolean.
- // Compute result = !scalar - 1.
- MNot* inv = MNot::New(alloc(), scalar);
- current->add(inv);
- result = MSub::New(alloc(), inv, constant(Int32Value(1)));
- }
-
- result->setInt32Specialization();
- current->add(result);
- return result;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineConstructSimdObject(CallInfo& callInfo, SimdTypeDescr* descr)
-{
- if (!JitSupportsSimd()) {
- trackOptimizationOutcome(TrackedOutcome::NoSimdJitSupport);
- return InliningStatus_NotInlined;
- }
-
- // Generic constructor of SIMD valuesX4.
- MIRType simdType;
- if (!MaybeSimdTypeToMIRType(descr->type(), &simdType)) {
- trackOptimizationOutcome(TrackedOutcome::SimdTypeNotOptimized);
- return InliningStatus_NotInlined;
- }
-
- // Take the templateObject out of Baseline ICs, such that we can box
- // SIMD value type in the same kind of objects.
- MOZ_ASSERT(size_t(descr->size(descr->type())) < InlineTypedObject::MaximumSize);
- MOZ_ASSERT(descr->getClass() == &SimdTypeDescr::class_,
- "getTemplateObjectForSimdCtor needs an update");
-
- JSObject* templateObject = inspector->getTemplateObjectForSimdCtor(pc, descr->type());
- if (!templateObject)
- return InliningStatus_NotInlined;
-
- // The previous assertion ensures this will never fail if we were able to
- // allocate a templateObject in Baseline.
- InlineTypedObject* inlineTypedObject = &templateObject->as<InlineTypedObject>();
- MOZ_ASSERT(&inlineTypedObject->typeDescr() == descr);
-
- // When there are missing arguments, provide a default value
- // containing the coercion of 'undefined' to the right type.
- MConstant* defVal = nullptr;
- MIRType laneType = SimdTypeToLaneType(simdType);
- unsigned lanes = SimdTypeToLength(simdType);
- if (lanes != 4 || callInfo.argc() < lanes) {
- if (laneType == MIRType::Int32 || laneType == MIRType::Boolean) {
- // The default lane for a boolean vector is |false|, but
- // |MSimdSplat|, |MSimdValueX4|, and |MSimdInsertElement| all
- // require an Int32 argument with the value 0 or 01 to initialize a
- // boolean lane. See also convertToBooleanSimdLane() which is
- // idempotent with a 0 argument after constant folding.
- defVal = constant(Int32Value(0));
- } else if (laneType == MIRType::Double) {
- defVal = constant(DoubleNaNValue());
- } else {
- MOZ_ASSERT(laneType == MIRType::Float32);
- defVal = MConstant::NewFloat32(alloc(), JS::GenericNaN());
- current->add(defVal);
- }
- }
-
- MInstruction *values = nullptr;
-
- // Use the MSimdValueX4 constructor for X4 vectors.
- if (lanes == 4) {
- MDefinition* lane[4];
- for (unsigned i = 0; i < 4; i++)
- lane[i] = callInfo.getArgWithDefault(i, defVal);
-
- // Convert boolean lanes into Int32 0 / -1.
- if (laneType == MIRType::Boolean) {
- for (unsigned i = 0; i < 4; i++)
- lane[i] = convertToBooleanSimdLane(lane[i]);
- }
-
- values = MSimdValueX4::New(alloc(), simdType, lane[0], lane[1], lane[2], lane[3]);
- current->add(values);
- } else {
- // For general constructor calls, start from splat(defVal), insert one
- // lane at a time.
- values = MSimdSplat::New(alloc(), defVal, simdType);
- current->add(values);
-
- // Stop early if constructor doesn't have enough arguments. These lanes
- // then get the default value.
- if (callInfo.argc() < lanes)
- lanes = callInfo.argc();
-
- for (unsigned i = 0; i < lanes; i++) {
- MDefinition* lane = callInfo.getArg(i);
- if (laneType == MIRType::Boolean)
- lane = convertToBooleanSimdLane(lane);
- values = MSimdInsertElement::New(alloc(), values, lane, i);
- current->add(values);
- }
- }
-
- MSimdBox* obj = MSimdBox::New(alloc(), constraints(), values, inlineTypedObject, descr->type(),
- inlineTypedObject->group()->initialHeap(constraints()));
- current->add(obj);
- current->push(obj);
-
- callInfo.setImplicitlyUsedUnchecked();
- return InliningStatus_Inlined;
-}
-
-bool
-IonBuilder::canInlineSimd(CallInfo& callInfo, JSNative native, unsigned numArgs,
- InlineTypedObject** templateObj)
-{
- if (callInfo.argc() != numArgs)
- return false;
-
- JSObject* templateObject = inspector->getTemplateObjectForNative(pc, native);
- if (!templateObject)
- return false;
-
- *templateObj = &templateObject->as<InlineTypedObject>();
- return true;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdCheck(CallInfo& callInfo, JSNative native, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 1, &templateObj))
- return InliningStatus_NotInlined;
-
- // Unboxing checks the SIMD object type and throws a TypeError if it doesn't
- // match type.
- MDefinition *arg = unboxSimd(callInfo.getArg(0), type);
-
- // Create an unbox/box pair, expecting the box to be optimized away if
- // anyone use the return value from this check() call. This is what you want
- // for code like this:
- //
- // function f(x) {
- // x = Int32x4.check(x)
- // for(...) {
- // y = Int32x4.add(x, ...)
- // }
- //
- // The unboxing of x happens as early as possible, and only once.
- return boxSimd(callInfo, arg, templateObj);
-}
-
-// Given a value or object, insert a dynamic check that this is a SIMD object of
-// the required SimdType, and unbox it into the corresponding SIMD MIRType.
-//
-// This represents the standard type checking that all the SIMD operations
-// perform on their arguments.
-MDefinition*
-IonBuilder::unboxSimd(MDefinition* ins, SimdType type)
-{
- // Trivial optimization: If ins is a MSimdBox of the same SIMD type, there
- // is no way the unboxing could fail, and we can skip it altogether.
- // This is the same thing MSimdUnbox::foldsTo() does, but we can save the
- // memory allocation here.
- if (ins->isSimdBox()) {
- MSimdBox* box = ins->toSimdBox();
- if (box->simdType() == type) {
- MDefinition* value = box->input();
- MOZ_ASSERT(value->type() == SimdTypeToMIRType(type));
- return value;
- }
- }
-
- MSimdUnbox* unbox = MSimdUnbox::New(alloc(), ins, type);
- current->add(unbox);
- return unbox;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::boxSimd(CallInfo& callInfo, MDefinition* ins, InlineTypedObject* templateObj)
-{
- SimdType simdType = templateObj->typeDescr().as<SimdTypeDescr>().type();
- MSimdBox* obj = MSimdBox::New(alloc(), constraints(), ins, templateObj, simdType,
- templateObj->group()->initialHeap(constraints()));
-
- // In some cases, ins has already been added to current.
- if (!ins->block() && ins->isInstruction())
- current->add(ins->toInstruction());
- current->add(obj);
- current->push(obj);
-
- callInfo.setImplicitlyUsedUnchecked();
- return InliningStatus_Inlined;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdBinaryArith(CallInfo& callInfo, JSNative native,
- MSimdBinaryArith::Operation op, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
- MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
-
- auto* ins = MSimdBinaryArith::AddLegalized(alloc(), current, lhs, rhs, op);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdBinaryBitwise(CallInfo& callInfo, JSNative native,
- MSimdBinaryBitwise::Operation op, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
- MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
-
- auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-// Inline a binary SIMD operation where both arguments are SIMD types.
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdBinarySaturating(CallInfo& callInfo, JSNative native,
- MSimdBinarySaturating::Operation op, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
- MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
-
- MSimdBinarySaturating* ins =
- MSimdBinarySaturating::New(alloc(), lhs, rhs, op, GetSimdSign(type));
- return boxSimd(callInfo, ins, templateObj);
-}
-
-// Inline a SIMD shiftByScalar operation.
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdShift(CallInfo& callInfo, JSNative native, MSimdShift::Operation op,
- SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* vec = unboxSimd(callInfo.getArg(0), type);
-
- MInstruction* ins = MSimdShift::AddLegalized(alloc(), current, vec, callInfo.getArg(1), op);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdComp(CallInfo& callInfo, JSNative native, MSimdBinaryComp::Operation op,
- SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* lhs = unboxSimd(callInfo.getArg(0), type);
- MDefinition* rhs = unboxSimd(callInfo.getArg(1), type);
- MInstruction* ins =
- MSimdBinaryComp::AddLegalized(alloc(), current, lhs, rhs, op, GetSimdSign(type));
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdUnary(CallInfo& callInfo, JSNative native, MSimdUnaryArith::Operation op,
- SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 1, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
-
- MSimdUnaryArith* ins = MSimdUnaryArith::New(alloc(), arg, op);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdSplat(CallInfo& callInfo, JSNative native, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 1, &templateObj))
- return InliningStatus_NotInlined;
-
- MIRType mirType = SimdTypeToMIRType(type);
- MDefinition* arg = callInfo.getArg(0);
-
- // Convert to 0 / -1 before splatting a boolean lane.
- if (SimdTypeToLaneType(mirType) == MIRType::Boolean)
- arg = convertToBooleanSimdLane(arg);
-
- MSimdSplat* ins = MSimdSplat::New(alloc(), arg, mirType);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdExtractLane(CallInfo& callInfo, JSNative native, SimdType type)
-{
- // extractLane() returns a scalar, so don't use canInlineSimd() which looks
- // for a template object.
- if (callInfo.argc() != 2 || callInfo.constructing()) {
- trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
- return InliningStatus_NotInlined;
- }
-
- // Lane index.
- MDefinition* arg = callInfo.getArg(1);
- if (!arg->isConstant() || arg->type() != MIRType::Int32)
- return InliningStatus_NotInlined;
- unsigned lane = arg->toConstant()->toInt32();
- if (lane >= GetSimdLanes(type))
- return InliningStatus_NotInlined;
-
- // Original vector.
- MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
- MIRType vecType = orig->type();
- MIRType laneType = SimdTypeToLaneType(vecType);
- SimdSign sign = GetSimdSign(type);
-
- // An Uint32 lane can't be represented in MIRType::Int32. Get it as a double.
- if (type == SimdType::Uint32x4)
- laneType = MIRType::Double;
-
- MSimdExtractElement* ins =
- MSimdExtractElement::New(alloc(), orig, laneType, lane, sign);
- current->add(ins);
- current->push(ins);
- callInfo.setImplicitlyUsedUnchecked();
- return InliningStatus_Inlined;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdReplaceLane(CallInfo& callInfo, JSNative native, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 3, &templateObj))
- return InliningStatus_NotInlined;
-
- // Lane index.
- MDefinition* arg = callInfo.getArg(1);
- if (!arg->isConstant() || arg->type() != MIRType::Int32)
- return InliningStatus_NotInlined;
-
- unsigned lane = arg->toConstant()->toInt32();
- if (lane >= GetSimdLanes(type))
- return InliningStatus_NotInlined;
-
- // Original vector.
- MDefinition* orig = unboxSimd(callInfo.getArg(0), type);
- MIRType vecType = orig->type();
-
- // Convert to 0 / -1 before inserting a boolean lane.
- MDefinition* value = callInfo.getArg(2);
- if (SimdTypeToLaneType(vecType) == MIRType::Boolean)
- value = convertToBooleanSimdLane(value);
-
- MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), orig, value, lane);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-// Inline a SIMD conversion or bitcast. When isCast==false, one of the types
-// must be floating point and the other integer. In this case, sign indicates if
-// the integer lanes should be treated as signed or unsigned integers.
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdConvert(CallInfo& callInfo, JSNative native, bool isCast, SimdType fromType,
- SimdType toType)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 1, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* arg = unboxSimd(callInfo.getArg(0), fromType);
- MIRType mirType = SimdTypeToMIRType(toType);
-
- MInstruction* ins;
- if (isCast) {
- // Signed/Unsigned doesn't matter for bitcasts.
- ins = MSimdReinterpretCast::New(alloc(), arg, mirType);
- } else {
- // Exactly one of fromType, toType must be an integer type.
- SimdSign sign = GetSimdSign(fromType);
- if (sign == SimdSign::NotApplicable)
- sign = GetSimdSign(toType);
-
- // Possibly expand into multiple instructions.
- ins = MSimdConvert::AddLegalized(alloc(), current, arg, mirType, sign);
- }
-
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdSelect(CallInfo& callInfo, JSNative native, SimdType type)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 3, &templateObj))
- return InliningStatus_NotInlined;
-
- MDefinition* mask = unboxSimd(callInfo.getArg(0), GetBooleanSimdType(type));
- MDefinition* tval = unboxSimd(callInfo.getArg(1), type);
- MDefinition* fval = unboxSimd(callInfo.getArg(2), type);
-
- MSimdSelect* ins = MSimdSelect::New(alloc(), mask, tval, fval);
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdShuffle(CallInfo& callInfo, JSNative native, SimdType type,
- unsigned numVectors)
-{
- unsigned numLanes = GetSimdLanes(type);
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, numVectors + numLanes, &templateObj))
- return InliningStatus_NotInlined;
-
- MIRType mirType = SimdTypeToMIRType(type);
-
- MSimdGeneralShuffle* ins = MSimdGeneralShuffle::New(alloc(), numVectors, numLanes, mirType);
-
- if (!ins->init(alloc()))
- return InliningStatus_Error;
-
- for (unsigned i = 0; i < numVectors; i++)
- ins->setVector(i, unboxSimd(callInfo.getArg(i), type));
- for (size_t i = 0; i < numLanes; i++)
- ins->setLane(i, callInfo.getArg(numVectors + i));
-
- return boxSimd(callInfo, ins, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdAnyAllTrue(CallInfo& callInfo, bool IsAllTrue, JSNative native,
- SimdType type)
-{
- // anyTrue() / allTrue() return a scalar, so don't use canInlineSimd() which looks
- // for a template object.
- if (callInfo.argc() != 1 || callInfo.constructing()) {
- trackOptimizationOutcome(TrackedOutcome::CantInlineNativeBadForm);
- return InliningStatus_NotInlined;
- }
-
- MDefinition* arg = unboxSimd(callInfo.getArg(0), type);
-
- MUnaryInstruction* ins;
- if (IsAllTrue)
- ins = MSimdAllTrue::New(alloc(), arg, MIRType::Boolean);
- else
- ins = MSimdAnyTrue::New(alloc(), arg, MIRType::Boolean);
-
- current->add(ins);
- current->push(ins);
- callInfo.setImplicitlyUsedUnchecked();
- return InliningStatus_Inlined;
-}
-
-// Get the typed array element type corresponding to the lanes in a SIMD vector type.
-// This only applies to SIMD types that can be loaded and stored to a typed array.
-static Scalar::Type
-SimdTypeToArrayElementType(SimdType type)
-{
- switch (type) {
- case SimdType::Float32x4: return Scalar::Float32x4;
- case SimdType::Int8x16:
- case SimdType::Uint8x16: return Scalar::Int8x16;
- case SimdType::Int16x8:
- case SimdType::Uint16x8: return Scalar::Int16x8;
- case SimdType::Int32x4:
- case SimdType::Uint32x4: return Scalar::Int32x4;
- default: MOZ_CRASH("unexpected simd type");
- }
-}
-
-bool
-IonBuilder::prepareForSimdLoadStore(CallInfo& callInfo, Scalar::Type simdType, MInstruction** elements,
- MDefinition** index, Scalar::Type* arrayType)
-{
- MDefinition* array = callInfo.getArg(0);
- *index = callInfo.getArg(1);
-
- if (!ElementAccessIsTypedArray(constraints(), array, *index, arrayType))
- return false;
-
- MInstruction* indexAsInt32 = MToInt32::New(alloc(), *index);
- current->add(indexAsInt32);
- *index = indexAsInt32;
-
- MDefinition* indexForBoundsCheck = *index;
-
- // Artificially make sure the index is in bounds by adding the difference
- // number of slots needed (e.g. reading from Float32Array we need to make
- // sure to be in bounds for 4 slots, so add 3, etc.).
- MOZ_ASSERT(Scalar::byteSize(simdType) % Scalar::byteSize(*arrayType) == 0);
- int32_t suppSlotsNeeded = Scalar::byteSize(simdType) / Scalar::byteSize(*arrayType) - 1;
- if (suppSlotsNeeded) {
- MConstant* suppSlots = constant(Int32Value(suppSlotsNeeded));
- MAdd* addedIndex = MAdd::New(alloc(), *index, suppSlots);
- // We're fine even with the add overflows, as long as the generated code
- // for the bounds check uses an unsigned comparison.
- addedIndex->setInt32Specialization();
- current->add(addedIndex);
- indexForBoundsCheck = addedIndex;
- }
-
- MInstruction* length;
- addTypedArrayLengthAndData(array, SkipBoundsCheck, index, &length, elements);
-
- // It can be that the index is out of bounds, while the added index for the
- // bounds check is in bounds, so we actually need two bounds checks here.
- MInstruction* positiveCheck = MBoundsCheck::New(alloc(), *index, length);
- current->add(positiveCheck);
-
- MInstruction* fullCheck = MBoundsCheck::New(alloc(), indexForBoundsCheck, length);
- current->add(fullCheck);
- return true;
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdLoad(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 2, &templateObj))
- return InliningStatus_NotInlined;
-
- Scalar::Type elemType = SimdTypeToArrayElementType(type);
-
- MDefinition* index = nullptr;
- MInstruction* elements = nullptr;
- Scalar::Type arrayType;
- if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
- return InliningStatus_NotInlined;
-
- MLoadUnboxedScalar* load = MLoadUnboxedScalar::New(alloc(), elements, index, arrayType);
- load->setResultType(SimdTypeToMIRType(type));
- load->setSimdRead(elemType, numElems);
-
- return boxSimd(callInfo, load, templateObj);
-}
-
-IonBuilder::InliningStatus
-IonBuilder::inlineSimdStore(CallInfo& callInfo, JSNative native, SimdType type, unsigned numElems)
-{
- InlineTypedObject* templateObj = nullptr;
- if (!canInlineSimd(callInfo, native, 3, &templateObj))
- return InliningStatus_NotInlined;
-
- Scalar::Type elemType = SimdTypeToArrayElementType(type);
-
- MDefinition* index = nullptr;
- MInstruction* elements = nullptr;
- Scalar::Type arrayType;
- if (!prepareForSimdLoadStore(callInfo, elemType, &elements, &index, &arrayType))
- return InliningStatus_NotInlined;
-
- MDefinition* valueToWrite = unboxSimd(callInfo.getArg(2), type);
- MStoreUnboxedScalar* store = MStoreUnboxedScalar::New(alloc(), elements, index,
- valueToWrite, arrayType,
- MStoreUnboxedScalar::TruncateInput);
- store->setSimdWrite(elemType, numElems);
-
- current->add(store);
- // Produce the original boxed value as our return value.
- // This is unlikely to be used, so don't bother reboxing valueToWrite.
- current->push(callInfo.getArg(2));
-
- callInfo.setImplicitlyUsedUnchecked();
-
- if (!resumeAfter(store))
- return InliningStatus_Error;
-
- return InliningStatus_Inlined;
-}
-
-// Note that SIMD.cpp provides its own JSJitInfo objects for SIMD.foo.* functions.
-// The Simd* objects defined here represent SIMD.foo() constructor calls.
-// They are encoded with .nativeOp = 0. That is the sub-opcode within the SIMD type.
-static_assert(uint16_t(SimdOperation::Constructor) == 0, "Constructor opcode must be 0");
-
#define ADD_NATIVE(native) const JSJitInfo JitInfo_##native { \
{ nullptr }, { uint16_t(InlinableNative::native) }, { 0 }, JSJitInfo::InlinableNative };
INLINABLE_NATIVE_LIST(ADD_NATIVE)
diff --git a/js/src/jit/MIR.cpp b/js/src/jit/MIR.cpp
index 2264bed4f2..34d9e07bf9 100644
--- a/js/src/jit/MIR.cpp
+++ b/js/src/jit/MIR.cpp
@@ -1235,530 +1235,6 @@ MConstant::valueToBoolean(bool* res) const
}
}
-MDefinition*
-MSimdValueX4::foldsTo(TempAllocator& alloc)
-{
-#ifdef DEBUG
- MIRType laneType = SimdTypeToLaneArgumentType(type());
-#endif
- bool allConstants = true;
- bool allSame = true;
-
- for (size_t i = 0; i < 4; ++i) {
- MDefinition* op = getOperand(i);
- MOZ_ASSERT(op->type() == laneType);
- if (!op->isConstant())
- allConstants = false;
- if (i > 0 && op != getOperand(i - 1))
- allSame = false;
- }
-
- if (!allConstants && !allSame)
- return this;
-
- if (allConstants) {
- SimdConstant cst;
- switch (type()) {
- case MIRType::Bool32x4: {
- int32_t a[4];
- for (size_t i = 0; i < 4; ++i)
- a[i] = getOperand(i)->toConstant()->valueToBooleanInfallible() ? -1 : 0;
- cst = SimdConstant::CreateX4(a);
- break;
- }
- case MIRType::Int32x4: {
- int32_t a[4];
- for (size_t i = 0; i < 4; ++i)
- a[i] = getOperand(i)->toConstant()->toInt32();
- cst = SimdConstant::CreateX4(a);
- break;
- }
- case MIRType::Float32x4: {
- float a[4];
- for (size_t i = 0; i < 4; ++i)
- a[i] = getOperand(i)->toConstant()->numberToDouble();
- cst = SimdConstant::CreateX4(a);
- break;
- }
- default: MOZ_CRASH("unexpected type in MSimdValueX4::foldsTo");
- }
-
- return MSimdConstant::New(alloc, cst, type());
- }
-
- MOZ_ASSERT(allSame);
- return MSimdSplat::New(alloc, getOperand(0), type());
-}
-
-MDefinition*
-MSimdSplat::foldsTo(TempAllocator& alloc)
-{
-#ifdef DEBUG
- MIRType laneType = SimdTypeToLaneArgumentType(type());
-#endif
- MDefinition* op = getOperand(0);
- if (!op->isConstant())
- return this;
- MOZ_ASSERT(op->type() == laneType);
-
- SimdConstant cst;
- switch (type()) {
- case MIRType::Bool8x16: {
- int8_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
- cst = SimdConstant::SplatX16(v);
- break;
- }
- case MIRType::Bool16x8: {
- int16_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
- cst = SimdConstant::SplatX8(v);
- break;
- }
- case MIRType::Bool32x4: {
- int32_t v = op->toConstant()->valueToBooleanInfallible() ? -1 : 0;
- cst = SimdConstant::SplatX4(v);
- break;
- }
- case MIRType::Int8x16: {
- int32_t v = op->toConstant()->toInt32();
- cst = SimdConstant::SplatX16(v);
- break;
- }
- case MIRType::Int16x8: {
- int32_t v = op->toConstant()->toInt32();
- cst = SimdConstant::SplatX8(v);
- break;
- }
- case MIRType::Int32x4: {
- int32_t v = op->toConstant()->toInt32();
- cst = SimdConstant::SplatX4(v);
- break;
- }
- case MIRType::Float32x4: {
- float v = op->toConstant()->numberToDouble();
- cst = SimdConstant::SplatX4(v);
- break;
- }
- default: MOZ_CRASH("unexpected type in MSimdSplat::foldsTo");
- }
-
- return MSimdConstant::New(alloc, cst, type());
-}
-
-MDefinition*
-MSimdUnbox::foldsTo(TempAllocator& alloc)
-{
- MDefinition* in = input();
-
- if (in->isSimdBox()) {
- MSimdBox* box = in->toSimdBox();
- // If the operand is a MSimdBox, then we just reuse the operand of the
- // MSimdBox as long as the type corresponds to what we are supposed to
- // unbox.
- in = box->input();
- if (box->simdType() != simdType())
- return this;
- MOZ_ASSERT(in->type() == type());
- return in;
- }
-
- return this;
-}
-
-MDefinition*
-MSimdSwizzle::foldsTo(TempAllocator& alloc)
-{
- if (lanesMatch(0, 1, 2, 3))
- return input();
- return this;
-}
-
-MDefinition*
-MSimdGeneralShuffle::foldsTo(TempAllocator& alloc)
-{
- FixedList<uint8_t> lanes;
- if (!lanes.init(alloc, numLanes()))
- return this;
-
- for (size_t i = 0; i < numLanes(); i++) {
- if (!lane(i)->isConstant() || lane(i)->type() != MIRType::Int32)
- return this;
- int32_t temp = lane(i)->toConstant()->toInt32();
- if (temp < 0 || unsigned(temp) >= numLanes() * numVectors())
- return this;
- lanes[i] = uint8_t(temp);
- }
-
- if (numVectors() == 1)
- return MSimdSwizzle::New(alloc, vector(0), lanes.data());
-
- MOZ_ASSERT(numVectors() == 2);
- return MSimdShuffle::New(alloc, vector(0), vector(1), lanes.data());
-}
-
-MInstruction*
-MSimdConvert::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
- MIRType toType, SimdSign sign, wasm::TrapOffset trapOffset)
-{
- MIRType fromType = obj->type();
-
- if (SupportsUint32x4FloatConversions || sign != SimdSign::Unsigned) {
- MInstruction* ins = New(alloc, obj, toType, sign, trapOffset);
- addTo->add(ins);
- return ins;
- }
-
- // This architecture can't do Uint32x4 <-> Float32x4 conversions (Hi SSE!)
- MOZ_ASSERT(sign == SimdSign::Unsigned);
- if (fromType == MIRType::Int32x4 && toType == MIRType::Float32x4) {
- // Converting Uint32x4 -> Float32x4. This algorithm is from LLVM.
- //
- // Split the input number into high and low parts:
- //
- // uint32_t hi = x >> 16;
- // uint32_t lo = x & 0xffff;
- //
- // Insert these parts as the low mantissa bits in a float32 number with
- // the corresponding exponent:
- //
- // float fhi = (bits-as-float)(hi | 0x53000000); // 0x1.0p39f + hi*2^16
- // float flo = (bits-as-float)(lo | 0x4b000000); // 0x1.0p23f + lo
- //
- // Subtract the bias from the hi part:
- //
- // fhi -= (0x1.0p39 + 0x1.0p23) // hi*2^16 - 0x1.0p23
- //
- // And finally combine:
- //
- // result = flo + fhi // lo + hi*2^16.
-
- // Compute hi = obj >> 16 (lane-wise unsigned shift).
- MInstruction* c16 = MConstant::New(alloc, Int32Value(16));
- addTo->add(c16);
- MInstruction* hi = MSimdShift::AddLegalized(alloc, addTo, obj, c16, MSimdShift::ursh);
-
- // Compute lo = obj & 0xffff (lane-wise).
- MInstruction* m16 =
- MSimdConstant::New(alloc, SimdConstant::SplatX4(0xffff), MIRType::Int32x4);
- addTo->add(m16);
- MInstruction* lo = MSimdBinaryBitwise::New(alloc, obj, m16, MSimdBinaryBitwise::and_);
- addTo->add(lo);
-
- // Mix in the exponents.
- MInstruction* exphi =
- MSimdConstant::New(alloc, SimdConstant::SplatX4(0x53000000), MIRType::Int32x4);
- addTo->add(exphi);
- MInstruction* mhi = MSimdBinaryBitwise::New(alloc, hi, exphi, MSimdBinaryBitwise::or_);
- addTo->add(mhi);
- MInstruction* explo =
- MSimdConstant::New(alloc, SimdConstant::SplatX4(0x4b000000), MIRType::Int32x4);
- addTo->add(explo);
- MInstruction* mlo = MSimdBinaryBitwise::New(alloc, lo, explo, MSimdBinaryBitwise::or_);
- addTo->add(mlo);
-
- // Bit-cast both to Float32x4.
- MInstruction* fhi = MSimdReinterpretCast::New(alloc, mhi, MIRType::Float32x4);
- addTo->add(fhi);
- MInstruction* flo = MSimdReinterpretCast::New(alloc, mlo, MIRType::Float32x4);
- addTo->add(flo);
-
- // Subtract out the bias: 0x1.0p39f + 0x1.0p23f.
- // MSVC doesn't support the hexadecimal float syntax.
- const float BiasValue = 549755813888.f + 8388608.f;
- MInstruction* bias =
- MSimdConstant::New(alloc, SimdConstant::SplatX4(BiasValue), MIRType::Float32x4);
- addTo->add(bias);
- MInstruction* fhi_debiased =
- MSimdBinaryArith::AddLegalized(alloc, addTo, fhi, bias, MSimdBinaryArith::Op_sub);
-
- // Compute the final result.
- return MSimdBinaryArith::AddLegalized(alloc, addTo, fhi_debiased, flo,
- MSimdBinaryArith::Op_add);
- }
-
- if (fromType == MIRType::Float32x4 && toType == MIRType::Int32x4) {
- // The Float32x4 -> Uint32x4 conversion can throw if the input is out of
- // range. This is handled by the LFloat32x4ToUint32x4 expansion.
- MInstruction* ins = New(alloc, obj, toType, sign, trapOffset);
- addTo->add(ins);
- return ins;
- }
-
- MOZ_CRASH("Unhandled SIMD type conversion");
-}
-
-MInstruction*
-MSimdBinaryComp::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op, SimdSign sign)
-{
- MOZ_ASSERT(left->type() == right->type());
- MIRType opType = left->type();
- MOZ_ASSERT(IsSimdType(opType));
- bool IsEquality = op == equal || op == notEqual;
-
- // Check if this is an unsupported unsigned compare that needs to be biased.
- // If so, put the bias vector in `bias`.
- if (sign == SimdSign::Unsigned && !IsEquality) {
- MInstruction* bias = nullptr;
-
- // This is an order comparison of Uint32x4 vectors which are not supported on this target.
- // Simply offset |left| and |right| by INT_MIN, then do a signed comparison.
- if (!SupportsUint32x4Compares && opType == MIRType::Int32x4)
- bias = MSimdConstant::New(alloc, SimdConstant::SplatX4(int32_t(0x80000000)), opType);
- else if (!SupportsUint16x8Compares && opType == MIRType::Int16x8)
- bias = MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x8000)), opType);
- if (!SupportsUint8x16Compares && opType == MIRType::Int8x16)
- bias = MSimdConstant::New(alloc, SimdConstant::SplatX16(int8_t(0x80)), opType);
-
- if (bias) {
- addTo->add(bias);
-
- // Add the bias.
- MInstruction* bleft =
- MSimdBinaryArith::AddLegalized(alloc, addTo, left, bias, MSimdBinaryArith::Op_add);
- MInstruction* bright =
- MSimdBinaryArith::AddLegalized(alloc, addTo, right, bias, MSimdBinaryArith::Op_add);
-
- // Do the equivalent signed comparison.
- MInstruction* result =
- MSimdBinaryComp::New(alloc, bleft, bright, op, SimdSign::Signed);
- addTo->add(result);
-
- return result;
- }
- }
-
- if (sign == SimdSign::Unsigned &&
- ((!SupportsUint32x4Compares && opType == MIRType::Int32x4) ||
- (!SupportsUint16x8Compares && opType == MIRType::Int16x8) ||
- (!SupportsUint8x16Compares && opType == MIRType::Int8x16))) {
- // The sign doesn't matter for equality tests. Flip it to make the
- // backend assertions happy.
- MOZ_ASSERT(IsEquality);
- sign = SimdSign::Signed;
- }
-
- // This is a legal operation already. Just create the instruction requested.
- MInstruction* result = MSimdBinaryComp::New(alloc, left, right, op, sign);
- addTo->add(result);
- return result;
-}
-
-MInstruction*
-MSimdBinaryArith::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op)
-{
- MOZ_ASSERT(left->type() == right->type());
- MIRType opType = left->type();
- MOZ_ASSERT(IsSimdType(opType));
-
- // SSE does not have 8x16 multiply instructions.
- if (opType == MIRType::Int8x16 && op == Op_mul) {
- // Express the multiply in terms of Int16x8 multiplies by handling the
- // even and odd lanes separately.
-
- MInstruction* wideL = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
- addTo->add(wideL);
- MInstruction* wideR = MSimdReinterpretCast::New(alloc, right, MIRType::Int16x8);
- addTo->add(wideR);
-
- // wideL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
- // wideR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
-
- // Shift the odd lanes down to the low bits of the 16x8 vectors.
- MInstruction* eight = MConstant::New(alloc, Int32Value(8));
- addTo->add(eight);
- MInstruction* evenL = wideL;
- MInstruction* evenR = wideR;
- MInstruction* oddL =
- MSimdShift::AddLegalized(alloc, addTo, wideL, eight, MSimdShift::ursh);
- MInstruction* oddR =
- MSimdShift::AddLegalized(alloc, addTo, wideR, eight, MSimdShift::ursh);
-
- // evenL = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
- // evenR = bbaa bbaa bbaa bbaa bbaa bbaa bbaa bbaa
- // oddL = 00yy 00yy 00yy 00yy 00yy 00yy 00yy 00yy
- // oddR = 00bb 00bb 00bb 00bb 00bb 00bb 00bb 00bb
-
- // Now do two 16x8 multiplications. We can use the low bits of each.
- MInstruction* even = MSimdBinaryArith::AddLegalized(alloc, addTo, evenL, evenR, Op_mul);
- MInstruction* odd = MSimdBinaryArith::AddLegalized(alloc, addTo, oddL, oddR, Op_mul);
-
- // even = ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP ~~PP
- // odd = ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ ~~QQ
-
- MInstruction* mask =
- MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0x00ff)), MIRType::Int16x8);
- addTo->add(mask);
- even = MSimdBinaryBitwise::New(alloc, even, mask, MSimdBinaryBitwise::and_);
- addTo->add(even);
- odd = MSimdShift::AddLegalized(alloc, addTo, odd, eight, MSimdShift::lsh);
-
- // even = 00PP 00PP 00PP 00PP 00PP 00PP 00PP 00PP
- // odd = QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00 QQ00
-
- // Combine:
- MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
- addTo->add(result);
- result = MSimdReinterpretCast::New(alloc, result, opType);
- addTo->add(result);
- return result;
- }
-
- // This is a legal operation already. Just create the instruction requested.
- MInstruction* result = MSimdBinaryArith::New(alloc, left, right, op);
- addTo->add(result);
- return result;
-}
-
-MInstruction*
-MSimdShift::AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op)
-{
- MIRType opType = left->type();
- MOZ_ASSERT(IsIntegerSimdType(opType));
-
- // SSE does not provide 8x16 shift instructions.
- if (opType == MIRType::Int8x16) {
- // Express the shift in terms of Int16x8 shifts by splitting into even
- // and odd lanes, place 8-bit lanes into the high bits of Int16x8
- // vectors `even` and `odd`. Shift, mask, combine.
- //
- // wide = Int16x8.fromInt8x16Bits(left);
- // shiftBy = right & 7
- // mask = Int16x8.splat(0xff00);
- //
- MInstruction* wide = MSimdReinterpretCast::New(alloc, left, MIRType::Int16x8);
- addTo->add(wide);
-
- // wide = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
-
- MInstruction* shiftMask = MConstant::New(alloc, Int32Value(7));
- addTo->add(shiftMask);
- MBinaryBitwiseInstruction* shiftBy = MBitAnd::New(alloc, right, shiftMask);
- shiftBy->setInt32Specialization();
- addTo->add(shiftBy);
-
- // Move the even 8x16 lanes into the high bits of the 16x8 lanes.
- MInstruction* eight = MConstant::New(alloc, Int32Value(8));
- addTo->add(eight);
- MInstruction* even = MSimdShift::AddLegalized(alloc, addTo, wide, eight, lsh);
-
- // Leave the odd lanes in place.
- MInstruction* odd = wide;
-
- // even = xx00 xx00 xx00 xx00 xx00 xx00 xx00 xx00
- // odd = yyxx yyxx yyxx yyxx yyxx yyxx yyxx yyxx
-
- MInstruction* mask =
- MSimdConstant::New(alloc, SimdConstant::SplatX8(int16_t(0xff00)), MIRType::Int16x8);
- addTo->add(mask);
-
- // Left-shift: Clear the low bits in `odd` before shifting.
- if (op == lsh) {
- odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
- addTo->add(odd);
- // odd = yy00 yy00 yy00 yy00 yy00 yy00 yy00 yy00
- }
-
- // Do the real shift twice: once for the even lanes, once for the odd
- // lanes. This is a recursive call, but with a different type.
- even = MSimdShift::AddLegalized(alloc, addTo, even, shiftBy, op);
- odd = MSimdShift::AddLegalized(alloc, addTo, odd, shiftBy, op);
-
- // even = XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~ XX~~
- // odd = YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~ YY~~
-
- // Right-shift: Clear the low bits in `odd` after shifting.
- if (op != lsh) {
- odd = MSimdBinaryBitwise::New(alloc, odd, mask, MSimdBinaryBitwise::and_);
- addTo->add(odd);
- // odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
- }
-
- // Move the even lanes back to their original place.
- even = MSimdShift::AddLegalized(alloc, addTo, even, eight, ursh);
-
- // Now, `odd` contains the odd lanes properly shifted, and `even`
- // contains the even lanes properly shifted:
- //
- // even = 00XX 00XX 00XX 00XX 00XX 00XX 00XX 00XX
- // odd = YY00 YY00 YY00 YY00 YY00 YY00 YY00 YY00
- //
- // Combine:
- MInstruction* result = MSimdBinaryBitwise::New(alloc, even, odd, MSimdBinaryBitwise::or_);
- addTo->add(result);
- result = MSimdReinterpretCast::New(alloc, result, opType);
- addTo->add(result);
- return result;
- }
-
- // This is a legal operation already. Just create the instruction requested.
- MInstruction* result = MSimdShift::New(alloc, left, right, op);
- addTo->add(result);
- return result;
-}
-
-template <typename T>
-static void
-PrintOpcodeOperation(T* mir, GenericPrinter& out)
-{
- mir->MDefinition::printOpcode(out);
- out.printf(" (%s)", T::OperationName(mir->operation()));
-}
-
-void
-MSimdBinaryArith::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-void
-MSimdBinarySaturating::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-void
-MSimdBinaryBitwise::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-void
-MSimdUnaryArith::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-void
-MSimdBinaryComp::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-void
-MSimdShift::printOpcode(GenericPrinter& out) const
-{
- PrintOpcodeOperation(this, out);
-}
-
-void
-MSimdInsertElement::printOpcode(GenericPrinter& out) const
-{
- MDefinition::printOpcode(out);
- out.printf(" (lane %u)", lane());
-}
-
-void
-MSimdBox::printOpcode(GenericPrinter& out) const
-{
- MDefinition::printOpcode(out);
- out.printf(" (%s%s)", SimdTypeToString(simdType()),
- initialHeap() == gc::TenuredHeap ? ", tenured" : "");
-}
-
-void
-MSimdUnbox::printOpcode(GenericPrinter& out) const
-{
- MDefinition::printOpcode(out);
- out.printf(" (%s)", SimdTypeToString(simdType()));
-}
-
void
MControlInstruction::printOpcode(GenericPrinter& out) const
{
diff --git a/js/src/jit/MIR.h b/js/src/jit/MIR.h
index 72c5214845..532f404b51 100644
--- a/js/src/jit/MIR.h
+++ b/js/src/jit/MIR.h
@@ -15,7 +15,6 @@
#include "mozilla/Attributes.h"
#include "mozilla/MacroForEach.h"
-#include "builtin/SIMD.h"
#include "jit/AtomicOp.h"
#include "jit/BaselineIC.h"
#include "jit/FixedList.h"
@@ -80,63 +79,6 @@ MIRType MIRTypeFromValue(const js::Value& vp)
return MIRTypeFromValueType(vp.extractNonDoubleType());
}
-// If simdType is one of the SIMD types suported by Ion, set mirType to the
-// corresponding MIRType, and return true.
-//
-// If simdType is not suported by Ion, return false.
-static inline MOZ_MUST_USE
-bool MaybeSimdTypeToMIRType(SimdType type, MIRType* mirType)
-{
- switch (type) {
- case SimdType::Uint32x4:
- case SimdType::Int32x4: *mirType = MIRType::Int32x4; return true;
- case SimdType::Uint16x8:
- case SimdType::Int16x8: *mirType = MIRType::Int16x8; return true;
- case SimdType::Uint8x16:
- case SimdType::Int8x16: *mirType = MIRType::Int8x16; return true;
- case SimdType::Float32x4: *mirType = MIRType::Float32x4; return true;
- case SimdType::Bool32x4: *mirType = MIRType::Bool32x4; return true;
- case SimdType::Bool16x8: *mirType = MIRType::Bool16x8; return true;
- case SimdType::Bool8x16: *mirType = MIRType::Bool8x16; return true;
- default: return false;
- }
-}
-
-// Convert a SimdType to the corresponding MIRType, or crash.
-//
-// Note that this is not an injective mapping: SimdType has signed and unsigned
-// integer types that map to the same MIRType.
-static inline
-MIRType SimdTypeToMIRType(SimdType type)
-{
- MIRType ret = MIRType::None;
- JS_ALWAYS_TRUE(MaybeSimdTypeToMIRType(type, &ret));
- return ret;
-}
-
-static inline
-SimdType MIRTypeToSimdType(MIRType type)
-{
- switch (type) {
- case MIRType::Int32x4: return SimdType::Int32x4;
- case MIRType::Int16x8: return SimdType::Int16x8;
- case MIRType::Int8x16: return SimdType::Int8x16;
- case MIRType::Float32x4: return SimdType::Float32x4;
- case MIRType::Bool32x4: return SimdType::Bool32x4;
- case MIRType::Bool16x8: return SimdType::Bool16x8;
- case MIRType::Bool8x16: return SimdType::Bool8x16;
- default: break;
- }
- MOZ_CRASH("unhandled MIRType");
-}
-
-// Get the boolean MIRType with the same shape as type.
-static inline
-MIRType MIRTypeToBooleanSimdType(MIRType type)
-{
- return SimdTypeToMIRType(GetBooleanSimdType(MIRTypeToSimdType(type)));
-}
-
#define MIR_FLAG_LIST(_) \
_(InWorklist) \
_(EmittedAtUses) \
@@ -1708,1036 +1650,6 @@ class MConstant : public MNullaryInstruction
bool appendRoots(MRootList& roots) const override;
};
-// Generic constructor of SIMD valuesX4.
-class MSimdValueX4
- : public MQuaternaryInstruction,
- public Mix4Policy<SimdScalarPolicy<0>, SimdScalarPolicy<1>,
- SimdScalarPolicy<2>, SimdScalarPolicy<3> >::Data
-{
- protected:
- MSimdValueX4(MIRType type, MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w)
- : MQuaternaryInstruction(x, y, z, w)
- {
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT(SimdTypeToLength(type) == 4);
-
- setMovable();
- setResultType(type);
- }
-
- public:
- INSTRUCTION_HEADER(SimdValueX4)
- TRIVIAL_NEW_WRAPPERS
-
- bool canConsumeFloat32(MUse* use) const override {
- return SimdTypeToLaneType(type()) == MIRType::Float32;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
-
- MDefinition* foldsTo(TempAllocator& alloc) override;
-
- ALLOW_CLONE(MSimdValueX4)
-};
-
-// Generic constructor of SIMD values with identical lanes.
-class MSimdSplat
- : public MUnaryInstruction,
- public SimdScalarPolicy<0>::Data
-{
- protected:
- MSimdSplat(MDefinition* v, MIRType type)
- : MUnaryInstruction(v)
- {
- MOZ_ASSERT(IsSimdType(type));
- setMovable();
- setResultType(type);
- }
-
- public:
- INSTRUCTION_HEADER(SimdSplat)
- TRIVIAL_NEW_WRAPPERS
-
- bool canConsumeFloat32(MUse* use) const override {
- return SimdTypeToLaneType(type()) == MIRType::Float32;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
-
- MDefinition* foldsTo(TempAllocator& alloc) override;
-
- ALLOW_CLONE(MSimdSplat)
-};
-
-// A constant SIMD value.
-class MSimdConstant
- : public MNullaryInstruction
-{
- SimdConstant value_;
-
- protected:
- MSimdConstant(const SimdConstant& v, MIRType type) : value_(v) {
- MOZ_ASSERT(IsSimdType(type));
- setMovable();
- setResultType(type);
- }
-
- public:
- INSTRUCTION_HEADER(SimdConstant)
- TRIVIAL_NEW_WRAPPERS
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!ins->isSimdConstant())
- return false;
- // Bool32x4 and Int32x4 share the same underlying SimdConstant representation.
- if (type() != ins->type())
- return false;
- return value() == ins->toSimdConstant()->value();
- }
-
- const SimdConstant& value() const {
- return value_;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- ALLOW_CLONE(MSimdConstant)
-};
-
-// Converts all lanes of a given vector into the type of another vector
-class MSimdConvert
- : public MUnaryInstruction,
- public SimdPolicy<0>::Data
-{
- // When either fromType or toType is an integer vector, should it be treated
- // as signed or unsigned. Note that we don't support int-int conversions -
- // use MSimdReinterpretCast for that.
- SimdSign sign_;
- wasm::TrapOffset trapOffset_;
-
- MSimdConvert(MDefinition* obj, MIRType toType, SimdSign sign, wasm::TrapOffset trapOffset)
- : MUnaryInstruction(obj), sign_(sign), trapOffset_(trapOffset)
- {
- MIRType fromType = obj->type();
- MOZ_ASSERT(IsSimdType(fromType));
- MOZ_ASSERT(IsSimdType(toType));
- // All conversions are int <-> float, so signedness is required.
- MOZ_ASSERT(sign != SimdSign::NotApplicable);
-
- setResultType(toType);
- specialization_ = fromType; // expects fromType as input
-
- setMovable();
- if (IsFloatingPointSimdType(fromType) && IsIntegerSimdType(toType)) {
- // Does the extra range check => do not remove
- setGuard();
- }
- }
-
- static MSimdConvert* New(TempAllocator& alloc, MDefinition* obj, MIRType toType, SimdSign sign,
- wasm::TrapOffset trapOffset)
- {
- return new (alloc) MSimdConvert(obj, toType, sign, trapOffset);
- }
-
- public:
- INSTRUCTION_HEADER(SimdConvert)
-
- // Create a MSimdConvert instruction and add it to the basic block.
- // Possibly create and add an equivalent sequence of instructions instead if
- // the current target doesn't support the requested conversion directly.
- // Return the inserted MInstruction that computes the converted value.
- static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* obj,
- MIRType toType, SimdSign sign,
- wasm::TrapOffset trapOffset = wasm::TrapOffset());
-
- SimdSign signedness() const {
- return sign_;
- }
- wasm::TrapOffset trapOffset() const {
- return trapOffset_;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
- bool congruentTo(const MDefinition* ins) const override {
- if (!congruentIfOperandsEqual(ins))
- return false;
- const MSimdConvert* other = ins->toSimdConvert();
- return sign_ == other->sign_;
- }
- ALLOW_CLONE(MSimdConvert)
-};
-
-// Casts bits of a vector input to another SIMD type (doesn't generate code).
-class MSimdReinterpretCast
- : public MUnaryInstruction,
- public SimdPolicy<0>::Data
-{
- MSimdReinterpretCast(MDefinition* obj, MIRType toType)
- : MUnaryInstruction(obj)
- {
- MIRType fromType = obj->type();
- MOZ_ASSERT(IsSimdType(fromType));
- MOZ_ASSERT(IsSimdType(toType));
- setMovable();
- setResultType(toType);
- specialization_ = fromType; // expects fromType as input
- }
-
- public:
- INSTRUCTION_HEADER(SimdReinterpretCast)
- TRIVIAL_NEW_WRAPPERS
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
- ALLOW_CLONE(MSimdReinterpretCast)
-};
-
-// Extracts a lane element from a given vector type, given by its lane symbol.
-//
-// For integer SIMD types, a SimdSign must be provided so the lane value can be
-// converted to a scalar correctly.
-class MSimdExtractElement
- : public MUnaryInstruction,
- public SimdPolicy<0>::Data
-{
- protected:
- unsigned lane_;
- SimdSign sign_;
-
- MSimdExtractElement(MDefinition* obj, MIRType laneType, unsigned lane, SimdSign sign)
- : MUnaryInstruction(obj), lane_(lane), sign_(sign)
- {
- MIRType vecType = obj->type();
- MOZ_ASSERT(IsSimdType(vecType));
- MOZ_ASSERT(lane < SimdTypeToLength(vecType));
- MOZ_ASSERT(!IsSimdType(laneType));
- MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(vecType),
- "Signedness must be specified for integer SIMD extractLanes");
- // The resulting type should match the lane type.
- // Allow extracting boolean lanes directly into an Int32 (for wasm).
- // Allow extracting Uint32 lanes into a double.
- //
- // We also allow extracting Uint32 lanes into a MIRType::Int32. This is
- // equivalent to extracting the Uint32 lane to a double and then
- // applying MTruncateToInt32, but it bypasses the conversion to/from
- // double.
- MOZ_ASSERT(SimdTypeToLaneType(vecType) == laneType ||
- (IsBooleanSimdType(vecType) && laneType == MIRType::Int32) ||
- (vecType == MIRType::Int32x4 && laneType == MIRType::Double &&
- sign == SimdSign::Unsigned));
-
- setMovable();
- specialization_ = vecType;
- setResultType(laneType);
- }
-
- public:
- INSTRUCTION_HEADER(SimdExtractElement)
- TRIVIAL_NEW_WRAPPERS
-
- unsigned lane() const {
- return lane_;
- }
-
- SimdSign signedness() const {
- return sign_;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
- bool congruentTo(const MDefinition* ins) const override {
- if (!ins->isSimdExtractElement())
- return false;
- const MSimdExtractElement* other = ins->toSimdExtractElement();
- if (other->lane_ != lane_ || other->sign_ != sign_)
- return false;
- return congruentIfOperandsEqual(other);
- }
- ALLOW_CLONE(MSimdExtractElement)
-};
-
-// Replaces the datum in the given lane by a scalar value of the same type.
-class MSimdInsertElement
- : public MBinaryInstruction,
- public MixPolicy< SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
-{
- private:
- unsigned lane_;
-
- MSimdInsertElement(MDefinition* vec, MDefinition* val, unsigned lane)
- : MBinaryInstruction(vec, val), lane_(lane)
- {
- MIRType type = vec->type();
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT(lane < SimdTypeToLength(type));
- setMovable();
- setResultType(type);
- }
-
- public:
- INSTRUCTION_HEADER(SimdInsertElement)
- TRIVIAL_NEW_WRAPPERS
- NAMED_OPERANDS((0, vector), (1, value))
-
- unsigned lane() const {
- return lane_;
- }
-
- bool canConsumeFloat32(MUse* use) const override {
- return use == getUseFor(1) && SimdTypeToLaneType(type()) == MIRType::Float32;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- return binaryCongruentTo(ins) && lane_ == ins->toSimdInsertElement()->lane();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdInsertElement)
-};
-
-// Returns true if all lanes are true.
-class MSimdAllTrue
- : public MUnaryInstruction,
- public SimdPolicy<0>::Data
-{
- protected:
- explicit MSimdAllTrue(MDefinition* obj, MIRType result)
- : MUnaryInstruction(obj)
- {
- MIRType simdType = obj->type();
- MOZ_ASSERT(IsBooleanSimdType(simdType));
- MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
- setResultType(result);
- specialization_ = simdType;
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdAllTrue)
- TRIVIAL_NEW_WRAPPERS
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
- ALLOW_CLONE(MSimdAllTrue)
-};
-
-// Returns true if any lane is true.
-class MSimdAnyTrue
- : public MUnaryInstruction,
- public SimdPolicy<0>::Data
-{
- protected:
- explicit MSimdAnyTrue(MDefinition* obj, MIRType result)
- : MUnaryInstruction(obj)
- {
- MIRType simdType = obj->type();
- MOZ_ASSERT(IsBooleanSimdType(simdType));
- MOZ_ASSERT(result == MIRType::Boolean || result == MIRType::Int32);
- setResultType(result);
- specialization_ = simdType;
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdAnyTrue)
- TRIVIAL_NEW_WRAPPERS
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
-
- ALLOW_CLONE(MSimdAnyTrue)
-};
-
-// Base for the MSimdSwizzle and MSimdShuffle classes.
-class MSimdShuffleBase
-{
- protected:
- // As of now, there are at most 16 lanes. For each lane, we need to know
- // which input we choose and which of the lanes we choose.
- mozilla::Array<uint8_t, 16> lane_;
- uint32_t arity_;
-
- MSimdShuffleBase(const uint8_t lanes[], MIRType type)
- {
- arity_ = SimdTypeToLength(type);
- for (unsigned i = 0; i < arity_; i++)
- lane_[i] = lanes[i];
- }
-
- bool sameLanes(const MSimdShuffleBase* other) const {
- return arity_ == other->arity_ &&
- memcmp(&lane_[0], &other->lane_[0], arity_) == 0;
- }
-
- public:
- unsigned numLanes() const {
- return arity_;
- }
-
- unsigned lane(unsigned i) const {
- MOZ_ASSERT(i < arity_);
- return lane_[i];
- }
-
- bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
- return arity_ == 4 && lane(0) == x && lane(1) == y && lane(2) == z &&
- lane(3) == w;
- }
-};
-
-// Applies a swizzle operation to the input, putting the input lanes as
-// indicated in the output register's lanes. This implements the SIMD.js
-// "swizzle" function, that takes one vector and an array of lane indexes.
-class MSimdSwizzle
- : public MUnaryInstruction,
- public MSimdShuffleBase,
- public NoTypePolicy::Data
-{
- protected:
- MSimdSwizzle(MDefinition* obj, const uint8_t lanes[])
- : MUnaryInstruction(obj), MSimdShuffleBase(lanes, obj->type())
- {
- for (unsigned i = 0; i < arity_; i++)
- MOZ_ASSERT(lane(i) < arity_);
- setResultType(obj->type());
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdSwizzle)
- TRIVIAL_NEW_WRAPPERS
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!ins->isSimdSwizzle())
- return false;
- const MSimdSwizzle* other = ins->toSimdSwizzle();
- return sameLanes(other) && congruentIfOperandsEqual(other);
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- MDefinition* foldsTo(TempAllocator& alloc) override;
-
- ALLOW_CLONE(MSimdSwizzle)
-};
-
-// A "general shuffle" is a swizzle or a shuffle with non-constant lane
-// indices. This is the one that Ion inlines and it can be folded into a
-// MSimdSwizzle/MSimdShuffle if lane indices are constant. Performance of
-// general swizzle/shuffle does not really matter, as we expect to get
-// constant indices most of the time.
-class MSimdGeneralShuffle :
- public MVariadicInstruction,
- public SimdShufflePolicy::Data
-{
- unsigned numVectors_;
- unsigned numLanes_;
-
- protected:
- MSimdGeneralShuffle(unsigned numVectors, unsigned numLanes, MIRType type)
- : numVectors_(numVectors), numLanes_(numLanes)
- {
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT(SimdTypeToLength(type) == numLanes_);
-
- setResultType(type);
- specialization_ = type;
- setGuard(); // throws if lane index is out of bounds
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdGeneralShuffle);
- TRIVIAL_NEW_WRAPPERS
-
- MOZ_MUST_USE bool init(TempAllocator& alloc) {
- return MVariadicInstruction::init(alloc, numVectors_ + numLanes_);
- }
- void setVector(unsigned i, MDefinition* vec) {
- MOZ_ASSERT(i < numVectors_);
- initOperand(i, vec);
- }
- void setLane(unsigned i, MDefinition* laneIndex) {
- MOZ_ASSERT(i < numLanes_);
- initOperand(numVectors_ + i, laneIndex);
- }
-
- unsigned numVectors() const {
- return numVectors_;
- }
- unsigned numLanes() const {
- return numLanes_;
- }
- MDefinition* vector(unsigned i) const {
- MOZ_ASSERT(i < numVectors_);
- return getOperand(i);
- }
- MDefinition* lane(unsigned i) const {
- MOZ_ASSERT(i < numLanes_);
- return getOperand(numVectors_ + i);
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!ins->isSimdGeneralShuffle())
- return false;
- const MSimdGeneralShuffle* other = ins->toSimdGeneralShuffle();
- return numVectors_ == other->numVectors() &&
- numLanes_ == other->numLanes() &&
- congruentIfOperandsEqual(other);
- }
-
- MDefinition* foldsTo(TempAllocator& alloc) override;
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-};
-
-// Applies a shuffle operation to the inputs. The lane indexes select a source
-// lane from the concatenation of the two input vectors.
-class MSimdShuffle
- : public MBinaryInstruction,
- public MSimdShuffleBase,
- public NoTypePolicy::Data
-{
- MSimdShuffle(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[])
- : MBinaryInstruction(lhs, rhs), MSimdShuffleBase(lanes, lhs->type())
- {
- MOZ_ASSERT(IsSimdType(lhs->type()));
- MOZ_ASSERT(IsSimdType(rhs->type()));
- MOZ_ASSERT(lhs->type() == rhs->type());
- for (unsigned i = 0; i < arity_; i++)
- MOZ_ASSERT(lane(i) < 2 * arity_);
- setResultType(lhs->type());
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdShuffle)
-
- static MInstruction* New(TempAllocator& alloc, MDefinition* lhs, MDefinition* rhs,
- const uint8_t lanes[])
- {
- unsigned arity = SimdTypeToLength(lhs->type());
-
- // Swap operands so that new lanes come from LHS in majority.
- // In the balanced case, swap operands if needs be, in order to be able
- // to do only one vshufps on x86.
- unsigned lanesFromLHS = 0;
- for (unsigned i = 0; i < arity; i++) {
- if (lanes[i] < arity)
- lanesFromLHS++;
- }
-
- if (lanesFromLHS < arity / 2 ||
- (arity == 4 && lanesFromLHS == 2 && lanes[0] >= 4 && lanes[1] >= 4)) {
- mozilla::Array<uint8_t, 16> newLanes;
- for (unsigned i = 0; i < arity; i++)
- newLanes[i] = (lanes[i] + arity) % (2 * arity);
- return New(alloc, rhs, lhs, &newLanes[0]);
- }
-
- // If all lanes come from the same vector, just use swizzle instead.
- if (lanesFromLHS == arity)
- return MSimdSwizzle::New(alloc, lhs, lanes);
-
- return new(alloc) MSimdShuffle(lhs, rhs, lanes);
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!ins->isSimdShuffle())
- return false;
- const MSimdShuffle* other = ins->toSimdShuffle();
- return sameLanes(other) && binaryCongruentTo(other);
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- ALLOW_CLONE(MSimdShuffle)
-};
-
-class MSimdUnaryArith
- : public MUnaryInstruction,
- public SimdSameAsReturnedTypePolicy<0>::Data
-{
- public:
- enum Operation {
-#define OP_LIST_(OP) OP,
- FOREACH_FLOAT_SIMD_UNOP(OP_LIST_)
- neg,
- not_
-#undef OP_LIST_
- };
-
- static const char* OperationName(Operation op) {
- switch (op) {
- case abs: return "abs";
- case neg: return "neg";
- case not_: return "not";
- case reciprocalApproximation: return "reciprocalApproximation";
- case reciprocalSqrtApproximation: return "reciprocalSqrtApproximation";
- case sqrt: return "sqrt";
- }
- MOZ_CRASH("unexpected operation");
- }
-
- private:
- Operation operation_;
-
- MSimdUnaryArith(MDefinition* def, Operation op)
- : MUnaryInstruction(def), operation_(op)
- {
- MIRType type = def->type();
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT_IF(IsIntegerSimdType(type), op == neg || op == not_);
- setResultType(type);
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdUnaryArith)
- TRIVIAL_NEW_WRAPPERS
-
- Operation operation() const { return operation_; }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins) && ins->toSimdUnaryArith()->operation() == operation();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdUnaryArith);
-};
-
-// Compares each value of a SIMD vector to each corresponding lane's value of
-// another SIMD vector, and returns a boolean vector containing the results of
-// the comparison: all bits are set to 1 if the comparison is true, 0 otherwise.
-// When comparing integer vectors, a SimdSign must be provided to request signed
-// or unsigned comparison.
-class MSimdBinaryComp
- : public MBinaryInstruction,
- public SimdAllPolicy::Data
-{
- public:
- enum Operation {
-#define NAME_(x) x,
- FOREACH_COMP_SIMD_OP(NAME_)
-#undef NAME_
- };
-
- static const char* OperationName(Operation op) {
- switch (op) {
-#define NAME_(x) case x: return #x;
- FOREACH_COMP_SIMD_OP(NAME_)
-#undef NAME_
- }
- MOZ_CRASH("unexpected operation");
- }
-
- private:
- Operation operation_;
- SimdSign sign_;
-
- MSimdBinaryComp(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
- : MBinaryInstruction(left, right), operation_(op), sign_(sign)
- {
- MOZ_ASSERT(left->type() == right->type());
- MIRType opType = left->type();
- MOZ_ASSERT(IsSimdType(opType));
- MOZ_ASSERT((sign != SimdSign::NotApplicable) == IsIntegerSimdType(opType),
- "Signedness must be specified for integer SIMD compares");
- setResultType(MIRTypeToBooleanSimdType(opType));
- specialization_ = opType;
- setMovable();
- if (op == equal || op == notEqual)
- setCommutative();
- }
-
- static MSimdBinaryComp* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
- Operation op, SimdSign sign)
- {
- return new (alloc) MSimdBinaryComp(left, right, op, sign);
- }
-
- public:
- INSTRUCTION_HEADER(SimdBinaryComp)
-
- // Create a MSimdBinaryComp or an equivalent sequence of instructions
- // supported by the current target.
- // Add all instructions to the basic block |addTo|.
- static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op, SimdSign sign);
-
- AliasSet getAliasSet() const override
- {
- return AliasSet::None();
- }
-
- Operation operation() const { return operation_; }
- SimdSign signedness() const { return sign_; }
- MIRType specialization() const { return specialization_; }
-
- // Swap the operands and reverse the comparison predicate.
- void reverse() {
- switch (operation()) {
- case greaterThan: operation_ = lessThan; break;
- case greaterThanOrEqual: operation_ = lessThanOrEqual; break;
- case lessThan: operation_ = greaterThan; break;
- case lessThanOrEqual: operation_ = greaterThanOrEqual; break;
- case equal:
- case notEqual:
- break;
- default: MOZ_CRASH("Unexpected compare operation");
- }
- swapOperands();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!binaryCongruentTo(ins))
- return false;
- const MSimdBinaryComp* other = ins->toSimdBinaryComp();
- return specialization_ == other->specialization() &&
- operation_ == other->operation() &&
- sign_ == other->signedness();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdBinaryComp)
-};
-
-class MSimdBinaryArith
- : public MBinaryInstruction,
- public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
-{
- public:
- enum Operation {
-#define OP_LIST_(OP) Op_##OP,
- FOREACH_NUMERIC_SIMD_BINOP(OP_LIST_)
- FOREACH_FLOAT_SIMD_BINOP(OP_LIST_)
-#undef OP_LIST_
- };
-
- static const char* OperationName(Operation op) {
- switch (op) {
-#define OP_CASE_LIST_(OP) case Op_##OP: return #OP;
- FOREACH_NUMERIC_SIMD_BINOP(OP_CASE_LIST_)
- FOREACH_FLOAT_SIMD_BINOP(OP_CASE_LIST_)
-#undef OP_CASE_LIST_
- }
- MOZ_CRASH("unexpected operation");
- }
-
- private:
- Operation operation_;
-
- MSimdBinaryArith(MDefinition* left, MDefinition* right, Operation op)
- : MBinaryInstruction(left, right), operation_(op)
- {
- MOZ_ASSERT(left->type() == right->type());
- MIRType type = left->type();
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT_IF(IsIntegerSimdType(type), op == Op_add || op == Op_sub || op == Op_mul);
- setResultType(type);
- setMovable();
- if (op == Op_add || op == Op_mul || op == Op_min || op == Op_max)
- setCommutative();
- }
-
- static MSimdBinaryArith* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
- Operation op)
- {
- return new (alloc) MSimdBinaryArith(left, right, op);
- }
-
- public:
- INSTRUCTION_HEADER(SimdBinaryArith)
-
- // Create an MSimdBinaryArith instruction and add it to the basic block. Possibly
- // create and add an equivalent sequence of instructions instead if the
- // current target doesn't support the requested shift operation directly.
- static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op);
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- Operation operation() const { return operation_; }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!binaryCongruentTo(ins))
- return false;
- return operation_ == ins->toSimdBinaryArith()->operation();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdBinaryArith)
-};
-
-class MSimdBinarySaturating
- : public MBinaryInstruction,
- public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1>>::Data
-{
- public:
- enum Operation
- {
- add,
- sub,
- };
-
- static const char* OperationName(Operation op)
- {
- switch (op) {
- case add:
- return "add";
- case sub:
- return "sub";
- }
- MOZ_CRASH("unexpected operation");
- }
-
- private:
- Operation operation_;
- SimdSign sign_;
-
- MSimdBinarySaturating(MDefinition* left, MDefinition* right, Operation op, SimdSign sign)
- : MBinaryInstruction(left, right)
- , operation_(op)
- , sign_(sign)
- {
- MOZ_ASSERT(left->type() == right->type());
- MIRType type = left->type();
- MOZ_ASSERT(type == MIRType::Int8x16 || type == MIRType::Int16x8);
- setResultType(type);
- setMovable();
- if (op == add)
- setCommutative();
- }
-
- public:
- INSTRUCTION_HEADER(SimdBinarySaturating)
- TRIVIAL_NEW_WRAPPERS
-
- AliasSet getAliasSet() const override { return AliasSet::None(); }
-
- Operation operation() const { return operation_; }
- SimdSign signedness() const { return sign_; }
-
- bool congruentTo(const MDefinition* ins) const override
- {
- if (!binaryCongruentTo(ins))
- return false;
- return operation_ == ins->toSimdBinarySaturating()->operation() &&
- sign_ == ins->toSimdBinarySaturating()->signedness();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdBinarySaturating)
-};
-
-class MSimdBinaryBitwise
- : public MBinaryInstruction,
- public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >::Data
-{
- public:
- enum Operation {
- and_,
- or_,
- xor_
- };
-
- static const char* OperationName(Operation op) {
- switch (op) {
- case and_: return "and";
- case or_: return "or";
- case xor_: return "xor";
- }
- MOZ_CRASH("unexpected operation");
- }
-
- private:
- Operation operation_;
-
- MSimdBinaryBitwise(MDefinition* left, MDefinition* right, Operation op)
- : MBinaryInstruction(left, right), operation_(op)
- {
- MOZ_ASSERT(left->type() == right->type());
- MIRType type = left->type();
- MOZ_ASSERT(IsSimdType(type));
- setResultType(type);
- setMovable();
- setCommutative();
- }
-
- public:
- INSTRUCTION_HEADER(SimdBinaryBitwise)
- TRIVIAL_NEW_WRAPPERS
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- Operation operation() const { return operation_; }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!binaryCongruentTo(ins))
- return false;
- return operation_ == ins->toSimdBinaryBitwise()->operation();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- ALLOW_CLONE(MSimdBinaryBitwise)
-};
-
-class MSimdShift
- : public MBinaryInstruction,
- public MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >::Data
-{
- public:
- enum Operation {
- lsh,
- rsh,
- ursh
- };
-
- private:
- Operation operation_;
-
- MSimdShift(MDefinition* left, MDefinition* right, Operation op)
- : MBinaryInstruction(left, right), operation_(op)
- {
- MIRType type = left->type();
- MOZ_ASSERT(IsIntegerSimdType(type));
- setResultType(type);
- setMovable();
- }
-
- static MSimdShift* New(TempAllocator& alloc, MDefinition* left, MDefinition* right,
- Operation op)
- {
- return new (alloc) MSimdShift(left, right, op);
- }
-
- public:
- INSTRUCTION_HEADER(SimdShift)
-
- // Create an MSimdShift instruction and add it to the basic block. Possibly
- // create and add an equivalent sequence of instructions instead if the
- // current target doesn't support the requested shift operation directly.
- // Return the inserted MInstruction that computes the shifted value.
- static MInstruction* AddLegalized(TempAllocator& alloc, MBasicBlock* addTo, MDefinition* left,
- MDefinition* right, Operation op);
-
- // Get the relevant right shift operation given the signedness of a type.
- static Operation rshForSign(SimdSign sign) {
- return sign == SimdSign::Unsigned ? ursh : rsh;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- Operation operation() const { return operation_; }
-
- static const char* OperationName(Operation op) {
- switch (op) {
- case lsh: return "lsh";
- case rsh: return "rsh-arithmetic";
- case ursh: return "rsh-logical";
- }
- MOZ_CRASH("unexpected operation");
- }
-
- void printOpcode(GenericPrinter& out) const override;
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!binaryCongruentTo(ins))
- return false;
- return operation_ == ins->toSimdShift()->operation();
- }
-
- ALLOW_CLONE(MSimdShift)
-};
-
-class MSimdSelect
- : public MTernaryInstruction,
- public SimdSelectPolicy::Data
-{
- MSimdSelect(MDefinition* mask, MDefinition* lhs, MDefinition* rhs)
- : MTernaryInstruction(mask, lhs, rhs)
- {
- MOZ_ASSERT(IsBooleanSimdType(mask->type()));
- MOZ_ASSERT(lhs->type() == lhs->type());
- MIRType type = lhs->type();
- MOZ_ASSERT(IsSimdType(type));
- setResultType(type);
- specialization_ = type;
- setMovable();
- }
-
- public:
- INSTRUCTION_HEADER(SimdSelect)
- TRIVIAL_NEW_WRAPPERS
- NAMED_OPERANDS((0, mask))
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- return congruentIfOperandsEqual(ins);
- }
-
- ALLOW_CLONE(MSimdSelect)
-};
-
// Deep clone a constant JSObject.
class MCloneLiteral
: public MUnaryInstruction,
@@ -3585,117 +2497,6 @@ class MTypedObjectDescr
}
};
-// Generic way for constructing a SIMD object in IonMonkey, this instruction
-// takes as argument a SIMD instruction and returns a new SIMD object which
-// corresponds to the MIRType of its operand.
-class MSimdBox
- : public MUnaryInstruction,
- public NoTypePolicy::Data
-{
- protected:
- CompilerGCPointer<InlineTypedObject*> templateObject_;
- SimdType simdType_;
- gc::InitialHeap initialHeap_;
-
- MSimdBox(CompilerConstraintList* constraints,
- MDefinition* op,
- InlineTypedObject* templateObject,
- SimdType simdType,
- gc::InitialHeap initialHeap)
- : MUnaryInstruction(op),
- templateObject_(templateObject),
- simdType_(simdType),
- initialHeap_(initialHeap)
- {
- MOZ_ASSERT(IsSimdType(op->type()));
- setMovable();
- setResultType(MIRType::Object);
- if (constraints)
- setResultTypeSet(MakeSingletonTypeSet(constraints, templateObject));
- }
-
- public:
- INSTRUCTION_HEADER(SimdBox)
- TRIVIAL_NEW_WRAPPERS
-
- InlineTypedObject* templateObject() const {
- return templateObject_;
- }
-
- SimdType simdType() const {
- return simdType_;
- }
-
- gc::InitialHeap initialHeap() const {
- return initialHeap_;
- }
-
- bool congruentTo(const MDefinition* ins) const override {
- if (!congruentIfOperandsEqual(ins))
- return false;
- const MSimdBox* box = ins->toSimdBox();
- if (box->simdType() != simdType())
- return false;
- MOZ_ASSERT(box->templateObject() == templateObject());
- if (box->initialHeap() != initialHeap())
- return false;
- return true;
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- void printOpcode(GenericPrinter& out) const override;
- MOZ_MUST_USE bool writeRecoverData(CompactBufferWriter& writer) const override;
- bool canRecoverOnBailout() const override {
- return true;
- }
-
- bool appendRoots(MRootList& roots) const override {
- return roots.append(templateObject_);
- }
-};
-
-class MSimdUnbox
- : public MUnaryInstruction,
- public SingleObjectPolicy::Data
-{
- protected:
- SimdType simdType_;
-
- MSimdUnbox(MDefinition* op, SimdType simdType)
- : MUnaryInstruction(op),
- simdType_(simdType)
- {
- MIRType type = SimdTypeToMIRType(simdType);
- MOZ_ASSERT(IsSimdType(type));
- setGuard();
- setMovable();
- setResultType(type);
- }
-
- public:
- INSTRUCTION_HEADER(SimdUnbox)
- TRIVIAL_NEW_WRAPPERS
- ALLOW_CLONE(MSimdUnbox)
-
- SimdType simdType() const { return simdType_; }
-
- MDefinition* foldsTo(TempAllocator& alloc) override;
- bool congruentTo(const MDefinition* ins) const override {
- if (!congruentIfOperandsEqual(ins))
- return false;
- return ins->toSimdUnbox()->simdType() == simdType();
- }
-
- AliasSet getAliasSet() const override {
- return AliasSet::None();
- }
-
- void printOpcode(GenericPrinter& out) const override;
-};
-
// Creates a new derived type object. At runtime, this is just a call
// to `BinaryBlock::createDerived()`. That is, the MIR itself does not
// compile to particularly optimized code. However, using a distinct
@@ -10041,7 +8842,6 @@ class MLoadUnboxedScalar
{
Scalar::Type storageType_;
Scalar::Type readType_;
- unsigned numElems_; // used only for SIMD
bool requiresBarrier_;
int32_t offsetAdjustment_;
bool canonicalizeDoubles_;
@@ -10052,7 +8852,6 @@ class MLoadUnboxedScalar
: MBinaryInstruction(elements, index),
storageType_(storageType),
readType_(storageType),
- numElems_(1),
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
offsetAdjustment_(offsetAdjustment),
canonicalizeDoubles_(canonicalizeDoubles)
@@ -10072,13 +8871,6 @@ class MLoadUnboxedScalar
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, elements), (1, index))
- void setSimdRead(Scalar::Type type, unsigned numElems) {
- readType_ = type;
- numElems_ = numElems;
- }
- unsigned numElems() const {
- return numElems_;
- }
Scalar::Type readType() const {
return readType_;
}
@@ -10120,8 +8912,6 @@ class MLoadUnboxedScalar
return false;
if (readType_ != other->readType_)
return false;
- if (numElems_ != other->numElems_)
- return false;
if (offsetAdjustment() != other->offsetAdjustment())
return false;
if (canonicalizeDoubles() != other->canonicalizeDoubles())
@@ -10263,7 +9053,7 @@ class StoreUnboxedScalarBase
explicit StoreUnboxedScalarBase(Scalar::Type writeType)
: writeType_(writeType)
{
- MOZ_ASSERT(isIntegerWrite() || isFloatWrite() || isSimdWrite());
+ MOZ_ASSERT(isIntegerWrite() || isFloatWrite());
}
public:
@@ -10289,9 +9079,6 @@ class StoreUnboxedScalarBase
return writeType_ == Scalar::Float32 ||
writeType_ == Scalar::Float64;
}
- bool isSimdWrite() const {
- return Scalar::isSimdType(writeType());
- }
};
// Store an unboxed scalar value to a typed array or other object.
@@ -10314,7 +9101,6 @@ class MStoreUnboxedScalar
bool requiresBarrier_;
int32_t offsetAdjustment_;
- unsigned numElems_; // used only for SIMD
MStoreUnboxedScalar(MDefinition* elements, MDefinition* index, MDefinition* value,
Scalar::Type storageType, TruncateInputKind truncateInput,
@@ -10325,8 +9111,7 @@ class MStoreUnboxedScalar
storageType_(storageType),
truncateInput_(truncateInput),
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
- offsetAdjustment_(offsetAdjustment),
- numElems_(1)
+ offsetAdjustment_(offsetAdjustment)
{
if (requiresBarrier_)
setGuard(); // Not removable or movable
@@ -10342,14 +9127,6 @@ class MStoreUnboxedScalar
TRIVIAL_NEW_WRAPPERS
NAMED_OPERANDS((0, elements), (1, index), (2, value))
- void setSimdWrite(Scalar::Type writeType, unsigned numElems) {
- MOZ_ASSERT(Scalar::isSimdType(writeType));
- setWriteType(writeType);
- numElems_ = numElems;
- }
- unsigned numElems() const {
- return numElems_;
- }
Scalar::Type storageType() const {
return storageType_;
}
@@ -10401,8 +9178,6 @@ class MStoreTypedArrayElementHole
NAMED_OPERANDS((0, elements), (1, length), (2, index), (3, value))
Scalar::Type arrayType() const {
- MOZ_ASSERT(!Scalar::isSimdType(writeType()),
- "arrayType == writeType iff the write type isn't SIMD");
return writeType();
}
AliasSet getAliasSet() const override {
@@ -13832,7 +12607,6 @@ class MAsmJSMemoryAccess
needsBoundsCheck_(true)
{
MOZ_ASSERT(accessType != Scalar::Uint8Clamped);
- MOZ_ASSERT(!Scalar::isSimdType(accessType));
}
uint32_t offset() const { return offset_; }
@@ -13998,7 +12772,7 @@ class MWasmLoadGlobalVar : public MNullaryInstruction
MWasmLoadGlobalVar(MIRType type, unsigned globalDataOffset, bool isConstant)
: globalDataOffset_(globalDataOffset), isConstant_(isConstant)
{
- MOZ_ASSERT(IsNumberType(type) || IsSimdType(type));
+ MOZ_ASSERT(IsNumberType(type));
setResultType(type);
setMovable();
}
diff --git a/js/src/jit/MIRGenerator.h b/js/src/jit/MIRGenerator.h
index 080205abcf..53e1155be9 100644
--- a/js/src/jit/MIRGenerator.h
+++ b/js/src/jit/MIRGenerator.h
@@ -147,9 +147,6 @@ class MIRGenerator
bool performsCall() const {
return performsCall_;
}
- // Traverses the graph to find if there's any SIMD instruction. Costful but
- // the value is cached, so don't worry about calling it several times.
- bool usesSimd();
bool modifiesFrameArguments() const {
return modifiesFrameArguments_;
@@ -180,8 +177,6 @@ class MIRGenerator
uint32_t wasmMaxStackArgBytes_;
bool performsCall_;
- bool usesSimd_;
- bool cachedUsesSimd_;
// Keep track of whether frame arguments are modified during execution.
// RegAlloc needs to know this as spilling values back to their register
diff --git a/js/src/jit/MIRGraph.cpp b/js/src/jit/MIRGraph.cpp
index 286e859d39..3cba074096 100644
--- a/js/src/jit/MIRGraph.cpp
+++ b/js/src/jit/MIRGraph.cpp
@@ -32,8 +32,6 @@ MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOpti
cancelBuild_(false),
wasmMaxStackArgBytes_(0),
performsCall_(false),
- usesSimd_(false),
- cachedUsesSimd_(false),
modifiesFrameArguments_(false),
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
@@ -44,37 +42,6 @@ MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOpti
{ }
bool
-MIRGenerator::usesSimd()
-{
- if (cachedUsesSimd_)
- return usesSimd_;
-
- cachedUsesSimd_ = true;
- for (ReversePostorderIterator block = graph_->rpoBegin(),
- end = graph_->rpoEnd();
- block != end;
- block++)
- {
- // It's fine to use MInstructionIterator here because we don't have to
- // worry about Phis, since any reachable phi (or phi cycle) will have at
- // least one instruction as an input.
- for (MInstructionIterator inst = block->begin(); inst != block->end(); inst++) {
- // Instructions that have SIMD inputs but not a SIMD type are fine
- // to ignore, as their inputs are also reached at some point. By
- // induction, at least one instruction with a SIMD type is reached
- // at some point.
- if (IsSimdType(inst->type())) {
- MOZ_ASSERT(SupportsSimd);
- usesSimd_ = true;
- return true;
- }
- }
- }
- usesSimd_ = false;
- return false;
-}
-
-bool
MIRGenerator::abortFmt(const char* message, va_list ap)
{
JitSpewVA(JitSpew_IonAbort, message, ap);
diff --git a/js/src/jit/MOpcodes.h b/js/src/jit/MOpcodes.h
index 54c65aff90..aa2dda77a2 100644
--- a/js/src/jit/MOpcodes.h
+++ b/js/src/jit/MOpcodes.h
@@ -11,27 +11,6 @@ namespace jit {
#define MIR_OPCODE_LIST(_) \
_(Constant) \
- _(SimdBox) \
- _(SimdUnbox) \
- _(SimdValueX4) \
- _(SimdSplat) \
- _(SimdConstant) \
- _(SimdConvert) \
- _(SimdReinterpretCast) \
- _(SimdExtractElement) \
- _(SimdInsertElement) \
- _(SimdSwizzle) \
- _(SimdGeneralShuffle) \
- _(SimdShuffle) \
- _(SimdUnaryArith) \
- _(SimdBinaryComp) \
- _(SimdBinaryArith) \
- _(SimdBinarySaturating) \
- _(SimdBinaryBitwise) \
- _(SimdShift) \
- _(SimdSelect) \
- _(SimdAllTrue) \
- _(SimdAnyTrue) \
_(CloneLiteral) \
_(Parameter) \
_(Callee) \
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
index 07f7b6fdcd..6721dd4419 100644
--- a/js/src/jit/MacroAssembler.cpp
+++ b/js/src/jit/MacroAssembler.cpp
@@ -240,8 +240,7 @@ template void MacroAssembler::guardTypeSetMightBeIncomplete(const TemporaryTypeS
template<typename S, typename T>
static void
-StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest,
- unsigned numElems)
+StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, const T& dest)
{
switch (arrayType) {
case Scalar::Float32:
@@ -250,48 +249,6 @@ StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, cons
case Scalar::Float64:
masm.storeDouble(value, dest);
break;
- case Scalar::Float32x4:
- switch (numElems) {
- case 1:
- masm.storeFloat32(value, dest);
- break;
- case 2:
- masm.storeDouble(value, dest);
- break;
- case 3:
- masm.storeFloat32x3(value, dest);
- break;
- case 4:
- masm.storeUnalignedSimd128Float(value, dest);
- break;
- default: MOZ_CRASH("unexpected number of elements in simd write");
- }
- break;
- case Scalar::Int32x4:
- switch (numElems) {
- case 1:
- masm.storeInt32x1(value, dest);
- break;
- case 2:
- masm.storeInt32x2(value, dest);
- break;
- case 3:
- masm.storeInt32x3(value, dest);
- break;
- case 4:
- masm.storeUnalignedSimd128Int(value, dest);
- break;
- default: MOZ_CRASH("unexpected number of elements in simd write");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(numElems == 16, "unexpected partial store");
- masm.storeUnalignedSimd128Int(value, dest);
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(numElems == 8, "unexpected partial store");
- masm.storeUnalignedSimd128Int(value, dest);
- break;
default:
MOZ_CRASH("Invalid typed array type");
}
@@ -299,21 +256,21 @@ StoreToTypedFloatArray(MacroAssembler& masm, int arrayType, const S& value, cons
void
MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
- const BaseIndex& dest, unsigned numElems)
+ const BaseIndex& dest)
{
- StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
+ StoreToTypedFloatArray(*this, arrayType, value, dest);
}
void
MacroAssembler::storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value,
- const Address& dest, unsigned numElems)
+ const Address& dest)
{
- StoreToTypedFloatArray(*this, arrayType, value, dest, numElems);
+ StoreToTypedFloatArray(*this, arrayType, value, dest);
}
template<typename T>
void
MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp,
- Label* fail, bool canonicalizeDoubles, unsigned numElems)
+ Label* fail, bool canonicalizeDoubles)
{
switch (arrayType) {
case Scalar::Int8:
@@ -359,59 +316,15 @@ MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegi
if (canonicalizeDoubles)
canonicalizeDouble(dest.fpu());
break;
- case Scalar::Int32x4:
- switch (numElems) {
- case 1:
- loadInt32x1(src, dest.fpu());
- break;
- case 2:
- loadInt32x2(src, dest.fpu());
- break;
- case 3:
- loadInt32x3(src, dest.fpu());
- break;
- case 4:
- loadUnalignedSimd128Int(src, dest.fpu());
- break;
- default: MOZ_CRASH("unexpected number of elements in SIMD load");
- }
- break;
- case Scalar::Float32x4:
- switch (numElems) {
- case 1:
- loadFloat32(src, dest.fpu());
- break;
- case 2:
- loadDouble(src, dest.fpu());
- break;
- case 3:
- loadFloat32x3(src, dest.fpu());
- break;
- case 4:
- loadUnalignedSimd128Float(src, dest.fpu());
- break;
- default: MOZ_CRASH("unexpected number of elements in SIMD load");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(numElems == 16, "unexpected partial load");
- loadUnalignedSimd128Int(src, dest.fpu());
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(numElems == 8, "unexpected partial load");
- loadUnalignedSimd128Int(src, dest.fpu());
- break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const Address& src, AnyRegister dest,
- Register temp, Label* fail, bool canonicalizeDoubles,
- unsigned numElems);
+ Register temp, Label* fail, bool canonicalizeDoubles);
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex& src, AnyRegister dest,
- Register temp, Label* fail, bool canonicalizeDoubles,
- unsigned numElems);
+ Register temp, Label* fail, bool canonicalizeDoubles);
template<typename T>
void
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
index 173a39014c..f681456396 100644
--- a/js/src/jit/MacroAssembler.h
+++ b/js/src/jit/MacroAssembler.h
@@ -1290,9 +1290,6 @@ class MacroAssembler : public MacroAssemblerSpecific
inline void canonicalizeFloat(FloatRegister reg);
inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
- inline void canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
- DEFINED_ON(x86_shared);
-
public:
// ========================================================================
// Memory access primitives.
@@ -1603,7 +1600,7 @@ class MacroAssembler : public MacroAssemblerSpecific
template<typename T>
void loadFromTypedArray(Scalar::Type arrayType, const T& src, AnyRegister dest, Register temp, Label* fail,
- bool canonicalizeDoubles = true, unsigned numElems = 0);
+ bool canonicalizeDoubles = true);
template<typename T>
void loadFromTypedArray(Scalar::Type arrayType, const T& src, const ValueOperand& dest, bool allowDouble,
@@ -1630,10 +1627,8 @@ class MacroAssembler : public MacroAssemblerSpecific
}
}
- void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
- unsigned numElems = 0);
- void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,
- unsigned numElems = 0);
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest);
+ void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest);
// Load a property from an UnboxedPlainObject or UnboxedArrayObject.
template <typename T>
diff --git a/js/src/jit/RangeAnalysis.cpp b/js/src/jit/RangeAnalysis.cpp
index 52c737e677..7b35349ab2 100644
--- a/js/src/jit/RangeAnalysis.cpp
+++ b/js/src/jit/RangeAnalysis.cpp
@@ -1785,10 +1785,6 @@ GetTypedArrayRange(TempAllocator& alloc, Scalar::Type type)
case Scalar::Int64:
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
diff --git a/js/src/jit/Recover.cpp b/js/src/jit/Recover.cpp
index 0d6882f52c..28601fa7cd 100644
--- a/js/src/jit/Recover.cpp
+++ b/js/src/jit/Recover.cpp
@@ -14,7 +14,6 @@
#include "jsstr.h"
#include "builtin/RegExp.h"
-#include "builtin/SIMD.h"
#include "builtin/TypedObject.h"
#include "gc/Heap.h"
@@ -1448,79 +1447,6 @@ RLambda::recover(JSContext* cx, SnapshotIterator& iter) const
}
bool
-MSimdBox::writeRecoverData(CompactBufferWriter& writer) const
-{
- MOZ_ASSERT(canRecoverOnBailout());
- writer.writeUnsigned(uint32_t(RInstruction::Recover_SimdBox));
- static_assert(unsigned(SimdType::Count) < 0x100, "assuming SimdType fits in 8 bits");
- writer.writeByte(uint8_t(simdType()));
- return true;
-}
-
-RSimdBox::RSimdBox(CompactBufferReader& reader)
-{
- type_ = reader.readByte();
-}
-
-bool
-RSimdBox::recover(JSContext* cx, SnapshotIterator& iter) const
-{
- JSObject* resultObject = nullptr;
- RValueAllocation a = iter.readAllocation();
- MOZ_ASSERT(iter.allocationReadable(a));
- MOZ_ASSERT_IF(a.mode() == RValueAllocation::ANY_FLOAT_REG, a.fpuReg().isSimd128());
- const FloatRegisters::RegisterContent* raw = iter.floatAllocationPointer(a);
- switch (SimdType(type_)) {
- case SimdType::Bool8x16:
- resultObject = js::CreateSimd<Bool8x16>(cx, (const Bool8x16::Elem*) raw);
- break;
- case SimdType::Int8x16:
- resultObject = js::CreateSimd<Int8x16>(cx, (const Int8x16::Elem*) raw);
- break;
- case SimdType::Uint8x16:
- resultObject = js::CreateSimd<Uint8x16>(cx, (const Uint8x16::Elem*) raw);
- break;
- case SimdType::Bool16x8:
- resultObject = js::CreateSimd<Bool16x8>(cx, (const Bool16x8::Elem*) raw);
- break;
- case SimdType::Int16x8:
- resultObject = js::CreateSimd<Int16x8>(cx, (const Int16x8::Elem*) raw);
- break;
- case SimdType::Uint16x8:
- resultObject = js::CreateSimd<Uint16x8>(cx, (const Uint16x8::Elem*) raw);
- break;
- case SimdType::Bool32x4:
- resultObject = js::CreateSimd<Bool32x4>(cx, (const Bool32x4::Elem*) raw);
- break;
- case SimdType::Int32x4:
- resultObject = js::CreateSimd<Int32x4>(cx, (const Int32x4::Elem*) raw);
- break;
- case SimdType::Uint32x4:
- resultObject = js::CreateSimd<Uint32x4>(cx, (const Uint32x4::Elem*) raw);
- break;
- case SimdType::Float32x4:
- resultObject = js::CreateSimd<Float32x4>(cx, (const Float32x4::Elem*) raw);
- break;
- case SimdType::Float64x2:
- MOZ_CRASH("NYI, RSimdBox of Float64x2");
- break;
- case SimdType::Bool64x2:
- MOZ_CRASH("NYI, RSimdBox of Bool64x2");
- break;
- case SimdType::Count:
- MOZ_CRASH("RSimdBox of Count is unreachable");
- }
-
- if (!resultObject)
- return false;
-
- RootedValue result(cx);
- result.setObject(*resultObject);
- iter.storeInstructionResult(result);
- return true;
-}
-
-bool
MObjectState::writeRecoverData(CompactBufferWriter& writer) const
{
MOZ_ASSERT(canRecoverOnBailout());
diff --git a/js/src/jit/Recover.h b/js/src/jit/Recover.h
index 41e64a9259..0d88e61ed6 100644
--- a/js/src/jit/Recover.h
+++ b/js/src/jit/Recover.h
@@ -104,7 +104,6 @@ namespace jit {
_(NewDerivedTypedObject) \
_(CreateThisWithTemplate) \
_(Lambda) \
- _(SimdBox) \
_(ObjectState) \
_(ArrayState) \
_(AtomicIsLockFree) \
@@ -616,17 +615,6 @@ class RLambda final : public RInstruction
MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
};
-class RSimdBox final : public RInstruction
-{
- private:
- uint8_t type_;
-
- public:
- RINSTRUCTION_HEADER_NUM_OP_(SimdBox, 1)
-
- MOZ_MUST_USE bool recover(JSContext* cx, SnapshotIterator& iter) const override;
-};
-
class RObjectState final : public RInstruction
{
private:
diff --git a/js/src/jit/TypePolicy.cpp b/js/src/jit/TypePolicy.cpp
index 1222cdd2b2..59ac9e2c21 100644
--- a/js/src/jit/TypePolicy.cpp
+++ b/js/src/jit/TypePolicy.cpp
@@ -582,45 +582,6 @@ template bool NoFloatPolicyAfter<2>::adjustInputs(TempAllocator& alloc, MInstruc
template <unsigned Op>
bool
-SimdScalarPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
- MIRType laneType = SimdTypeToLaneType(ins->type());
-
- MDefinition* in = ins->getOperand(Op);
-
- // A vector with boolean lanes requires Int32 inputs that have already been
- // converted to 0/-1.
- // We can't insert a MIRType::Boolean lane directly - it requires conversion.
- if (laneType == MIRType::Boolean) {
- MOZ_ASSERT(in->type() == MIRType::Int32, "Boolean SIMD vector requires Int32 lanes.");
- return true;
- }
-
- if (in->type() == laneType)
- return true;
-
- MInstruction* replace;
- if (laneType == MIRType::Int32) {
- replace = MTruncateToInt32::New(alloc, in);
- } else {
- MOZ_ASSERT(laneType == MIRType::Float32);
- replace = MToFloat32::New(alloc, in);
- }
-
- ins->block()->insertBefore(ins, replace);
- ins->replaceOperand(Op, replace);
-
- return replace->typePolicy()->adjustInputs(alloc, replace);
-}
-
-template bool SimdScalarPolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
-template bool SimdScalarPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
-template bool SimdScalarPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
-template bool SimdScalarPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
-
-template <unsigned Op>
-bool
BoxPolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
{
MDefinition* in = ins->getOperand(Op);
@@ -802,75 +763,6 @@ template bool ObjectPolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruc
template bool ObjectPolicy<2>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
template bool ObjectPolicy<3>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
-template <unsigned Op>
-bool
-SimdSameAsReturnedTypePolicy<Op>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- MOZ_ASSERT(ins->type() == ins->getOperand(Op)->type());
- return true;
-}
-
-template bool
-SimdSameAsReturnedTypePolicy<0>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
-template bool
-SimdSameAsReturnedTypePolicy<1>::staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
-
-bool
-SimdAllPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- for (unsigned i = 0, e = ins->numOperands(); i < e; i++)
- MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
- return true;
-}
-
-template <unsigned Op>
-bool
-SimdPolicy<Op>::adjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- MOZ_ASSERT(ins->typePolicySpecialization() == ins->getOperand(Op)->type());
- return true;
-}
-
-template bool
-SimdPolicy<0>::adjustInputs(TempAllocator& alloc, MInstruction* ins);
-
-bool
-SimdShufflePolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- MSimdGeneralShuffle* s = ins->toSimdGeneralShuffle();
-
- for (unsigned i = 0; i < s->numVectors(); i++)
- MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
-
- // Next inputs are the lanes, which need to be int32
- for (unsigned i = 0; i < s->numLanes(); i++) {
- MDefinition* in = ins->getOperand(s->numVectors() + i);
- if (in->type() == MIRType::Int32)
- continue;
-
- MInstruction* replace = MToInt32::New(alloc, in, MacroAssembler::IntConversion_NumbersOnly);
- ins->block()->insertBefore(ins, replace);
- ins->replaceOperand(s->numVectors() + i, replace);
- if (!replace->typePolicy()->adjustInputs(alloc, replace))
- return false;
- }
-
- return true;
-}
-
-bool
-SimdSelectPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
-{
- // First input is the mask, which has to be a boolean.
- MOZ_ASSERT(IsBooleanSimdType(ins->getOperand(0)->type()));
-
- // Next inputs are the two vectors of a particular type.
- for (unsigned i = 1; i < 3; i++)
- MOZ_ASSERT(ins->getOperand(i)->type() == ins->typePolicySpecialization());
-
- return true;
-}
-
bool
CallPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
{
@@ -928,13 +820,6 @@ StoreUnboxedScalarPolicy::adjustValueInput(TempAllocator& alloc, MInstruction* i
Scalar::Type writeType, MDefinition* value,
int valueOperand)
{
- // Storing a SIMD value requires a valueOperand that has already been
- // SimdUnboxed. See IonBuilder::inlineSimdStore(()
- if (Scalar::isSimdType(writeType)) {
- MOZ_ASSERT(IsSimdType(value->type()));
- return true;
- }
-
MDefinition* curValue = value;
// First, ensure the value is int32, boolean, double or Value.
// The conversion is based on TypedArrayObjectTemplate::setElementTail.
@@ -1196,9 +1081,6 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(FilterTypeSetPolicy) \
_(InstanceOfPolicy) \
_(PowPolicy) \
- _(SimdAllPolicy) \
- _(SimdSelectPolicy) \
- _(SimdShufflePolicy) \
_(StoreTypedArrayElementStaticPolicy) \
_(StoreTypedArrayHolePolicy) \
_(StoreUnboxedScalarPolicy) \
@@ -1236,7 +1118,6 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2>, IntPolicy<3>>) \
_(Mix4Policy<ObjectPolicy<0>, IntPolicy<1>, TruncateToInt32Policy<2>, TruncateToInt32Policy<3> >) \
_(Mix3Policy<ObjectPolicy<0>, CacheIdPolicy<1>, NoFloatPolicy<2>>) \
- _(Mix4Policy<SimdScalarPolicy<0>, SimdScalarPolicy<1>, SimdScalarPolicy<2>, SimdScalarPolicy<3> >) \
_(MixPolicy<BoxPolicy<0>, ObjectPolicy<1> >) \
_(MixPolicy<ConvertToStringPolicy<0>, ConvertToStringPolicy<1> >) \
_(MixPolicy<ConvertToStringPolicy<0>, ObjectPolicy<1> >) \
@@ -1254,8 +1135,6 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(MixPolicy<ObjectPolicy<0>, StringPolicy<1> >) \
_(MixPolicy<ObjectPolicy<0>, ConvertToStringPolicy<2> >) \
_(MixPolicy<ObjectPolicy<1>, ConvertToStringPolicy<0> >) \
- _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdSameAsReturnedTypePolicy<1> >) \
- _(MixPolicy<SimdSameAsReturnedTypePolicy<0>, SimdScalarPolicy<1> >) \
_(MixPolicy<StringPolicy<0>, IntPolicy<1> >) \
_(MixPolicy<StringPolicy<0>, StringPolicy<1> >) \
_(MixPolicy<BoxPolicy<0>, BoxPolicy<1> >) \
@@ -1265,9 +1144,6 @@ FilterTypeSetPolicy::adjustInputs(TempAllocator& alloc, MInstruction* ins)
_(ObjectPolicy<0>) \
_(ObjectPolicy<1>) \
_(ObjectPolicy<3>) \
- _(SimdPolicy<0>) \
- _(SimdSameAsReturnedTypePolicy<0>) \
- _(SimdScalarPolicy<0>) \
_(StringPolicy<0>)
diff --git a/js/src/jit/TypePolicy.h b/js/src/jit/TypePolicy.h
index f5d8d7a59e..0b9d2b37ef 100644
--- a/js/src/jit/TypePolicy.h
+++ b/js/src/jit/TypePolicy.h
@@ -318,61 +318,6 @@ class ObjectPolicy final : public TypePolicy
// a primitive, we use ValueToNonNullObject.
typedef ObjectPolicy<0> SingleObjectPolicy;
-// Convert an operand to have a type identical to the scalar type of the
-// returned type of the instruction.
-template <unsigned Op>
-class SimdScalarPolicy final : public TypePolicy
-{
- public:
- EMPTY_DATA_;
- static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* def);
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* def) override {
- return staticAdjustInputs(alloc, def);
- }
-};
-
-class SimdAllPolicy final : public TypePolicy
-{
- public:
- SPECIALIZATION_DATA_;
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
-};
-
-template <unsigned Op>
-class SimdPolicy final : public TypePolicy
-{
- public:
- SPECIALIZATION_DATA_;
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
-};
-
-class SimdSelectPolicy final : public TypePolicy
-{
- public:
- SPECIALIZATION_DATA_;
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
-};
-
-class SimdShufflePolicy final : public TypePolicy
-{
- public:
- SPECIALIZATION_DATA_;
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override;
-};
-
-// SIMD value-type policy, use the returned type of the instruction to determine
-// how to unbox its operand.
-template <unsigned Op>
-class SimdSameAsReturnedTypePolicy final : public TypePolicy
-{
- public:
- EMPTY_DATA_;
- static MOZ_MUST_USE bool staticAdjustInputs(TempAllocator& alloc, MInstruction* ins);
- virtual MOZ_MUST_USE bool adjustInputs(TempAllocator& alloc, MInstruction* ins) override {
- return staticAdjustInputs(alloc, ins);
- }
-};
-
template <unsigned Op>
class BoxPolicy final : public TypePolicy
{
diff --git a/js/src/jit/TypedObjectPrediction.cpp b/js/src/jit/TypedObjectPrediction.cpp
index fe968e5eeb..f6d6138bb5 100644
--- a/js/src/jit/TypedObjectPrediction.cpp
+++ b/js/src/jit/TypedObjectPrediction.cpp
@@ -118,7 +118,6 @@ TypedObjectPrediction::ofArrayKind() const
switch (kind()) {
case type::Scalar:
case type::Reference:
- case type::Simd:
case type::Struct:
return false;
@@ -206,12 +205,6 @@ TypedObjectPrediction::referenceType() const
return extractType<ReferenceTypeDescr>();
}
-SimdType
-TypedObjectPrediction::simdType() const
-{
- return descr().as<SimdTypeDescr>().type();
-}
-
bool
TypedObjectPrediction::hasKnownArrayLength(int32_t* length) const
{
diff --git a/js/src/jit/TypedObjectPrediction.h b/js/src/jit/TypedObjectPrediction.h
index 3a042b7712..9bf9ae6e8a 100644
--- a/js/src/jit/TypedObjectPrediction.h
+++ b/js/src/jit/TypedObjectPrediction.h
@@ -163,11 +163,10 @@ class TypedObjectPrediction {
//////////////////////////////////////////////////////////////////////
// Simple operations
//
- // Only valid when |kind()| is Scalar, Reference, or Simd (as appropriate).
+ // Only valid when |kind()| is Scalar or Reference (as appropriate).
ScalarTypeDescr::Type scalarType() const;
ReferenceTypeDescr::Type referenceType() const;
- SimdType simdType() const;
///////////////////////////////////////////////////////////////////////////
// Queries valid only for arrays.
diff --git a/js/src/jit/arm/Lowering-arm.h b/js/src/jit/arm/Lowering-arm.h
index 137ab32d2a..9b8bbf5de5 100644
--- a/js/src/jit/arm/Lowering-arm.h
+++ b/js/src/jit/arm/Lowering-arm.h
@@ -63,17 +63,6 @@ class LIRGeneratorARM : public LIRGeneratorShared
void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
MDefinition* lhs, MDefinition* rhs);
- void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
- void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
-
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs);
void lowerTruncateDToInt32(MTruncateToInt32* ins);
diff --git a/js/src/jit/arm/MacroAssembler-arm.h b/js/src/jit/arm/MacroAssembler-arm.h
index aaf92539a3..cb200f0609 100644
--- a/js/src/jit/arm/MacroAssembler-arm.h
+++ b/js/src/jit/arm/MacroAssembler-arm.h
@@ -1046,34 +1046,6 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
void loadPrivate(const Address& address, Register dest);
- void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
- void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
diff --git a/js/src/jit/arm64/Lowering-arm64.h b/js/src/jit/arm64/Lowering-arm64.h
index d9c3c49388..5f89accce1 100644
--- a/js/src/jit/arm64/Lowering-arm64.h
+++ b/js/src/jit/arm64/Lowering-arm64.h
@@ -63,18 +63,6 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
MDefinition* lhs, MDefinition* rhs);
- void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
-
- void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
-
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs);
void lowerTruncateDToInt32(MTruncateToInt32* ins);
diff --git a/js/src/jit/arm64/MacroAssembler-arm64.h b/js/src/jit/arm64/MacroAssembler-arm64.h
index d91db1ecbe..05284f03fc 100644
--- a/js/src/jit/arm64/MacroAssembler-arm64.h
+++ b/js/src/jit/arm64/MacroAssembler-arm64.h
@@ -907,39 +907,6 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
storePtr(src.reg, address);
}
- // SIMD.
- void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Int(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Int(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
-
- void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Float(FloatRegister src, const Address& addr) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Float(FloatRegister src, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister dest, const Address& addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister dest, const BaseIndex& addr) { MOZ_CRASH("NYI"); }
-
// StackPointer manipulation.
template <typename T> void addToStackPtr(T t);
template <typename T> void addStackPtrTo(T t);
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.h b/js/src/jit/mips-shared/Lowering-mips-shared.h
index 577baebc22..ebcaa186f4 100644
--- a/js/src/jit/mips-shared/Lowering-mips-shared.h
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.h
@@ -50,17 +50,6 @@ class LIRGeneratorMIPSShared : public LIRGeneratorShared
void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir,
MDefinition* lhs, MDefinition* rhs);
- void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
- void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs)
- {
- return lowerForFPU(ins, mir, lhs, rhs);
- }
-
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs);
void lowerDivI(MDiv* div);
diff --git a/js/src/jit/mips32/MacroAssembler-mips32.h b/js/src/jit/mips32/MacroAssembler-mips32.h
index 5ae77384c5..c51b9277c4 100644
--- a/js/src/jit/mips32/MacroAssembler-mips32.h
+++ b/js/src/jit/mips32/MacroAssembler-mips32.h
@@ -866,34 +866,6 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
void loadPrivate(const Address& address, Register dest);
- void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
- void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
diff --git a/js/src/jit/mips64/MacroAssembler-mips64.h b/js/src/jit/mips64/MacroAssembler-mips64.h
index 3a84b7c0c9..8594a41a1e 100644
--- a/js/src/jit/mips64/MacroAssembler-mips64.h
+++ b/js/src/jit/mips64/MacroAssembler-mips64.h
@@ -879,34 +879,6 @@ class MacroAssemblerMIPS64Compat : public MacroAssemblerMIPS64
void loadPrivate(const Address& address, Register dest);
- void loadInt32x1(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x1(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x2(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadInt32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x1(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x2(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const Address& dest) { MOZ_CRASH("NYI"); }
- void storeInt32x3(FloatRegister src, const BaseIndex& dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Int(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Int(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
- void loadFloat32x3(const Address& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadFloat32x3(const BaseIndex& src, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadAlignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeAlignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const Address& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void loadUnalignedSimd128Float(const BaseIndex& addr, FloatRegister dest) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, Address addr) { MOZ_CRASH("NYI"); }
- void storeUnalignedSimd128Float(FloatRegister src, BaseIndex addr) { MOZ_CRASH("NYI"); }
-
void loadDouble(const Address& addr, FloatRegister dest);
void loadDouble(const BaseIndex& src, FloatRegister dest);
void loadUnalignedDouble(const wasm::MemoryAccessDesc& access, const BaseIndex& src,
diff --git a/js/src/jit/none/Lowering-none.h b/js/src/jit/none/Lowering-none.h
index 6a52ac36d8..64f85e4573 100644
--- a/js/src/jit/none/Lowering-none.h
+++ b/js/src/jit/none/Lowering-none.h
@@ -45,14 +45,6 @@ class LIRGeneratorNone : public LIRGeneratorShared
void lowerForMulInt64(LMulI64*, MMul*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
template <typename T>
void lowerForShiftInt64(T, MDefinition*, MDefinition*, MDefinition* v = nullptr) { MOZ_CRASH(); }
- void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs) {
- MOZ_CRASH();
- }
- void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs) {
- MOZ_CRASH();
- }
void lowerForBitAndAndBranch(LBitAndAndBranch*, MInstruction*,
MDefinition*, MDefinition*) {
MOZ_CRASH();
diff --git a/js/src/jit/none/MacroAssembler-none.h b/js/src/jit/none/MacroAssembler-none.h
index f71439a6cc..bb32adcdc1 100644
--- a/js/src/jit/none/MacroAssembler-none.h
+++ b/js/src/jit/none/MacroAssembler-none.h
@@ -261,19 +261,10 @@ class MacroAssemblerNone : public Assembler
template <typename T, typename S> void move64(T, S) { MOZ_CRASH(); }
template <typename T> CodeOffset movWithPatch(T, Register) { MOZ_CRASH(); }
- template <typename T> void loadInt32x1(T, FloatRegister dest) { MOZ_CRASH(); }
- template <typename T> void loadInt32x2(T, FloatRegister dest) { MOZ_CRASH(); }
- template <typename T> void loadInt32x3(T, FloatRegister dest) { MOZ_CRASH(); }
- template <typename T> void loadFloat32x3(T, FloatRegister dest) { MOZ_CRASH(); }
-
template <typename T> void loadPtr(T, Register) { MOZ_CRASH(); }
template <typename T> void load32(T, Register) { MOZ_CRASH(); }
template <typename T> void loadFloat32(T, FloatRegister) { MOZ_CRASH(); }
template <typename T> void loadDouble(T, FloatRegister) { MOZ_CRASH(); }
- template <typename T> void loadAlignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
- template <typename T> void loadUnalignedSimd128Int(T, FloatRegister) { MOZ_CRASH(); }
- template <typename T> void loadAlignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
- template <typename T> void loadUnalignedSimd128Float(T, FloatRegister) { MOZ_CRASH(); }
template <typename T> void loadPrivate(T, Register) { MOZ_CRASH(); }
template <typename T> void load8SignExtend(T, Register) { MOZ_CRASH(); }
template <typename T> void load8ZeroExtend(T, Register) { MOZ_CRASH(); }
@@ -286,16 +277,8 @@ class MacroAssemblerNone : public Assembler
template <typename T, typename S> void store32_NoSecondScratch(T, S) { MOZ_CRASH(); }
template <typename T, typename S> void storeFloat32(T, S) { MOZ_CRASH(); }
template <typename T, typename S> void storeDouble(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeAlignedSimd128Int(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeUnalignedSimd128Int(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeAlignedSimd128Float(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeUnalignedSimd128Float(T, S) { MOZ_CRASH(); }
template <typename T, typename S> void store8(T, S) { MOZ_CRASH(); }
template <typename T, typename S> void store16(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeInt32x1(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeInt32x2(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeInt32x3(T, S) { MOZ_CRASH(); }
- template <typename T, typename S> void storeFloat32x3(T, S) { MOZ_CRASH(); }
template <typename T, typename S> void store64(T, S) { MOZ_CRASH(); }
template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
index f18cbb9e1d..308d9f1a12 100644
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -705,7 +705,6 @@ class MemoryAccessDesc
uint32_t offset_;
uint32_t align_;
Scalar::Type type_;
- unsigned numSimdElems_;
jit::MemoryBarrierBits barrierBefore_;
jit::MemoryBarrierBits barrierAfter_;
mozilla::Maybe<wasm::TrapOffset> trapOffset_;
@@ -719,32 +718,23 @@ class MemoryAccessDesc
: offset_(offset),
align_(align),
type_(type),
- numSimdElems_(numSimdElems),
barrierBefore_(barrierBefore),
barrierAfter_(barrierAfter),
trapOffset_(trapOffset)
{
- MOZ_ASSERT(Scalar::isSimdType(type) == (numSimdElems > 0));
MOZ_ASSERT(mozilla::IsPowerOfTwo(align));
- MOZ_ASSERT_IF(isSimd(), hasTrap());
MOZ_ASSERT_IF(isAtomic(), hasTrap());
}
uint32_t offset() const { return offset_; }
uint32_t align() const { return align_; }
Scalar::Type type() const { return type_; }
- unsigned byteSize() const {
- return Scalar::isSimdType(type())
- ? Scalar::scalarByteSize(type()) * numSimdElems()
- : Scalar::byteSize(type());
- }
- unsigned numSimdElems() const { MOZ_ASSERT(isSimd()); return numSimdElems_; }
+ unsigned byteSize() const { return Scalar::byteSize(type()); }
jit::MemoryBarrierBits barrierBefore() const { return barrierBefore_; }
jit::MemoryBarrierBits barrierAfter() const { return barrierAfter_; }
bool hasTrap() const { return !!trapOffset_; }
TrapOffset trapOffset() const { return *trapOffset_; }
bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
- bool isSimd() const { return Scalar::isSimdType(type_); }
bool isPlainAsmJS() const { return !hasTrap(); }
void clearOffset() { offset_ = 0; }
diff --git a/js/src/jit/shared/CodeGenerator-shared-inl.h b/js/src/jit/shared/CodeGenerator-shared-inl.h
index 00ca8b3d63..7325449ed1 100644
--- a/js/src/jit/shared/CodeGenerator-shared-inl.h
+++ b/js/src/jit/shared/CodeGenerator-shared-inl.h
@@ -391,10 +391,6 @@ CodeGeneratorShared::verifyHeapAccessDisassembly(uint32_t begin, uint32_t end, b
break;
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
op = OtherOperand(ToFloatRegister(alloc).encoding());
break;
case Scalar::Uint8Clamped:
diff --git a/js/src/jit/shared/CodeGenerator-shared.cpp b/js/src/jit/shared/CodeGenerator-shared.cpp
index 3701f24872..cdacf9ead5 100644
--- a/js/src/jit/shared/CodeGenerator-shared.cpp
+++ b/js/src/jit/shared/CodeGenerator-shared.cpp
@@ -85,17 +85,10 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph, Mac
MOZ_ASSERT(graph->argumentSlotCount() == 0);
frameDepth_ += gen->wasmMaxStackArgBytes();
- if (gen->usesSimd()) {
- // If the function uses any SIMD then we may need to insert padding
- // so that local slots are aligned for SIMD.
- frameInitialAdjustment_ = ComputeByteAlignment(sizeof(wasm::Frame),
- WasmStackAlignment);
- frameDepth_ += frameInitialAdjustment_;
- // Keep the stack aligned. Some SIMD sequences build values on the
- // stack and need the stack aligned.
- frameDepth_ += ComputeByteAlignment(sizeof(wasm::Frame) + frameDepth_,
- WasmStackAlignment);
- } else if (gen->performsCall()) {
+ static_assert(!SupportsSimd, "we need padding so that local slots are SIMD-aligned and "
+ "the stack must be kept SIMD-aligned too.");
+
+ if (gen->performsCall()) {
// An MWasmCall does not align the stack pointer at calls sites but
// instead relies on the a priori stack adjustment. This must be the
// last adjustment of frameDepth_.
diff --git a/js/src/jit/shared/LIR-shared.h b/js/src/jit/shared/LIR-shared.h
index 69782f8061..437c3fa7f3 100644
--- a/js/src/jit/shared/LIR-shared.h
+++ b/js/src/jit/shared/LIR-shared.h
@@ -152,634 +152,6 @@ class LMoveGroup : public LInstructionHelper<0, 0, 0>
}
};
-
-// Constructs a SIMD object (value type) based on the MIRType of its input.
-class LSimdBox : public LInstructionHelper<1, 1, 1>
-{
- public:
- LIR_HEADER(SimdBox)
-
- explicit LSimdBox(const LAllocation& simd, const LDefinition& temp)
- {
- setOperand(0, simd);
- setTemp(0, temp);
- }
-
- const LDefinition* temp() {
- return getTemp(0);
- }
-
- MSimdBox* mir() const {
- return mir_->toSimdBox();
- }
-};
-
-class LSimdUnbox : public LInstructionHelper<1, 1, 1>
-{
- public:
- LIR_HEADER(SimdUnbox)
-
- LSimdUnbox(const LAllocation& obj, const LDefinition& temp)
- {
- setOperand(0, obj);
- setTemp(0, temp);
- }
-
- const LDefinition* temp() {
- return getTemp(0);
- }
-
- MSimdUnbox* mir() const {
- return mir_->toSimdUnbox();
- }
-};
-
-// Constructs a SIMD value with 16 equal components (int8x16).
-class LSimdSplatX16 : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdSplatX16)
- explicit LSimdSplatX16(const LAllocation& v)
- {
- setOperand(0, v);
- }
-
- MSimdSplat* mir() const {
- return mir_->toSimdSplat();
- }
-};
-
-// Constructs a SIMD value with 8 equal components (int16x8).
-class LSimdSplatX8 : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdSplatX8)
- explicit LSimdSplatX8(const LAllocation& v)
- {
- setOperand(0, v);
- }
-
- MSimdSplat* mir() const {
- return mir_->toSimdSplat();
- }
-};
-
-// Constructs a SIMD value with 4 equal components (e.g. int32x4, float32x4).
-class LSimdSplatX4 : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdSplatX4)
- explicit LSimdSplatX4(const LAllocation& v)
- {
- setOperand(0, v);
- }
-
- MSimdSplat* mir() const {
- return mir_->toSimdSplat();
- }
-};
-
-// Reinterpret the bits of a SIMD value with a different type.
-class LSimdReinterpretCast : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdReinterpretCast)
- explicit LSimdReinterpretCast(const LAllocation& v)
- {
- setOperand(0, v);
- }
-
- MSimdReinterpretCast* mir() const {
- return mir_->toSimdReinterpretCast();
- }
-};
-
-class LSimdExtractElementBase : public LInstructionHelper<1, 1, 0>
-{
- protected:
- explicit LSimdExtractElementBase(const LAllocation& base) {
- setOperand(0, base);
- }
-
- public:
- const LAllocation* getBase() {
- return getOperand(0);
- }
- MSimdExtractElement* mir() const {
- return mir_->toSimdExtractElement();
- }
-};
-
-// Extracts an element from a given SIMD bool32x4 lane.
-class LSimdExtractElementB : public LSimdExtractElementBase
-{
- public:
- LIR_HEADER(SimdExtractElementB);
- explicit LSimdExtractElementB(const LAllocation& base)
- : LSimdExtractElementBase(base)
- {}
-};
-
-// Extracts an element from a given SIMD int32x4 lane.
-class LSimdExtractElementI : public LSimdExtractElementBase
-{
- public:
- LIR_HEADER(SimdExtractElementI);
- explicit LSimdExtractElementI(const LAllocation& base)
- : LSimdExtractElementBase(base)
- {}
-};
-
-// Extracts an element from a given SIMD float32x4 lane.
-class LSimdExtractElementF : public LSimdExtractElementBase
-{
- public:
- LIR_HEADER(SimdExtractElementF);
- explicit LSimdExtractElementF(const LAllocation& base)
- : LSimdExtractElementBase(base)
- {}
-};
-
-// Extracts an element from an Uint32x4 SIMD vector, converts to double.
-class LSimdExtractElementU2D : public LInstructionHelper<1, 1, 1>
-{
- public:
- LIR_HEADER(SimdExtractElementU2D);
- explicit LSimdExtractElementU2D(const LAllocation& base, const LDefinition& temp) {
- setOperand(0, base);
- setTemp(0, temp);
- }
- MSimdExtractElement* mir() const {
- return mir_->toSimdExtractElement();
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
-};
-
-
-class LSimdInsertElementBase : public LInstructionHelper<1, 2, 0>
-{
- protected:
- LSimdInsertElementBase(const LAllocation& vec, const LAllocation& val)
- {
- setOperand(0, vec);
- setOperand(1, val);
- }
-
- public:
- const LAllocation* vector() {
- return getOperand(0);
- }
- const LAllocation* value() {
- return getOperand(1);
- }
- unsigned lane() const {
- return mir_->toSimdInsertElement()->lane();
- }
- unsigned length() const {
- return SimdTypeToLength(mir_->toSimdInsertElement()->type());
- }
-};
-
-// Replace an element from a given SIMD integer or boolean lane with a given value.
-// The value inserted into a boolean lane should be 0 or -1.
-class LSimdInsertElementI : public LSimdInsertElementBase
-{
- public:
- LIR_HEADER(SimdInsertElementI);
- LSimdInsertElementI(const LAllocation& vec, const LAllocation& val)
- : LSimdInsertElementBase(vec, val)
- {}
-};
-
-// Replace an element from a given SIMD float32x4 lane with a given value.
-class LSimdInsertElementF : public LSimdInsertElementBase
-{
- public:
- LIR_HEADER(SimdInsertElementF);
- LSimdInsertElementF(const LAllocation& vec, const LAllocation& val)
- : LSimdInsertElementBase(vec, val)
- {}
-};
-
-// Base class for both int32x4 and float32x4 shuffle instructions.
-class LSimdSwizzleBase : public LInstructionHelper<1, 1, 1>
-{
- public:
- explicit LSimdSwizzleBase(const LAllocation& base)
- {
- setOperand(0, base);
- }
-
- const LAllocation* getBase() {
- return getOperand(0);
- }
-
- unsigned numLanes() const { return mir_->toSimdSwizzle()->numLanes(); }
- uint32_t lane(unsigned i) const { return mir_->toSimdSwizzle()->lane(i); }
-
- bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
- return mir_->toSimdSwizzle()->lanesMatch(x, y, z, w);
- }
-};
-
-// Shuffles a int32x4 into another int32x4 vector.
-class LSimdSwizzleI : public LSimdSwizzleBase
-{
- public:
- LIR_HEADER(SimdSwizzleI);
- explicit LSimdSwizzleI(const LAllocation& base) : LSimdSwizzleBase(base)
- {}
-};
-// Shuffles a float32x4 into another float32x4 vector.
-class LSimdSwizzleF : public LSimdSwizzleBase
-{
- public:
- LIR_HEADER(SimdSwizzleF);
- explicit LSimdSwizzleF(const LAllocation& base) : LSimdSwizzleBase(base)
- {}
-};
-
-class LSimdGeneralShuffleBase : public LVariadicInstruction<1, 1>
-{
- public:
- explicit LSimdGeneralShuffleBase(const LDefinition& temp) {
- setTemp(0, temp);
- }
- const LAllocation* vector(unsigned i) {
- MOZ_ASSERT(i < mir()->numVectors());
- return getOperand(i);
- }
- const LAllocation* lane(unsigned i) {
- MOZ_ASSERT(i < mir()->numLanes());
- return getOperand(mir()->numVectors() + i);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
- MSimdGeneralShuffle* mir() const {
- return mir_->toSimdGeneralShuffle();
- }
-};
-
-class LSimdGeneralShuffleI : public LSimdGeneralShuffleBase
-{
- public:
- LIR_HEADER(SimdGeneralShuffleI);
- explicit LSimdGeneralShuffleI(const LDefinition& temp)
- : LSimdGeneralShuffleBase(temp)
- {}
-};
-
-class LSimdGeneralShuffleF : public LSimdGeneralShuffleBase
-{
- public:
- LIR_HEADER(SimdGeneralShuffleF);
- explicit LSimdGeneralShuffleF(const LDefinition& temp)
- : LSimdGeneralShuffleBase(temp)
- {}
-};
-
-// Base class for both int32x4 and float32x4 shuffle instructions.
-class LSimdShuffleX4 : public LInstructionHelper<1, 2, 1>
-{
- public:
- LIR_HEADER(SimdShuffleX4);
- LSimdShuffleX4()
- {}
-
- const LAllocation* lhs() {
- return getOperand(0);
- }
- const LAllocation* rhs() {
- return getOperand(1);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
-
- uint32_t lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
-
- bool lanesMatch(uint32_t x, uint32_t y, uint32_t z, uint32_t w) const {
- return mir_->toSimdShuffle()->lanesMatch(x, y, z, w);
- }
-};
-
-// Remaining shuffles (8x16, 16x8).
-class LSimdShuffle : public LInstructionHelper<1, 2, 1>
-{
- public:
- LIR_HEADER(SimdShuffle);
- LSimdShuffle()
- {}
-
- const LAllocation* lhs() {
- return getOperand(0);
- }
- const LAllocation* rhs() {
- return getOperand(1);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
-
- unsigned numLanes() const { return mir_->toSimdShuffle()->numLanes(); }
- unsigned lane(unsigned i) const { return mir_->toSimdShuffle()->lane(i); }
-};
-
-// Binary SIMD comparison operation between two SIMD operands
-class LSimdBinaryComp: public LInstructionHelper<1, 2, 0>
-{
- protected:
- LSimdBinaryComp() {}
-
-public:
- const LAllocation* lhs() {
- return getOperand(0);
- }
- const LAllocation* rhs() {
- return getOperand(1);
- }
- MSimdBinaryComp::Operation operation() const {
- return mir_->toSimdBinaryComp()->operation();
- }
- const char* extraName() const {
- return MSimdBinaryComp::OperationName(operation());
- }
-};
-
-// Binary SIMD comparison operation between two Int8x16 operands.
-class LSimdBinaryCompIx16 : public LSimdBinaryComp
-{
- public:
- LIR_HEADER(SimdBinaryCompIx16);
- LSimdBinaryCompIx16() : LSimdBinaryComp() {}
-};
-
-// Binary SIMD comparison operation between two Int16x8 operands.
-class LSimdBinaryCompIx8 : public LSimdBinaryComp
-{
- public:
- LIR_HEADER(SimdBinaryCompIx8);
- LSimdBinaryCompIx8() : LSimdBinaryComp() {}
-};
-
-// Binary SIMD comparison operation between two Int32x4 operands.
-class LSimdBinaryCompIx4 : public LSimdBinaryComp
-{
- public:
- LIR_HEADER(SimdBinaryCompIx4);
- LSimdBinaryCompIx4() : LSimdBinaryComp() {}
-};
-
-// Binary SIMD comparison operation between two Float32x4 operands
-class LSimdBinaryCompFx4 : public LSimdBinaryComp
-{
- public:
- LIR_HEADER(SimdBinaryCompFx4);
- LSimdBinaryCompFx4() : LSimdBinaryComp() {}
-};
-
-// Binary SIMD arithmetic operation between two SIMD operands
-class LSimdBinaryArith : public LInstructionHelper<1, 2, 1>
-{
- public:
- LSimdBinaryArith() {}
-
- const LAllocation* lhs() {
- return this->getOperand(0);
- }
- const LAllocation* rhs() {
- return this->getOperand(1);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
-
- MSimdBinaryArith::Operation operation() const {
- return this->mir_->toSimdBinaryArith()->operation();
- }
- const char* extraName() const {
- return MSimdBinaryArith::OperationName(operation());
- }
-};
-
-// Binary SIMD arithmetic operation between two Int8x16 operands
-class LSimdBinaryArithIx16 : public LSimdBinaryArith
-{
- public:
- LIR_HEADER(SimdBinaryArithIx16);
- LSimdBinaryArithIx16() : LSimdBinaryArith() {}
-};
-
-// Binary SIMD arithmetic operation between two Int16x8 operands
-class LSimdBinaryArithIx8 : public LSimdBinaryArith
-{
- public:
- LIR_HEADER(SimdBinaryArithIx8);
- LSimdBinaryArithIx8() : LSimdBinaryArith() {}
-};
-
-// Binary SIMD arithmetic operation between two Int32x4 operands
-class LSimdBinaryArithIx4 : public LSimdBinaryArith
-{
- public:
- LIR_HEADER(SimdBinaryArithIx4);
- LSimdBinaryArithIx4() : LSimdBinaryArith() {}
-};
-
-// Binary SIMD arithmetic operation between two Float32x4 operands
-class LSimdBinaryArithFx4 : public LSimdBinaryArith
-{
- public:
- LIR_HEADER(SimdBinaryArithFx4);
- LSimdBinaryArithFx4() : LSimdBinaryArith() {}
-};
-
-// Binary SIMD saturating arithmetic operation between two SIMD operands
-class LSimdBinarySaturating : public LInstructionHelper<1, 2, 0>
-{
- public:
- LIR_HEADER(SimdBinarySaturating);
- LSimdBinarySaturating() {}
-
- const LAllocation* lhs() {
- return this->getOperand(0);
- }
- const LAllocation* rhs() {
- return this->getOperand(1);
- }
-
- MSimdBinarySaturating::Operation operation() const {
- return this->mir_->toSimdBinarySaturating()->operation();
- }
- SimdSign signedness() const {
- return this->mir_->toSimdBinarySaturating()->signedness();
- }
- MIRType type() const {
- return mir_->type();
- }
- const char* extraName() const {
- return MSimdBinarySaturating::OperationName(operation());
- }
-};
-
-// Unary SIMD arithmetic operation on a SIMD operand
-class LSimdUnaryArith : public LInstructionHelper<1, 1, 0>
-{
- public:
- explicit LSimdUnaryArith(const LAllocation& in) {
- setOperand(0, in);
- }
- MSimdUnaryArith::Operation operation() const {
- return mir_->toSimdUnaryArith()->operation();
- }
-};
-
-// Unary SIMD arithmetic operation on a Int8x16 operand
-class LSimdUnaryArithIx16 : public LSimdUnaryArith
-{
- public:
- LIR_HEADER(SimdUnaryArithIx16);
- explicit LSimdUnaryArithIx16(const LAllocation& in) : LSimdUnaryArith(in) {}
-};
-
-// Unary SIMD arithmetic operation on a Int16x8 operand
-class LSimdUnaryArithIx8 : public LSimdUnaryArith
-{
- public:
- LIR_HEADER(SimdUnaryArithIx8);
- explicit LSimdUnaryArithIx8(const LAllocation& in) : LSimdUnaryArith(in) {}
-};
-
-// Unary SIMD arithmetic operation on a Int32x4 operand
-class LSimdUnaryArithIx4 : public LSimdUnaryArith
-{
- public:
- LIR_HEADER(SimdUnaryArithIx4);
- explicit LSimdUnaryArithIx4(const LAllocation& in) : LSimdUnaryArith(in) {}
-};
-
-// Unary SIMD arithmetic operation on a Float32x4 operand
-class LSimdUnaryArithFx4 : public LSimdUnaryArith
-{
- public:
- LIR_HEADER(SimdUnaryArithFx4);
- explicit LSimdUnaryArithFx4(const LAllocation& in) : LSimdUnaryArith(in) {}
-};
-
-// Binary SIMD bitwise operation between two 128-bit operands.
-class LSimdBinaryBitwise : public LInstructionHelper<1, 2, 0>
-{
- public:
- LIR_HEADER(SimdBinaryBitwise);
- const LAllocation* lhs() {
- return getOperand(0);
- }
- const LAllocation* rhs() {
- return getOperand(1);
- }
- MSimdBinaryBitwise::Operation operation() const {
- return mir_->toSimdBinaryBitwise()->operation();
- }
- const char* extraName() const {
- return MSimdBinaryBitwise::OperationName(operation());
- }
- MIRType type() const {
- return mir_->type();
- }
-};
-
-// Shift a SIMD vector by a scalar amount.
-// The temp register is only required if the shift amount is a dynamical
-// value. If it is a constant, use a BogusTemp instead.
-class LSimdShift : public LInstructionHelper<1, 2, 1>
-{
- public:
- LIR_HEADER(SimdShift)
- LSimdShift(const LAllocation& vec, const LAllocation& val, const LDefinition& temp) {
- setOperand(0, vec);
- setOperand(1, val);
- setTemp(0, temp);
- }
- const LAllocation* vector() {
- return getOperand(0);
- }
- const LAllocation* value() {
- return getOperand(1);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
- MSimdShift::Operation operation() const {
- return mir_->toSimdShift()->operation();
- }
- const char* extraName() const {
- return MSimdShift::OperationName(operation());
- }
- MSimdShift* mir() const {
- return mir_->toSimdShift();
- }
- MIRType type() const {
- return mir_->type();
- }
-};
-
-// SIMD selection of lanes from two int32x4 or float32x4 arguments based on a
-// int32x4 argument.
-class LSimdSelect : public LInstructionHelper<1, 3, 1>
-{
- public:
- LIR_HEADER(SimdSelect);
- const LAllocation* mask() {
- return getOperand(0);
- }
- const LAllocation* lhs() {
- return getOperand(1);
- }
- const LAllocation* rhs() {
- return getOperand(2);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
- MSimdSelect* mir() const {
- return mir_->toSimdSelect();
- }
-};
-
-class LSimdAnyTrue : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdAnyTrue)
- explicit LSimdAnyTrue(const LAllocation& input) {
- setOperand(0, input);
- }
- const LAllocation* vector() {
- return getOperand(0);
- }
- MSimdAnyTrue* mir() const {
- return mir_->toSimdAnyTrue();
- }
-};
-
-class LSimdAllTrue : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(SimdAllTrue)
- explicit LSimdAllTrue(const LAllocation& input) {
- setOperand(0, input);
- }
- const LAllocation* vector() {
- return getOperand(0);
- }
- MSimdAllTrue* mir() const {
- return mir_->toSimdAllTrue();
- }
-};
-
-
// Constant 32-bit integer.
class LInteger : public LInstructionHelper<1, 0, 0>
{
@@ -885,27 +257,6 @@ class LFloat32 : public LInstructionHelper<1, 0, 0>
}
};
-// Constant 128-bit SIMD integer vector (8x16, 16x8, 32x4).
-// Also used for Bool32x4, Bool16x8, etc.
-class LSimd128Int : public LInstructionHelper<1, 0, 0>
-{
- public:
- LIR_HEADER(Simd128Int);
-
- explicit LSimd128Int() {}
- const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
-};
-
-// Constant 128-bit SIMD floating point vector (32x4, 64x2).
-class LSimd128Float : public LInstructionHelper<1, 0, 0>
-{
- public:
- LIR_HEADER(Simd128Float);
-
- explicit LSimd128Float() {}
- const SimdConstant& getValue() const { return mir_->toSimdConstant()->value(); }
-};
-
// A constant Value.
class LValue : public LInstructionHelper<BOX_PIECES, 0, 0>
{
@@ -4506,54 +3857,6 @@ class LValueToObjectOrNull : public LInstructionHelper<1, BOX_PIECES, 0>
}
};
-class LInt32x4ToFloat32x4 : public LInstructionHelper<1, 1, 0>
-{
- public:
- LIR_HEADER(Int32x4ToFloat32x4);
- explicit LInt32x4ToFloat32x4(const LAllocation& input) {
- setOperand(0, input);
- }
-};
-
-class LFloat32x4ToInt32x4 : public LInstructionHelper<1, 1, 1>
-{
- public:
- LIR_HEADER(Float32x4ToInt32x4);
- explicit LFloat32x4ToInt32x4(const LAllocation& input, const LDefinition& temp) {
- setOperand(0, input);
- setTemp(0, temp);
- }
- const LDefinition* temp() {
- return getTemp(0);
- }
- const MSimdConvert* mir() const {
- return mir_->toSimdConvert();
- }
-};
-
-// Float32x4 to Uint32x4 needs one GPR temp and one FloatReg temp.
-class LFloat32x4ToUint32x4 : public LInstructionHelper<1, 1, 2>
-{
- public:
- LIR_HEADER(Float32x4ToUint32x4);
- explicit LFloat32x4ToUint32x4(const LAllocation& input, const LDefinition& tempR,
- const LDefinition& tempF)
- {
- setOperand(0, input);
- setTemp(0, tempR);
- setTemp(1, tempF);
- }
- const LDefinition* tempR() {
- return getTemp(0);
- }
- const LDefinition* tempF() {
- return getTemp(1);
- }
- const MSimdConvert* mir() const {
- return mir_->toSimdConvert();
- }
-};
-
// Double raised to a half power.
class LPowHalfD : public LInstructionHelper<1, 1, 0>
{
diff --git a/js/src/jit/shared/LOpcodes-shared.h b/js/src/jit/shared/LOpcodes-shared.h
index 396765fbd0..10c6be0b36 100644
--- a/js/src/jit/shared/LOpcodes-shared.h
+++ b/js/src/jit/shared/LOpcodes-shared.h
@@ -17,44 +17,6 @@
_(Pointer) \
_(Double) \
_(Float32) \
- _(SimdBox) \
- _(SimdUnbox) \
- _(SimdSplatX16) \
- _(SimdSplatX8) \
- _(SimdSplatX4) \
- _(Simd128Int) \
- _(Simd128Float) \
- _(SimdAllTrue) \
- _(SimdAnyTrue) \
- _(SimdReinterpretCast) \
- _(SimdExtractElementI) \
- _(SimdExtractElementU2D) \
- _(SimdExtractElementB) \
- _(SimdExtractElementF) \
- _(SimdInsertElementI) \
- _(SimdInsertElementF) \
- _(SimdGeneralShuffleI) \
- _(SimdGeneralShuffleF) \
- _(SimdSwizzleI) \
- _(SimdSwizzleF) \
- _(SimdShuffle) \
- _(SimdShuffleX4) \
- _(SimdUnaryArithIx16) \
- _(SimdUnaryArithIx8) \
- _(SimdUnaryArithIx4) \
- _(SimdUnaryArithFx4) \
- _(SimdBinaryCompIx16) \
- _(SimdBinaryCompIx8) \
- _(SimdBinaryCompIx4) \
- _(SimdBinaryCompFx4) \
- _(SimdBinaryArithIx16) \
- _(SimdBinaryArithIx8) \
- _(SimdBinaryArithIx4) \
- _(SimdBinaryArithFx4) \
- _(SimdBinarySaturating) \
- _(SimdBinaryBitwise) \
- _(SimdShift) \
- _(SimdSelect) \
_(Value) \
_(CloneLiteral) \
_(Parameter) \
@@ -215,9 +177,6 @@
_(DoubleToString) \
_(ValueToString) \
_(ValueToObjectOrNull) \
- _(Int32x4ToFloat32x4) \
- _(Float32x4ToInt32x4) \
- _(Float32x4ToUint32x4) \
_(Start) \
_(NaNToZero) \
_(OsrEntry) \
diff --git a/js/src/jit/shared/Lowering-shared-inl.h b/js/src/jit/shared/Lowering-shared-inl.h
index cdbba9ef84..75869595b5 100644
--- a/js/src/jit/shared/Lowering-shared-inl.h
+++ b/js/src/jit/shared/Lowering-shared-inl.h
@@ -333,9 +333,6 @@ IsCompatibleLIRCoercion(MIRType to, MIRType from)
(from == MIRType::Int32 || from == MIRType::Boolean)) {
return true;
}
- // SIMD types can be coerced with from*Bits operators.
- if (IsSimdType(to) && IsSimdType(from))
- return true;
return false;
}
diff --git a/js/src/jit/shared/Lowering-shared.h b/js/src/jit/shared/Lowering-shared.h
index 64a32bd1f2..c6b0d8e3c2 100644
--- a/js/src/jit/shared/Lowering-shared.h
+++ b/js/src/jit/shared/Lowering-shared.h
@@ -271,22 +271,6 @@ class LIRGeneratorShared : public MDefinitionVisitor
static bool allowStaticTypedArrayAccesses() {
return false;
}
-
- // Provide NYI default implementations of the SIMD visitor functions.
- // Many targets don't implement SIMD at all, and we don't want to duplicate
- // these stubs in the specific sub-classes.
- // Some SIMD visitors are implemented in LIRGenerator in Lowering.cpp. These
- // shared implementations are not included here.
- void visitSimdInsertElement(MSimdInsertElement*) override { MOZ_CRASH("NYI"); }
- void visitSimdExtractElement(MSimdExtractElement*) override { MOZ_CRASH("NYI"); }
- void visitSimdBinaryArith(MSimdBinaryArith*) override { MOZ_CRASH("NYI"); }
- void visitSimdSelect(MSimdSelect*) override { MOZ_CRASH("NYI"); }
- void visitSimdSplat(MSimdSplat*) override { MOZ_CRASH("NYI"); }
- void visitSimdValueX4(MSimdValueX4*) override { MOZ_CRASH("NYI"); }
- void visitSimdBinarySaturating(MSimdBinarySaturating*) override { MOZ_CRASH("NYI"); }
- void visitSimdSwizzle(MSimdSwizzle*) override { MOZ_CRASH("NYI"); }
- void visitSimdShuffle(MSimdShuffle*) override { MOZ_CRASH("NYI"); }
- void visitSimdGeneralShuffle(MSimdGeneralShuffle*) override { MOZ_CRASH("NYI"); }
};
} // namespace jit
diff --git a/js/src/jit/x64/Assembler-x64.h b/js/src/jit/x64/Assembler-x64.h
index 509b5b5450..fa89237d9c 100644
--- a/js/src/jit/x64/Assembler-x64.h
+++ b/js/src/jit/x64/Assembler-x64.h
@@ -225,7 +225,9 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
-static constexpr bool SupportsSimd = true;
+// XXX: As of Issue #2307 this is false and will no longer compile with true.
+// This should eventually be removed.
+static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 16;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,
diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp
index 3f4052f51d..ef0502a489 100644
--- a/js/src/jit/x64/CodeGenerator-x64.cpp
+++ b/js/src/jit/x64/CodeGenerator-x64.cpp
@@ -416,8 +416,6 @@ CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocat
Operand dstAddr)
{
if (value->isConstant()) {
- MOZ_ASSERT(!access.isSimd());
-
masm.memoryBarrier(access.barrierBefore());
const MConstant* mir = value->toConstant();
@@ -440,10 +438,6 @@ CodeGeneratorX64::wasmStore(const wasm::MemoryAccessDesc& access, const LAllocat
case Scalar::Int64:
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::Uint8Clamped:
case Scalar::BigInt64:
case Scalar::BigUint64:
@@ -530,7 +524,6 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
const LDefinition* out = ins->output();
Scalar::Type accessType = mir->access().type();
- MOZ_ASSERT(!Scalar::isSimdType(accessType));
Operand srcAddr = ptr->isBogus()
? Operand(HeapReg, mir->offset())
@@ -552,7 +545,6 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
const LAllocation* value = ins->value();
Scalar::Type accessType = mir->access().type();
- MOZ_ASSERT(!Scalar::isSimdType(accessType));
canonicalizeIfDeterministic(accessType, value);
diff --git a/js/src/jit/x64/LOpcodes-x64.h b/js/src/jit/x64/LOpcodes-x64.h
index a0f4359b35..97fc4dfaed 100644
--- a/js/src/jit/x64/LOpcodes-x64.h
+++ b/js/src/jit/x64/LOpcodes-x64.h
@@ -14,8 +14,6 @@
_(UDivOrModI64) \
_(WasmTruncateToInt64) \
_(Int64ToFloatingPoint) \
- _(SimdValueInt32x4) \
- _(SimdValueFloat32x4) \
_(UDivOrMod) \
_(UDivOrModConstant)
diff --git a/js/src/jit/x64/Lowering-x64.cpp b/js/src/jit/x64/Lowering-x64.cpp
index 4aebe05af2..070d5ebd9f 100644
--- a/js/src/jit/x64/Lowering-x64.cpp
+++ b/js/src/jit/x64/Lowering-x64.cpp
@@ -233,10 +233,6 @@ LIRGeneratorX64::visitWasmStore(MWasmStore* ins)
break;
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
valueAlloc = useRegisterAtStart(value);
break;
case Scalar::BigInt64:
@@ -279,10 +275,6 @@ LIRGeneratorX64::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
break;
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
lir = new(alloc()) LAsmJSStoreHeap(useRegisterOrZeroAtStart(base),
useRegisterAtStart(ins->value()));
break;
diff --git a/js/src/jit/x64/MacroAssembler-x64.cpp b/js/src/jit/x64/MacroAssembler-x64.cpp
index 1fab669d2b..6374dd6c64 100644
--- a/js/src/jit/x64/MacroAssembler-x64.cpp
+++ b/js/src/jit/x64/MacroAssembler-x64.cpp
@@ -683,34 +683,6 @@ MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
case Scalar::Float64:
loadDouble(srcAddr, out.fpu());
break;
- case Scalar::Float32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movss zeroes out the high lanes.
- case 1: loadFloat32(srcAddr, out.fpu()); break;
- // See comment above, which also applies to movsd.
- case 2: loadDouble(srcAddr, out.fpu()); break;
- case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movd zeroes out the high lanes.
- case 1: vmovd(srcAddr, out.fpu()); break;
- // See comment above, which also applies to movq.
- case 2: vmovq(srcAddr, out.fpu()); break;
- case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
- loadUnalignedSimd128Int(srcAddr, out.fpu());
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
- loadUnalignedSimd128Int(srcAddr, out.fpu());
- break;
case Scalar::Int64:
MOZ_CRASH("int64 loads must use load64");
case Scalar::BigInt64:
@@ -728,7 +700,6 @@ void
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
{
MOZ_ASSERT(!access.isAtomic());
- MOZ_ASSERT(!access.isSimd());
size_t loadOffset = size();
switch (access.type()) {
@@ -756,10 +727,6 @@ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAdd
break;
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
MOZ_CRASH("non-int64 loads should use load()");
case Scalar::BigInt64:
case Scalar::BigUint64:
@@ -798,34 +765,6 @@ MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister valu
case Scalar::Float64:
storeUncanonicalizedDouble(value.fpu(), dstAddr);
break;
- case Scalar::Float32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movss zeroes out the high lanes.
- case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
- // See comment above, which also applies to movsd.
- case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
- case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movd zeroes out the high lanes.
- case 1: vmovd(value.fpu(), dstAddr); break;
- // See comment above, which also applies to movq.
- case 2: vmovq(value.fpu(), dstAddr); break;
- case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
- storeUnalignedSimd128Int(value.fpu(), dstAddr);
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
- storeUnalignedSimd128Int(value.fpu(), dstAddr);
- break;
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::Uint8Clamped:
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
index 7e28891a84..704674a4cd 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp
@@ -299,16 +299,11 @@ CodeGeneratorX86Shared::visitWasmStackArg(LWasmStackArg* ins)
case MIRType::Float32:
masm.storeFloat32(ToFloatRegister(ins->arg()), dst);
return;
- // StackPointer is SIMD-aligned and ABIArgGenerator guarantees
- // stack offsets are SIMD-aligned.
case MIRType::Int32x4:
case MIRType::Bool32x4:
- masm.storeAlignedSimd128Int(ToFloatRegister(ins->arg()), dst);
- return;
case MIRType::Float32x4:
- masm.storeAlignedSimd128Float(ToFloatRegister(ins->arg()), dst);
- return;
- default: break;
+ default:
+ break;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected mir type in WasmStackArg");
}
@@ -399,10 +394,6 @@ CodeGeneratorX86Shared::visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTyp
{
switch (ool->viewType()) {
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::BigInt64:
case Scalar::BigUint64:
case Scalar::MaxTypedArrayViewType:
@@ -2431,873 +2422,6 @@ CodeGeneratorX86Shared::visitNegF(LNegF* ins)
}
void
-CodeGeneratorX86Shared::visitSimd128Int(LSimd128Int* ins)
-{
- const LDefinition* out = ins->getDef(0);
- masm.loadConstantSimd128Int(ins->getValue(), ToFloatRegister(out));
-}
-
-void
-CodeGeneratorX86Shared::visitSimd128Float(LSimd128Float* ins)
-{
- const LDefinition* out = ins->getDef(0);
- masm.loadConstantSimd128Float(ins->getValue(), ToFloatRegister(out));
-}
-
-void
-CodeGeneratorX86Shared::visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4* ins)
-{
- FloatRegister in = ToFloatRegister(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
- masm.convertInt32x4ToFloat32x4(in, out);
-}
-
-void
-CodeGeneratorX86Shared::visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins)
-{
- FloatRegister in = ToFloatRegister(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
- Register temp = ToRegister(ins->temp());
-
- auto* ool = new(alloc()) OutOfLineSimdFloatToIntCheck(temp, in, ins, ins->mir()->trapOffset());
- addOutOfLineCode(ool, ins->mir());
-
- masm.checkedConvertFloat32x4ToInt32x4(in, out, temp, ool->entry(), ool->rejoin());
-}
-
-void
-CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool)
-{
- Label onConversionError;
-
- masm.oolConvertFloat32x4ToInt32x4(ool->input(), ool->temp(), ool->rejoin(), &onConversionError);
- if (gen->compilingWasm()) {
- masm.bindLater(&onConversionError, trap(ool, wasm::Trap::ImpreciseSimdConversion));
- } else {
- masm.bind(&onConversionError);
- bailout(ool->ins()->snapshot());
- }
-}
-
-// Convert Float32x4 to Uint32x4.
-// If any input lane value is out of range or NaN, bail out.
-void
-CodeGeneratorX86Shared::visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins)
-{
- FloatRegister in = ToFloatRegister(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
- Register temp = ToRegister(ins->tempR());
- FloatRegister tempF = ToFloatRegister(ins->tempF());
-
- Label failed;
- masm.checkedConvertFloat32x4ToUint32x4(in, out, temp, tempF, &failed);
-
- Label ok;
- masm.jump(&ok);
- masm.bind(&failed);
- if (gen->compilingWasm())
- masm.j(Assembler::NotEqual, trap(ins->mir(), wasm::Trap::ImpreciseSimdConversion));
- else
- bailout(ins->snapshot());
- masm.bind(&ok);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdValueInt32x4(LSimdValueInt32x4* ins)
-{
- MOZ_ASSERT(ins->mir()->type() == MIRType::Int32x4 || ins->mir()->type() == MIRType::Bool32x4);
- masm.createInt32x4(ToRegister(ins->getOperand(0)),
- ToRegister(ins->getOperand(1)),
- ToRegister(ins->getOperand(2)),
- ToRegister(ins->getOperand(3)),
- ToFloatRegister(ins->output())
- );
-}
-
-void
-CodeGeneratorX86Shared::visitSimdValueFloat32x4(LSimdValueFloat32x4* ins)
-{
- MOZ_ASSERT(ins->mir()->type() == MIRType::Float32x4);
-
- FloatRegister r0 = ToFloatRegister(ins->getOperand(0));
- FloatRegister r1 = ToFloatRegister(ins->getOperand(1));
- FloatRegister r2 = ToFloatRegister(ins->getOperand(2));
- FloatRegister r3 = ToFloatRegister(ins->getOperand(3));
- FloatRegister tmp = ToFloatRegister(ins->getTemp(0));
- FloatRegister output = ToFloatRegister(ins->output());
-
- masm.createFloat32x4(r0, r1, r2, r3, tmp, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSplatX16(LSimdSplatX16* ins)
-{
- MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 16);
- Register input = ToRegister(ins->getOperand(0));
- FloatRegister output = ToFloatRegister(ins->output());
- masm.splatX16(input, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSplatX8(LSimdSplatX8* ins)
-{
- MOZ_ASSERT(SimdTypeToLength(ins->mir()->type()) == 8);
- Register input = ToRegister(ins->getOperand(0));
- FloatRegister output = ToFloatRegister(ins->output());
- masm.splatX8(input, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSplatX4(LSimdSplatX4* ins)
-{
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdSplat* mir = ins->mir();
- MOZ_ASSERT(IsSimdType(mir->type()));
- JS_STATIC_ASSERT(sizeof(float) == sizeof(int32_t));
-
- if (mir->type() == MIRType::Float32x4)
- masm.splatX4(ToFloatRegister(ins->getOperand(0)), output);
- else
- masm.splatX4(ToRegister(ins->getOperand(0)), output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdReinterpretCast(LSimdReinterpretCast* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- FloatRegister output = ToFloatRegister(ins->output());
- bool isIntLaneType = IsIntegerSimdType(ins->mir()->type());
- masm.reinterpretSimd(isIntLaneType, input, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdExtractElementB(LSimdExtractElementB* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- Register output = ToRegister(ins->output());
- MSimdExtractElement* mir = ins->mir();
- unsigned numLanes = SimdTypeToLength(mir->specialization());
- masm.extractLaneSimdBool(input, output, numLanes, mir->lane());
-}
-
-void
-CodeGeneratorX86Shared::visitSimdExtractElementI(LSimdExtractElementI* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- Register output = ToRegister(ins->output());
- MSimdExtractElement* mir = ins->mir();
- unsigned numLanes = SimdTypeToLength(mir->specialization());
- switch (numLanes) {
- case 4:
- masm.extractLaneInt32x4(input, output, mir->lane());
- break;
- case 8:
- masm.extractLaneInt16x8(input, output, mir->lane(), mir->signedness());
- break;
- case 16:
- masm.extractLaneInt8x16(input, output, mir->lane(), mir->signedness());
- break;
- default:
- MOZ_CRASH("Unhandled SIMD length");
- }
-}
-
-void
-CodeGeneratorX86Shared::visitSimdExtractElementU2D(LSimdExtractElementU2D* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- FloatRegister output = ToFloatRegister(ins->output());
- Register temp = ToRegister(ins->temp());
- MSimdExtractElement* mir = ins->mir();
- MOZ_ASSERT(mir->specialization() == MIRType::Int32x4);
- masm.extractLaneInt32x4(input, temp, mir->lane());
- masm.convertUInt32ToDouble(temp, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdExtractElementF(LSimdExtractElementF* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- FloatRegister output = ToFloatRegister(ins->output());
-
- unsigned lane = ins->mir()->lane();
- bool canonicalize = !gen->compilingWasm();
- masm.extractLaneFloat32x4(input, output, lane, canonicalize);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdInsertElementI(LSimdInsertElementI* ins)
-{
- FloatRegister input = ToFloatRegister(ins->vector());
- Register value = ToRegister(ins->value());
- FloatRegister output = ToFloatRegister(ins->output());
- MOZ_ASSERT(input == output); // defineReuseInput(0)
- unsigned lane = ins->lane();
- unsigned length = ins->length();
-
- masm.insertLaneSimdInt(input, value, output, lane, length);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdInsertElementF(LSimdInsertElementF* ins)
-{
- FloatRegister input = ToFloatRegister(ins->vector());
- FloatRegister value = ToFloatRegister(ins->value());
- FloatRegister output = ToFloatRegister(ins->output());
- MOZ_ASSERT(input == output); // defineReuseInput(0)
- masm.insertLaneFloat32x4(input, value, output, ins->lane());
-}
-
-void
-CodeGeneratorX86Shared::visitSimdAllTrue(LSimdAllTrue* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- Register output = ToRegister(ins->output());
-
- masm.allTrueSimdBool(input, output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdAnyTrue(LSimdAnyTrue* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- Register output = ToRegister(ins->output());
-
- masm.anyTrueSimdBool(input, output);
-}
-
-// XXX note for reviewer: this is SIMD.js only, no need to keep it for wasm.
-template <class T, class Reg> void
-CodeGeneratorX86Shared::visitSimdGeneralShuffle(LSimdGeneralShuffleBase* ins, Reg tempRegister)
-{
- MSimdGeneralShuffle* mir = ins->mir();
- unsigned numVectors = mir->numVectors();
-
- Register laneTemp = ToRegister(ins->temp());
-
- // This won't generate fast code, but it's fine because we expect users
- // to have used constant indices (and thus MSimdGeneralShuffle to be fold
- // into MSimdSwizzle/MSimdShuffle, which are fast).
-
- // We need stack space for the numVectors inputs and for the output vector.
- unsigned stackSpace = Simd128DataSize * (numVectors + 1);
- masm.reserveStack(stackSpace);
-
- for (unsigned i = 0; i < numVectors; i++) {
- masm.storeAlignedVector<T>(ToFloatRegister(ins->vector(i)),
- Address(StackPointer, Simd128DataSize * (1 + i)));
- }
-
- Label bail;
- const Scale laneScale = ScaleFromElemWidth(sizeof(T));
-
- for (size_t i = 0; i < mir->numLanes(); i++) {
- Operand lane = ToOperand(ins->lane(i));
-
- masm.cmp32(lane, Imm32(numVectors * mir->numLanes() - 1));
- masm.j(Assembler::Above, &bail);
-
- if (lane.kind() == Operand::REG) {
- masm.loadScalar<T>(Operand(StackPointer, ToRegister(ins->lane(i)), laneScale, Simd128DataSize),
- tempRegister);
- } else {
- masm.load32(lane, laneTemp);
- masm.loadScalar<T>(Operand(StackPointer, laneTemp, laneScale, Simd128DataSize), tempRegister);
- }
-
- masm.storeScalar<T>(tempRegister, Address(StackPointer, i * sizeof(T)));
- }
-
- FloatRegister output = ToFloatRegister(ins->output());
- masm.loadAlignedVector<T>(Address(StackPointer, 0), output);
-
- Label join;
- masm.jump(&join);
-
- {
- masm.bind(&bail);
- masm.freeStack(stackSpace);
- bailout(ins->snapshot());
- }
-
- masm.bind(&join);
- masm.setFramePushed(masm.framePushed() + stackSpace);
- masm.freeStack(stackSpace);
-}
-
-// XXX SIMD.js only
-void
-CodeGeneratorX86Shared::visitSimdGeneralShuffleI(LSimdGeneralShuffleI* ins)
-{
- switch (ins->mir()->type()) {
- case MIRType::Int8x16:
- return visitSimdGeneralShuffle<int8_t, Register>(ins, ToRegister(ins->temp()));
- case MIRType::Int16x8:
- return visitSimdGeneralShuffle<int16_t, Register>(ins, ToRegister(ins->temp()));
- case MIRType::Int32x4:
- return visitSimdGeneralShuffle<int32_t, Register>(ins, ToRegister(ins->temp()));
- default:
- MOZ_CRASH("unsupported type for general shuffle");
- }
-}
-void
-CodeGeneratorX86Shared::visitSimdGeneralShuffleF(LSimdGeneralShuffleF* ins)
-{
- ScratchFloat32Scope scratch(masm);
- visitSimdGeneralShuffle<float, FloatRegister>(ins, scratch);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSwizzleI(LSimdSwizzleI* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- FloatRegister output = ToFloatRegister(ins->output());
- const unsigned numLanes = ins->numLanes();
-
- switch (numLanes) {
- case 4: {
- unsigned lanes[4];
- for (unsigned i = 0; i < 4; i++)
- lanes[i] = ins->lane(i);
- masm.swizzleInt32x4(input, output, lanes);
- return;
- }
- }
-
- // In the general case, use pshufb if it is available. Convert to a
- // byte-wise swizzle.
- const unsigned bytesPerLane = 16 / numLanes;
- int8_t lanes[16];
- for (unsigned i = 0; i < numLanes; i++) {
- for (unsigned b = 0; b < bytesPerLane; b++) {
- lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
- }
- }
-
- Maybe<Register> maybeTemp;
- if (!ins->getTemp(0)->isBogusTemp())
- maybeTemp.emplace(ToRegister(ins->getTemp(0)));
-
- masm.swizzleInt8x16(input, output, maybeTemp, lanes);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSwizzleF(LSimdSwizzleF* ins)
-{
- FloatRegister input = ToFloatRegister(ins->input());
- FloatRegister output = ToFloatRegister(ins->output());
- MOZ_ASSERT(ins->numLanes() == 4);
-
- unsigned lanes[4];
- for (unsigned i = 0; i < 4; i++)
- lanes[i] = ins->lane(i);
- masm.swizzleFloat32x4(input, output, lanes);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdShuffle(LSimdShuffle* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- FloatRegister rhs = ToFloatRegister(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
- const unsigned numLanes = ins->numLanes();
- const unsigned bytesPerLane = 16 / numLanes;
-
- // Convert the shuffle to a byte-wise shuffle.
- uint8_t lanes[16];
- for (unsigned i = 0; i < numLanes; i++) {
- for (unsigned b = 0; b < bytesPerLane; b++) {
- lanes[i * bytesPerLane + b] = ins->lane(i) * bytesPerLane + b;
- }
- }
-
- Maybe<FloatRegister> maybeFloatTemp;
- Maybe<Register> maybeTemp;
- if (AssemblerX86Shared::HasSSSE3())
- maybeFloatTemp.emplace(ToFloatRegister(ins->temp()));
- else
- maybeTemp.emplace(ToRegister(ins->temp()));
-
- masm.shuffleInt8x16(lhs, rhs, output, maybeFloatTemp, maybeTemp, lanes);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdShuffleX4(LSimdShuffleX4* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister out = ToFloatRegister(ins->output());
-
- unsigned lanes[4];
- for (unsigned i = 0; i < 4; i++)
- lanes[i] = ins->lane(i);
- Maybe<FloatRegister> maybeTemp;
- if (!ins->temp()->isBogusTemp())
- maybeTemp.emplace(ToFloatRegister(ins->temp()));
- masm.shuffleX4(lhs, rhs, out, maybeTemp, lanes);
-}
-
-static inline Assembler::Condition
-ToCondition(MSimdBinaryComp::Operation op)
-{
- switch (op) {
- case MSimdBinaryComp::greaterThan: return Assembler::GreaterThan;
- case MSimdBinaryComp::equal: return Assembler::Equal;
- case MSimdBinaryComp::lessThan: return Assembler::LessThan;
- case MSimdBinaryComp::notEqual: return Assembler::NotEqual;
- case MSimdBinaryComp::greaterThanOrEqual: return Assembler::GreaterThanOrEqual;
- case MSimdBinaryComp::lessThanOrEqual: return Assembler::LessThanOrEqual;
- }
-
- MOZ_CRASH("unexpected cond");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryCompIx16(LSimdBinaryCompIx16* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
- MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
-
- masm.compareInt8x16(lhs, rhs, ToCondition(ins->operation()), output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryCompIx8(LSimdBinaryCompIx8* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
- MOZ_ASSERT_IF(!Assembler::HasAVX(), output == lhs);
-
- masm.compareInt16x8(lhs, rhs, ToCondition(ins->operation()), output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryCompIx4(LSimdBinaryCompIx4* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- MOZ_ASSERT(ToFloatRegister(ins->output()) == lhs);
-
- masm.compareInt32x4(lhs, rhs, ToCondition(ins->operation()), lhs);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryCompFx4(LSimdBinaryCompFx4* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- masm.compareFloat32x4(lhs, rhs, ToCondition(ins->operation()), output);
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryArithIx16(LSimdBinaryArithIx16* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdBinaryArith::Operation op = ins->operation();
- switch (op) {
- case MSimdBinaryArith::Op_add:
- masm.addInt8x16(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_sub:
- masm.subInt8x16(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_mul:
- // 8x16 mul is a valid operation, but not supported in SSE or AVX.
- // The operation is synthesized from 16x8 multiplies by
- // MSimdBinaryArith::AddLegalized().
- break;
- case MSimdBinaryArith::Op_div:
- case MSimdBinaryArith::Op_max:
- case MSimdBinaryArith::Op_min:
- case MSimdBinaryArith::Op_minNum:
- case MSimdBinaryArith::Op_maxNum:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryArithIx8(LSimdBinaryArithIx8* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdBinaryArith::Operation op = ins->operation();
- switch (op) {
- case MSimdBinaryArith::Op_add:
- masm.addInt16x8(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_sub:
- masm.subInt16x8(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_mul:
- masm.mulInt16x8(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_div:
- case MSimdBinaryArith::Op_max:
- case MSimdBinaryArith::Op_min:
- case MSimdBinaryArith::Op_minNum:
- case MSimdBinaryArith::Op_maxNum:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryArithIx4(LSimdBinaryArithIx4* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdBinaryArith::Operation op = ins->operation();
- switch (op) {
- case MSimdBinaryArith::Op_add:
- masm.addInt32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_sub:
- masm.subInt32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_mul: {
- Maybe<FloatRegister> maybeTemp;
- if (!AssemblerX86Shared::HasSSE41())
- maybeTemp.emplace(ToFloatRegister(ins->getTemp(0)));
- masm.mulInt32x4(lhs, rhs, maybeTemp, output);
- return;
- }
- case MSimdBinaryArith::Op_div:
- // x86 doesn't have SIMD i32 div.
- break;
- case MSimdBinaryArith::Op_max:
- // we can do max with a single instruction only if we have SSE4.1
- // using the PMAXSD instruction.
- break;
- case MSimdBinaryArith::Op_min:
- // we can do max with a single instruction only if we have SSE4.1
- // using the PMINSD instruction.
- break;
- case MSimdBinaryArith::Op_minNum:
- case MSimdBinaryArith::Op_maxNum:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryArithFx4(LSimdBinaryArithFx4* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdBinaryArith::Operation op = ins->operation();
- switch (op) {
- case MSimdBinaryArith::Op_add:
- masm.addFloat32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_sub:
- masm.subFloat32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_mul:
- masm.mulFloat32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_div:
- masm.divFloat32x4(lhs, rhs, output);
- return;
- case MSimdBinaryArith::Op_max: {
- masm.maxFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
- return;
- }
- case MSimdBinaryArith::Op_min: {
- masm.minFloat32x4(lhs, rhs, output);
- return;
- }
- case MSimdBinaryArith::Op_minNum: {
- masm.minNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
- return;
- }
- case MSimdBinaryArith::Op_maxNum: {
- masm.maxNumFloat32x4(lhs, rhs, ToFloatRegister(ins->temp()), output);
- return;
- }
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinarySaturating(LSimdBinarySaturating* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- SimdSign sign = ins->signedness();
- MOZ_ASSERT(sign != SimdSign::NotApplicable);
-
- switch (ins->type()) {
- case MIRType::Int8x16:
- switch (ins->operation()) {
- case MSimdBinarySaturating::add:
- masm.addSatInt8x16(lhs, rhs, sign, output);
- return;
- case MSimdBinarySaturating::sub:
- masm.subSatInt8x16(lhs, rhs, sign, output);
- return;
- }
- break;
-
- case MIRType::Int16x8:
- switch (ins->operation()) {
- case MSimdBinarySaturating::add:
- masm.addSatInt16x8(lhs, rhs, sign, output);
- return;
- case MSimdBinarySaturating::sub:
- masm.subSatInt16x8(lhs, rhs, sign, output);
- return;
- }
- break;
-
- default:
- break;
- }
- MOZ_CRASH("unsupported type for SIMD saturating arithmetic");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdUnaryArithIx16(LSimdUnaryArithIx16* ins)
-{
- Operand in = ToOperand(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
-
- switch (ins->operation()) {
- case MSimdUnaryArith::neg:
- masm.negInt8x16(in, out);
- return;
- case MSimdUnaryArith::not_:
- masm.notInt8x16(in, out);;
- return;
- case MSimdUnaryArith::abs:
- case MSimdUnaryArith::reciprocalApproximation:
- case MSimdUnaryArith::reciprocalSqrtApproximation:
- case MSimdUnaryArith::sqrt:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdUnaryArithIx8(LSimdUnaryArithIx8* ins)
-{
- Operand in = ToOperand(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
-
- switch (ins->operation()) {
- case MSimdUnaryArith::neg:
- masm.negInt16x8(in, out);
- return;
- case MSimdUnaryArith::not_:
- masm.notInt16x8(in, out);
- return;
- case MSimdUnaryArith::abs:
- case MSimdUnaryArith::reciprocalApproximation:
- case MSimdUnaryArith::reciprocalSqrtApproximation:
- case MSimdUnaryArith::sqrt:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdUnaryArithIx4(LSimdUnaryArithIx4* ins)
-{
- Operand in = ToOperand(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
-
- switch (ins->operation()) {
- case MSimdUnaryArith::neg:
- masm.negInt32x4(in, out);
- return;
- case MSimdUnaryArith::not_:
- masm.notInt32x4(in, out);
- return;
- case MSimdUnaryArith::abs:
- case MSimdUnaryArith::reciprocalApproximation:
- case MSimdUnaryArith::reciprocalSqrtApproximation:
- case MSimdUnaryArith::sqrt:
- break;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdUnaryArithFx4(LSimdUnaryArithFx4* ins)
-{
- Operand in = ToOperand(ins->input());
- FloatRegister out = ToFloatRegister(ins->output());
-
- switch (ins->operation()) {
- case MSimdUnaryArith::abs:
- masm.absFloat32x4(in, out);
- return;
- case MSimdUnaryArith::neg:
- masm.negFloat32x4(in, out);
- return;
- case MSimdUnaryArith::not_:
- masm.notFloat32x4(in, out);
- return;
- case MSimdUnaryArith::reciprocalApproximation:
- masm.packedRcpApproximationFloat32x4(in, out);
- return;
- case MSimdUnaryArith::reciprocalSqrtApproximation:
- masm.packedRcpSqrtApproximationFloat32x4(in, out);
- return;
- case MSimdUnaryArith::sqrt:
- masm.packedSqrtFloat32x4(in, out);
- return;
- }
- MOZ_CRASH("unexpected SIMD op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdBinaryBitwise(LSimdBinaryBitwise* ins)
-{
- FloatRegister lhs = ToFloatRegister(ins->lhs());
- Operand rhs = ToOperand(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
-
- MSimdBinaryBitwise::Operation op = ins->operation();
- switch (op) {
- case MSimdBinaryBitwise::and_:
- if (ins->type() == MIRType::Float32x4)
- masm.bitwiseAndFloat32x4(lhs, rhs, output);
- else
- masm.bitwiseAndSimdInt(lhs, rhs, output);
- return;
- case MSimdBinaryBitwise::or_:
- if (ins->type() == MIRType::Float32x4)
- masm.bitwiseOrFloat32x4(lhs, rhs, output);
- else
- masm.bitwiseOrSimdInt(lhs, rhs, output);
- return;
- case MSimdBinaryBitwise::xor_:
- if (ins->type() == MIRType::Float32x4)
- masm.bitwiseXorFloat32x4(lhs, rhs, output);
- else
- masm.bitwiseXorSimdInt(lhs, rhs, output);
- return;
- }
- MOZ_CRASH("unexpected SIMD bitwise op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdShift(LSimdShift* ins)
-{
- FloatRegister out = ToFloatRegister(ins->output());
- MOZ_ASSERT(ToFloatRegister(ins->vector()) == out); // defineReuseInput(0);
-
- // Note that SSE doesn't have instructions for shifting 8x16 vectors.
- // These shifts are synthesized by the MSimdShift::AddLegalized() function.
- const LAllocation* val = ins->value();
- if (val->isConstant()) {
- MOZ_ASSERT(ins->temp()->isBogusTemp());
- Imm32 count(uint32_t(ToInt32(val)));
- switch (ins->type()) {
- case MIRType::Int16x8:
- switch (ins->operation()) {
- case MSimdShift::lsh:
- masm.packedLeftShiftByScalarInt16x8(count, out);
- return;
- case MSimdShift::rsh:
- masm.packedRightShiftByScalarInt16x8(count, out);
- return;
- case MSimdShift::ursh:
- masm.packedUnsignedRightShiftByScalarInt16x8(count, out);
- return;
- }
- break;
- case MIRType::Int32x4:
- switch (ins->operation()) {
- case MSimdShift::lsh:
- masm.packedLeftShiftByScalarInt32x4(count, out);
- return;
- case MSimdShift::rsh:
- masm.packedRightShiftByScalarInt32x4(count, out);
- return;
- case MSimdShift::ursh:
- masm.packedUnsignedRightShiftByScalarInt32x4(count, out);
- return;
- }
- break;
- default:
- MOZ_CRASH("unsupported type for SIMD shifts");
- }
- MOZ_CRASH("unexpected SIMD bitwise op");
- }
-
- Register temp = ToRegister(ins->temp());
- Register count = ToRegister(val);
-
- switch (ins->type()) {
- case MIRType::Int16x8:
- switch (ins->operation()) {
- case MSimdShift::lsh:
- masm.packedLeftShiftByScalarInt16x8(out, count, temp, out);
- return;
- case MSimdShift::rsh:
- masm.packedRightShiftByScalarInt16x8(out, count, temp, out);
- return;
- case MSimdShift::ursh:
- masm.packedUnsignedRightShiftByScalarInt16x8(out, count, temp, out);
- return;
- }
- break;
- case MIRType::Int32x4:
- switch (ins->operation()) {
- case MSimdShift::lsh:
- masm.packedLeftShiftByScalarInt32x4(out, count, temp, out);
- return;
- case MSimdShift::rsh:
- masm.packedRightShiftByScalarInt32x4(out, count, temp, out);
- return;
- case MSimdShift::ursh:
- masm.packedUnsignedRightShiftByScalarInt32x4(out, count, temp, out);
- return;
- }
- break;
- default:
- MOZ_CRASH("unsupported type for SIMD shifts");
- }
- MOZ_CRASH("unexpected SIMD bitwise op");
-}
-
-void
-CodeGeneratorX86Shared::visitSimdSelect(LSimdSelect* ins)
-{
- FloatRegister mask = ToFloatRegister(ins->mask());
- FloatRegister onTrue = ToFloatRegister(ins->lhs());
- FloatRegister onFalse = ToFloatRegister(ins->rhs());
- FloatRegister output = ToFloatRegister(ins->output());
- FloatRegister temp = ToFloatRegister(ins->temp());
-
- MSimdSelect* mir = ins->mir();
- unsigned lanes = SimdTypeToLength(mir->type());
- if (lanes == 4)
- masm.selectX4(mask, onTrue, onFalse, temp, output);
- else
- masm.selectSimd128(mask, onTrue, onFalse, temp, output);
-}
-
-void
CodeGeneratorX86Shared::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir)
{
Register elements = ToRegister(lir->elements());
@@ -3717,15 +2841,6 @@ CodeGeneratorX86Shared::canonicalizeIfDeterministic(Scalar::Type type, const LAl
masm.canonicalizeDoubleIfDeterministic(in);
break;
}
- case Scalar::Float32x4: {
- FloatRegister in = ToFloatRegister(value);
- MOZ_ASSERT(in.isSimd128());
- FloatRegister scratch = in != xmm0.asSimd128() ? xmm0 : xmm1;
- masm.push(scratch);
- masm.canonicalizeFloat32x4(in, scratch);
- masm.pop(scratch);
- break;
- }
default: {
// Other types don't need canonicalization.
break;
diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
index 4b0664fb63..d990f662c4 100644
--- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
+++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h
@@ -49,31 +49,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
}
};
- // Additional bounds check for vector Float to Int conversion, when the
- // undefined pattern is seen. Might imply a bailout.
- class OutOfLineSimdFloatToIntCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
- {
- Register temp_;
- FloatRegister input_;
- LInstruction* ins_;
- wasm::TrapOffset trapOffset_;
-
- public:
- OutOfLineSimdFloatToIntCheck(Register temp, FloatRegister input, LInstruction *ins,
- wasm::TrapOffset trapOffset)
- : temp_(temp), input_(input), ins_(ins), trapOffset_(trapOffset)
- {}
-
- Register temp() const { return temp_; }
- FloatRegister input() const { return input_; }
- LInstruction* ins() const { return ins_; }
- wasm::TrapOffset trapOffset() const { return trapOffset_; }
-
- void accept(CodeGeneratorX86Shared* codegen) {
- codegen->visitOutOfLineSimdFloatToIntCheck(this);
- }
- };
-
public:
NonAssertingLabel deoptLabel_;
@@ -258,51 +233,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
void visitOutOfLineWasmTruncateCheck(OutOfLineWasmTruncateCheck* ool);
- // SIMD operators
- void visitSimdValueInt32x4(LSimdValueInt32x4* lir);
- void visitSimdValueFloat32x4(LSimdValueFloat32x4* lir);
- void visitSimdSplatX16(LSimdSplatX16* lir);
- void visitSimdSplatX8(LSimdSplatX8* lir);
- void visitSimdSplatX4(LSimdSplatX4* lir);
- void visitSimd128Int(LSimd128Int* ins);
- void visitSimd128Float(LSimd128Float* ins);
- void visitInt32x4ToFloat32x4(LInt32x4ToFloat32x4* ins);
- void visitFloat32x4ToInt32x4(LFloat32x4ToInt32x4* ins);
- void visitFloat32x4ToUint32x4(LFloat32x4ToUint32x4* ins);
- void visitSimdReinterpretCast(LSimdReinterpretCast* lir);
- void visitSimdExtractElementB(LSimdExtractElementB* lir);
- void visitSimdExtractElementI(LSimdExtractElementI* lir);
- void visitSimdExtractElementU2D(LSimdExtractElementU2D* lir);
- void visitSimdExtractElementF(LSimdExtractElementF* lir);
- void visitSimdInsertElementI(LSimdInsertElementI* lir);
- void visitSimdInsertElementF(LSimdInsertElementF* lir);
- void visitSimdSwizzleI(LSimdSwizzleI* lir);
- void visitSimdSwizzleF(LSimdSwizzleF* lir);
- void visitSimdShuffleX4(LSimdShuffleX4* lir);
- void visitSimdShuffle(LSimdShuffle* lir);
- void visitSimdUnaryArithIx16(LSimdUnaryArithIx16* lir);
- void visitSimdUnaryArithIx8(LSimdUnaryArithIx8* lir);
- void visitSimdUnaryArithIx4(LSimdUnaryArithIx4* lir);
- void visitSimdUnaryArithFx4(LSimdUnaryArithFx4* lir);
- void visitSimdBinaryCompIx16(LSimdBinaryCompIx16* lir);
- void visitSimdBinaryCompIx8(LSimdBinaryCompIx8* lir);
- void visitSimdBinaryCompIx4(LSimdBinaryCompIx4* lir);
- void visitSimdBinaryCompFx4(LSimdBinaryCompFx4* lir);
- void visitSimdBinaryArithIx16(LSimdBinaryArithIx16* lir);
- void visitSimdBinaryArithIx8(LSimdBinaryArithIx8* lir);
- void visitSimdBinaryArithIx4(LSimdBinaryArithIx4* lir);
- void visitSimdBinaryArithFx4(LSimdBinaryArithFx4* lir);
- void visitSimdBinarySaturating(LSimdBinarySaturating* lir);
- void visitSimdBinaryBitwise(LSimdBinaryBitwise* lir);
- void visitSimdShift(LSimdShift* lir);
- void visitSimdSelect(LSimdSelect* ins);
- void visitSimdAllTrue(LSimdAllTrue* ins);
- void visitSimdAnyTrue(LSimdAnyTrue* ins);
-
- template <class T, class Reg> void visitSimdGeneralShuffle(LSimdGeneralShuffleBase* lir, Reg temp);
- void visitSimdGeneralShuffleI(LSimdGeneralShuffleI* lir);
- void visitSimdGeneralShuffleF(LSimdGeneralShuffleF* lir);
-
// Out of line visitors.
void visitOutOfLineBailout(OutOfLineBailout* ool);
void visitOutOfLineUndoALUOperation(OutOfLineUndoALUOperation* ool);
@@ -310,7 +240,6 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
void visitModOverflowCheck(ModOverflowCheck* ool);
void visitReturnZero(ReturnZero* ool);
void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
- void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool);
void generateInvalidateEpilogue();
// Generating a result.
diff --git a/js/src/jit/x86-shared/LIR-x86-shared.h b/js/src/jit/x86-shared/LIR-x86-shared.h
index 7bd7414118..415e9f774a 100644
--- a/js/src/jit/x86-shared/LIR-x86-shared.h
+++ b/js/src/jit/x86-shared/LIR-x86-shared.h
@@ -354,47 +354,6 @@ class LMulI : public LBinaryMath<0, 1>
}
};
-// Constructs an int32x4 SIMD value.
-class LSimdValueInt32x4 : public LInstructionHelper<1, 4, 0>
-{
- public:
- LIR_HEADER(SimdValueInt32x4)
- LSimdValueInt32x4(const LAllocation& x, const LAllocation& y,
- const LAllocation& z, const LAllocation& w)
- {
- setOperand(0, x);
- setOperand(1, y);
- setOperand(2, z);
- setOperand(3, w);
- }
-
- MSimdValueX4* mir() const {
- return mir_->toSimdValueX4();
- }
-};
-
-// Constructs a float32x4 SIMD value, optimized for x86 family
-class LSimdValueFloat32x4 : public LInstructionHelper<1, 4, 1>
-{
- public:
- LIR_HEADER(SimdValueFloat32x4)
- LSimdValueFloat32x4(const LAllocation& x, const LAllocation& y,
- const LAllocation& z, const LAllocation& w,
- const LDefinition& copyY)
- {
- setOperand(0, x);
- setOperand(1, y);
- setOperand(2, z);
- setOperand(3, w);
-
- setTemp(0, copyY);
- }
-
- MSimdValueX4* mir() const {
- return mir_->toSimdValueX4();
- }
-};
-
class LInt64ToFloatingPoint : public LInstructionHelper<1, INT64_PIECES, 1>
{
public:
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.cpp b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
index 0a9a9cc55d..161f3634e9 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.cpp
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.cpp
@@ -157,31 +157,6 @@ template void LIRGeneratorX86Shared::lowerForFPU(LInstructionHelper<1, 2, 1>* in
MDefinition* lhs, MDefinition* rhs);
void
-LIRGeneratorX86Shared::lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
-{
- lowerForALU(ins, mir, lhs, rhs);
-}
-
-void
-LIRGeneratorX86Shared::lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs)
-{
- // Swap the operands around to fit the instructions that x86 actually has.
- // We do this here, before register allocation, so that we don't need
- // temporaries and copying afterwards.
- switch (mir->operation()) {
- case MSimdBinaryComp::greaterThan:
- case MSimdBinaryComp::greaterThanOrEqual:
- mir->reverse();
- Swap(lhs, rhs);
- break;
- default:
- break;
- }
-
- lowerForFPU(ins, mir, lhs, rhs);
-}
-
-void
LIRGeneratorX86Shared::lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs)
{
@@ -643,356 +618,6 @@ LIRGeneratorX86Shared::lowerAtomicTypedArrayElementBinop(MAtomicTypedArrayElemen
}
void
-LIRGeneratorX86Shared::visitSimdInsertElement(MSimdInsertElement* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- LUse vec = useRegisterAtStart(ins->vector());
- LUse val = useRegister(ins->value());
- switch (ins->type()) {
- case MIRType::Int8x16:
- case MIRType::Bool8x16:
- // When SSE 4.1 is not available, we need to go via the stack.
- // This requires the value to be inserted to be in %eax-%edx.
- // Pick %ebx since other instructions use %eax or %ecx hard-wired.
-#if defined(JS_CODEGEN_X86)
- if (!AssemblerX86Shared::HasSSE41())
- val = useFixed(ins->value(), ebx);
-#endif
- defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
- break;
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- defineReuseInput(new(alloc()) LSimdInsertElementI(vec, val), ins, 0);
- break;
- case MIRType::Float32x4:
- defineReuseInput(new(alloc()) LSimdInsertElementF(vec, val), ins, 0);
- break;
- default:
- MOZ_CRASH("Unknown SIMD kind when generating constant");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdExtractElement(MSimdExtractElement* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->input()->type()));
- MOZ_ASSERT(!IsSimdType(ins->type()));
-
- switch (ins->input()->type()) {
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4: {
- MOZ_ASSERT(ins->signedness() != SimdSign::NotApplicable);
- LUse use = useRegisterAtStart(ins->input());
- if (ins->type() == MIRType::Double) {
- // Extract an Uint32 lane into a double.
- MOZ_ASSERT(ins->signedness() == SimdSign::Unsigned);
- define(new (alloc()) LSimdExtractElementU2D(use, temp()), ins);
- } else {
- auto* lir = new (alloc()) LSimdExtractElementI(use);
-#if defined(JS_CODEGEN_X86)
- // On x86 (32-bit), we may need to use movsbl or movzbl instructions
- // to sign or zero extend the extracted lane to 32 bits. The 8-bit
- // version of these instructions require a source register that is
- // %al, %bl, %cl, or %dl.
- // Fix it to %ebx since we can't express that constraint better.
- if (ins->input()->type() == MIRType::Int8x16) {
- defineFixed(lir, ins, LAllocation(AnyRegister(ebx)));
- return;
- }
-#endif
- define(lir, ins);
- }
- break;
- }
- case MIRType::Float32x4: {
- MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
- LUse use = useRegisterAtStart(ins->input());
- define(new(alloc()) LSimdExtractElementF(use), ins);
- break;
- }
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4: {
- MOZ_ASSERT(ins->signedness() == SimdSign::NotApplicable);
- LUse use = useRegisterAtStart(ins->input());
- define(new(alloc()) LSimdExtractElementB(use), ins);
- break;
- }
- default:
- MOZ_CRASH("Unknown SIMD kind when extracting element");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdBinaryArith(MSimdBinaryArith* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- MDefinition* lhs = ins->lhs();
- MDefinition* rhs = ins->rhs();
-
- if (ins->isCommutative())
- ReorderCommutative(&lhs, &rhs, ins);
-
- switch (ins->type()) {
- case MIRType::Int8x16: {
- LSimdBinaryArithIx16* lir = new (alloc()) LSimdBinaryArithIx16();
- lir->setTemp(0, LDefinition::BogusTemp());
- lowerForFPU(lir, ins, lhs, rhs);
- return;
- }
-
- case MIRType::Int16x8: {
- LSimdBinaryArithIx8* lir = new (alloc()) LSimdBinaryArithIx8();
- lir->setTemp(0, LDefinition::BogusTemp());
- lowerForFPU(lir, ins, lhs, rhs);
- return;
- }
-
- case MIRType::Int32x4: {
- LSimdBinaryArithIx4* lir = new (alloc()) LSimdBinaryArithIx4();
- bool needsTemp =
- ins->operation() == MSimdBinaryArith::Op_mul && !MacroAssembler::HasSSE41();
- lir->setTemp(0, needsTemp ? temp(LDefinition::SIMD128INT) : LDefinition::BogusTemp());
- lowerForFPU(lir, ins, lhs, rhs);
- return;
- }
-
- case MIRType::Float32x4: {
- LSimdBinaryArithFx4* lir = new (alloc()) LSimdBinaryArithFx4();
-
- bool needsTemp = ins->operation() == MSimdBinaryArith::Op_max ||
- ins->operation() == MSimdBinaryArith::Op_minNum ||
- ins->operation() == MSimdBinaryArith::Op_maxNum;
- lir->setTemp(0,
- needsTemp ? temp(LDefinition::SIMD128FLOAT) : LDefinition::BogusTemp());
- lowerForFPU(lir, ins, lhs, rhs);
- return;
- }
-
- default:
- MOZ_CRASH("unknown simd type on binary arith operation");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdBinarySaturating(MSimdBinarySaturating* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- MDefinition* lhs = ins->lhs();
- MDefinition* rhs = ins->rhs();
-
- if (ins->isCommutative())
- ReorderCommutative(&lhs, &rhs, ins);
-
- LSimdBinarySaturating* lir = new (alloc()) LSimdBinarySaturating();
- lowerForFPU(lir, ins, lhs, rhs);
-}
-
-void
-LIRGeneratorX86Shared::visitSimdSelect(MSimdSelect* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- LSimdSelect* lins = new(alloc()) LSimdSelect;
- MDefinition* r0 = ins->getOperand(0);
- MDefinition* r1 = ins->getOperand(1);
- MDefinition* r2 = ins->getOperand(2);
-
- lins->setOperand(0, useRegister(r0));
- lins->setOperand(1, useRegister(r1));
- lins->setOperand(2, useRegister(r2));
- lins->setTemp(0, temp(LDefinition::SIMD128FLOAT));
-
- define(lins, ins);
-}
-
-void
-LIRGeneratorX86Shared::visitSimdSplat(MSimdSplat* ins)
-{
- LAllocation x = useRegisterAtStart(ins->getOperand(0));
-
- switch (ins->type()) {
- case MIRType::Int8x16:
- define(new (alloc()) LSimdSplatX16(x), ins);
- break;
- case MIRType::Int16x8:
- define(new (alloc()) LSimdSplatX8(x), ins);
- break;
- case MIRType::Int32x4:
- case MIRType::Float32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- // Use the SplatX4 instruction for all boolean splats. Since the input
- // value is a 32-bit int that is either 0 or -1, the X4 splat gives
- // the right result for all boolean geometries.
- // For floats, (Non-AVX) codegen actually wants the input and the output
- // to be in the same register, but we can't currently use
- // defineReuseInput because they have different types (scalar vs
- // vector), so a spill slot for one may not be suitable for the other.
- define(new (alloc()) LSimdSplatX4(x), ins);
- break;
- default:
- MOZ_CRASH("Unknown SIMD kind");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdValueX4(MSimdValueX4* ins)
-{
- switch (ins->type()) {
- case MIRType::Float32x4: {
- // Ideally, x would be used at start and reused for the output, however
- // register allocation currently doesn't permit us to tie together two
- // virtual registers with different types.
- LAllocation x = useRegister(ins->getOperand(0));
- LAllocation y = useRegister(ins->getOperand(1));
- LAllocation z = useRegister(ins->getOperand(2));
- LAllocation w = useRegister(ins->getOperand(3));
- LDefinition t = temp(LDefinition::SIMD128FLOAT);
- define(new (alloc()) LSimdValueFloat32x4(x, y, z, w, t), ins);
- break;
- }
- case MIRType::Bool32x4:
- case MIRType::Int32x4: {
- // No defineReuseInput => useAtStart for everyone.
- LAllocation x = useRegisterAtStart(ins->getOperand(0));
- LAllocation y = useRegisterAtStart(ins->getOperand(1));
- LAllocation z = useRegisterAtStart(ins->getOperand(2));
- LAllocation w = useRegisterAtStart(ins->getOperand(3));
- define(new(alloc()) LSimdValueInt32x4(x, y, z, w), ins);
- break;
- }
- default:
- MOZ_CRASH("Unknown SIMD kind");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdSwizzle(MSimdSwizzle* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->input()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- if (IsIntegerSimdType(ins->input()->type())) {
- LUse use = useRegisterAtStart(ins->input());
- LSimdSwizzleI* lir = new (alloc()) LSimdSwizzleI(use);
- define(lir, ins);
- // We need a GPR temp register for pre-SSSE3 codegen (no vpshufb).
- if (Assembler::HasSSSE3()) {
- lir->setTemp(0, LDefinition::BogusTemp());
- } else {
- // The temp must be a GPR usable with 8-bit loads and stores.
-#if defined(JS_CODEGEN_X86)
- lir->setTemp(0, tempFixed(ebx));
-#else
- lir->setTemp(0, temp());
-#endif
- }
- } else if (ins->input()->type() == MIRType::Float32x4) {
- LUse use = useRegisterAtStart(ins->input());
- LSimdSwizzleF* lir = new (alloc()) LSimdSwizzleF(use);
- define(lir, ins);
- lir->setTemp(0, LDefinition::BogusTemp());
- } else {
- MOZ_CRASH("Unknown SIMD kind when getting lane");
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdShuffle(MSimdShuffle* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->lhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->rhs()->type()));
- MOZ_ASSERT(IsSimdType(ins->type()));
- if (ins->type() == MIRType::Int32x4 || ins->type() == MIRType::Float32x4) {
- bool zFromLHS = ins->lane(2) < 4;
- bool wFromLHS = ins->lane(3) < 4;
- uint32_t lanesFromLHS = (ins->lane(0) < 4) + (ins->lane(1) < 4) + zFromLHS + wFromLHS;
-
- LSimdShuffleX4* lir = new (alloc()) LSimdShuffleX4();
- lowerForFPU(lir, ins, ins->lhs(), ins->rhs());
-
- // See codegen for requirements details.
- LDefinition temp =
- (lanesFromLHS == 3) ? tempCopy(ins->rhs(), 1) : LDefinition::BogusTemp();
- lir->setTemp(0, temp);
- } else {
- MOZ_ASSERT(ins->type() == MIRType::Int8x16 || ins->type() == MIRType::Int16x8);
- LSimdShuffle* lir = new (alloc()) LSimdShuffle();
- lir->setOperand(0, useRegister(ins->lhs()));
- lir->setOperand(1, useRegister(ins->rhs()));
- define(lir, ins);
- // We need a GPR temp register for pre-SSSE3 codegen, and an SSE temp
- // when using pshufb.
- if (Assembler::HasSSSE3()) {
- lir->setTemp(0, temp(LDefinition::SIMD128INT));
- } else {
- // The temp must be a GPR usable with 8-bit loads and stores.
-#if defined(JS_CODEGEN_X86)
- lir->setTemp(0, tempFixed(ebx));
-#else
- lir->setTemp(0, temp());
-#endif
- }
- }
-}
-
-void
-LIRGeneratorX86Shared::visitSimdGeneralShuffle(MSimdGeneralShuffle* ins)
-{
- MOZ_ASSERT(IsSimdType(ins->type()));
-
- LSimdGeneralShuffleBase* lir;
- if (IsIntegerSimdType(ins->type())) {
-#if defined(JS_CODEGEN_X86)
- // The temp register must be usable with 8-bit load and store
- // instructions, so one of %eax-%edx.
- LDefinition t;
- if (ins->type() == MIRType::Int8x16)
- t = tempFixed(ebx);
- else
- t = temp();
-#else
- LDefinition t = temp();
-#endif
- lir = new (alloc()) LSimdGeneralShuffleI(t);
- } else if (ins->type() == MIRType::Float32x4) {
- lir = new (alloc()) LSimdGeneralShuffleF(temp());
- } else {
- MOZ_CRASH("Unknown SIMD kind when doing a shuffle");
- }
-
- if (!lir->init(alloc(), ins->numVectors() + ins->numLanes()))
- return;
-
- for (unsigned i = 0; i < ins->numVectors(); i++) {
- MOZ_ASSERT(IsSimdType(ins->vector(i)->type()));
- lir->setOperand(i, useRegister(ins->vector(i)));
- }
-
- for (unsigned i = 0; i < ins->numLanes(); i++) {
- MOZ_ASSERT(ins->lane(i)->type() == MIRType::Int32);
- // Note that there can be up to 16 lane arguments, so we can't assume
- // that they all get an allocated register.
- lir->setOperand(i + ins->numVectors(), use(ins->lane(i)));
- }
-
- assignSnapshot(lir, Bailout_BoundsCheck);
- define(lir, ins);
-}
-
-void
LIRGeneratorX86Shared::visitCopySign(MCopySign* ins)
{
MDefinition* lhs = ins->lhs();
diff --git a/js/src/jit/x86-shared/Lowering-x86-shared.h b/js/src/jit/x86-shared/Lowering-x86-shared.h
index fc76203fe2..515de50503 100644
--- a/js/src/jit/x86-shared/Lowering-x86-shared.h
+++ b/js/src/jit/x86-shared/Lowering-x86-shared.h
@@ -38,10 +38,6 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
template<size_t Temps>
void lowerForFPU(LInstructionHelper<1, 2, Temps>* ins, MDefinition* mir, MDefinition* lhs,
MDefinition* rhs);
- void lowerForCompIx4(LSimdBinaryCompIx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs);
- void lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir,
- MDefinition* lhs, MDefinition* rhs);
void lowerForBitAndAndBranch(LBitAndAndBranch* baab, MInstruction* mir,
MDefinition* lhs, MDefinition* rhs);
void visitAsmJSNeg(MAsmJSNeg* ins);
@@ -55,16 +51,6 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
void lowerUrshD(MUrsh* mir);
void lowerTruncateDToInt32(MTruncateToInt32* ins);
void lowerTruncateFToInt32(MTruncateToInt32* ins);
- void visitSimdInsertElement(MSimdInsertElement* ins);
- void visitSimdExtractElement(MSimdExtractElement* ins);
- void visitSimdBinaryArith(MSimdBinaryArith* ins);
- void visitSimdBinarySaturating(MSimdBinarySaturating* ins);
- void visitSimdSelect(MSimdSelect* ins);
- void visitSimdSplat(MSimdSplat* ins);
- void visitSimdSwizzle(MSimdSwizzle* ins);
- void visitSimdShuffle(MSimdShuffle* ins);
- void visitSimdGeneralShuffle(MSimdGeneralShuffle* ins);
- void visitSimdValueX4(MSimdValueX4* ins);
void lowerCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement* ins,
bool useI386ByteRegisters);
void lowerAtomicExchangeTypedArrayElement(MAtomicExchangeTypedArrayElement* ins,
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
index f308e41fd8..902731f2b4 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared-inl.h
@@ -1106,29 +1106,6 @@ MacroAssembler::branchTestMagicImpl(Condition cond, const T& t, L label)
}
// ========================================================================
-// Canonicalization primitives.
-void
-MacroAssembler::canonicalizeFloat32x4(FloatRegister reg, FloatRegister scratch)
-{
- ScratchSimd128Scope scratch2(*this);
-
- MOZ_ASSERT(scratch.asSimd128() != scratch2.asSimd128());
- MOZ_ASSERT(reg.asSimd128() != scratch2.asSimd128());
- MOZ_ASSERT(reg.asSimd128() != scratch.asSimd128());
-
- FloatRegister mask = scratch;
- vcmpordps(Operand(reg), reg, mask);
-
- FloatRegister ifFalse = scratch2;
- float nanf = float(JS::GenericNaN());
- loadConstantSimd128Float(SimdConstant::SplatX4(nanf), ifFalse);
-
- bitwiseAndFloat32x4(reg, Operand(mask), reg);
- bitwiseAndNotFloat32x4(mask, Operand(ifFalse), mask);
- bitwiseOrFloat32x4(reg, Operand(mask), reg);
-}
-
-// ========================================================================
// Memory access primitives.
void
MacroAssembler::storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
diff --git a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
index 25b3b846da..0afd4ae6bd 100644
--- a/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
+++ b/js/src/jit/x86-shared/MacroAssembler-x86-shared.h
@@ -1015,37 +1015,6 @@ class MacroAssemblerX86Shared : public Assembler
template <class T> inline void loadAlignedVector(const Address& src, FloatRegister dest);
template <class T> inline void storeAlignedVector(FloatRegister src, const Address& dest);
- void loadInt32x1(const Address& src, FloatRegister dest) {
- vmovd(Operand(src), dest);
- }
- void loadInt32x1(const BaseIndex& src, FloatRegister dest) {
- vmovd(Operand(src), dest);
- }
- void loadInt32x2(const Address& src, FloatRegister dest) {
- vmovq(Operand(src), dest);
- }
- void loadInt32x2(const BaseIndex& src, FloatRegister dest) {
- vmovq(Operand(src), dest);
- }
- void loadInt32x3(const BaseIndex& src, FloatRegister dest) {
- BaseIndex srcZ(src);
- srcZ.offset += 2 * sizeof(int32_t);
-
- ScratchSimd128Scope scratch(asMasm());
- vmovq(Operand(src), dest);
- vmovd(Operand(srcZ), scratch);
- vmovlhps(scratch, dest, dest);
- }
- void loadInt32x3(const Address& src, FloatRegister dest) {
- Address srcZ(src);
- srcZ.offset += 2 * sizeof(int32_t);
-
- ScratchSimd128Scope scratch(asMasm());
- vmovq(Operand(src), dest);
- vmovd(Operand(srcZ), scratch);
- vmovlhps(scratch, dest, dest);
- }
-
void loadAlignedSimd128Int(const Address& src, FloatRegister dest) {
vmovdqa(Operand(src), dest);
}
@@ -1080,35 +1049,6 @@ class MacroAssemblerX86Shared : public Assembler
vmovdqu(src, dest);
}
- void storeInt32x1(FloatRegister src, const Address& dest) {
- vmovd(src, Operand(dest));
- }
- void storeInt32x1(FloatRegister src, const BaseIndex& dest) {
- vmovd(src, Operand(dest));
- }
- void storeInt32x2(FloatRegister src, const Address& dest) {
- vmovq(src, Operand(dest));
- }
- void storeInt32x2(FloatRegister src, const BaseIndex& dest) {
- vmovq(src, Operand(dest));
- }
- void storeInt32x3(FloatRegister src, const Address& dest) {
- Address destZ(dest);
- destZ.offset += 2 * sizeof(int32_t);
- vmovq(src, Operand(dest));
- ScratchSimd128Scope scratch(asMasm());
- vmovhlps(src, scratch, scratch);
- vmovd(scratch, Operand(destZ));
- }
- void storeInt32x3(FloatRegister src, const BaseIndex& dest) {
- BaseIndex destZ(dest);
- destZ.offset += 2 * sizeof(int32_t);
- vmovq(src, Operand(dest));
- ScratchSimd128Scope scratch(asMasm());
- vmovhlps(src, scratch, scratch);
- vmovd(scratch, Operand(destZ));
- }
-
void storeUnalignedSimd128Int(FloatRegister src, const Address& dest) {
vmovdqu(src, Operand(dest));
}
@@ -1191,23 +1131,6 @@ class MacroAssemblerX86Shared : public Assembler
vpsrld(count, dest, dest);
}
- void loadFloat32x3(const Address& src, FloatRegister dest) {
- Address srcZ(src);
- srcZ.offset += 2 * sizeof(float);
- vmovsd(src, dest);
- ScratchSimd128Scope scratch(asMasm());
- vmovss(srcZ, scratch);
- vmovlhps(scratch, dest, dest);
- }
- void loadFloat32x3(const BaseIndex& src, FloatRegister dest) {
- BaseIndex srcZ(src);
- srcZ.offset += 2 * sizeof(float);
- vmovsd(src, dest);
- ScratchSimd128Scope scratch(asMasm());
- vmovss(srcZ, scratch);
- vmovlhps(scratch, dest, dest);
- }
-
void loadAlignedSimd128Float(const Address& src, FloatRegister dest) {
vmovaps(Operand(src), dest);
}
diff --git a/js/src/jit/x86/Assembler-x86.h b/js/src/jit/x86/Assembler-x86.h
index 39460b5676..2e9c6b88f2 100644
--- a/js/src/jit/x86/Assembler-x86.h
+++ b/js/src/jit/x86/Assembler-x86.h
@@ -143,7 +143,9 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 && JitStackValueAlignment >
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
-static constexpr bool SupportsSimd = true;
+// XXX: As of issue #2307 this is set to false and will no longer compile.
+// to-do: remove this.
+static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 16;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,
diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp
index 7e90076a26..f255ed362b 100644
--- a/js/src/jit/x86/CodeGenerator-x86.cpp
+++ b/js/src/jit/x86/CodeGenerator-x86.cpp
@@ -419,7 +419,6 @@ CodeGeneratorX86::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
AnyRegister out = ToAnyRegister(ins->output());
Scalar::Type accessType = mir->accessType();
- MOZ_ASSERT(!Scalar::isSimdType(accessType));
OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
if (mir->needsBoundsCheck()) {
@@ -498,7 +497,6 @@ CodeGeneratorX86::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
const LAllocation* value = ins->value();
Scalar::Type accessType = mir->accessType();
- MOZ_ASSERT(!Scalar::isSimdType(accessType));
canonicalizeIfDeterministic(accessType, value);
Operand dstAddr = ptr->isBogus()
diff --git a/js/src/jit/x86/LOpcodes-x86.h b/js/src/jit/x86/LOpcodes-x86.h
index d5881f6a9a..7b561818d1 100644
--- a/js/src/jit/x86/LOpcodes-x86.h
+++ b/js/src/jit/x86/LOpcodes-x86.h
@@ -11,8 +11,6 @@
#define LIR_CPU_OPCODE_LIST(_) \
_(BoxFloatingPoint) \
_(DivOrModConstantI) \
- _(SimdValueInt32x4) \
- _(SimdValueFloat32x4) \
_(UDivOrMod) \
_(UDivOrModConstant) \
_(UDivOrModI64) \
diff --git a/js/src/jit/x86/Lowering-x86.cpp b/js/src/jit/x86/Lowering-x86.cpp
index 27859c0772..39aa040fad 100644
--- a/js/src/jit/x86/Lowering-x86.cpp
+++ b/js/src/jit/x86/Lowering-x86.cpp
@@ -312,10 +312,6 @@ LIRGeneratorX86::visitWasmStore(MWasmStore* ins)
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
case Scalar::Float32: case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
// For now, don't allow constant values. The immediate operand affects
// instruction layout which affects patching.
valueAlloc = useRegisterAtStart(ins->value());
@@ -373,10 +369,6 @@ LIRGeneratorX86::visitAsmJSStoreHeap(MAsmJSStoreHeap* ins)
case Scalar::Int16: case Scalar::Uint16:
case Scalar::Int32: case Scalar::Uint32:
case Scalar::Float32: case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
// For now, don't allow constant values. The immediate operand affects
// instruction layout which affects patching.
lir = new (alloc()) LAsmJSStoreHeap(baseAlloc, useRegisterAtStart(ins->value()));
diff --git a/js/src/jit/x86/MacroAssembler-x86.cpp b/js/src/jit/x86/MacroAssembler-x86.cpp
index 9962b9c594..fbefd9e77a 100644
--- a/js/src/jit/x86/MacroAssembler-x86.cpp
+++ b/js/src/jit/x86/MacroAssembler-x86.cpp
@@ -587,34 +587,6 @@ MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
case Scalar::Float64:
vmovsdWithPatch(srcAddr, out.fpu());
break;
- case Scalar::Float32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movss zeroes out the high lanes.
- case 1: vmovssWithPatch(srcAddr, out.fpu()); break;
- // See comment above, which also applies to movsd.
- case 2: vmovsdWithPatch(srcAddr, out.fpu()); break;
- case 4: vmovupsWithPatch(srcAddr, out.fpu()); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movd zeroes out the high lanes.
- case 1: vmovdWithPatch(srcAddr, out.fpu()); break;
- // See comment above, which also applies to movq.
- case 2: vmovqWithPatch(srcAddr, out.fpu()); break;
- case 4: vmovdquWithPatch(srcAddr, out.fpu()); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
- vmovdquWithPatch(srcAddr, out.fpu());
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
- vmovdquWithPatch(srcAddr, out.fpu());
- break;
case Scalar::Int64:
case Scalar::BigInt64:
case Scalar::BigUint64:
@@ -632,7 +604,6 @@ void
MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr, Register64 out)
{
MOZ_ASSERT(!access.isAtomic());
- MOZ_ASSERT(!access.isSimd());
size_t loadOffset = size();
switch (access.type()) {
@@ -724,10 +695,6 @@ MacroAssembler::wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAdd
break;
case Scalar::Float32:
case Scalar::Float64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
MOZ_CRASH("non-int64 loads should use load()");
case Scalar::BigInt64:
case Scalar::BigUint64:
@@ -763,34 +730,6 @@ MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister valu
case Scalar::Float64:
vmovsdWithPatch(value.fpu(), dstAddr);
break;
- case Scalar::Float32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movss zeroes out the high lanes.
- case 1: vmovssWithPatch(value.fpu(), dstAddr); break;
- // See comment above, which also applies to movsd.
- case 2: vmovsdWithPatch(value.fpu(), dstAddr); break;
- case 4: vmovupsWithPatch(value.fpu(), dstAddr); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int32x4:
- switch (access.numSimdElems()) {
- // In memory-to-register mode, movd zeroes out the high lanes.
- case 1: vmovdWithPatch(value.fpu(), dstAddr); break;
- // See comment above, which also applies to movsd.
- case 2: vmovqWithPatch(value.fpu(), dstAddr); break;
- case 4: vmovdquWithPatch(value.fpu(), dstAddr); break;
- default: MOZ_CRASH("unexpected size for partial load");
- }
- break;
- case Scalar::Int8x16:
- MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
- vmovdquWithPatch(value.fpu(), dstAddr);
- break;
- case Scalar::Int16x8:
- MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
- vmovdquWithPatch(value.fpu(), dstAddr);
- break;
case Scalar::Int64:
MOZ_CRASH("Should be handled in storeI64.");
case Scalar::MaxTypedArrayViewType:
@@ -808,7 +747,6 @@ void
MacroAssembler::wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value, Operand dstAddr)
{
MOZ_ASSERT(!access.isAtomic());
- MOZ_ASSERT(!access.isSimd());
size_t storeOffset = size();
if (dstAddr.kind() == Operand::MEM_ADDRESS32) {
diff --git a/js/src/js.msg b/js/src/js.msg
index 340cfd6ad2..e651b10137 100644
--- a/js/src/js.msg
+++ b/js/src/js.msg
@@ -331,7 +331,6 @@ MSG_DEF(JSMSG_STMT_AFTER_RETURN, 0, JSEXN_WARN, "unreachable code after re
MSG_DEF(JSMSG_STRICT_CODE_WITH, 0, JSEXN_SYNTAXERR, "strict mode code may not contain 'with' statements")
MSG_DEF(JSMSG_STRICT_NON_SIMPLE_PARAMS, 1, JSEXN_SYNTAXERR, "\"use strict\" not allowed in function with {0} parameter")
MSG_DEF(JSMSG_TEMPLSTR_UNTERM_EXPR, 0, JSEXN_SYNTAXERR, "missing } in template string")
-MSG_DEF(JSMSG_SIMD_NOT_A_VECTOR, 2, JSEXN_TYPEERR, "expecting a SIMD {0} object as argument {1}")
MSG_DEF(JSMSG_TOO_MANY_CASES, 0, JSEXN_INTERNALERR, "too many switch cases")
MSG_DEF(JSMSG_TOO_MANY_CATCH_VARS, 0, JSEXN_SYNTAXERR, "too many catch variables")
MSG_DEF(JSMSG_TOO_MANY_CON_ARGS, 0, JSEXN_SYNTAXERR, "too many constructor arguments")
@@ -533,15 +532,13 @@ MSG_DEF(JSMSG_INVALID_NAMED_CAPTURE_REF, 0, JSEXN_SYNTAXERR, "invalid named ca
MSG_DEF(JSMSG_DEFAULT_LOCALE_ERROR, 0, JSEXN_ERR, "internal error getting the default locale")
MSG_DEF(JSMSG_NO_SUCH_SELF_HOSTED_PROP,1, JSEXN_ERR, "No such property on self-hosted object: {0}")
-// Typed object / SIMD
+// Typed object
MSG_DEF(JSMSG_INVALID_PROTOTYPE, 0, JSEXN_TYPEERR, "prototype field is not an object")
MSG_DEF(JSMSG_TYPEDOBJECT_BAD_ARGS, 0, JSEXN_TYPEERR, "invalid arguments")
MSG_DEF(JSMSG_TYPEDOBJECT_BINARYARRAY_BAD_INDEX, 0, JSEXN_RANGEERR, "invalid or out-of-range index")
MSG_DEF(JSMSG_TYPEDOBJECT_HANDLE_UNATTACHED, 0, JSEXN_TYPEERR, "handle unattached")
MSG_DEF(JSMSG_TYPEDOBJECT_STRUCTTYPE_BAD_ARGS, 0, JSEXN_RANGEERR, "invalid field descriptor")
MSG_DEF(JSMSG_TYPEDOBJECT_TOO_BIG, 0, JSEXN_ERR, "Type is too large to allocate")
-MSG_DEF(JSMSG_SIMD_FAILED_CONVERSION, 0, JSEXN_RANGEERR, "SIMD conversion loses precision")
-MSG_DEF(JSMSG_SIMD_TO_NUMBER, 0, JSEXN_TYPEERR, "can't convert SIMD value to number")
// Array
MSG_DEF(JSMSG_TOO_LONG_ARRAY, 0, JSEXN_TYPEERR, "Too long array")
diff --git a/js/src/jsapi.cpp b/js/src/jsapi.cpp
index 0e29f02176..0fd82f7c2d 100644
--- a/js/src/jsapi.cpp
+++ b/js/src/jsapi.cpp
@@ -47,9 +47,6 @@
#include "builtin/Promise.h"
#include "builtin/RegExp.h"
#include "builtin/SymbolObject.h"
-#ifdef ENABLE_SIMD
-# include "builtin/SIMD.h"
-#endif
#ifdef ENABLE_BINARYDATA
# include "builtin/TypedObject.h"
#endif
diff --git a/js/src/jscntxt.cpp b/js/src/jscntxt.cpp
index 66a548d6e8..a77f3d1809 100644
--- a/js/src/jscntxt.cpp
+++ b/js/src/jscntxt.cpp
@@ -35,6 +35,12 @@
#include "gc/Marking.h"
#include "jit/Ion.h"
#include "js/CharacterEncoding.h"
+#ifdef JS_SIMULATOR_ARM64
+# include "jit/arm64/vixl/Simulator-vixl.h"
+#endif
+#ifdef JS_SIMULATOR_ARM
+# include "jit/arm/Simulator-arm.h"
+#endif
#include "vm/HelperThreads.h"
#include "vm/Shape.h"
diff --git a/js/src/jsfriendapi.h b/js/src/jsfriendapi.h
index 6a873e15da..b4c4e15801 100644
--- a/js/src/jsfriendapi.h
+++ b/js/src/jsfriendapi.h
@@ -1501,10 +1501,6 @@ enum Type {
MaxTypedArrayViewType,
Int64,
- Float32x4,
- Int8x16,
- Int16x8,
- Int32x4
};
static inline size_t
@@ -1527,11 +1523,6 @@ byteSize(Type atype)
case BigInt64:
case BigUint64:
return 8;
- case Int8x16:
- case Int16x8:
- case Int32x4:
- case Float32x4:
- return 16;
default:
MOZ_CRASH("invalid scalar type");
}
@@ -1544,9 +1535,6 @@ isSignedIntType(Type atype) {
case Int16:
case Int32:
case Int64:
- case Int8x16:
- case Int16x8:
- case Int32x4:
case BigInt64:
return true;
case Uint8:
@@ -1555,7 +1543,6 @@ isSignedIntType(Type atype) {
case Uint32:
case Float32:
case Float64:
- case Float32x4:
case BigUint64:
return false;
default:
@@ -1578,10 +1565,6 @@ static inline bool isBigIntType(Type atype) {
case Uint32:
case Float32:
case Float64:
- case Int8x16:
- case Int16x8:
- case Int32x4:
- case Float32x4:
return false;
case MaxTypedArrayViewType:
break;
@@ -1589,61 +1572,6 @@ static inline bool isBigIntType(Type atype) {
MOZ_CRASH("invalid scalar type");
}
-static inline bool
-isSimdType(Type atype) {
- switch (atype) {
- case Int8:
- case Uint8:
- case Uint8Clamped:
- case Int16:
- case Uint16:
- case Int32:
- case Uint32:
- case Int64:
- case Float32:
- case Float64:
- case BigInt64:
- case BigUint64:
- return false;
- case Int8x16:
- case Int16x8:
- case Int32x4:
- case Float32x4:
- return true;
- case MaxTypedArrayViewType:
- break;
- }
- MOZ_CRASH("invalid scalar type");
-}
-
-static inline size_t
-scalarByteSize(Type atype) {
- switch (atype) {
- case Int8x16:
- return 1;
- case Int16x8:
- return 2;
- case Int32x4:
- case Float32x4:
- return 4;
- case Int8:
- case Uint8:
- case Uint8Clamped:
- case Int16:
- case Uint16:
- case Int32:
- case Uint32:
- case Int64:
- case Float32:
- case Float64:
- case BigInt64:
- case BigUint64:
- case MaxTypedArrayViewType:
- break;
- }
- MOZ_CRASH("invalid simd type");
-}
-
} /* namespace Scalar */
} /* namespace js */
diff --git a/js/src/jsprototypes.h b/js/src/jsprototypes.h
index 34f835bc1d..52d0d8ea4a 100644
--- a/js/src/jsprototypes.h
+++ b/js/src/jsprototypes.h
@@ -43,12 +43,6 @@
#define IF_BDATA(real,imaginary) imaginary
#endif
-#ifdef ENABLE_SIMD
-# define IF_SIMD(real,imaginary) real
-#else
-# define IF_SIMD(real,imaginary) imaginary
-#endif
-
#ifdef ENABLE_SHARED_ARRAY_BUFFER
#define IF_SAB(real,imaginary) real
#else
@@ -104,7 +98,6 @@ IF_SAB(real,imaginary)(SharedArrayBuffer, InitViaClassSpec, OCLASP(SharedArr
IF_INTL(real,imaginary) (Intl, InitIntlClass, CLASP(Intl)) \
IF_BDATA(real,imaginary)(TypedObject, InitTypedObjectModuleObject, OCLASP(TypedObjectModule)) \
real(Reflect, InitReflect, nullptr) \
-IF_SIMD(real,imaginary)(SIMD, InitSimdClass, OCLASP(Simd)) \
real(WeakSet, InitWeakSetClass, OCLASP(WeakSet)) \
real(TypedArray, InitViaClassSpec, &js::TypedArrayObject::sharedTypedArrayPrototypeClass) \
IF_SAB(real,imaginary)(Atomics, InitAtomicsClass, OCLASP(Atomics)) \
diff --git a/js/src/moz.build b/js/src/moz.build
index 59feedf22d..5429888a2c 100644
--- a/js/src/moz.build
+++ b/js/src/moz.build
@@ -132,7 +132,6 @@ main_deunified_sources = [
'builtin/Promise.cpp',
'builtin/Reflect.cpp',
'builtin/ReflectParse.cpp',
- 'builtin/SIMD.cpp',
'builtin/SymbolObject.cpp',
'builtin/TestingFunctions.cpp',
'builtin/TypedObject.cpp',
@@ -206,7 +205,6 @@ main_deunified_sources = [
'jit/CodeGenerator.cpp',
'jit/CompileWrappers.cpp',
'jit/Disassembler.cpp',
- 'jit/EagerSimdUnbox.cpp',
'jit/EdgeCaseAnalysis.cpp',
'jit/EffectiveAddressAnalysis.cpp',
'jit/ExecutableAllocator.cpp',
diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp
index 3bed40af47..db1d7c798f 100644
--- a/js/src/vm/ArrayBufferObject.cpp
+++ b/js/src/vm/ArrayBufferObject.cpp
@@ -777,8 +777,8 @@ ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buf
return true;
// Non-prepared-for-asm.js wasm buffers can be detached at any time.
- // This error can only be triggered for SIMD.js (which isn't shipping)
- // on !WASM_HUGE_MEMORY so this error is only visible in testing.
+ // This error can only be triggered for Atomics on !WASM_HUGE_MEMORY,
+ // so this error is only visible in testing.
if (buffer->isWasm() || buffer->isPreparedForAsmJS())
return false;
diff --git a/js/src/vm/GlobalObject.h b/js/src/vm/GlobalObject.h
index c4d9cf7287..355d055fb6 100644
--- a/js/src/vm/GlobalObject.h
+++ b/js/src/vm/GlobalObject.h
@@ -25,9 +25,6 @@ class Debugger;
class TypedObjectModuleObject;
class LexicalEnvironmentObject;
-class SimdTypeDescr;
-enum class SimdType;
-
/*
* Global object slots are reserved as follows:
*
@@ -482,17 +479,6 @@ class GlobalObject : public NativeObject
initTypedObjectModule);
}
- static JSObject*
- getOrCreateSimdGlobalObject(JSContext* cx, Handle<GlobalObject*> global) {
- return getOrCreateObject(cx, global, APPLICATION_SLOTS + JSProto_SIMD, initSimdObject);
- }
-
- // Get the type descriptor for one of the SIMD types.
- // simdType is one of the JS_SIMDTYPEREPR_* constants.
- // Implemented in builtin/SIMD.cpp.
- static SimdTypeDescr*
- getOrCreateSimdTypeDescr(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType);
-
TypedObjectModuleObject& getTypedObjectModule() const;
JSObject* getLegacyIteratorPrototype() {
@@ -869,10 +855,6 @@ class GlobalObject : public NativeObject
// Implemented in builtin/TypedObject.cpp
static bool initTypedObjectModule(JSContext* cx, Handle<GlobalObject*> global);
- // Implemented in builtin/SIMD.cpp
- static bool initSimdObject(JSContext* cx, Handle<GlobalObject*> global);
- static bool initSimdType(JSContext* cx, Handle<GlobalObject*> global, SimdType simdType);
-
static bool initStandardClasses(JSContext* cx, Handle<GlobalObject*> global);
static bool initSelfHostingBuiltins(JSContext* cx, Handle<GlobalObject*> global,
const JSFunctionSpec* builtins);
diff --git a/js/src/vm/SelfHosting.cpp b/js/src/vm/SelfHosting.cpp
index 2a60a1885c..e9c72d1bf1 100644
--- a/js/src/vm/SelfHosting.cpp
+++ b/js/src/vm/SelfHosting.cpp
@@ -36,7 +36,6 @@
#include "builtin/Promise.h"
#include "builtin/Reflect.h"
#include "builtin/SelfHostingDefines.h"
-#include "builtin/SIMD.h"
#include "builtin/TypedObject.h"
#include "builtin/WeakSetObject.h"
#include "gc/Marking.h"
@@ -2299,19 +2298,6 @@ static const JSFunctionSpec intrinsic_functions[] = {
JS_FN("std_WeakMap_set", WeakMap_set, 2,0),
JS_FN("std_WeakMap_delete", WeakMap_delete, 1,0),
- JS_FN("std_SIMD_Int8x16_extractLane", simd_int8x16_extractLane, 2,0),
- JS_FN("std_SIMD_Int16x8_extractLane", simd_int16x8_extractLane, 2,0),
- JS_INLINABLE_FN("std_SIMD_Int32x4_extractLane", simd_int32x4_extractLane, 2,0, SimdInt32x4_extractLane),
- JS_FN("std_SIMD_Uint8x16_extractLane", simd_uint8x16_extractLane, 2,0),
- JS_FN("std_SIMD_Uint16x8_extractLane", simd_uint16x8_extractLane, 2,0),
- JS_FN("std_SIMD_Uint32x4_extractLane", simd_uint32x4_extractLane, 2,0),
- JS_INLINABLE_FN("std_SIMD_Float32x4_extractLane", simd_float32x4_extractLane,2,0, SimdFloat32x4_extractLane),
- JS_FN("std_SIMD_Float64x2_extractLane", simd_float64x2_extractLane, 2,0),
- JS_FN("std_SIMD_Bool8x16_extractLane", simd_bool8x16_extractLane, 2,0),
- JS_FN("std_SIMD_Bool16x8_extractLane", simd_bool16x8_extractLane, 2,0),
- JS_FN("std_SIMD_Bool32x4_extractLane", simd_bool32x4_extractLane, 2,0),
- JS_FN("std_SIMD_Bool64x2_extractLane", simd_bool64x2_extractLane, 2,0),
-
// Helper funtions after this point.
JS_INLINABLE_FN("ToObject", intrinsic_ToObject, 1,0, IntrinsicToObject),
JS_INLINABLE_FN("IsObject", intrinsic_IsObject, 1,0, IntrinsicIsObject),
@@ -2513,7 +2499,6 @@ static const JSFunctionSpec intrinsic_functions[] = {
JS_FN("TypedObjectTypeDescr", js::TypedObjectTypeDescr, 1, 0),
JS_FN("ClampToUint8", js::ClampToUint8, 1, 0),
JS_FN("GetTypedObjectModule", js::GetTypedObjectModule, 0, 0),
- JS_FN("GetSimdTypeDescr", js::GetSimdTypeDescr, 1, 0),
JS_INLINABLE_FN("ObjectIsTypeDescr" , js::ObjectIsTypeDescr, 1, 0,
IntrinsicObjectIsTypeDescr),
diff --git a/js/src/vm/TypedArrayCommon.h b/js/src/vm/TypedArrayCommon.h
index 8e66587a1e..59ffd78b24 100644
--- a/js/src/vm/TypedArrayCommon.h
+++ b/js/src/vm/TypedArrayCommon.h
@@ -849,10 +849,6 @@ class TypedArrayMethods
return ElementSpecific<Uint8ClampedArrayType, SharedOps>::setFromTypedArray(cx, target, source, offset);
return ElementSpecific<Uint8ClampedArrayType, UnsharedOps>::setFromTypedArray(cx, target, source, offset);
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@@ -913,11 +909,6 @@ class TypedArrayMethods
if (isShared)
return ElementSpecific<Uint8ClampedArrayType, SharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
return ElementSpecific<Uint8ClampedArrayType, UnsharedOps>::setFromNonTypedArray(cx, target, source, len, offset);
- case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@@ -976,10 +967,6 @@ class TypedArrayMethods
return ElementSpecific<Uint8ClampedArrayType, SharedOps>::initFromIterablePackedArray(cx, target, source);
return ElementSpecific<Uint8ClampedArrayType, UnsharedOps>::initFromIterablePackedArray(cx, target, source);
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
diff --git a/js/src/vm/TypedArrayObject.cpp b/js/src/vm/TypedArrayObject.cpp
index 28e4090eb8..fc3d6c5df2 100644
--- a/js/src/vm/TypedArrayObject.cpp
+++ b/js/src/vm/TypedArrayObject.cpp
@@ -2741,10 +2741,6 @@ bool TypedArrayObject::getElement<CanGC>(ExclusiveContext* cx, uint32_t index, M
JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT)
#undef GET_ELEMENT
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@@ -2770,10 +2766,6 @@ bool TypedArrayObject::getElementPure(uint32_t index, Value* vp)
JS_FOR_EACH_TYPED_ARRAY(GET_ELEMENT_PURE)
#undef GET_ELEMENT
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
case Scalar::MaxTypedArrayViewType:
break;
}
@@ -2803,10 +2795,6 @@ bool TypedArrayObject::getElements(JSContext* cx, Handle<TypedArrayObject*> tarr
MOZ_CRASH("Unknown TypedArray type");
case Scalar::MaxTypedArrayViewType:
case Scalar::Int64:
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
break;
}
diff --git a/js/src/vm/TypedArrayObject.h b/js/src/vm/TypedArrayObject.h
index 06aaea0617..8b4b0b5092 100644
--- a/js/src/vm/TypedArrayObject.h
+++ b/js/src/vm/TypedArrayObject.h
@@ -413,11 +413,6 @@ TypedArrayShift(Scalar::Type viewType)
case Scalar::Int64:
case Scalar::Float64:
return 3;
- case Scalar::Float32x4:
- case Scalar::Int8x16:
- case Scalar::Int16x8:
- case Scalar::Int32x4:
- return 4;
default:;
}
MOZ_CRASH("Unexpected array type");
diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
index 3eee8c4c18..7923c57c0c 100644
--- a/js/src/wasm/AsmJS.cpp
+++ b/js/src/wasm/AsmJS.cpp
@@ -1,7 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2014 Mozilla Foundation
- * Copyright 2022 Moonchild Productions
+ * Copyright 2022, 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -30,7 +30,6 @@
#include "jswrapper.h"
-#include "builtin/SIMD.h"
#include "frontend/Parser.h"
#include "gc/Policy.h"
#include "js/MemoryMetrics.h"
@@ -121,7 +120,7 @@ class AsmJSGlobal
{
public:
enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
- AtomicsBuiltinFunction, Constant, SimdCtor, SimdOp };
+ AtomicsBuiltinFunction, Constant };
enum VarInitKind { InitConstant, InitImport };
enum ConstantKind { GlobalConstant, MathConstant };
@@ -141,11 +140,6 @@ class AsmJSGlobal
Scalar::Type viewType_;
AsmJSMathBuiltinFunction mathBuiltinFunc_;
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
- SimdType simdCtorType_;
- struct {
- SimdType type_;
- SimdOperation which_;
- } simdOp;
struct {
ConstantKind kind_;
double value_;
@@ -204,18 +198,6 @@ class AsmJSGlobal
MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
return pod.u.atomicsBuiltinFunc_;
}
- SimdType simdCtorType() const {
- MOZ_ASSERT(pod.which_ == SimdCtor);
- return pod.u.simdCtorType_;
- }
- SimdOperation simdOperation() const {
- MOZ_ASSERT(pod.which_ == SimdOp);
- return pod.u.simdOp.which_;
- }
- SimdType simdOperationType() const {
- MOZ_ASSERT(pod.which_ == SimdOp);
- return pod.u.simdOp.type_;
- }
ConstantKind constantKind() const {
MOZ_ASSERT(pod.which_ == Constant);
return pod.u.constant.kind_;
@@ -292,7 +274,6 @@ struct AsmJSMetadataCacheablePod
uint32_t numFFIs = 0;
uint32_t srcLength = 0;
uint32_t srcLengthWithRightBrace = 0;
- bool usesSimd = false;
AsmJSMetadataCacheablePod() = default;
};
@@ -809,9 +790,6 @@ ParseVarOrConstStatement(AsmJSParser& parser, ParseNode** var)
// Lastly, a literal may be a float literal which is any double or integer
// literal coerced with Math.fround.
//
-// This class distinguishes between signed and unsigned integer SIMD types like
-// Int32x4 and Uint32x4, and so does Type below. The wasm ValType and ExprType
-// enums, and the wasm::Val class do not.
class NumLit
{
public:
@@ -821,16 +799,6 @@ class NumLit
BigUnsigned,
Double,
Float,
- Int8x16,
- Int16x8,
- Int32x4,
- Uint8x16,
- Uint16x8,
- Uint32x4,
- Float32x4,
- Bool8x16,
- Bool16x8,
- Bool32x4,
OutOfRangeInt = -1
};
@@ -838,7 +806,6 @@ class NumLit
Which which_;
union {
JS::UninitializedValue scalar_;
- SimdConstant simd_;
} u;
public:
@@ -849,11 +816,6 @@ class NumLit
MOZ_ASSERT(!isSimd());
}
- NumLit(Which w, SimdConstant c) : which_(w) {
- u.simd_ = c;
- MOZ_ASSERT(isSimd());
- }
-
Which which() const {
return which_;
}
@@ -882,19 +844,6 @@ class NumLit
return u.scalar_.asValueRef();
}
- bool isSimd() const
- {
- return which_ == Int8x16 || which_ == Uint8x16 || which_ == Int16x8 ||
- which_ == Uint16x8 || which_ == Int32x4 || which_ == Uint32x4 ||
- which_ == Float32x4 || which_ == Bool8x16 || which_ == Bool16x8 ||
- which_ == Bool32x4;
- }
-
- const SimdConstant& simdValue() const {
- MOZ_ASSERT(isSimd());
- return u.simd_;
- }
-
bool valid() const {
return which_ != OutOfRangeInt;
}
@@ -910,20 +859,6 @@ class NumLit
return toDouble().bits() == 0;
case NumLit::Float:
return toFloat().bits() == 0;
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- case NumLit::Bool8x16:
- return simdValue() == SimdConstant::SplatX16(0);
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- case NumLit::Bool16x8:
- return simdValue() == SimdConstant::SplatX8(0);
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- case NumLit::Bool32x4:
- return simdValue() == SimdConstant::SplatX4(0);
- case NumLit::Float32x4:
- return simdValue() == SimdConstant::SplatX4(0.f);
case NumLit::OutOfRangeInt:
MOZ_CRASH("can't be here because of valid() check above");
}
@@ -940,23 +875,6 @@ class NumLit
return Val(toFloat());
case NumLit::Double:
return Val(toDouble());
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- return Val(simdValue().asInt8x16());
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- return Val(simdValue().asInt16x8());
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- return Val(simdValue().asInt32x4());
- case NumLit::Float32x4:
- return Val(simdValue().asFloat32x4());
- case NumLit::Bool8x16:
- return Val(simdValue().asInt8x16(), ValType::B8x16);
- case NumLit::Bool16x8:
- return Val(simdValue().asInt16x8(), ValType::B16x8);
- case NumLit::Bool32x4:
- return Val(simdValue().asInt32x4(), ValType::B32x4);
case NumLit::OutOfRangeInt:;
}
MOZ_CRASH("bad literal");
@@ -966,17 +884,10 @@ class NumLit
// Represents the type of a general asm.js expression.
//
// A canonical subset of types representing the coercion targets: Int, Float,
-// Double, and the SIMD types. This is almost equivalent to wasm::ValType,
-// except the integer SIMD types have signed/unsigned variants.
+// Double.
//
// Void is also part of the canonical subset which then maps to wasm::ExprType.
-//
-// Note that while the canonical subset distinguishes signed and unsigned SIMD
-// types, it only uses |Int| to represent signed and unsigned 32-bit integers.
-// This is because the scalar coersions x|0 and x>>>0 work with any kind of
-// integer input, while the SIMD check functions throw a TypeError if the passed
-// type doesn't match.
-//
+
class Type
{
public:
@@ -986,16 +897,6 @@ class Type
Unsigned = NumLit::BigUnsigned,
DoubleLit = NumLit::Double,
Float = NumLit::Float,
- Int8x16 = NumLit::Int8x16,
- Int16x8 = NumLit::Int16x8,
- Int32x4 = NumLit::Int32x4,
- Uint8x16 = NumLit::Uint8x16,
- Uint16x8 = NumLit::Uint16x8,
- Uint32x4 = NumLit::Uint32x4,
- Float32x4 = NumLit::Float32x4,
- Bool8x16 = NumLit::Bool8x16,
- Bool16x8 = NumLit::Bool16x8,
- Bool32x4 = NumLit::Bool32x4,
Double,
MaybeDouble,
MaybeFloat,
@@ -1011,22 +912,6 @@ class Type
public:
Type() = default;
MOZ_IMPLICIT Type(Which w) : which_(w) {}
- MOZ_IMPLICIT Type(SimdType type) {
- switch (type) {
- case SimdType::Int8x16: which_ = Int8x16; return;
- case SimdType::Int16x8: which_ = Int16x8; return;
- case SimdType::Int32x4: which_ = Int32x4; return;
- case SimdType::Uint8x16: which_ = Uint8x16; return;
- case SimdType::Uint16x8: which_ = Uint16x8; return;
- case SimdType::Uint32x4: which_ = Uint32x4; return;
- case SimdType::Float32x4: which_ = Float32x4; return;
- case SimdType::Bool8x16: which_ = Bool8x16; return;
- case SimdType::Bool16x8: which_ = Bool16x8; return;
- case SimdType::Bool32x4: which_ = Bool32x4; return;
- default: break;
- }
- MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad SimdType");
- }
// Map an already canonicalized Type to the return type of a function call.
static Type ret(Type t) {
@@ -1038,7 +923,7 @@ class Type
static Type lit(const NumLit& lit) {
MOZ_ASSERT(lit.valid());
Which which = Type::Which(lit.which());
- MOZ_ASSERT(which >= Fixnum && which <= Bool32x4);
+ MOZ_ASSERT(which >= Fixnum && which <= Float);
Type t;
t.which_ = which;
return t;
@@ -1064,18 +949,6 @@ class Type
case Void:
return Void;
- case Int8x16:
- case Int16x8:
- case Int32x4:
- case Uint8x16:
- case Uint16x8:
- case Uint32x4:
- case Float32x4:
- case Bool8x16:
- case Bool16x8:
- case Bool32x4:
- return t;
-
case MaybeDouble:
case MaybeFloat:
case Floatish:
@@ -1099,16 +972,6 @@ class Type
case DoubleLit: return isDoubleLit();
case Double: return isDouble();
case Float: return isFloat();
- case Int8x16: return isInt8x16();
- case Int16x8: return isInt16x8();
- case Int32x4: return isInt32x4();
- case Uint8x16: return isUint8x16();
- case Uint16x8: return isUint16x8();
- case Uint32x4: return isUint32x4();
- case Float32x4: return isFloat32x4();
- case Bool8x16: return isBool8x16();
- case Bool16x8: return isBool16x8();
- case Bool32x4: return isBool32x4();
case MaybeDouble: return isMaybeDouble();
case MaybeFloat: return isMaybeFloat();
case Floatish: return isFloatish();
@@ -1172,64 +1035,14 @@ class Type
return isDouble() || isSigned();
}
- bool isInt8x16() const {
- return which_ == Int8x16;
- }
-
- bool isInt16x8() const {
- return which_ == Int16x8;
- }
-
- bool isInt32x4() const {
- return which_ == Int32x4;
- }
-
- bool isUint8x16() const {
- return which_ == Uint8x16;
- }
-
- bool isUint16x8() const {
- return which_ == Uint16x8;
- }
-
- bool isUint32x4() const {
- return which_ == Uint32x4;
- }
-
- bool isFloat32x4() const {
- return which_ == Float32x4;
- }
-
- bool isBool8x16() const {
- return which_ == Bool8x16;
- }
-
- bool isBool16x8() const {
- return which_ == Bool16x8;
- }
-
- bool isBool32x4() const {
- return which_ == Bool32x4;
- }
-
- bool isSimd() const {
- return isInt8x16() || isInt16x8() || isInt32x4() || isUint8x16() || isUint16x8() ||
- isUint32x4() || isFloat32x4() || isBool8x16() || isBool16x8() || isBool32x4();
- }
-
- bool isUnsignedSimd() const {
- return isUint8x16() || isUint16x8() || isUint32x4();
- }
-
// Check if this is one of the valid types for a function argument.
bool isArgType() const {
- return isInt() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd());
+ return isInt() || isFloat() || isDouble();
}
// Check if this is one of the valid types for a function return value.
bool isReturnType() const {
- return isSigned() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd()) ||
- isVoid();
+ return isSigned() || isFloat() || isDouble() || isVoid();
}
// Check if this is one of the valid types for a global variable.
@@ -1247,7 +1060,7 @@ class Type
case Void:
return true;
default:
- return isSimd();
+ return false;
}
}
@@ -1263,16 +1076,6 @@ class Type
case Float: return ExprType::F32;
case Double: return ExprType::F64;
case Void: return ExprType::Void;
- case Uint8x16:
- case Int8x16: return ExprType::I8x16;
- case Uint16x8:
- case Int16x8: return ExprType::I16x8;
- case Uint32x4:
- case Int32x4: return ExprType::I32x4;
- case Float32x4: return ExprType::F32x4;
- case Bool8x16: return ExprType::B8x16;
- case Bool16x8: return ExprType::B16x8;
- case Bool32x4: return ExprType::B32x4;
default: MOZ_CRASH("Need canonical type");
}
}
@@ -1307,17 +1110,6 @@ class Type
case Void:
return ExprType::Void;
-
- case Uint8x16:
- case Int8x16: return ExprType::I8x16;
- case Uint16x8:
- case Int16x8: return ExprType::I16x8;
- case Uint32x4:
- case Int32x4: return ExprType::I32x4;
- case Float32x4: return ExprType::F32x4;
- case Bool8x16: return ExprType::B8x16;
- case Bool16x8: return ExprType::B16x8;
- case Bool32x4: return ExprType::B32x4;
}
MOZ_CRASH("Invalid Type");
}
@@ -1335,16 +1127,6 @@ class Type
case Signed: return "signed";
case Unsigned: return "unsigned";
case Intish: return "intish";
- case Int8x16: return "int8x16";
- case Int16x8: return "int16x8";
- case Int32x4: return "int32x4";
- case Uint8x16: return "uint8x16";
- case Uint16x8: return "uint16x8";
- case Uint32x4: return "uint32x4";
- case Float32x4: return "float32x4";
- case Bool8x16: return "bool8x16";
- case Bool16x8: return "bool16x8";
- case Bool32x4: return "bool32x4";
case Void: return "void";
}
MOZ_CRASH("Invalid Type");
@@ -1442,9 +1224,7 @@ class MOZ_STACK_CLASS ModuleValidator
ArrayView,
ArrayViewCtor,
MathBuiltinFunction,
- AtomicsBuiltinFunction,
- SimdCtor,
- SimdOp
+ AtomicsBuiltinFunction
};
private:
@@ -1463,19 +1243,15 @@ class MOZ_STACK_CLASS ModuleValidator
} viewInfo;
AsmJSMathBuiltinFunction mathBuiltinFunc_;
AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
- SimdType simdCtorType_;
- struct {
- SimdType type_;
- SimdOperation which_;
- } simdOp;
} u;
friend class ModuleValidator;
friend class js::LifoAlloc;
explicit Global(Which which) : which_(which) {}
-
+
public:
+
Which which() const {
return which_;
}
@@ -1527,24 +1303,6 @@ class MOZ_STACK_CLASS ModuleValidator
MOZ_ASSERT(which_ == AtomicsBuiltinFunction);
return u.atomicsBuiltinFunc_;
}
- bool isSimdCtor() const {
- return which_ == SimdCtor;
- }
- SimdType simdCtorType() const {
- MOZ_ASSERT(which_ == SimdCtor);
- return u.simdCtorType_;
- }
- bool isSimdOperation() const {
- return which_ == SimdOp;
- }
- SimdOperation simdOperation() const {
- MOZ_ASSERT(which_ == SimdOp);
- return u.simdOp.which_;
- }
- SimdType simdOperationType() const {
- MOZ_ASSERT(which_ == SimdOp);
- return u.simdOp.type_;
- }
};
struct MathBuiltin
@@ -1611,7 +1369,6 @@ class MOZ_STACK_CLASS ModuleValidator
typedef HashMap<PropertyName*, Global*> GlobalMap;
typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
- typedef HashMap<PropertyName*, SimdOperation> SimdOperationNameMap;
typedef Vector<ArrayView> ArrayViewVector;
ExclusiveContext* cx_;
@@ -1623,7 +1380,6 @@ class MOZ_STACK_CLASS ModuleValidator
PropertyName* bufferArgumentName_;
MathNameMap standardLibraryMathNames_;
AtomicsNameMap standardLibraryAtomicsNames_;
- SimdOperationNameMap standardLibrarySimdOpNames_;
RootedFunction dummyFunction_;
// Validation-internal state:
@@ -1635,7 +1391,6 @@ class MOZ_STACK_CLASS ModuleValidator
ImportMap importMap_;
ArrayViewVector arrayViews_;
bool atomicsPresent_;
- bool simdPresent_;
// State used to build the AsmJSModule in finish():
ModuleGenerator mg_;
@@ -1667,12 +1422,6 @@ class MOZ_STACK_CLASS ModuleValidator
return false;
return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
}
- bool addStandardLibrarySimdOpName(const char* name, SimdOperation op) {
- JSAtom* atom = Atomize(cx_, name, strlen(name));
- if (!atom)
- return false;
- return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
- }
bool newSig(Sig&& sig, uint32_t* sigIndex) {
*sigIndex = 0;
if (mg_.numSigs() >= MaxSigs)
@@ -1705,7 +1454,6 @@ class MOZ_STACK_CLASS ModuleValidator
bufferArgumentName_(nullptr),
standardLibraryMathNames_(cx),
standardLibraryAtomicsNames_(cx),
- standardLibrarySimdOpNames_(cx),
dummyFunction_(cx),
validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
functions_(cx),
@@ -1715,7 +1463,6 @@ class MOZ_STACK_CLASS ModuleValidator
importMap_(cx),
arrayViews_(cx),
atomicsPresent_(false),
- simdPresent_(false),
mg_(ImportVector()),
errorString_(nullptr),
errorOffset_(UINT32_MAX),
@@ -1795,14 +1542,6 @@ class MOZ_STACK_CLASS ModuleValidator
return false;
}
-#define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, SimdOperation::Fn_##op)
- if (!standardLibrarySimdOpNames_.init()
- FORALL_SIMD_ASMJS_OP(ADDSTDLIBSIMDOPNAME))
- {
- return false;
- }
-#undef ADDSTDLIBSIMDOPNAME
-
// This flows into FunctionBox, so must be tenured.
dummyFunction_ = NewScriptedFunction(cx_, 0, JSFunction::INTERPRETED, nullptr,
/* proto = */ nullptr, gc::AllocKind::FUNCTION,
@@ -1850,7 +1589,6 @@ class MOZ_STACK_CLASS ModuleValidator
AsmJSParser& parser() const { return parser_; }
TokenStream& tokenStream() const { return parser_.tokenStream; }
RootedFunction& dummyFunction() { return dummyFunction_; }
- bool supportsSimd() const { return cx_->jitSupportsSimd(); }
bool atomicsPresent() const { return atomicsPresent_; }
uint32_t minMemoryLength() const { return mg_.minMemoryLength(); }
@@ -2037,44 +1775,6 @@ class MOZ_STACK_CLASS ModuleValidator
g.pod.u.atomicsBuiltinFunc_ = func;
return asmJSMetadata_->asmJSGlobals.append(Move(g));
}
- bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
- simdPresent_ = true;
-
- UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
- if (!fieldChars)
- return false;
-
- Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
- if (!global)
- return false;
- global->u.simdCtorType_ = type;
- if (!globalMap_.putNew(var, global))
- return false;
-
- AsmJSGlobal g(AsmJSGlobal::SimdCtor, Move(fieldChars));
- g.pod.u.simdCtorType_ = type;
- return asmJSMetadata_->asmJSGlobals.append(Move(g));
- }
- bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) {
- simdPresent_ = true;
-
- UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
- if (!fieldChars)
- return false;
-
- Global* global = validationLifo_.new_<Global>(Global::SimdOp);
- if (!global)
- return false;
- global->u.simdOp.type_ = type;
- global->u.simdOp.which_ = op;
- if (!globalMap_.putNew(var, global))
- return false;
-
- AsmJSGlobal g(AsmJSGlobal::SimdOp, Move(fieldChars));
- g.pod.u.simdOp.type_ = type;
- g.pod.u.simdOp.which_ = op;
- return asmJSMetadata_->asmJSGlobals.append(Move(g));
- }
bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
if (!fieldChars)
@@ -2320,13 +2020,6 @@ class MOZ_STACK_CLASS ModuleValidator
}
return false;
}
- bool lookupStandardSimdOpName(PropertyName* name, SimdOperation* op) const {
- if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
- *op = p->value();
- return true;
- }
- return false;
- }
bool startFunctionBodies() {
return mg_.startFuncDefs();
@@ -2338,8 +2031,6 @@ class MOZ_STACK_CLASS ModuleValidator
if (!arrayViews_.empty())
mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared);
- asmJSMetadata_->usesSimd = simdPresent_;
-
MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
for (const Func* func : functions_) {
CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
@@ -2409,11 +2100,6 @@ IsCoercionCall(ModuleValidator& m, ParseNode* pn, Type* coerceTo, ParseNode** co
return true;
}
- if (global->isSimdOperation() && global->simdOperation() == SimdOperation::Fn_check) {
- *coerceTo = global->simdOperationType();
- return true;
- }
-
return false;
}
@@ -2431,23 +2117,6 @@ IsFloatLiteral(ModuleValidator& m, ParseNode* pn)
}
static bool
-IsSimdTuple(ModuleValidator& m, ParseNode* pn, SimdType* type)
-{
- const ModuleValidator::Global* global;
- if (!IsCallToGlobal(m, pn, &global))
- return false;
-
- if (!global->isSimdCtor())
- return false;
-
- if (CallArgListLength(pn) != GetSimdLanes(global->simdCtorType()))
- return false;
-
- *type = global->simdCtorType();
- return true;
-}
-
-static bool
IsNumericLiteral(ModuleValidator& m, ParseNode* pn, bool* isSimd = nullptr);
static NumLit
@@ -2457,57 +2126,10 @@ static inline bool
IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32);
static bool
-IsSimdLiteral(ModuleValidator& m, ParseNode* pn)
-{
- SimdType type;
- if (!IsSimdTuple(m, pn, &type))
- return false;
-
- ParseNode* arg = CallArgList(pn);
- unsigned length = GetSimdLanes(type);
- for (unsigned i = 0; i < length; i++) {
- if (!IsNumericLiteral(m, arg))
- return false;
-
- uint32_t _;
- switch (type) {
- case SimdType::Int8x16:
- case SimdType::Int16x8:
- case SimdType::Int32x4:
- case SimdType::Uint8x16:
- case SimdType::Uint16x8:
- case SimdType::Uint32x4:
- case SimdType::Bool8x16:
- case SimdType::Bool16x8:
- case SimdType::Bool32x4:
- if (!IsLiteralInt(m, arg, &_))
- return false;
- break;
- case SimdType::Float32x4:
- if (!IsNumericNonFloatLiteral(arg))
- return false;
- break;
- default:
- MOZ_CRASH("unhandled simd type");
- }
-
- arg = NextNode(arg);
- }
-
- MOZ_ASSERT(arg == nullptr);
- return true;
-}
-
-static bool
IsNumericLiteral(ModuleValidator& m, ParseNode* pn, bool* isSimd)
{
if (IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn))
return true;
- if (IsSimdLiteral(m, pn)) {
- if (isSimd)
- *isSimd = true;
- return true;
- }
return false;
}
@@ -2531,104 +2153,6 @@ ExtractNumericNonFloatValue(ParseNode* pn, ParseNode** out = nullptr)
}
static NumLit
-ExtractSimdValue(ModuleValidator& m, ParseNode* pn)
-{
- MOZ_ASSERT(IsSimdLiteral(m, pn));
-
- SimdType type = SimdType::Count;
- JS_ALWAYS_TRUE(IsSimdTuple(m, pn, &type));
- MOZ_ASSERT(CallArgListLength(pn) == GetSimdLanes(type));
-
- ParseNode* arg = CallArgList(pn);
- switch (type) {
- case SimdType::Int8x16:
- case SimdType::Uint8x16: {
- MOZ_ASSERT(GetSimdLanes(type) == 16);
- int8_t val[16];
- for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = int8_t(u32);
- }
- MOZ_ASSERT(arg == nullptr);
- NumLit::Which w = type == SimdType::Uint8x16 ? NumLit::Uint8x16 : NumLit::Int8x16;
- return NumLit(w, SimdConstant::CreateX16(val));
- }
- case SimdType::Int16x8:
- case SimdType::Uint16x8: {
- MOZ_ASSERT(GetSimdLanes(type) == 8);
- int16_t val[8];
- for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = int16_t(u32);
- }
- MOZ_ASSERT(arg == nullptr);
- NumLit::Which w = type == SimdType::Uint16x8 ? NumLit::Uint16x8 : NumLit::Int16x8;
- return NumLit(w, SimdConstant::CreateX8(val));
- }
- case SimdType::Int32x4:
- case SimdType::Uint32x4: {
- MOZ_ASSERT(GetSimdLanes(type) == 4);
- int32_t val[4];
- for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = int32_t(u32);
- }
- MOZ_ASSERT(arg == nullptr);
- NumLit::Which w = type == SimdType::Uint32x4 ? NumLit::Uint32x4 : NumLit::Int32x4;
- return NumLit(w, SimdConstant::CreateX4(val));
- }
- case SimdType::Float32x4: {
- MOZ_ASSERT(GetSimdLanes(type) == 4);
- float val[4];
- for (size_t i = 0; i < 4; i++, arg = NextNode(arg))
- val[i] = float(ExtractNumericNonFloatValue(arg));
- MOZ_ASSERT(arg == nullptr);
- return NumLit(NumLit::Float32x4, SimdConstant::CreateX4(val));
- }
- case SimdType::Bool8x16: {
- MOZ_ASSERT(GetSimdLanes(type) == 16);
- int8_t val[16];
- for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = u32 ? -1 : 0;
- }
- MOZ_ASSERT(arg == nullptr);
- return NumLit(NumLit::Bool8x16, SimdConstant::CreateX16(val));
- }
- case SimdType::Bool16x8: {
- MOZ_ASSERT(GetSimdLanes(type) == 8);
- int16_t val[8];
- for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = u32 ? -1 : 0;
- }
- MOZ_ASSERT(arg == nullptr);
- return NumLit(NumLit::Bool16x8, SimdConstant::CreateX8(val));
- }
- case SimdType::Bool32x4: {
- MOZ_ASSERT(GetSimdLanes(type) == 4);
- int32_t val[4];
- for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
- uint32_t u32;
- JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
- val[i] = u32 ? -1 : 0;
- }
- MOZ_ASSERT(arg == nullptr);
- return NumLit(NumLit::Bool32x4, SimdConstant::CreateX4(val));
- }
- default:
- break;
- }
-
- MOZ_CRASH("Unexpected SIMD type.");
-}
-
-static NumLit
ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn)
{
MOZ_ASSERT(IsNumericLiteral(m, pn));
@@ -2636,13 +2160,10 @@ ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn)
if (pn->isKind(PNK_CALL)) {
// Float literals are explicitly coerced and thus the coerced literal may be
// any valid (non-float) numeric literal.
- if (CallArgListLength(pn) == 1) {
- pn = CallArgList(pn);
- double d = ExtractNumericNonFloatValue(pn);
- return NumLit(NumLit::Float, DoubleValue(d));
- }
-
- return ExtractSimdValue(m, pn);
+ MOZ_ASSERT((CallArgListLength(pn) == 1));
+ pn = CallArgList(pn);
+ double d = ExtractNumericNonFloatValue(pn);
+ return NumLit(NumLit::Float, DoubleValue(d));
}
double d = ExtractNumericNonFloatValue(pn, &pn);
@@ -2688,16 +2209,6 @@ IsLiteralInt(const NumLit& lit, uint32_t* u32)
case NumLit::Double:
case NumLit::Float:
case NumLit::OutOfRangeInt:
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- case NumLit::Float32x4:
- case NumLit::Bool8x16:
- case NumLit::Bool16x8:
- case NumLit::Bool32x4:
return false;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal type");
@@ -2714,138 +2225,6 @@ IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32)
namespace {
-#define CASE(TYPE, OP) case SimdOperation::Fn_##OP: return Op::TYPE##OP;
-#define I8x16CASE(OP) CASE(I8x16, OP)
-#define I16x8CASE(OP) CASE(I16x8, OP)
-#define I32x4CASE(OP) CASE(I32x4, OP)
-#define F32x4CASE(OP) CASE(F32x4, OP)
-#define B8x16CASE(OP) CASE(B8x16, OP)
-#define B16x8CASE(OP) CASE(B16x8, OP)
-#define B32x4CASE(OP) CASE(B32x4, OP)
-#define ENUMERATE(TYPE, FOR_ALL, DO) \
- switch(op) { \
- case SimdOperation::Constructor: return Op::TYPE##Constructor; \
- FOR_ALL(DO) \
- default: break; \
- }
-
-static inline Op
-SimdToOp(SimdType type, SimdOperation op)
-{
- switch (type) {
- case SimdType::Uint8x16:
- // Handle the special unsigned opcodes, then fall through to Int8x16.
- switch (op) {
- case SimdOperation::Fn_addSaturate: return Op::I8x16addSaturateU;
- case SimdOperation::Fn_subSaturate: return Op::I8x16subSaturateU;
- case SimdOperation::Fn_extractLane: return Op::I8x16extractLaneU;
- case SimdOperation::Fn_shiftRightByScalar: return Op::I8x16shiftRightByScalarU;
- case SimdOperation::Fn_lessThan: return Op::I8x16lessThanU;
- case SimdOperation::Fn_lessThanOrEqual: return Op::I8x16lessThanOrEqualU;
- case SimdOperation::Fn_greaterThan: return Op::I8x16greaterThanU;
- case SimdOperation::Fn_greaterThanOrEqual: return Op::I8x16greaterThanOrEqualU;
- case SimdOperation::Fn_fromInt8x16Bits: return Op::Limit;
- default: break;
- }
- MOZ_FALLTHROUGH;
- case SimdType::Int8x16:
- // Bitcasts Uint8x16 <--> Int8x16 become noops.
- switch (op) {
- case SimdOperation::Fn_fromUint8x16Bits: return Op::Limit;
- case SimdOperation::Fn_fromUint16x8Bits: return Op::I8x16fromInt16x8Bits;
- case SimdOperation::Fn_fromUint32x4Bits: return Op::I8x16fromInt32x4Bits;
- default: break;
- }
- ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
- break;
-
- case SimdType::Uint16x8:
- // Handle the special unsigned opcodes, then fall through to Int16x8.
- switch(op) {
- case SimdOperation::Fn_addSaturate: return Op::I16x8addSaturateU;
- case SimdOperation::Fn_subSaturate: return Op::I16x8subSaturateU;
- case SimdOperation::Fn_extractLane: return Op::I16x8extractLaneU;
- case SimdOperation::Fn_shiftRightByScalar: return Op::I16x8shiftRightByScalarU;
- case SimdOperation::Fn_lessThan: return Op::I16x8lessThanU;
- case SimdOperation::Fn_lessThanOrEqual: return Op::I16x8lessThanOrEqualU;
- case SimdOperation::Fn_greaterThan: return Op::I16x8greaterThanU;
- case SimdOperation::Fn_greaterThanOrEqual: return Op::I16x8greaterThanOrEqualU;
- case SimdOperation::Fn_fromInt16x8Bits: return Op::Limit;
- default: break;
- }
- MOZ_FALLTHROUGH;
- case SimdType::Int16x8:
- // Bitcasts Uint16x8 <--> Int16x8 become noops.
- switch (op) {
- case SimdOperation::Fn_fromUint8x16Bits: return Op::I16x8fromInt8x16Bits;
- case SimdOperation::Fn_fromUint16x8Bits: return Op::Limit;
- case SimdOperation::Fn_fromUint32x4Bits: return Op::I16x8fromInt32x4Bits;
- default: break;
- }
- ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
- break;
-
- case SimdType::Uint32x4:
- // Handle the special unsigned opcodes, then fall through to Int32x4.
- switch(op) {
- case SimdOperation::Fn_shiftRightByScalar: return Op::I32x4shiftRightByScalarU;
- case SimdOperation::Fn_lessThan: return Op::I32x4lessThanU;
- case SimdOperation::Fn_lessThanOrEqual: return Op::I32x4lessThanOrEqualU;
- case SimdOperation::Fn_greaterThan: return Op::I32x4greaterThanU;
- case SimdOperation::Fn_greaterThanOrEqual: return Op::I32x4greaterThanOrEqualU;
- case SimdOperation::Fn_fromFloat32x4: return Op::I32x4fromFloat32x4U;
- case SimdOperation::Fn_fromInt32x4Bits: return Op::Limit;
- default: break;
- }
- MOZ_FALLTHROUGH;
- case SimdType::Int32x4:
- // Bitcasts Uint32x4 <--> Int32x4 become noops.
- switch (op) {
- case SimdOperation::Fn_fromUint8x16Bits: return Op::I32x4fromInt8x16Bits;
- case SimdOperation::Fn_fromUint16x8Bits: return Op::I32x4fromInt16x8Bits;
- case SimdOperation::Fn_fromUint32x4Bits: return Op::Limit;
- default: break;
- }
- ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
- break;
-
- case SimdType::Float32x4:
- switch (op) {
- case SimdOperation::Fn_fromUint8x16Bits: return Op::F32x4fromInt8x16Bits;
- case SimdOperation::Fn_fromUint16x8Bits: return Op::F32x4fromInt16x8Bits;
- case SimdOperation::Fn_fromUint32x4Bits: return Op::F32x4fromInt32x4Bits;
- default: break;
- }
- ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
- break;
-
- case SimdType::Bool8x16:
- ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
- break;
-
- case SimdType::Bool16x8:
- ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
- break;
-
- case SimdType::Bool32x4:
- ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
- break;
-
- default: break;
- }
- MOZ_CRASH("unexpected SIMD (type, operator) combination");
-}
-
-#undef CASE
-#undef I8x16CASE
-#undef I16x8CASE
-#undef I32x4CASE
-#undef F32x4CASE
-#undef B8x16CASE
-#undef B16x8CASE
-#undef B32x4CASE
-#undef ENUMERATE
-
typedef Vector<PropertyName*, 4, SystemAllocPolicy> NameVector;
// Encapsulates the building of an asm bytecode function from an asm.js function
@@ -2918,12 +2297,6 @@ class MOZ_STACK_CLASS FunctionValidator
MOZ_ASSERT(continuableStack_.empty());
MOZ_ASSERT(breakLabels_.empty());
MOZ_ASSERT(continueLabels_.empty());
- for (auto iter = locals_.all(); !iter.empty(); iter.popFront()) {
- if (iter.front().value().type.isSimd()) {
- setUsesSimd();
- break;
- }
- }
return m_.mg().finishFuncDef(funcIndex, &fg_);
}
@@ -3148,33 +2521,6 @@ class MOZ_STACK_CLASS FunctionValidator
case NumLit::Double:
return encoder().writeOp(Op::F64Const) &&
encoder().writeFixedF64(lit.toDouble());
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- return encoder().writeOp(Op::I8x16Const) &&
- encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- return encoder().writeOp(Op::I16x8Const) &&
- encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- return encoder().writeOp(Op::I32x4Const) &&
- encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
- case NumLit::Float32x4:
- return encoder().writeOp(Op::F32x4Const) &&
- encoder().writeFixedF32x4(lit.simdValue().asFloat32x4());
- case NumLit::Bool8x16:
- // Boolean vectors use the Int8x16 memory representation.
- return encoder().writeOp(Op::B8x16Const) &&
- encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
- case NumLit::Bool16x8:
- // Boolean vectors use the Int16x8 memory representation.
- return encoder().writeOp(Op::B16x8Const) &&
- encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
- case NumLit::Bool32x4:
- // Boolean vectors use the Int32x4 memory representation.
- return encoder().writeOp(Op::B32x4Const) &&
- encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
case NumLit::OutOfRangeInt:
break;
}
@@ -3187,12 +2533,6 @@ class MOZ_STACK_CLASS FunctionValidator
MOZ_MUST_USE bool prepareCall(ParseNode* pn) {
return fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
}
- MOZ_MUST_USE bool writeSimdOp(SimdType simdType, SimdOperation simdOp) {
- Op op = SimdToOp(simdType, simdOp);
- if (op == Op::Limit)
- return true;
- return encoder().writeOp(op);
- }
};
} /* anonymous namespace */
@@ -3356,7 +2696,7 @@ CheckTypeAnnotation(ModuleValidator& m, ParseNode* coercionNode, Type* coerceTo,
default:;
}
- return m.fail(coercionNode, "must be of the form +x, x|0, fround(x), or a SIMD check(x)");
+ return m.fail(coercionNode, "must be of the form +x, x|0 or fround(x)");
}
static bool
@@ -3474,97 +2814,6 @@ CheckNewArrayView(ModuleValidator& m, PropertyName* varName, ParseNode* newExpr)
}
static bool
-IsSimdValidOperationType(SimdType type, SimdOperation op)
-{
-#define CASE(op) case SimdOperation::Fn_##op:
- switch(type) {
- case SimdType::Int8x16:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_INT8X16_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Int16x8:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_INT16X8_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Int32x4:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_INT32X4_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Uint8x16:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromInt8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_INT8X16_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Uint16x8:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromInt16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_INT16X8_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Uint32x4:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromInt32x4Bits:
- FORALL_INT32X4_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Float32x4:
- switch (op) {
- case SimdOperation::Constructor:
- case SimdOperation::Fn_fromUint8x16Bits:
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- FORALL_FLOAT32X4_ASMJS_OP(CASE) return true;
- default: return false;
- }
- break;
- case SimdType::Bool8x16:
- case SimdType::Bool16x8:
- case SimdType::Bool32x4:
- switch (op) {
- case SimdOperation::Constructor:
- FORALL_BOOL_SIMD_OP(CASE) return true;
- default: return false;
- }
- break;
- default:
- // Unimplemented SIMD type.
- return false;
- }
-#undef CASE
-}
-
-static bool
CheckGlobalMathImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
PropertyName* field)
{
@@ -3597,42 +2846,6 @@ CheckGlobalAtomicsImport(ModuleValidator& m, ParseNode* initNode, PropertyName*
}
static bool
-CheckGlobalSimdImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
- PropertyName* field)
-{
- if (!m.supportsSimd())
- return m.fail(initNode, "SIMD is not supported on this platform");
-
- // SIMD constructor, with the form glob.SIMD.[[type]]
- SimdType simdType;
- if (!IsSimdTypeName(m.cx()->names(), field, &simdType))
- return m.failName(initNode, "'%s' is not a standard SIMD type", field);
-
- // IsSimdTypeName will return true for any SIMD type supported by the VM.
- //
- // Since we may not support all of those SIMD types in asm.js, use the
- // asm.js-specific IsSimdValidOperationType() to check if this specific
- // constructor is supported in asm.js.
- if (!IsSimdValidOperationType(simdType, SimdOperation::Constructor))
- return m.failName(initNode, "'%s' is not a supported SIMD type", field);
-
- return m.addSimdCtor(varName, simdType, field);
-}
-
-static bool
-CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
- ParseNode* initNode, PropertyName* varName, PropertyName* opName)
-{
- SimdType simdType = global->simdCtorType();
- SimdOperation simdOp;
- if (!m.lookupStandardSimdOpName(opName, &simdOp))
- return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
- if (!IsSimdValidOperationType(simdType, simdOp))
- return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
- return m.addSimdOperation(varName, simdType, simdOp, opName);
-}
-
-static bool
CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode)
{
ParseNode* base = DotBase(initNode);
@@ -3640,7 +2853,7 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN
if (base->isKind(PNK_DOT)) {
ParseNode* global = DotBase(base);
- PropertyName* mathOrAtomicsOrSimd = DotMember(base);
+ PropertyName* mathOrAtomics = DotMember(base);
PropertyName* globalName = m.globalArgumentName();
if (!globalName)
@@ -3654,13 +2867,11 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN
return m.failName(base, "expecting %s.*", globalName);
}
- if (mathOrAtomicsOrSimd == m.cx()->names().Math)
+ if (mathOrAtomics == m.cx()->names().Math)
return CheckGlobalMathImport(m, initNode, varName, field);
- if (mathOrAtomicsOrSimd == m.cx()->names().Atomics)
+ if (mathOrAtomics == m.cx()->names().Atomics)
return CheckGlobalAtomicsImport(m, initNode, varName, field);
- if (mathOrAtomicsOrSimd == m.cx()->names().SIMD)
- return CheckGlobalSimdImport(m, initNode, varName, field);
- return m.failName(base, "expecting %s.{Math|SIMD}", globalName);
+ return m.failName(base, "expecting %s.{Math|Atomics}", globalName);
}
if (!base->isKind(PNK_NAME))
@@ -3678,18 +2889,10 @@ CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initN
return m.failName(initNode, "'%s' is not a standard constant or typed array name", field);
}
+ if (base->name() != m.importArgumentName())
+ return m.fail(base, "expected global or import name");
- if (base->name() == m.importArgumentName())
- return m.addFFI(varName, field);
-
- const ModuleValidator::Global* global = m.lookupGlobal(base->name());
- if (!global)
- return m.failName(initNode, "%s not found in module global scope", base->name());
-
- if (!global->isSimdCtor())
- return m.failName(base, "expecting SIMD constructor name, got %s", field);
-
- return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
+ return m.addFFI(varName, field);
}
static bool
@@ -3988,8 +3191,6 @@ CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type)
case ModuleValidator::Global::FuncPtrTable:
case ModuleValidator::Global::ArrayView:
case ModuleValidator::Global::ArrayViewCtor:
- case ModuleValidator::Global::SimdCtor:
- case ModuleValidator::Global::SimdOp:
break;
}
return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
@@ -4009,12 +3210,10 @@ IsLiteralOrConstInt(FunctionValidator& f, ParseNode* pn, uint32_t* u32)
}
static const int32_t NoMask = -1;
-static const bool YesSimd = true;
-static const bool NoSimd = false;
static bool
CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
- bool isSimd, Scalar::Type* viewType)
+ Scalar::Type* viewType)
{
if (!viewName->isKind(PNK_NAME))
return f.fail(viewName, "base of array access must be a typed array view name");
@@ -4028,7 +3227,7 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
uint32_t index;
if (IsLiteralOrConstInt(f, indexExpr, &index)) {
uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
- uint64_t width = isSimd ? Simd128DataSize : TypedArrayElemSize(*viewType);
+ uint64_t width = TypedArrayElemSize(*viewType);
if (!f.m().tryConstantAccess(byteOffset, width))
return f.fail(indexExpr, "constant index out of range");
@@ -4060,8 +3259,7 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
if (!pointerType.isIntish())
return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
} else {
- // For SIMD access, and legacy scalar access compatibility, accept
- // Int8/Uint8 accesses with no shift.
+ // For legacy scalar access compatibility, accept Int8/Uint8 accesses with no shift.
if (TypedArrayShift(*viewType) != 0)
return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access");
@@ -4073,17 +3271,12 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
if (!CheckExpr(f, pointerNode, &pointerType))
return false;
- if (isSimd) {
- if (!pointerType.isIntish())
- return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
- } else {
- if (!pointerType.isInt())
- return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
- }
+ if (!pointerType.isInt())
+ return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
}
- // Don't generate the mask op if there is no need for it which could happen for
- // a shift of zero or a SIMD access.
+ // Don't generate the mask op if there is no need for it which could happen
+ // for a shift of zero.
if (mask != NoMask) {
return f.writeInt32Lit(mask) &&
f.encoder().writeOp(Op::I32And);
@@ -4093,13 +3286,6 @@ CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr
}
static bool
-CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
- bool isSimd, Scalar::Type* viewType)
-{
- return CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType);
-}
-
-static bool
WriteArrayAccessFlags(FunctionValidator& f, Scalar::Type viewType)
{
// asm.js only has naturally-aligned accesses.
@@ -4120,7 +3306,7 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
{
Scalar::Type viewType;
- if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType))
+ if (!CheckArrayAccess(f, ElemBase(elem), ElemIndex(elem), &viewType))
return false;
switch (viewType) {
@@ -4163,7 +3349,7 @@ static bool
CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
{
Scalar::Type viewType;
- if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), NoSimd, &viewType))
+ if (!CheckArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), &viewType))
return false;
Type rhsType;
@@ -4448,7 +3634,7 @@ static bool
CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
Scalar::Type* viewType)
{
- if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, NoSimd, viewType))
+ if (!CheckArrayAccess(f, viewName, indexExpr, viewType))
return false;
// The global will be sane, CheckArrayAccess checks it.
@@ -4753,9 +3939,7 @@ static bool
CheckIsArgType(FunctionValidator& f, ParseNode* argNode, Type type)
{
if (!type.isArgType())
- return f.failf(argNode,
- "%s is not a subtype of int, float, double, or an allowed SIMD type",
- type.toChars());
+ return f.failf(argNode, "%s is not a subtype of int, float or double", type.toChars());
return true;
}
@@ -4887,8 +4071,6 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type
if (ret.isFloat())
return f.fail(callNode, "FFI calls can't return float");
- if (ret.isSimd())
- return f.fail(callNode, "FFI calls can't return SIMD values");
ValTypeVector args;
if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
@@ -4944,9 +4126,6 @@ CheckCoercionArg(FunctionValidator& f, ParseNode* arg, Type expected, Type* type
if (expected.isFloat()) {
if (!CheckFloatCoercionArg(f, arg, argType))
return false;
- } else if (expected.isSimd()) {
- if (!(argType <= expected))
- return f.fail(arg, "argument to SIMD coercion isn't from the correct SIMD type");
} else {
MOZ_CRASH("not call coercions");
}
@@ -5044,639 +4223,6 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin
return true;
}
-namespace {
-// Include CheckSimdCallArgs in unnamed namespace to avoid MSVC name lookup bug.
-
-template<class CheckArgOp>
-static bool
-CheckSimdCallArgs(FunctionValidator& f, ParseNode* call, unsigned expectedArity,
- const CheckArgOp& checkArg)
-{
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != expectedArity)
- return f.failf(call, "expected %u arguments to SIMD call, got %u", expectedArity, numArgs);
-
- ParseNode* arg = CallArgList(call);
- for (size_t i = 0; i < numArgs; i++, arg = NextNode(arg)) {
- MOZ_ASSERT(!!arg);
- Type argType;
- if (!CheckExpr(f, arg, &argType))
- return false;
- if (!checkArg(f, arg, i, argType))
- return false;
- }
-
- return true;
-}
-
-
-class CheckArgIsSubtypeOf
-{
- Type formalType_;
-
- public:
- explicit CheckArgIsSubtypeOf(SimdType t) : formalType_(t) {}
-
- bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
- {
- if (!(actualType <= formalType_)) {
- return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
- formalType_.toChars());
- }
- return true;
- }
-};
-
-static inline Type
-SimdToCoercedScalarType(SimdType t)
-{
- switch (t) {
- case SimdType::Int8x16:
- case SimdType::Int16x8:
- case SimdType::Int32x4:
- case SimdType::Uint8x16:
- case SimdType::Uint16x8:
- case SimdType::Uint32x4:
- case SimdType::Bool8x16:
- case SimdType::Bool16x8:
- case SimdType::Bool32x4:
- return Type::Intish;
- case SimdType::Float32x4:
- return Type::Floatish;
- default:
- break;
- }
- MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected SIMD type");
-}
-
-class CheckSimdScalarArgs
-{
- SimdType simdType_;
- Type formalType_;
-
- public:
- explicit CheckSimdScalarArgs(SimdType simdType)
- : simdType_(simdType), formalType_(SimdToCoercedScalarType(simdType))
- {}
-
- bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
- {
- if (!(actualType <= formalType_)) {
- // As a special case, accept doublelit arguments to float32x4 ops by
- // re-emitting them as float32 constants.
- if (simdType_ != SimdType::Float32x4 || !actualType.isDoubleLit()) {
- return f.failf(arg, "%s is not a subtype of %s%s",
- actualType.toChars(), formalType_.toChars(),
- simdType_ == SimdType::Float32x4 ? " or doublelit" : "");
- }
-
- // We emitted a double literal and actually want a float32.
- return f.encoder().writeOp(Op::F32DemoteF64);
- }
-
- return true;
- }
-};
-
-class CheckSimdSelectArgs
-{
- Type formalType_;
- Type maskType_;
-
- public:
- explicit CheckSimdSelectArgs(SimdType t) : formalType_(t), maskType_(GetBooleanSimdType(t)) {}
-
- bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
- {
- // The first argument is the boolean selector, the next two are the
- // values to choose from.
- Type wantedType = argIndex == 0 ? maskType_ : formalType_;
-
- if (!(actualType <= wantedType)) {
- return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
- wantedType.toChars());
- }
- return true;
- }
-};
-
-class CheckSimdVectorScalarArgs
-{
- SimdType formalSimdType_;
-
- public:
- explicit CheckSimdVectorScalarArgs(SimdType t) : formalSimdType_(t) {}
-
- bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
- {
- MOZ_ASSERT(argIndex < 2);
- if (argIndex == 0) {
- // First argument is the vector
- if (!(actualType <= Type(formalSimdType_))) {
- return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
- Type(formalSimdType_).toChars());
- }
-
- return true;
- }
-
- // Second argument is the scalar
- return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType);
- }
-};
-
-} // namespace
-
-static bool
-CheckSimdUnary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
- return false;
- if (!f.writeSimdOp(opType, op))
- return false;
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdBinaryShift(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type *type)
-{
- if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType)))
- return false;
- if (!f.writeSimdOp(opType, op))
- return false;
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdBinaryComp(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type *type)
-{
- if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
- return false;
- if (!f.writeSimdOp(opType, op))
- return false;
- *type = GetBooleanSimdType(opType);
- return true;
-}
-
-static bool
-CheckSimdBinary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
- return false;
- if (!f.writeSimdOp(opType, op))
- return false;
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- switch (opType) {
- case SimdType::Int8x16:
- case SimdType::Int16x8:
- case SimdType::Int32x4: *type = Type::Signed; break;
- case SimdType::Uint8x16:
- case SimdType::Uint16x8:
- case SimdType::Uint32x4: *type = Type::Unsigned; break;
- case SimdType::Float32x4: *type = Type::Float; break;
- case SimdType::Bool8x16:
- case SimdType::Bool16x8:
- case SimdType::Bool32x4: *type = Type::Int; break;
- default: MOZ_CRASH("unhandled simd type");
- }
-
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 2)
- return f.failf(call, "expected 2 arguments to SIMD extract, got %u", numArgs);
-
- ParseNode* arg = CallArgList(call);
-
- // First argument is the vector
- Type vecType;
- if (!CheckExpr(f, arg, &vecType))
- return false;
- if (!(vecType <= Type(opType))) {
- return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
- Type(opType).toChars());
- }
-
- arg = NextNode(arg);
-
- // Second argument is the lane < vector length
- uint32_t lane;
- if (!IsLiteralOrConstInt(f, arg, &lane))
- return f.failf(arg, "lane selector should be a constant integer literal");
- if (lane >= GetSimdLanes(opType))
- return f.failf(arg, "lane selector should be in bounds");
-
- if (!f.writeSimdOp(opType, SimdOperation::Fn_extractLane))
- return false;
- if (!f.encoder().writeVarU32(lane))
- return false;
- return true;
-}
-
-static bool
-CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 3)
- return f.failf(call, "expected 2 arguments to SIMD replace, got %u", numArgs);
-
- ParseNode* arg = CallArgList(call);
-
- // First argument is the vector
- Type vecType;
- if (!CheckExpr(f, arg, &vecType))
- return false;
- if (!(vecType <= Type(opType))) {
- return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
- Type(opType).toChars());
- }
-
- arg = NextNode(arg);
-
- // Second argument is the lane < vector length
- uint32_t lane;
- if (!IsLiteralOrConstInt(f, arg, &lane))
- return f.failf(arg, "lane selector should be a constant integer literal");
- if (lane >= GetSimdLanes(opType))
- return f.failf(arg, "lane selector should be in bounds");
-
- arg = NextNode(arg);
-
- // Third argument is the scalar
- Type scalarType;
- if (!CheckExpr(f, arg, &scalarType))
- return false;
- if (!(scalarType <= SimdToCoercedScalarType(opType))) {
- if (opType == SimdType::Float32x4 && scalarType.isDoubleLit()) {
- if (!f.encoder().writeOp(Op::F32DemoteF64))
- return false;
- } else {
- return f.failf(arg, "%s is not the correct type to replace an element of %s",
- scalarType.toChars(), vecType.toChars());
- }
- }
-
- if (!f.writeSimdOp(opType, SimdOperation::Fn_replaceLane))
- return false;
- if (!f.encoder().writeVarU32(lane))
- return false;
- *type = opType;
- return true;
-}
-
-typedef bool Bitcast;
-
-namespace {
-// Include CheckSimdCast in unnamed namespace to avoid MSVC name lookup bug (due to the use of Type).
-
-static bool
-CheckSimdCast(FunctionValidator& f, ParseNode* call, SimdType fromType, SimdType toType,
- SimdOperation op, Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(fromType)))
- return false;
- if (!f.writeSimdOp(toType, op))
- return false;
- *type = toType;
- return true;
-}
-
-} // namespace
-
-static bool
-CheckSimdShuffleSelectors(FunctionValidator& f, ParseNode* lane,
- mozilla::Array<uint8_t, 16>& lanes, unsigned numLanes, unsigned maxLane)
-{
- for (unsigned i = 0; i < numLanes; i++, lane = NextNode(lane)) {
- uint32_t u32;
- if (!IsLiteralInt(f.m(), lane, &u32))
- return f.failf(lane, "lane selector should be a constant integer literal");
- if (u32 >= maxLane)
- return f.failf(lane, "lane selector should be less than %u", maxLane);
- lanes[i] = uint8_t(u32);
- }
- return true;
-}
-
-static bool
-CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- const unsigned numLanes = GetSimdLanes(opType);
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 1 + numLanes)
- return f.failf(call, "expected %u arguments to SIMD swizzle, got %u", 1 + numLanes,
- numArgs);
-
- Type retType = opType;
- ParseNode* vec = CallArgList(call);
- Type vecType;
- if (!CheckExpr(f, vec, &vecType))
- return false;
- if (!(vecType <= retType))
- return f.failf(vec, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
-
- if (!f.writeSimdOp(opType, SimdOperation::Fn_swizzle))
- return false;
-
- mozilla::Array<uint8_t, 16> lanes;
- if (!CheckSimdShuffleSelectors(f, NextNode(vec), lanes, numLanes, numLanes))
- return false;
-
- for (unsigned i = 0; i < numLanes; i++) {
- if (!f.encoder().writeFixedU8(lanes[i]))
- return false;
- }
-
- *type = retType;
- return true;
-}
-
-static bool
-CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- const unsigned numLanes = GetSimdLanes(opType);
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 2 + numLanes)
- return f.failf(call, "expected %u arguments to SIMD shuffle, got %u", 2 + numLanes,
- numArgs);
-
- Type retType = opType;
- ParseNode* arg = CallArgList(call);
- for (unsigned i = 0; i < 2; i++, arg = NextNode(arg)) {
- Type type;
- if (!CheckExpr(f, arg, &type))
- return false;
- if (!(type <= retType))
- return f.failf(arg, "%s is not a subtype of %s", type.toChars(), retType.toChars());
- }
-
- if (!f.writeSimdOp(opType, SimdOperation::Fn_shuffle))
- return false;
-
- mozilla::Array<uint8_t, 16> lanes;
- if (!CheckSimdShuffleSelectors(f, arg, lanes, numLanes, 2 * numLanes))
- return false;
-
- for (unsigned i = 0; i < numLanes; i++) {
- if (!f.encoder().writeFixedU8(uint8_t(lanes[i])))
- return false;
- }
-
- *type = retType;
- return true;
-}
-
-static bool
-CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, Scalar::Type* viewType)
-{
- ParseNode* view = CallArgList(call);
- if (!view->isKind(PNK_NAME))
- return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
-
- ParseNode* indexExpr = NextNode(view);
-
- if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, viewType))
- return false;
-
- if (*viewType != Scalar::Uint8)
- return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
-
- return true;
-}
-
-static bool
-CheckSimdLoad(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type* type)
-{
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 2)
- return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs);
-
- Scalar::Type viewType;
- if (!CheckSimdLoadStoreArgs(f, call, &viewType))
- return false;
-
- if (!f.writeSimdOp(opType, op))
- return false;
-
- if (!WriteArrayAccessFlags(f, viewType))
- return false;
-
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
- Type* type)
-{
- unsigned numArgs = CallArgListLength(call);
- if (numArgs != 3)
- return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs);
-
- Scalar::Type viewType;
- if (!CheckSimdLoadStoreArgs(f, call, &viewType))
- return false;
-
- Type retType = opType;
- ParseNode* vecExpr = NextNode(NextNode(CallArgList(call)));
- Type vecType;
- if (!CheckExpr(f, vecExpr, &vecType))
- return false;
-
- if (!f.writeSimdOp(opType, op))
- return false;
-
- if (!WriteArrayAccessFlags(f, viewType))
- return false;
-
- if (!(vecType <= retType))
- return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
-
- *type = vecType;
- return true;
-}
-
-static bool
-CheckSimdSelect(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType)))
- return false;
- if (!f.writeSimdOp(opType, SimdOperation::Fn_select))
- return false;
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
- return false;
- if (!f.writeSimdOp(opType, SimdOperation::Fn_allTrue))
- return false;
- *type = Type::Int;
- return true;
-}
-
-static bool
-CheckSimdAnyTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
- return false;
- if (!f.writeSimdOp(opType, SimdOperation::Fn_anyTrue))
- return false;
- *type = Type::Int;
- return true;
-}
-
-static bool
-CheckSimdCheck(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- Type coerceTo;
- ParseNode* argNode;
- if (!IsCoercionCall(f.m(), call, &coerceTo, &argNode))
- return f.failf(call, "expected 1 argument in call to check");
- return CheckCoercionArg(f, argNode, coerceTo, type);
-}
-
-static bool
-CheckSimdSplat(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
-{
- if (!CheckSimdCallArgs(f, call, 1, CheckSimdScalarArgs(opType)))
- return false;
- if (!f.writeSimdOp(opType, SimdOperation::Fn_splat))
- return false;
- *type = opType;
- return true;
-}
-
-static bool
-CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
- Type* type)
-{
- f.setUsesSimd();
-
- MOZ_ASSERT(global->isSimdOperation());
-
- SimdType opType = global->simdOperationType();
-
- switch (SimdOperation op = global->simdOperation()) {
- case SimdOperation::Fn_check:
- return CheckSimdCheck(f, call, opType, type);
-
-#define _CASE(OP) case SimdOperation::Fn_##OP:
- FOREACH_SHIFT_SIMD_OP(_CASE)
- return CheckSimdBinaryShift(f, call, opType, op, type);
-
- FOREACH_COMP_SIMD_OP(_CASE)
- return CheckSimdBinaryComp(f, call, opType, op, type);
-
- FOREACH_NUMERIC_SIMD_BINOP(_CASE)
- FOREACH_FLOAT_SIMD_BINOP(_CASE)
- FOREACH_BITWISE_SIMD_BINOP(_CASE)
- FOREACH_SMINT_SIMD_BINOP(_CASE)
- return CheckSimdBinary(f, call, opType, op, type);
-#undef _CASE
-
- case SimdOperation::Fn_extractLane:
- return CheckSimdExtractLane(f, call, opType, type);
- case SimdOperation::Fn_replaceLane:
- return CheckSimdReplaceLane(f, call, opType, type);
-
- case SimdOperation::Fn_fromInt8x16Bits:
- return CheckSimdCast(f, call, SimdType::Int8x16, opType, op, type);
- case SimdOperation::Fn_fromUint8x16Bits:
- return CheckSimdCast(f, call, SimdType::Uint8x16, opType, op, type);
- case SimdOperation::Fn_fromInt16x8Bits:
- return CheckSimdCast(f, call, SimdType::Int16x8, opType, op, type);
- case SimdOperation::Fn_fromUint16x8Bits:
- return CheckSimdCast(f, call, SimdType::Uint16x8, opType, op, type);
- case SimdOperation::Fn_fromInt32x4:
- case SimdOperation::Fn_fromInt32x4Bits:
- return CheckSimdCast(f, call, SimdType::Int32x4, opType, op, type);
- case SimdOperation::Fn_fromUint32x4:
- case SimdOperation::Fn_fromUint32x4Bits:
- return CheckSimdCast(f, call, SimdType::Uint32x4, opType, op, type);
- case SimdOperation::Fn_fromFloat32x4:
- case SimdOperation::Fn_fromFloat32x4Bits:
- return CheckSimdCast(f, call, SimdType::Float32x4, opType, op, type);
-
- case SimdOperation::Fn_abs:
- case SimdOperation::Fn_neg:
- case SimdOperation::Fn_not:
- case SimdOperation::Fn_sqrt:
- case SimdOperation::Fn_reciprocalApproximation:
- case SimdOperation::Fn_reciprocalSqrtApproximation:
- return CheckSimdUnary(f, call, opType, op, type);
-
- case SimdOperation::Fn_swizzle:
- return CheckSimdSwizzle(f, call, opType, type);
- case SimdOperation::Fn_shuffle:
- return CheckSimdShuffle(f, call, opType, type);
-
- case SimdOperation::Fn_load:
- case SimdOperation::Fn_load1:
- case SimdOperation::Fn_load2:
- return CheckSimdLoad(f, call, opType, op, type);
- case SimdOperation::Fn_store:
- case SimdOperation::Fn_store1:
- case SimdOperation::Fn_store2:
- return CheckSimdStore(f, call, opType, op, type);
-
- case SimdOperation::Fn_select:
- return CheckSimdSelect(f, call, opType, type);
-
- case SimdOperation::Fn_splat:
- return CheckSimdSplat(f, call, opType, type);
-
- case SimdOperation::Fn_allTrue:
- return CheckSimdAllTrue(f, call, opType, type);
- case SimdOperation::Fn_anyTrue:
- return CheckSimdAnyTrue(f, call, opType, type);
-
- case SimdOperation::Fn_load3:
- case SimdOperation::Fn_store3:
- return f.fail(call, "asm.js does not support 3-element SIMD loads or stores");
-
- case SimdOperation::Constructor:
- MOZ_CRASH("constructors are handled in CheckSimdCtorCall");
- case SimdOperation::Fn_fromFloat64x2Bits:
- MOZ_CRASH("NYI");
- }
- MOZ_CRASH("unexpected simd operation in CheckSimdOperationCall");
-}
-
-static bool
-CheckSimdCtorCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
- Type* type)
-{
- f.setUsesSimd();
-
- MOZ_ASSERT(call->isKind(PNK_CALL));
-
- SimdType simdType = global->simdCtorType();
- unsigned length = GetSimdLanes(simdType);
- if (!CheckSimdCallArgs(f, call, length, CheckSimdScalarArgs(simdType)))
- return false;
-
- if (!f.writeSimdOp(simdType, SimdOperation::Constructor))
- return false;
-
- *type = simdType;
- return true;
-}
-
static bool
CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type)
{
@@ -5688,16 +4234,12 @@ CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type)
return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
if (global->isAtomicsFunction())
return CheckAtomicsBuiltinCall(f, expr, global->atomicsBuiltinFunction(), type);
- if (global->isSimdCtor())
- return CheckSimdCtorCall(f, expr, global, type);
- if (global->isSimdOperation())
- return CheckSimdOperationCall(f, expr, global, type);
}
return f.fail(expr, "all function calls must either be calls to standard lib math functions, "
- "standard atomic functions, standard SIMD constructors or operations, "
- "ignored (via f(); or comma-expression), coerced to signed (via f()|0), "
- "coerced to float (via fround(f())) or coerced to double (via +f())");
+ "standard atomic functions ignored (via f(); or comma-expression), coerced"
+ " to signed (via f()|0), coerced to float (via fround(f())) or coerced to "
+ "double (via +f())");
}
static bool
@@ -5740,10 +4282,7 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
}
break;
default:
- MOZ_ASSERT(expected.isSimd(), "Incomplete switch");
- if (actual != expected)
- return f.failf(expr, "got type %s, expected %s", actual.toChars(), expected.toChars());
- break;
+ MOZ_CRASH("unexpected uncoerced result type");
}
*type = Type::ret(expected);
@@ -5761,26 +4300,6 @@ CheckCoercedMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMath
}
static bool
-CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
- Type ret, Type* type)
-{
- MOZ_ASSERT(ret.isCanonical());
-
- Type actual;
- if (global->isSimdCtor()) {
- if (!CheckSimdCtorCall(f, call, global, &actual))
- return false;
- MOZ_ASSERT(actual.isSimd());
- } else {
- MOZ_ASSERT(global->isSimdOperation());
- if (!CheckSimdOperationCall(f, call, global, &actual))
- return false;
- }
-
- return CoerceResult(f, call, ret, actual, type);
-}
-
-static bool
CheckCoercedAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode,
AsmJSAtomicsBuiltinFunction func, Type ret, Type* type)
{
@@ -5834,9 +4353,6 @@ CheckCoercedCall(FunctionValidator& f, ParseNode* call, Type ret, Type* type)
case ModuleValidator::Global::ArrayView:
case ModuleValidator::Global::ArrayViewCtor:
return f.failName(callee, "'%s' is not callable function", callee->name());
- case ModuleValidator::Global::SimdCtor:
- case ModuleValidator::Global::SimdOp:
- return CheckCoercedSimdCall(f, call, global, ret, type);
case ModuleValidator::Global::Function:
break;
}
@@ -6021,11 +4537,9 @@ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type)
*type = Type::Double;
} else if (thenType.isFloat() && elseType.isFloat()) {
*type = Type::Float;
- } else if (thenType.isSimd() && elseType == thenType) {
- *type = thenType;
} else {
return f.failf(ternary, "then/else branches of conditional must both produce int, float, "
- "double or SIMD types, current types are %s and %s",
+ "or double; current types are %s and %s",
thenType.toChars(), elseType.toChars());
}
@@ -6052,16 +4566,6 @@ IsValidIntMultiplyConstant(ModuleValidator& m, ParseNode* expr)
case NumLit::Double:
case NumLit::Float:
case NumLit::OutOfRangeInt:
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- case NumLit::Float32x4:
- case NumLit::Bool8x16:
- case NumLit::Bool16x8:
- case NumLit::Bool32x4:
return false;
}
@@ -6714,16 +5218,6 @@ CheckCaseExpr(FunctionValidator& f, ParseNode* caseExpr, int32_t* value)
return f.fail(caseExpr, "switch case expression out of integer range");
case NumLit::Double:
case NumLit::Float:
- case NumLit::Int8x16:
- case NumLit::Uint8x16:
- case NumLit::Int16x8:
- case NumLit::Uint16x8:
- case NumLit::Int32x4:
- case NumLit::Uint32x4:
- case NumLit::Float32x4:
- case NumLit::Bool8x16:
- case NumLit::Bool16x8:
- case NumLit::Bool32x4:
return f.fail(caseExpr, "switch case expression must be an integer literal");
}
@@ -7463,17 +5957,13 @@ HasObjectValueOfMethodPure(JSObject* obj, JSContext* cx)
static bool
HasPureCoercion(JSContext* cx, HandleValue v)
{
- // Unsigned SIMD types are not allowed in function signatures.
- if (IsVectorObject<Int32x4>(v) || IsVectorObject<Float32x4>(v) || IsVectorObject<Bool32x4>(v))
- return true;
-
- // Ideally, we'd reject all non-SIMD non-primitives, but Emscripten has a
- // bug that generates code that passes functions for some imports. To avoid
- // breaking all the code that contains this bug, we make an exception for
- // functions that don't have user-defined valueOf or toString, for their
- // coercions are not observable and coercion via ToNumber/ToInt32
- // definitely produces NaN/0. We should remove this special case later once
- // most apps have been built with newer Emscripten.
+ // Ideally, we'd reject all non-primitives, but Emscripten has a bug that
+ // generates code that passes functions for some imports. To avoid breaking
+ // all the code that contains this bug, we make an exception for functions
+ // that don't have user-defined valueOf or toString, for their coercions
+ // are not observable and coercion via ToNumber/ToInt32 definitely produces
+ // NaN/0. We should remove this special case later once most apps have been
+ // built with newer Emscripten.
if (v.toObject().is<JSFunction>() &&
HasNoToPrimitiveMethodPure(&v.toObject(), cx) &&
HasObjectValueOfMethodPure(&v.toObject(), cx) &&
@@ -7525,58 +6015,6 @@ ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue imp
*val = Val(RawF64(d));
return true;
}
- case ValType::I8x16: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Int8x16>(cx, v, &simdConstant))
- return false;
- *val = Val(simdConstant.asInt8x16());
- return true;
- }
- case ValType::I16x8: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Int16x8>(cx, v, &simdConstant))
- return false;
- *val = Val(simdConstant.asInt16x8());
- return true;
- }
- case ValType::I32x4: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
- return false;
- *val = Val(simdConstant.asInt32x4());
- return true;
- }
- case ValType::F32x4: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
- return false;
- *val = Val(simdConstant.asFloat32x4());
- return true;
- }
- case ValType::B8x16: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Bool8x16>(cx, v, &simdConstant))
- return false;
- // Bool8x16 uses the same data layout as Int8x16.
- *val = Val(simdConstant.asInt8x16());
- return true;
- }
- case ValType::B16x8: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Bool16x8>(cx, v, &simdConstant))
- return false;
- // Bool16x8 uses the same data layout as Int16x8.
- *val = Val(simdConstant.asInt16x8());
- return true;
- }
- case ValType::B32x4: {
- SimdConstant simdConstant;
- if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
- return false;
- // Bool32x4 uses the same data layout as Int32x4.
- *val = Val(simdConstant.asInt32x4());
- return true;
- }
}
}
}
@@ -7656,167 +6094,6 @@ ValidateMathBuiltinFunction(JSContext* cx, const AsmJSGlobal& global, HandleValu
}
static bool
-ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal,
- MutableHandleValue out)
-{
- RootedValue v(cx);
- if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
- return false;
-
- SimdType type;
- if (global.which() == AsmJSGlobal::SimdCtor)
- type = global.simdCtorType();
- else
- type = global.simdOperationType();
-
- RootedPropertyName simdTypeName(cx, SimdTypeToName(cx->names(), type));
- if (!GetDataProperty(cx, v, simdTypeName, &v))
- return false;
-
- if (!v.isObject())
- return LinkFail(cx, "bad SIMD type");
-
- RootedObject simdDesc(cx, &v.toObject());
- if (!simdDesc->is<SimdTypeDescr>())
- return LinkFail(cx, "bad SIMD type");
-
- if (type != simdDesc->as<SimdTypeDescr>().type())
- return LinkFail(cx, "bad SIMD type");
-
- out.set(v);
- return true;
-}
-
-static bool
-ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
-{
- RootedValue _(cx);
- return ValidateSimdType(cx, global, globalVal, &_);
-}
-
-static bool
-ValidateSimdOperation(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
-{
- RootedValue v(cx);
- JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
-
- if (!GetDataProperty(cx, v, global.field(), &v))
- return false;
-
- Native native = nullptr;
- switch (global.simdOperationType()) {
-#define SET_NATIVE_INT8X16(op) case SimdOperation::Fn_##op: native = simd_int8x16_##op; break;
-#define SET_NATIVE_INT16X8(op) case SimdOperation::Fn_##op: native = simd_int16x8_##op; break;
-#define SET_NATIVE_INT32X4(op) case SimdOperation::Fn_##op: native = simd_int32x4_##op; break;
-#define SET_NATIVE_UINT8X16(op) case SimdOperation::Fn_##op: native = simd_uint8x16_##op; break;
-#define SET_NATIVE_UINT16X8(op) case SimdOperation::Fn_##op: native = simd_uint16x8_##op; break;
-#define SET_NATIVE_UINT32X4(op) case SimdOperation::Fn_##op: native = simd_uint32x4_##op; break;
-#define SET_NATIVE_FLOAT32X4(op) case SimdOperation::Fn_##op: native = simd_float32x4_##op; break;
-#define SET_NATIVE_BOOL8X16(op) case SimdOperation::Fn_##op: native = simd_bool8x16_##op; break;
-#define SET_NATIVE_BOOL16X8(op) case SimdOperation::Fn_##op: native = simd_bool16x8_##op; break;
-#define SET_NATIVE_BOOL32X4(op) case SimdOperation::Fn_##op: native = simd_bool32x4_##op; break;
-#define FALLTHROUGH(op) case SimdOperation::Fn_##op:
- case SimdType::Int8x16:
- switch (global.simdOperation()) {
- FORALL_INT8X16_ASMJS_OP(SET_NATIVE_INT8X16)
- SET_NATIVE_INT8X16(fromUint8x16Bits)
- SET_NATIVE_INT8X16(fromUint16x8Bits)
- SET_NATIVE_INT8X16(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Int16x8:
- switch (global.simdOperation()) {
- FORALL_INT16X8_ASMJS_OP(SET_NATIVE_INT16X8)
- SET_NATIVE_INT16X8(fromUint8x16Bits)
- SET_NATIVE_INT16X8(fromUint16x8Bits)
- SET_NATIVE_INT16X8(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Int32x4:
- switch (global.simdOperation()) {
- FORALL_INT32X4_ASMJS_OP(SET_NATIVE_INT32X4)
- SET_NATIVE_INT32X4(fromUint8x16Bits)
- SET_NATIVE_INT32X4(fromUint16x8Bits)
- SET_NATIVE_INT32X4(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Uint8x16:
- switch (global.simdOperation()) {
- FORALL_INT8X16_ASMJS_OP(SET_NATIVE_UINT8X16)
- SET_NATIVE_UINT8X16(fromInt8x16Bits)
- SET_NATIVE_UINT8X16(fromUint16x8Bits)
- SET_NATIVE_UINT8X16(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Uint16x8:
- switch (global.simdOperation()) {
- FORALL_INT16X8_ASMJS_OP(SET_NATIVE_UINT16X8)
- SET_NATIVE_UINT16X8(fromUint8x16Bits)
- SET_NATIVE_UINT16X8(fromInt16x8Bits)
- SET_NATIVE_UINT16X8(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Uint32x4:
- switch (global.simdOperation()) {
- FORALL_INT32X4_ASMJS_OP(SET_NATIVE_UINT32X4)
- SET_NATIVE_UINT32X4(fromUint8x16Bits)
- SET_NATIVE_UINT32X4(fromUint16x8Bits)
- SET_NATIVE_UINT32X4(fromInt32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Float32x4:
- switch (global.simdOperation()) {
- FORALL_FLOAT32X4_ASMJS_OP(SET_NATIVE_FLOAT32X4)
- SET_NATIVE_FLOAT32X4(fromUint8x16Bits)
- SET_NATIVE_FLOAT32X4(fromUint16x8Bits)
- SET_NATIVE_FLOAT32X4(fromUint32x4Bits)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Bool8x16:
- switch (global.simdOperation()) {
- FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL8X16)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Bool16x8:
- switch (global.simdOperation()) {
- FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL16X8)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- case SimdType::Bool32x4:
- switch (global.simdOperation()) {
- FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL32X4)
- default: MOZ_CRASH("shouldn't have been validated in the first place");
- }
- break;
- default: MOZ_CRASH("unhandled simd type");
-#undef FALLTHROUGH
-#undef SET_NATIVE_INT8X16
-#undef SET_NATIVE_INT16X8
-#undef SET_NATIVE_INT32X4
-#undef SET_NATIVE_UINT8X16
-#undef SET_NATIVE_UINT16X8
-#undef SET_NATIVE_UINT32X4
-#undef SET_NATIVE_FLOAT32X4
-#undef SET_NATIVE_BOOL8X16
-#undef SET_NATIVE_BOOL16X8
-#undef SET_NATIVE_BOOL32X4
-#undef SET_NATIVE
- }
- if (!native || !IsNativeFunction(v, native))
- return LinkFail(cx, "bad SIMD.type.* operation");
- return true;
-}
-
-static bool
ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
{
RootedValue v(cx);
@@ -7918,12 +6195,11 @@ CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata, HandleValue bufferVal,
// On 64-bit, bounds checks are statically removed so the huge guard
// region is always necessary. On 32-bit, allocating a guard page
// requires reallocating the incoming ArrayBuffer which could trigger
- // OOM. Thus, only ask for a guard page when SIMD is used since SIMD
- // allows unaligned memory access (see MaxMemoryAccessSize comment);
+ // OOM. Thus, don't ask for a guard page in this case.
#ifdef WASM_HUGE_MEMORY
bool needGuard = true;
#else
- bool needGuard = metadata.usesSimd;
+ bool needGuard = false;
#endif
Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
@@ -7976,14 +6252,6 @@ GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal,
if (!ValidateConstant(cx, global, globalVal))
return false;
break;
- case AsmJSGlobal::SimdCtor:
- if (!ValidateSimdType(cx, global, globalVal))
- return false;
- break;
- case AsmJSGlobal::SimdOp:
- if (!ValidateSimdOperation(cx, global, globalVal))
- return false;
- break;
}
}
diff --git a/js/src/wasm/AsmJS.h b/js/src/wasm/AsmJS.h
index 296617c79b..2bad4b3e4e 100644
--- a/js/src/wasm/AsmJS.h
+++ b/js/src/wasm/AsmJS.h
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2014 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
index 7a905ecbea..729ce73f35 100644
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -7094,70 +7094,6 @@ BaseCompiler::emitBody()
case uint16_t(Op::F64Ge):
CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_GE, MCompare::Compare_Double));
- // SIMD
-#define CASE(TYPE, OP, SIGN) \
- case uint16_t(Op::TYPE##OP): \
- MOZ_CRASH("Unimplemented SIMD");
-#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
-#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
-#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
-#define F32x4CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable)
-#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
-#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
-#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
-#define ENUMERATE(TYPE, FORALL, DO) \
- case uint16_t(Op::TYPE##Constructor): \
- FORALL(DO)
-
- ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
- ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
- ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
- ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
- ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
- ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
- ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
-
-#undef CASE
-#undef I8x16CASE
-#undef I16x8CASE
-#undef I32x4CASE
-#undef F32x4CASE
-#undef B8x16CASE
-#undef B16x8CASE
-#undef B32x4CASE
-#undef ENUMERATE
-
- case uint16_t(Op::I8x16Const):
- case uint16_t(Op::I16x8Const):
- case uint16_t(Op::I32x4Const):
- case uint16_t(Op::F32x4Const):
- case uint16_t(Op::B8x16Const):
- case uint16_t(Op::B16x8Const):
- case uint16_t(Op::B32x4Const):
- case uint16_t(Op::I32x4shiftRightByScalarU):
- case uint16_t(Op::I8x16addSaturateU):
- case uint16_t(Op::I8x16subSaturateU):
- case uint16_t(Op::I8x16shiftRightByScalarU):
- case uint16_t(Op::I8x16lessThanU):
- case uint16_t(Op::I8x16lessThanOrEqualU):
- case uint16_t(Op::I8x16greaterThanU):
- case uint16_t(Op::I8x16greaterThanOrEqualU):
- case uint16_t(Op::I8x16extractLaneU):
- case uint16_t(Op::I16x8addSaturateU):
- case uint16_t(Op::I16x8subSaturateU):
- case uint16_t(Op::I16x8shiftRightByScalarU):
- case uint16_t(Op::I16x8lessThanU):
- case uint16_t(Op::I16x8lessThanOrEqualU):
- case uint16_t(Op::I16x8greaterThanU):
- case uint16_t(Op::I16x8greaterThanOrEqualU):
- case uint16_t(Op::I16x8extractLaneU):
- case uint16_t(Op::I32x4lessThanU):
- case uint16_t(Op::I32x4lessThanOrEqualU):
- case uint16_t(Op::I32x4greaterThanU):
- case uint16_t(Op::I32x4greaterThanOrEqualU):
- case uint16_t(Op::I32x4fromFloat32x4U):
- MOZ_CRASH("Unimplemented SIMD");
-
// Atomics
case uint16_t(Op::I32AtomicsLoad):
case uint16_t(Op::I32AtomicsStore):
diff --git a/js/src/wasm/WasmBinaryConstants.h b/js/src/wasm/WasmBinaryConstants.h
index 4a5ec36b3d..f662814c9e 100644
--- a/js/src/wasm/WasmBinaryConstants.h
+++ b/js/src/wasm/WasmBinaryConstants.h
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -18,8 +19,6 @@
#ifndef wasm_binary_h
#define wasm_binary_h
-#include "builtin/SIMD.h"
-
namespace js {
namespace wasm {
@@ -54,15 +53,6 @@ enum class TypeCode
F32 = 0x7d, // SLEB128(-0x03)
F64 = 0x7c, // SLEB128(-0x04)
- // Only emitted internally for asm.js, likely to get collapsed into I128
- I8x16 = 0x7b,
- I16x8 = 0x7a,
- I32x4 = 0x79,
- F32x4 = 0x78,
- B8x16 = 0x77,
- B16x8 = 0x76,
- B32x4 = 0x75,
-
// A function pointer with any signature
AnyFunc = 0x70, // SLEB128(-0x10)
@@ -81,18 +71,6 @@ enum class ValType
I64 = uint8_t(TypeCode::I64),
F32 = uint8_t(TypeCode::F32),
F64 = uint8_t(TypeCode::F64),
-
- // ------------------------------------------------------------------------
- // The rest of these types are currently only emitted internally when
- // compiling asm.js and are rejected by wasm validation.
-
- I8x16 = uint8_t(TypeCode::I8x16),
- I16x8 = uint8_t(TypeCode::I16x8),
- I32x4 = uint8_t(TypeCode::I32x4),
- F32x4 = uint8_t(TypeCode::F32x4),
- B8x16 = uint8_t(TypeCode::B8x16),
- B16x8 = uint8_t(TypeCode::B16x8),
- B32x4 = uint8_t(TypeCode::B32x4)
};
typedef Vector<ValType, 8, SystemAllocPolicy> ValTypeVector;
@@ -354,82 +332,6 @@ enum class Op
I32AtomicsStore,
I32AtomicsBinOp,
- // SIMD
-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
-#define _(OP) SIMD_OPCODE(I8x16, OP)
- FORALL_INT8X16_ASMJS_OP(_)
- I8x16Constructor,
- I8x16Const,
-#undef _
- // Unsigned I8x16 operations. These are the SIMD.Uint8x16 operations that
- // behave differently from their SIMD.Int8x16 counterparts.
- I8x16extractLaneU,
- I8x16addSaturateU,
- I8x16subSaturateU,
- I8x16shiftRightByScalarU,
- I8x16lessThanU,
- I8x16lessThanOrEqualU,
- I8x16greaterThanU,
- I8x16greaterThanOrEqualU,
-
-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
-#define _(OP) SIMD_OPCODE(I16x8, OP)
- FORALL_INT16X8_ASMJS_OP(_)
- I16x8Constructor,
- I16x8Const,
-#undef _
- // Unsigned I16x8 operations. These are the SIMD.Uint16x8 operations that
- // behave differently from their SIMD.Int16x8 counterparts.
- I16x8extractLaneU,
- I16x8addSaturateU,
- I16x8subSaturateU,
- I16x8shiftRightByScalarU,
- I16x8lessThanU,
- I16x8lessThanOrEqualU,
- I16x8greaterThanU,
- I16x8greaterThanOrEqualU,
-
-#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
-#define _(OP) SIMD_OPCODE(I32x4, OP)
- FORALL_INT32X4_ASMJS_OP(_)
- I32x4Constructor,
- I32x4Const,
-#undef _
- // Unsigned I32x4 operations. These are the SIMD.Uint32x4 operations that
- // behave differently from their SIMD.Int32x4 counterparts.
- I32x4shiftRightByScalarU,
- I32x4lessThanU,
- I32x4lessThanOrEqualU,
- I32x4greaterThanU,
- I32x4greaterThanOrEqualU,
- I32x4fromFloat32x4U,
-#define _(OP) SIMD_OPCODE(F32x4, OP)
- FORALL_FLOAT32X4_ASMJS_OP(_)
- F32x4Constructor,
- F32x4Const,
-#undef _
-
-#define _(OP) SIMD_OPCODE(B8x16, OP)
- FORALL_BOOL_SIMD_OP(_)
- B8x16Constructor,
- B8x16Const,
-#undef _
-#undef OPCODE
-
-#define _(OP) SIMD_OPCODE(B16x8, OP)
- FORALL_BOOL_SIMD_OP(_)
- B16x8Constructor,
- B16x8Const,
-#undef _
-#undef OPCODE
-
-#define _(OP) SIMD_OPCODE(B32x4, OP)
- FORALL_BOOL_SIMD_OP(_)
- B32x4Constructor,
- B32x4Const,
-#undef _
-#undef OPCODE
-
Limit
};
diff --git a/js/src/wasm/WasmBinaryFormat.cpp b/js/src/wasm/WasmBinaryFormat.cpp
index 1e3914d515..92b634c583 100644
--- a/js/src/wasm/WasmBinaryFormat.cpp
+++ b/js/src/wasm/WasmBinaryFormat.cpp
@@ -57,17 +57,6 @@ DecodeValType(Decoder& d, ModuleKind kind, ValType* type)
case uint8_t(ValType::I64):
*type = ValType(unchecked);
return true;
- case uint8_t(ValType::I8x16):
- case uint8_t(ValType::I16x8):
- case uint8_t(ValType::I32x4):
- case uint8_t(ValType::F32x4):
- case uint8_t(ValType::B8x16):
- case uint8_t(ValType::B16x8):
- case uint8_t(ValType::B32x4):
- if (kind != ModuleKind::AsmJS)
- return d.fail("bad type");
- *type = ValType(unchecked);
- return true;
default:
break;
}
diff --git a/js/src/wasm/WasmBinaryFormat.h b/js/src/wasm/WasmBinaryFormat.h
index 4db90e999d..0279597cb8 100644
--- a/js/src/wasm/WasmBinaryFormat.h
+++ b/js/src/wasm/WasmBinaryFormat.h
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2016 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -126,18 +127,6 @@ class Encoder
MOZ_MUST_USE bool writeFixedF64(RawF64 d) {
return write<uint64_t>(d.bits());
}
- MOZ_MUST_USE bool writeFixedI8x16(const I8x16& i8x16) {
- return write<I8x16>(i8x16);
- }
- MOZ_MUST_USE bool writeFixedI16x8(const I16x8& i16x8) {
- return write<I16x8>(i16x8);
- }
- MOZ_MUST_USE bool writeFixedI32x4(const I32x4& i32x4) {
- return write<I32x4>(i32x4);
- }
- MOZ_MUST_USE bool writeFixedF32x4(const F32x4& f32x4) {
- return write<F32x4>(f32x4);
- }
// Variable-length encodings that all use LEB128.
@@ -375,18 +364,6 @@ class Decoder
*d = RawF64::fromBits(u);
return true;
}
- MOZ_MUST_USE bool readFixedI8x16(I8x16* i8x16) {
- return read<I8x16>(i8x16);
- }
- MOZ_MUST_USE bool readFixedI16x8(I16x8* i16x8) {
- return read<I16x8>(i16x8);
- }
- MOZ_MUST_USE bool readFixedI32x4(I32x4* i32x4) {
- return read<I32x4>(i32x4);
- }
- MOZ_MUST_USE bool readFixedF32x4(F32x4* f32x4) {
- return read<F32x4>(f32x4);
- }
// Variable-length encodings that all use LEB128.
@@ -599,26 +576,6 @@ class Decoder
? Op(u8)
: Op(uncheckedReadFixedU8() + UINT8_MAX);
}
- void uncheckedReadFixedI8x16(I8x16* i8x16) {
- struct T { I8x16 v; };
- T t = uncheckedRead<T>();
- memcpy(i8x16, &t, sizeof(t));
- }
- void uncheckedReadFixedI16x8(I16x8* i16x8) {
- struct T { I16x8 v; };
- T t = uncheckedRead<T>();
- memcpy(i16x8, &t, sizeof(t));
- }
- void uncheckedReadFixedI32x4(I32x4* i32x4) {
- struct T { I32x4 v; };
- T t = uncheckedRead<T>();
- memcpy(i32x4, &t, sizeof(t));
- }
- void uncheckedReadFixedF32x4(F32x4* f32x4) {
- struct T { F32x4 v; };
- T t = uncheckedRead<T>();
- memcpy(f32x4, &t, sizeof(t));
- }
};
// Reusable macro encoding/decoding functions reused by both the two
diff --git a/js/src/wasm/WasmBinaryIterator.cpp b/js/src/wasm/WasmBinaryIterator.cpp
index 14371e8f22..cedd97eb97 100644
--- a/js/src/wasm/WasmBinaryIterator.cpp
+++ b/js/src/wasm/WasmBinaryIterator.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -42,20 +43,6 @@ wasm::Classify(Op op)
return OpKind::F32;
case Op::F64Const:
return OpKind::F64;
- case Op::I8x16Const:
- return OpKind::I8x16;
- case Op::I16x8Const:
- return OpKind::I16x8;
- case Op::I32x4Const:
- return OpKind::I32x4;
- case Op::B8x16Const:
- return OpKind::B8x16;
- case Op::B16x8Const:
- return OpKind::B16x8;
- case Op::B32x4Const:
- return OpKind::B32x4;
- case Op::F32x4Const:
- return OpKind::F32x4;
case Op::Br:
return OpKind::Br;
case Op::BrIf:
@@ -95,20 +82,6 @@ wasm::Classify(Op op)
case Op::F64Exp:
case Op::F64Log:
case Op::I32Neg:
- case Op::I8x16neg:
- case Op::I8x16not:
- case Op::I16x8neg:
- case Op::I16x8not:
- case Op::I32x4neg:
- case Op::I32x4not:
- case Op::F32x4neg:
- case Op::F32x4sqrt:
- case Op::F32x4abs:
- case Op::F32x4reciprocalApproximation:
- case Op::F32x4reciprocalSqrtApproximation:
- case Op::B8x16not:
- case Op::B16x8not:
- case Op::B32x4not:
return OpKind::Unary;
case Op::I32Add:
case Op::I32Sub:
@@ -159,49 +132,6 @@ wasm::Classify(Op op)
case Op::F64Mod:
case Op::F64Pow:
case Op::F64Atan2:
- case Op::I8x16add:
- case Op::I8x16sub:
- case Op::I8x16mul:
- case Op::I8x16addSaturate:
- case Op::I8x16subSaturate:
- case Op::I8x16addSaturateU:
- case Op::I8x16subSaturateU:
- case Op::I8x16and:
- case Op::I8x16or:
- case Op::I8x16xor:
- case Op::I16x8add:
- case Op::I16x8sub:
- case Op::I16x8mul:
- case Op::I16x8addSaturate:
- case Op::I16x8subSaturate:
- case Op::I16x8addSaturateU:
- case Op::I16x8subSaturateU:
- case Op::I16x8and:
- case Op::I16x8or:
- case Op::I16x8xor:
- case Op::I32x4add:
- case Op::I32x4sub:
- case Op::I32x4mul:
- case Op::I32x4and:
- case Op::I32x4or:
- case Op::I32x4xor:
- case Op::F32x4add:
- case Op::F32x4sub:
- case Op::F32x4mul:
- case Op::F32x4div:
- case Op::F32x4min:
- case Op::F32x4max:
- case Op::F32x4minNum:
- case Op::F32x4maxNum:
- case Op::B8x16and:
- case Op::B8x16or:
- case Op::B8x16xor:
- case Op::B16x8and:
- case Op::B16x8or:
- case Op::B16x8xor:
- case Op::B32x4and:
- case Op::B32x4or:
- case Op::B32x4xor:
return OpKind::Binary;
case Op::I32Eq:
case Op::I32Ne:
@@ -263,22 +193,6 @@ wasm::Classify(Op op)
case Op::F64ConvertUI64:
case Op::F64ReinterpretI64:
case Op::F64PromoteF32:
- case Op::I32x4fromFloat32x4:
- case Op::I32x4fromFloat32x4U:
- case Op::F32x4fromInt32x4:
- case Op::F32x4fromUint32x4:
- case Op::I32x4fromFloat32x4Bits:
- case Op::I32x4fromInt8x16Bits:
- case Op::I32x4fromInt16x8Bits:
- case Op::I16x8fromInt8x16Bits:
- case Op::I16x8fromInt32x4Bits:
- case Op::I16x8fromFloat32x4Bits:
- case Op::I8x16fromInt16x8Bits:
- case Op::I8x16fromInt32x4Bits:
- case Op::I8x16fromFloat32x4Bits:
- case Op::F32x4fromInt8x16Bits:
- case Op::F32x4fromInt16x8Bits:
- case Op::F32x4fromInt32x4Bits:
return OpKind::Conversion;
case Op::I32Load8S:
case Op::I32Load8U:
@@ -294,16 +208,6 @@ wasm::Classify(Op op)
case Op::I64Load:
case Op::F32Load:
case Op::F64Load:
- case Op::I8x16load:
- case Op::I16x8load:
- case Op::I32x4load:
- case Op::I32x4load1:
- case Op::I32x4load2:
- case Op::I32x4load3:
- case Op::F32x4load:
- case Op::F32x4load1:
- case Op::F32x4load2:
- case Op::F32x4load3:
return OpKind::Load;
case Op::I32Store8:
case Op::I32Store16:
@@ -326,16 +230,6 @@ wasm::Classify(Op op)
case Op::F64TeeStore:
case Op::F32TeeStoreF64:
case Op::F64TeeStoreF32:
- case Op::I8x16store:
- case Op::I16x8store:
- case Op::I32x4store:
- case Op::I32x4store1:
- case Op::I32x4store2:
- case Op::I32x4store3:
- case Op::F32x4store:
- case Op::F32x4store1:
- case Op::F32x4store2:
- case Op::F32x4store3:
return OpKind::TeeStore;
case Op::Select:
return OpKind::Select;
@@ -377,116 +271,6 @@ wasm::Classify(Op op)
return OpKind::AtomicCompareExchange;
case Op::I32AtomicsExchange:
return OpKind::AtomicExchange;
- case Op::I8x16extractLane:
- case Op::I8x16extractLaneU:
- case Op::I16x8extractLane:
- case Op::I16x8extractLaneU:
- case Op::I32x4extractLane:
- case Op::F32x4extractLane:
- case Op::B8x16extractLane:
- case Op::B16x8extractLane:
- case Op::B32x4extractLane:
- return OpKind::ExtractLane;
- case Op::I8x16replaceLane:
- case Op::I16x8replaceLane:
- case Op::I32x4replaceLane:
- case Op::F32x4replaceLane:
- case Op::B8x16replaceLane:
- case Op::B16x8replaceLane:
- case Op::B32x4replaceLane:
- return OpKind::ReplaceLane;
- case Op::I8x16swizzle:
- case Op::I16x8swizzle:
- case Op::I32x4swizzle:
- case Op::F32x4swizzle:
- return OpKind::Swizzle;
- case Op::I8x16shuffle:
- case Op::I16x8shuffle:
- case Op::I32x4shuffle:
- case Op::F32x4shuffle:
- return OpKind::Shuffle;
- case Op::I16x8check:
- case Op::I16x8splat:
- case Op::I32x4check:
- case Op::I32x4splat:
- case Op::I8x16check:
- case Op::I8x16splat:
- case Op::F32x4check:
- case Op::F32x4splat:
- case Op::B16x8check:
- case Op::B16x8splat:
- case Op::B32x4check:
- case Op::B32x4splat:
- case Op::B8x16check:
- case Op::B8x16splat:
- return OpKind::Splat;
- case Op::I8x16select:
- case Op::I16x8select:
- case Op::I32x4select:
- case Op::F32x4select:
- return OpKind::SimdSelect;
- case Op::I8x16Constructor:
- case Op::I16x8Constructor:
- case Op::I32x4Constructor:
- case Op::F32x4Constructor:
- case Op::B8x16Constructor:
- case Op::B16x8Constructor:
- case Op::B32x4Constructor:
- return OpKind::SimdCtor;
- case Op::B8x16allTrue:
- case Op::B8x16anyTrue:
- case Op::B16x8allTrue:
- case Op::B16x8anyTrue:
- case Op::B32x4allTrue:
- case Op::B32x4anyTrue:
- return OpKind::SimdBooleanReduction;
- case Op::I8x16shiftLeftByScalar:
- case Op::I8x16shiftRightByScalar:
- case Op::I8x16shiftRightByScalarU:
- case Op::I16x8shiftLeftByScalar:
- case Op::I16x8shiftRightByScalar:
- case Op::I16x8shiftRightByScalarU:
- case Op::I32x4shiftLeftByScalar:
- case Op::I32x4shiftRightByScalar:
- case Op::I32x4shiftRightByScalarU:
- return OpKind::SimdShiftByScalar;
- case Op::I8x16equal:
- case Op::I8x16notEqual:
- case Op::I8x16greaterThan:
- case Op::I8x16greaterThanOrEqual:
- case Op::I8x16lessThan:
- case Op::I8x16lessThanOrEqual:
- case Op::I8x16greaterThanU:
- case Op::I8x16greaterThanOrEqualU:
- case Op::I8x16lessThanU:
- case Op::I8x16lessThanOrEqualU:
- case Op::I16x8equal:
- case Op::I16x8notEqual:
- case Op::I16x8greaterThan:
- case Op::I16x8greaterThanOrEqual:
- case Op::I16x8lessThan:
- case Op::I16x8lessThanOrEqual:
- case Op::I16x8greaterThanU:
- case Op::I16x8greaterThanOrEqualU:
- case Op::I16x8lessThanU:
- case Op::I16x8lessThanOrEqualU:
- case Op::I32x4equal:
- case Op::I32x4notEqual:
- case Op::I32x4greaterThan:
- case Op::I32x4greaterThanOrEqual:
- case Op::I32x4lessThan:
- case Op::I32x4lessThanOrEqual:
- case Op::I32x4greaterThanU:
- case Op::I32x4greaterThanOrEqualU:
- case Op::I32x4lessThanU:
- case Op::I32x4lessThanOrEqualU:
- case Op::F32x4equal:
- case Op::F32x4notEqual:
- case Op::F32x4greaterThan:
- case Op::F32x4greaterThanOrEqual:
- case Op::F32x4lessThan:
- case Op::F32x4lessThanOrEqual:
- return OpKind::SimdComparison;
case Op::CurrentMemory:
return OpKind::CurrentMemory;
case Op::GrowMemory:
diff --git a/js/src/wasm/WasmBinaryIterator.h b/js/src/wasm/WasmBinaryIterator.h
index 522182ed92..ccad80f9dd 100644
--- a/js/src/wasm/WasmBinaryIterator.h
+++ b/js/src/wasm/WasmBinaryIterator.h
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2016 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -48,13 +49,6 @@ enum class OpKind {
I64,
F32,
F64,
- I8x16,
- I16x8,
- I32x4,
- F32x4,
- B8x16,
- B16x8,
- B32x4,
Br,
BrIf,
BrTable,
@@ -93,11 +87,6 @@ enum class OpKind {
Swizzle,
Shuffle,
Splat,
- SimdSelect,
- SimdCtor,
- SimdBooleanReduction,
- SimdShiftByScalar,
- SimdComparison,
};
// Return the OpKind for a given Op. This is used for sanity-checking that
@@ -319,30 +308,6 @@ class MOZ_STACK_CLASS OpIter : private Policy
*out = d_.uncheckedReadFixedF64();
return true;
}
- MOZ_MUST_USE bool readFixedI8x16(I8x16* out) {
- if (Validate)
- return d_.readFixedI8x16(out);
- d_.uncheckedReadFixedI8x16(out);
- return true;
- }
- MOZ_MUST_USE bool readFixedI16x8(I16x8* out) {
- if (Validate)
- return d_.readFixedI16x8(out);
- d_.uncheckedReadFixedI16x8(out);
- return true;
- }
- MOZ_MUST_USE bool readFixedI32x4(I32x4* out) {
- if (Validate)
- return d_.readFixedI32x4(out);
- d_.uncheckedReadFixedI32x4(out);
- return true;
- }
- MOZ_MUST_USE bool readFixedF32x4(F32x4* out) {
- if (Validate)
- return d_.readFixedF32x4(out);
- d_.uncheckedReadFixedF32x4(out);
- return true;
- }
MOZ_MUST_USE bool readAtomicViewType(Scalar::Type* viewType) {
uint8_t x;
@@ -576,13 +541,6 @@ class MOZ_STACK_CLASS OpIter : private Policy
MOZ_MUST_USE bool readI64Const(int64_t* i64);
MOZ_MUST_USE bool readF32Const(RawF32* f32);
MOZ_MUST_USE bool readF64Const(RawF64* f64);
- MOZ_MUST_USE bool readI8x16Const(I8x16* i8x16);
- MOZ_MUST_USE bool readI16x8Const(I16x8* i16x8);
- MOZ_MUST_USE bool readI32x4Const(I32x4* i32x4);
- MOZ_MUST_USE bool readF32x4Const(F32x4* f32x4);
- MOZ_MUST_USE bool readB8x16Const(I8x16* i8x16);
- MOZ_MUST_USE bool readB16x8Const(I16x8* i16x8);
- MOZ_MUST_USE bool readB32x4Const(I32x4* i32x4);
MOZ_MUST_USE bool readCall(uint32_t* calleeIndex);
MOZ_MUST_USE bool readCallIndirect(uint32_t* sigIndex, Value* callee);
MOZ_MUST_USE bool readOldCallIndirect(uint32_t* sigIndex);
@@ -606,27 +564,6 @@ class MOZ_STACK_CLASS OpIter : private Policy
MOZ_MUST_USE bool readAtomicExchange(LinearMemoryAddress<Value>* addr,
Scalar::Type* viewType,
Value* newValue);
- MOZ_MUST_USE bool readSimdComparison(ValType simdType, Value* lhs,
- Value* rhs);
- MOZ_MUST_USE bool readSimdShiftByScalar(ValType simdType, Value* lhs,
- Value* rhs);
- MOZ_MUST_USE bool readSimdBooleanReduction(ValType simdType, Value* input);
- MOZ_MUST_USE bool readExtractLane(ValType simdType, uint8_t* lane,
- Value* vector);
- MOZ_MUST_USE bool readReplaceLane(ValType simdType, uint8_t* lane,
- Value* vector, Value* scalar);
- MOZ_MUST_USE bool readSplat(ValType simdType, Value* scalar);
- MOZ_MUST_USE bool readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector);
- MOZ_MUST_USE bool readShuffle(ValType simdType, uint8_t (* lanes)[16],
- Value* lhs, Value* rhs);
- MOZ_MUST_USE bool readSimdSelect(ValType simdType, Value* trueValue,
- Value* falseValue,
- Value* condition);
- MOZ_MUST_USE bool readSimdCtor();
- MOZ_MUST_USE bool readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t argIndex,
- Value* arg);
- MOZ_MUST_USE bool readSimdCtorArgsEnd(uint32_t numElements);
- MOZ_MUST_USE bool readSimdCtorReturn(ValType simdType);
// At a location where readOp is allowed, peek at the next opcode
// without consuming it or updating any internal state.
@@ -827,13 +764,6 @@ OpIter<Policy>::readBlockType(ExprType* type)
case uint8_t(ExprType::I64):
case uint8_t(ExprType::F32):
case uint8_t(ExprType::F64):
- case uint8_t(ExprType::I8x16):
- case uint8_t(ExprType::I16x8):
- case uint8_t(ExprType::I32x4):
- case uint8_t(ExprType::F32x4):
- case uint8_t(ExprType::B8x16):
- case uint8_t(ExprType::B16x8):
- case uint8_t(ExprType::B32x4):
break;
default:
return fail("invalid inline block type");
@@ -1649,118 +1579,6 @@ OpIter<Policy>::readF64Const(RawF64* f64)
template <typename Policy>
inline bool
-OpIter<Policy>::readI8x16Const(I8x16* i8x16)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::I8x16);
-
- I8x16 unused;
- if (!readFixedI8x16(Output ? i8x16 : &unused))
- return false;
-
- if (!push(ValType::I8x16))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readI16x8Const(I16x8* i16x8)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::I16x8);
-
- I16x8 unused;
- if (!readFixedI16x8(Output ? i16x8 : &unused))
- return false;
-
- if (!push(ValType::I16x8))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readI32x4Const(I32x4* i32x4)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::I32x4);
-
- I32x4 unused;
- if (!readFixedI32x4(Output ? i32x4 : &unused))
- return false;
-
- if (!push(ValType::I32x4))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readF32x4Const(F32x4* f32x4)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::F32x4);
-
- F32x4 unused;
- if (!readFixedF32x4(Output ? f32x4 : &unused))
- return false;
-
- if (!push(ValType::F32x4))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readB8x16Const(I8x16* i8x16)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::B8x16);
-
- I8x16 unused;
- if (!readFixedI8x16(Output ? i8x16 : &unused))
- return false;
-
- if (!push(ValType::B8x16))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readB16x8Const(I16x8* i16x8)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::B16x8);
-
- I16x8 unused;
- if (!readFixedI16x8(Output ? i16x8 : &unused))
- return false;
-
- if (!push(ValType::B16x8))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readB32x4Const(I32x4* i32x4)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::B32x4);
-
- I32x4 unused;
- if (!readFixedI32x4(Output ? i32x4 : &unused))
- return false;
-
- if (!push(ValType::B32x4))
- return false;
-
- return true;
-}
-
-template <typename Policy>
-inline bool
OpIter<Policy>::readCall(uint32_t* calleeIndex)
{
MOZ_ASSERT(Classify(op_) == OpKind::Call);
@@ -1997,239 +1815,6 @@ OpIter<Policy>::readAtomicExchange(LinearMemoryAddress<Value>* addr, Scalar::Typ
return true;
}
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdComparison(ValType simdType, Value* lhs, Value* rhs)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdComparison);
-
- if (!popWithType(simdType, rhs))
- return false;
-
- if (!popWithType(simdType, lhs))
- return false;
-
- infalliblePush(SimdBoolType(simdType));
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdShiftByScalar(ValType simdType, Value* lhs, Value* rhs)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdShiftByScalar);
-
- if (!popWithType(ValType::I32, rhs))
- return false;
-
- if (!popWithType(simdType, lhs))
- return false;
-
- infalliblePush(simdType);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdBooleanReduction(ValType simdType, Value* input)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdBooleanReduction);
-
- if (!popWithType(simdType, input))
- return false;
-
- infalliblePush(ValType::I32);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readExtractLane(ValType simdType, uint8_t* lane, Value* vector)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::ExtractLane);
-
- uint32_t laneBits;
- if (!readVarU32(&laneBits))
- return false;
-
- if (Validate && laneBits >= NumSimdElements(simdType))
- return fail("simd lane out of bounds for simd type");
-
- if (!popWithType(simdType, vector))
- return false;
-
- infalliblePush(SimdElementType(simdType));
-
- if (Output)
- *lane = uint8_t(laneBits);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readReplaceLane(ValType simdType, uint8_t* lane, Value* vector, Value* scalar)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::ReplaceLane);
-
- uint32_t laneBits;
- if (!readVarU32(&laneBits))
- return false;
-
- if (Validate && laneBits >= NumSimdElements(simdType))
- return fail("simd lane out of bounds for simd type");
-
- if (!popWithType(SimdElementType(simdType), scalar))
- return false;
-
- if (!popWithType(simdType, vector))
- return false;
-
- infalliblePush(simdType);
-
- if (Output)
- *lane = uint8_t(laneBits);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSplat(ValType simdType, Value* scalar)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::Splat);
-
- if (!popWithType(SimdElementType(simdType), scalar))
- return false;
-
- infalliblePush(simdType);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::Swizzle);
-
- uint32_t numSimdLanes = NumSimdElements(simdType);
- MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
- for (uint32_t i = 0; i < numSimdLanes; ++i) {
- uint8_t validateLane;
- if (!readFixedU8(Output ? &(*lanes)[i] : &validateLane))
- return fail("unable to read swizzle lane");
- if (Validate && (Output ? (*lanes)[i] : validateLane) >= numSimdLanes)
- return fail("swizzle index out of bounds");
- }
-
- if (!popWithType(simdType, vector))
- return false;
-
- infalliblePush(simdType);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readShuffle(ValType simdType, uint8_t (* lanes)[16], Value* lhs, Value* rhs)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::Shuffle);
-
- uint32_t numSimdLanes = NumSimdElements(simdType);
- MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
- for (uint32_t i = 0; i < numSimdLanes; ++i) {
- uint8_t validateLane;
- if (!readFixedU8(Output ? &(*lanes)[i] : &validateLane))
- return fail("unable to read shuffle lane");
- if (Validate && (Output ? (*lanes)[i] : validateLane) >= numSimdLanes * 2)
- return fail("shuffle index out of bounds");
- }
-
- if (!popWithType(simdType, rhs))
- return false;
-
- if (!popWithType(simdType, lhs))
- return false;
-
- infalliblePush(simdType);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdSelect(ValType simdType, Value* trueValue, Value* falseValue,
- Value* condition)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdSelect);
-
- if (!popWithType(simdType, falseValue))
- return false;
- if (!popWithType(simdType, trueValue))
- return false;
- if (!popWithType(SimdBoolType(simdType), condition))
- return false;
-
- infalliblePush(simdType);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdCtor()
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t index,
- Value* arg)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
- MOZ_ASSERT(numElements > 0);
-
- TypeAndValue<Value> tv;
-
- if (!peek(numElements - index, &tv))
- return false;
- if (!checkType(tv.type(), elementType))
- return false;
-
- *arg = tv.value();
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdCtorArgsEnd(uint32_t numElements)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
- MOZ_ASSERT(numElements <= valueStack_.length());
-
- valueStack_.shrinkBy(numElements);
-
- return true;
-}
-
-template <typename Policy>
-inline bool
-OpIter<Policy>::readSimdCtorReturn(ValType simdType)
-{
- MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
-
- infalliblePush(simdType);
-
- return true;
-}
} // namespace wasm
} // namespace js
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
index 7bf02fbe70..daff135077 100644
--- a/js/src/wasm/WasmGenerator.cpp
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -665,15 +666,6 @@ ModuleGenerator::allocateGlobal(GlobalDesc* global)
case ValType::F64:
width = 8;
break;
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- width = 16;
- break;
}
uint32_t offset;
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
index b98d4344f9..40197c6ed1 100644
--- a/js/src/wasm/WasmInstance.cpp
+++ b/js/src/wasm/WasmInstance.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2016 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -158,14 +159,10 @@ Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, con
hasI64Arg = true;
break;
}
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- MOZ_CRASH("unhandled type in callImport");
+ default: {
+ MOZ_ASSERT("unhandled type in callImport");
+ return false;
+ }
}
}
@@ -230,13 +227,6 @@ Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, con
case ValType::I64: MOZ_CRASH("can't happen because of above guard");
case ValType::F32: type = TypeSet::DoubleType(); break;
case ValType::F64: type = TypeSet::DoubleType(); break;
- case ValType::I8x16: MOZ_CRASH("NYI");
- case ValType::I16x8: MOZ_CRASH("NYI");
- case ValType::I32x4: MOZ_CRASH("NYI");
- case ValType::F32x4: MOZ_CRASH("NYI");
- case ValType::B8x16: MOZ_CRASH("NYI");
- case ValType::B16x8: MOZ_CRASH("NYI");
- case ValType::B32x4: MOZ_CRASH("NYI");
}
if (!TypeScript::ArgTypes(script, i)->hasType(type))
return true;
@@ -541,12 +531,12 @@ Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
// The calling convention for an external call into wasm is to pass an
// array of 16-byte values where each value contains either a coerced int32
- // (in the low word), a double value (in the low dword) or a SIMD vector
- // value, with the coercions specified by the wasm signature. The external
- // entry point unpacks this array into the system-ABI-specified registers
- // and stack memory and then calls into the internal entry point. The return
- // value is stored in the first element of the array (which, therefore, must
- // have length >= 1).
+ // (in the low word), or a double value (in the low dword) value, with the
+ // coercions specified by the wasm signature. The external entry point
+ // unpacks this array into the system-ABI-specified registers and stack
+ // memory and then calls into the internal entry point. The return value is
+ // stored in the first element of the array (which, therefore, must have
+ // length >= 1).
Vector<ExportArg, 8> exportArgs(cx);
if (!exportArgs.resize(Max<size_t>(1, func.sig().args().length())))
return false;
@@ -585,58 +575,6 @@ Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
if (!ToNumber(cx, v, (double*)&exportArgs[i]))
return false;
break;
- case ValType::I8x16: {
- SimdConstant simd;
- if (!ToSimdConstant<Int8x16>(cx, v, &simd))
- return false;
- memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
- break;
- }
- case ValType::I16x8: {
- SimdConstant simd;
- if (!ToSimdConstant<Int16x8>(cx, v, &simd))
- return false;
- memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
- break;
- }
- case ValType::I32x4: {
- SimdConstant simd;
- if (!ToSimdConstant<Int32x4>(cx, v, &simd))
- return false;
- memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
- break;
- }
- case ValType::F32x4: {
- SimdConstant simd;
- if (!ToSimdConstant<Float32x4>(cx, v, &simd))
- return false;
- memcpy(&exportArgs[i], simd.asFloat32x4(), Simd128DataSize);
- break;
- }
- case ValType::B8x16: {
- SimdConstant simd;
- if (!ToSimdConstant<Bool8x16>(cx, v, &simd))
- return false;
- // Bool8x16 uses the same representation as Int8x16.
- memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
- break;
- }
- case ValType::B16x8: {
- SimdConstant simd;
- if (!ToSimdConstant<Bool16x8>(cx, v, &simd))
- return false;
- // Bool16x8 uses the same representation as Int16x8.
- memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
- break;
- }
- case ValType::B32x4: {
- SimdConstant simd;
- if (!ToSimdConstant<Bool32x4>(cx, v, &simd))
- return false;
- // Bool32x4 uses the same representation as Int32x4.
- memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
- break;
- }
}
}
@@ -703,41 +641,6 @@ Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
}
args.rval().set(NumberValue(*(double*)retAddr));
break;
- case ExprType::I8x16:
- retObj = CreateSimd<Int8x16>(cx, (int8_t*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::I16x8:
- retObj = CreateSimd<Int16x8>(cx, (int16_t*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::I32x4:
- retObj = CreateSimd<Int32x4>(cx, (int32_t*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::F32x4:
- retObj = CreateSimd<Float32x4>(cx, (float*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::B8x16:
- retObj = CreateSimd<Bool8x16>(cx, (int8_t*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::B16x8:
- retObj = CreateSimd<Bool16x8>(cx, (int16_t*)retAddr);
- if (!retObj)
- return false;
- break;
- case ExprType::B32x4:
- retObj = CreateSimd<Bool32x4>(cx, (int32_t*)retAddr);
- if (!retObj)
- return false;
- break;
case ExprType::Limit:
MOZ_CRASH("Limit");
}
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
index 2b16d3f053..c9839ed640 100644
--- a/js/src/wasm/WasmIonCompile.cpp
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -250,30 +251,6 @@ class FunctionCompiler
case ValType::F64:
ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
break;
- case ValType::I8x16:
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Int8x16);
- break;
- case ValType::I16x8:
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Int16x8);
- break;
- case ValType::I32x4:
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Int32x4);
- break;
- case ValType::F32x4:
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0.f), MIRType::Float32x4);
- break;
- case ValType::B8x16:
- // Bool8x16 uses the same data layout as Int8x16.
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Bool8x16);
- break;
- case ValType::B16x8:
- // Bool16x8 uses the same data layout as Int16x8.
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Bool16x8);
- break;
- case ValType::B32x4:
- // Bool32x4 uses the same data layout as Int32x4.
- ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Bool32x4);
- break;
}
curBlock_->add(ins);
@@ -323,16 +300,6 @@ class FunctionCompiler
/***************************** Code generation (after local scope setup) */
- MDefinition* constant(const SimdConstant& v, MIRType type)
- {
- if (inDeadCode())
- return nullptr;
- MInstruction* constant;
- constant = MSimdConstant::New(alloc(), v, type);
- curBlock_->add(constant);
- return constant;
- }
-
MDefinition* constant(const Value& v, MIRType type)
{
if (inDeadCode())
@@ -425,172 +392,6 @@ class FunctionCompiler
return ins;
}
- MDefinition* unarySimd(MDefinition* input, MSimdUnaryArith::Operation op, MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(input->type()) && input->type() == type);
- MInstruction* ins = MSimdUnaryArith::New(alloc(), input, op);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryArith::Operation op,
- MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
- MOZ_ASSERT(lhs->type() == type);
- return MSimdBinaryArith::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
- }
-
- MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryBitwise::Operation op,
- MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
- MOZ_ASSERT(lhs->type() == type);
- auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* binarySimdComp(MDefinition* lhs, MDefinition* rhs, MSimdBinaryComp::Operation op,
- SimdSign sign)
- {
- if (inDeadCode())
- return nullptr;
-
- return MSimdBinaryComp::AddLegalized(alloc(), curBlock_, lhs, rhs, op, sign);
- }
-
- MDefinition* binarySimdSaturating(MDefinition* lhs, MDefinition* rhs,
- MSimdBinarySaturating::Operation op, SimdSign sign)
- {
- if (inDeadCode())
- return nullptr;
-
- auto* ins = MSimdBinarySaturating::New(alloc(), lhs, rhs, op, sign);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* binarySimdShift(MDefinition* lhs, MDefinition* rhs, MSimdShift::Operation op)
- {
- if (inDeadCode())
- return nullptr;
-
- return MSimdShift::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
- }
-
- MDefinition* swizzleSimd(MDefinition* vector, const uint8_t lanes[], MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(vector->type() == type);
- MSimdSwizzle* ins = MSimdSwizzle::New(alloc(), vector, lanes);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* shuffleSimd(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[],
- MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(lhs->type() == type);
- MInstruction* ins = MSimdShuffle::New(alloc(), lhs, rhs, lanes);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* insertElementSimd(MDefinition* vec, MDefinition* val, unsigned lane, MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(vec->type()) && vec->type() == type);
- MOZ_ASSERT(SimdTypeToLaneArgumentType(vec->type()) == val->type());
- MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), vec, val, lane);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* selectSimd(MDefinition* mask, MDefinition* lhs, MDefinition* rhs, MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(mask->type()));
- MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
- MOZ_ASSERT(lhs->type() == type);
- MSimdSelect* ins = MSimdSelect::New(alloc(), mask, lhs, rhs);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* simdAllTrue(MDefinition* boolVector)
- {
- if (inDeadCode())
- return nullptr;
-
- MSimdAllTrue* ins = MSimdAllTrue::New(alloc(), boolVector, MIRType::Int32);
- curBlock_->add(ins);
- return ins;
- }
-
- MDefinition* simdAnyTrue(MDefinition* boolVector)
- {
- if (inDeadCode())
- return nullptr;
-
- MSimdAnyTrue* ins = MSimdAnyTrue::New(alloc(), boolVector, MIRType::Int32);
- curBlock_->add(ins);
- return ins;
- }
-
- // fromXXXBits()
- MDefinition* bitcastSimd(MDefinition* vec, MIRType from, MIRType to)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(vec->type() == from);
- MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
- auto* ins = MSimdReinterpretCast::New(alloc(), vec, to);
- curBlock_->add(ins);
- return ins;
- }
-
- // Int <--> Float conversions.
- MDefinition* convertSimd(MDefinition* vec, MIRType from, MIRType to, SimdSign sign)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
- return MSimdConvert::AddLegalized(alloc(), curBlock_, vec, to, sign, trapOffset());
- }
-
- MDefinition* splatSimd(MDefinition* v, MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(type));
- MOZ_ASSERT(SimdTypeToLaneArgumentType(type) == v->type());
- MSimdSplat* ins = MSimdSplat::New(alloc(), v, type);
- curBlock_->add(ins);
- return ins;
- }
-
MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type, bool isMax) {
if (inDeadCode())
return nullptr;
@@ -824,31 +625,6 @@ class FunctionCompiler
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
}
- MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(base->type()));
- MOZ_ASSERT(!IsSimdType(type));
- auto* ins = MSimdExtractElement::New(alloc(), base, type, lane, sign);
- curBlock_->add(ins);
- return ins;
- }
-
- template<typename T>
- MDefinition* constructSimd(MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w,
- MIRType type)
- {
- if (inDeadCode())
- return nullptr;
-
- MOZ_ASSERT(IsSimdType(type));
- T* ins = T::New(alloc(), type, x, y, z, w);
- curBlock_->add(ins);
- return ins;
- }
-
/***************************************************************** Calls */
// The IonMonkey backend maintains a single stack offset (from the stack
@@ -2061,18 +1837,6 @@ EmitGetGlobal(FunctionCompiler& f)
case ValType::F64:
result = f.constant(value.f64());
break;
- case ValType::I8x16:
- result = f.constant(SimdConstant::CreateX16(value.i8x16()), mirType);
- break;
- case ValType::I16x8:
- result = f.constant(SimdConstant::CreateX8(value.i16x8()), mirType);
- break;
- case ValType::I32x4:
- result = f.constant(SimdConstant::CreateX4(value.i32x4()), mirType);
- break;
- case ValType::F32x4:
- result = f.constant(SimdConstant::CreateX4(value.f32x4()), mirType);
- break;
default:
MOZ_CRASH("unexpected type in EmitGetGlobal");
}
@@ -2563,491 +2327,6 @@ EmitAtomicsExchange(FunctionCompiler& f)
}
static bool
-EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp)
-{
- MSimdUnaryArith::Operation op;
- switch (simdOp) {
- case SimdOperation::Fn_abs:
- op = MSimdUnaryArith::abs;
- break;
- case SimdOperation::Fn_neg:
- op = MSimdUnaryArith::neg;
- break;
- case SimdOperation::Fn_not:
- op = MSimdUnaryArith::not_;
- break;
- case SimdOperation::Fn_sqrt:
- op = MSimdUnaryArith::sqrt;
- break;
- case SimdOperation::Fn_reciprocalApproximation:
- op = MSimdUnaryArith::reciprocalApproximation;
- break;
- case SimdOperation::Fn_reciprocalSqrtApproximation:
- op = MSimdUnaryArith::reciprocalSqrtApproximation;
- break;
- default:
- MOZ_CRASH("not a simd unary arithmetic operation");
- }
-
- MDefinition* input;
- if (!f.iter().readUnary(type, &input))
- return false;
-
- f.iter().setResult(f.unarySimd(input, op, ToMIRType(type)));
- return true;
-}
-
-template<class OpKind>
-inline bool
-EmitSimdBinary(FunctionCompiler& f, ValType type, OpKind op)
-{
- MDefinition* lhs;
- MDefinition* rhs;
- if (!f.iter().readBinary(type, &lhs, &rhs))
- return false;
-
- f.iter().setResult(f.binarySimd(lhs, rhs, op, ToMIRType(type)));
- return true;
-}
-
-static bool
-EmitSimdBinaryComp(FunctionCompiler& f, ValType operandType, MSimdBinaryComp::Operation op,
- SimdSign sign)
-{
- MDefinition* lhs;
- MDefinition* rhs;
- if (!f.iter().readSimdComparison(operandType, &lhs, &rhs))
- return false;
-
- f.iter().setResult(f.binarySimdComp(lhs, rhs, op, sign));
- return true;
-}
-
-static bool
-EmitSimdBinarySaturating(FunctionCompiler& f, ValType type, MSimdBinarySaturating::Operation op,
- SimdSign sign)
-{
- MDefinition* lhs;
- MDefinition* rhs;
- if (!f.iter().readBinary(type, &lhs, &rhs))
- return false;
-
- f.iter().setResult(f.binarySimdSaturating(lhs, rhs, op, sign));
- return true;
-}
-
-static bool
-EmitSimdShift(FunctionCompiler& f, ValType operandType, MSimdShift::Operation op)
-{
- MDefinition* lhs;
- MDefinition* rhs;
- if (!f.iter().readSimdShiftByScalar(operandType, &lhs, &rhs))
- return false;
-
- f.iter().setResult(f.binarySimdShift(lhs, rhs, op));
- return true;
-}
-
-static ValType
-SimdToLaneType(ValType type)
-{
- switch (type) {
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4: return ValType::I32;
- case ValType::F32x4: return ValType::F32;
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4: return ValType::I32; // Boolean lanes are Int32 in asm.
- case ValType::I32:
- case ValType::I64:
- case ValType::F32:
- case ValType::F64:
- break;
- }
- MOZ_CRASH("bad simd type");
-}
-
-static bool
-EmitExtractLane(FunctionCompiler& f, ValType operandType, SimdSign sign)
-{
- uint8_t lane;
- MDefinition* vector;
- if (!f.iter().readExtractLane(operandType, &lane, &vector))
- return false;
-
- f.iter().setResult(f.extractSimdElement(lane, vector,
- ToMIRType(SimdToLaneType(operandType)), sign));
- return true;
-}
-
-// Emit an I32 expression and then convert it to a boolean SIMD lane value, i.e. -1 or 0.
-static MDefinition*
-EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition* i32)
-{
- // Compute !i32 - 1 to force the value range into {0, -1}.
- MDefinition* noti32 = f.unary<MNot>(i32);
- return f.binary<MSub>(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32);
-}
-
-static bool
-EmitSimdReplaceLane(FunctionCompiler& f, ValType simdType)
-{
- if (IsSimdBoolType(simdType))
- f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
-
- uint8_t lane;
- MDefinition* vector;
- MDefinition* scalar;
- if (!f.iter().readReplaceLane(simdType, &lane, &vector, &scalar))
- return false;
-
- f.iter().setResult(f.insertElementSimd(vector, scalar, lane, ToMIRType(simdType)));
- return true;
-}
-
-inline bool
-EmitSimdBitcast(FunctionCompiler& f, ValType fromType, ValType toType)
-{
- MDefinition* input;
- if (!f.iter().readConversion(fromType, toType, &input))
- return false;
-
- f.iter().setResult(f.bitcastSimd(input, ToMIRType(fromType), ToMIRType(toType)));
- return true;
-}
-
-inline bool
-EmitSimdConvert(FunctionCompiler& f, ValType fromType, ValType toType, SimdSign sign)
-{
- MDefinition* input;
- if (!f.iter().readConversion(fromType, toType, &input))
- return false;
-
- f.iter().setResult(f.convertSimd(input, ToMIRType(fromType), ToMIRType(toType), sign));
- return true;
-}
-
-static bool
-EmitSimdSwizzle(FunctionCompiler& f, ValType simdType)
-{
- uint8_t lanes[16];
- MDefinition* vector;
- if (!f.iter().readSwizzle(simdType, &lanes, &vector))
- return false;
-
- f.iter().setResult(f.swizzleSimd(vector, lanes, ToMIRType(simdType)));
- return true;
-}
-
-static bool
-EmitSimdShuffle(FunctionCompiler& f, ValType simdType)
-{
- uint8_t lanes[16];
- MDefinition* lhs;
- MDefinition* rhs;
- if (!f.iter().readShuffle(simdType, &lanes, &lhs, &rhs))
- return false;
-
- f.iter().setResult(f.shuffleSimd(lhs, rhs, lanes, ToMIRType(simdType)));
- return true;
-}
-
-static inline Scalar::Type
-SimdExprTypeToViewType(ValType type, unsigned* defaultNumElems)
-{
- switch (type) {
- case ValType::I8x16: *defaultNumElems = 16; return Scalar::Int8x16;
- case ValType::I16x8: *defaultNumElems = 8; return Scalar::Int16x8;
- case ValType::I32x4: *defaultNumElems = 4; return Scalar::Int32x4;
- case ValType::F32x4: *defaultNumElems = 4; return Scalar::Float32x4;
- default: break;
- }
- MOZ_CRASH("type not handled in SimdExprTypeToViewType");
-}
-
-static bool
-EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems)
-{
- unsigned defaultNumElems;
- Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
-
- if (!numElems)
- numElems = defaultNumElems;
-
- LinearMemoryAddress<MDefinition*> addr;
- if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
- return false;
-
- MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), numElems);
-
- f.iter().setResult(f.load(addr.base, access, resultType));
- return true;
-}
-
-static bool
-EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
-{
- unsigned defaultNumElems;
- Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
-
- if (!numElems)
- numElems = defaultNumElems;
-
- LinearMemoryAddress<MDefinition*> addr;
- MDefinition* value;
- if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
- return false;
-
- MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), numElems);
-
- f.store(addr.base, access, value);
- return true;
-}
-
-static bool
-EmitSimdSelect(FunctionCompiler& f, ValType simdType)
-{
- MDefinition* trueValue;
- MDefinition* falseValue;
- MDefinition* condition;
- if (!f.iter().readSimdSelect(simdType, &trueValue, &falseValue, &condition))
- return false;
-
- f.iter().setResult(f.selectSimd(condition, trueValue, falseValue,
- ToMIRType(simdType)));
- return true;
-}
-
-static bool
-EmitSimdAllTrue(FunctionCompiler& f, ValType operandType)
-{
- MDefinition* input;
- if (!f.iter().readSimdBooleanReduction(operandType, &input))
- return false;
-
- f.iter().setResult(f.simdAllTrue(input));
- return true;
-}
-
-static bool
-EmitSimdAnyTrue(FunctionCompiler& f, ValType operandType)
-{
- MDefinition* input;
- if (!f.iter().readSimdBooleanReduction(operandType, &input))
- return false;
-
- f.iter().setResult(f.simdAnyTrue(input));
- return true;
-}
-
-static bool
-EmitSimdSplat(FunctionCompiler& f, ValType simdType)
-{
- if (IsSimdBoolType(simdType))
- f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
-
- MDefinition* input;
- if (!f.iter().readSplat(simdType, &input))
- return false;
-
- f.iter().setResult(f.splatSimd(input, ToMIRType(simdType)));
- return true;
-}
-
-// Build a SIMD vector by inserting lanes one at a time into an initial constant.
-static bool
-EmitSimdChainedCtor(FunctionCompiler& f, ValType valType, MIRType type, const SimdConstant& init)
-{
- const unsigned length = SimdTypeToLength(type);
- MDefinition* val = f.constant(init, type);
- for (unsigned i = 0; i < length; i++) {
- MDefinition* scalar = 0;
- if (!f.iter().readSimdCtorArg(ValType::I32, length, i, &scalar))
- return false;
- val = f.insertElementSimd(val, scalar, i, type);
- }
- if (!f.iter().readSimdCtorArgsEnd(length) || !f.iter().readSimdCtorReturn(valType))
- return false;
- f.iter().setResult(val);
- return true;
-}
-
-// Build a boolean SIMD vector by inserting lanes one at a time into an initial constant.
-static bool
-EmitSimdBooleanChainedCtor(FunctionCompiler& f, ValType valType, MIRType type,
- const SimdConstant& init)
-{
- const unsigned length = SimdTypeToLength(type);
- MDefinition* val = f.constant(init, type);
- for (unsigned i = 0; i < length; i++) {
- MDefinition* scalar = 0;
- if (!f.iter().readSimdCtorArg(ValType::I32, length, i, &scalar))
- return false;
- val = f.insertElementSimd(val, EmitSimdBooleanLaneExpr(f, scalar), i, type);
- }
- if (!f.iter().readSimdCtorArgsEnd(length) || !f.iter().readSimdCtorReturn(valType))
- return false;
- f.iter().setResult(val);
- return true;
-}
-
-static bool
-EmitSimdCtor(FunctionCompiler& f, ValType type)
-{
- if (!f.iter().readSimdCtor())
- return false;
-
- switch (type) {
- case ValType::I8x16:
- return EmitSimdChainedCtor(f, type, MIRType::Int8x16, SimdConstant::SplatX16(0));
- case ValType::I16x8:
- return EmitSimdChainedCtor(f, type, MIRType::Int16x8, SimdConstant::SplatX8(0));
- case ValType::I32x4: {
- MDefinition* args[4];
- for (unsigned i = 0; i < 4; i++) {
- if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &args[i]))
- return false;
- }
- if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
- return false;
- f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
- MIRType::Int32x4));
- return true;
- }
- case ValType::F32x4: {
- MDefinition* args[4];
- for (unsigned i = 0; i < 4; i++) {
- if (!f.iter().readSimdCtorArg(ValType::F32, 4, i, &args[i]))
- return false;
- }
- if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
- return false;
- f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
- MIRType::Float32x4));
- return true;
- }
- case ValType::B8x16:
- return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool8x16, SimdConstant::SplatX16(0));
- case ValType::B16x8:
- return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool16x8, SimdConstant::SplatX8(0));
- case ValType::B32x4: {
- MDefinition* args[4];
- for (unsigned i = 0; i < 4; i++) {
- MDefinition* i32;
- if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &i32))
- return false;
- args[i] = EmitSimdBooleanLaneExpr(f, i32);
- }
- if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
- return false;
- f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
- MIRType::Bool32x4));
- return true;
- }
- case ValType::I32:
- case ValType::I64:
- case ValType::F32:
- case ValType::F64:
- break;
- }
- MOZ_CRASH("unexpected SIMD type");
-}
-
-static bool
-EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
-{
- switch (op) {
- case SimdOperation::Constructor:
- return EmitSimdCtor(f, type);
- case SimdOperation::Fn_extractLane:
- return EmitExtractLane(f, type, sign);
- case SimdOperation::Fn_replaceLane:
- return EmitSimdReplaceLane(f, type);
- case SimdOperation::Fn_check:
- MOZ_CRASH("only used in asm.js' type system");
- case SimdOperation::Fn_splat:
- return EmitSimdSplat(f, type);
- case SimdOperation::Fn_select:
- return EmitSimdSelect(f, type);
- case SimdOperation::Fn_swizzle:
- return EmitSimdSwizzle(f, type);
- case SimdOperation::Fn_shuffle:
- return EmitSimdShuffle(f, type);
- case SimdOperation::Fn_load:
- return EmitSimdLoad(f, type, 0);
- case SimdOperation::Fn_load1:
- return EmitSimdLoad(f, type, 1);
- case SimdOperation::Fn_load2:
- return EmitSimdLoad(f, type, 2);
- case SimdOperation::Fn_store:
- return EmitSimdStore(f, type, 0);
- case SimdOperation::Fn_store1:
- return EmitSimdStore(f, type, 1);
- case SimdOperation::Fn_store2:
- return EmitSimdStore(f, type, 2);
- case SimdOperation::Fn_allTrue:
- return EmitSimdAllTrue(f, type);
- case SimdOperation::Fn_anyTrue:
- return EmitSimdAnyTrue(f, type);
- case SimdOperation::Fn_abs:
- case SimdOperation::Fn_neg:
- case SimdOperation::Fn_not:
- case SimdOperation::Fn_sqrt:
- case SimdOperation::Fn_reciprocalApproximation:
- case SimdOperation::Fn_reciprocalSqrtApproximation:
- return EmitSimdUnary(f, type, op);
- case SimdOperation::Fn_shiftLeftByScalar:
- return EmitSimdShift(f, type, MSimdShift::lsh);
- case SimdOperation::Fn_shiftRightByScalar:
- return EmitSimdShift(f, type, MSimdShift::rshForSign(sign));
-#define _CASE(OP) \
- case SimdOperation::Fn_##OP: \
- return EmitSimdBinaryComp(f, type, MSimdBinaryComp::OP, sign);
- FOREACH_COMP_SIMD_OP(_CASE)
-#undef _CASE
- case SimdOperation::Fn_and:
- return EmitSimdBinary(f, type, MSimdBinaryBitwise::and_);
- case SimdOperation::Fn_or:
- return EmitSimdBinary(f, type, MSimdBinaryBitwise::or_);
- case SimdOperation::Fn_xor:
- return EmitSimdBinary(f, type, MSimdBinaryBitwise::xor_);
-#define _CASE(OP) \
- case SimdOperation::Fn_##OP: \
- return EmitSimdBinary(f, type, MSimdBinaryArith::Op_##OP);
- FOREACH_NUMERIC_SIMD_BINOP(_CASE)
- FOREACH_FLOAT_SIMD_BINOP(_CASE)
-#undef _CASE
- case SimdOperation::Fn_addSaturate:
- return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::add, sign);
- case SimdOperation::Fn_subSaturate:
- return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::sub, sign);
- case SimdOperation::Fn_fromFloat32x4:
- return EmitSimdConvert(f, ValType::F32x4, type, sign);
- case SimdOperation::Fn_fromInt32x4:
- return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Signed);
- case SimdOperation::Fn_fromUint32x4:
- return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Unsigned);
- case SimdOperation::Fn_fromInt8x16Bits:
- case SimdOperation::Fn_fromUint8x16Bits:
- return EmitSimdBitcast(f, ValType::I8x16, type);
- case SimdOperation::Fn_fromUint16x8Bits:
- case SimdOperation::Fn_fromInt16x8Bits:
- return EmitSimdBitcast(f, ValType::I16x8, type);
- case SimdOperation::Fn_fromInt32x4Bits:
- case SimdOperation::Fn_fromUint32x4Bits:
- return EmitSimdBitcast(f, ValType::I32x4, type);
- case SimdOperation::Fn_fromFloat32x4Bits:
- return EmitSimdBitcast(f, ValType::F32x4, type);
- case SimdOperation::Fn_load3:
- case SimdOperation::Fn_store3:
- case SimdOperation::Fn_fromFloat64x2Bits:
- MOZ_CRASH("NYI");
- }
- MOZ_CRASH("unexpected opcode");
-}
-
-static bool
EmitGrowMemory(FunctionCompiler& f)
{
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
@@ -3546,145 +2825,6 @@ EmitExpr(FunctionCompiler& f)
case Op::F64Ge:
return EmitComparison(f, ValType::F64, JSOP_GE, MCompare::Compare_Double);
- // SIMD
-#define CASE(TYPE, OP, SIGN) \
- case Op::TYPE##OP: \
- return EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN);
-#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
-#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
-#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
-#define F32x4CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable)
-#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
-#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
-#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
-#define ENUMERATE(TYPE, FORALL, DO) \
- case Op::TYPE##Constructor: \
- return EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable); \
- FORALL(DO)
-
- ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
- ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
- ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
- ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
- ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
- ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
- ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
-
-#undef CASE
-#undef I8x16CASE
-#undef I16x8CASE
-#undef I32x4CASE
-#undef F32x4CASE
-#undef B8x16CASE
-#undef B16x8CASE
-#undef B32x4CASE
-#undef ENUMERATE
-
- case Op::I8x16Const: {
- I8x16 i8x16;
- if (!f.iter().readI8x16Const(&i8x16))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Int8x16));
- return true;
- }
- case Op::I16x8Const: {
- I16x8 i16x8;
- if (!f.iter().readI16x8Const(&i16x8))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Int16x8));
- return true;
- }
- case Op::I32x4Const: {
- I32x4 i32x4;
- if (!f.iter().readI32x4Const(&i32x4))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Int32x4));
- return true;
- }
- case Op::F32x4Const: {
- F32x4 f32x4;
- if (!f.iter().readF32x4Const(&f32x4))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX4(f32x4), MIRType::Float32x4));
- return true;
- }
- case Op::B8x16Const: {
- I8x16 i8x16;
- if (!f.iter().readB8x16Const(&i8x16))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Bool8x16));
- return true;
- }
- case Op::B16x8Const: {
- I16x8 i16x8;
- if (!f.iter().readB16x8Const(&i16x8))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Bool16x8));
- return true;
- }
- case Op::B32x4Const: {
- I32x4 i32x4;
- if (!f.iter().readB32x4Const(&i32x4))
- return false;
-
- f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Bool32x4));
- return true;
- }
-
- // SIMD unsigned integer operations.
- case Op::I8x16addSaturateU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_addSaturate, SimdSign::Unsigned);
- case Op::I8x16subSaturateU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_subSaturate, SimdSign::Unsigned);
- case Op::I8x16shiftRightByScalarU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
- case Op::I8x16lessThanU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
- case Op::I8x16lessThanOrEqualU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
- case Op::I8x16greaterThanU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
- case Op::I8x16greaterThanOrEqualU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
- case Op::I8x16extractLaneU:
- return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_extractLane, SimdSign::Unsigned);
-
- case Op::I16x8addSaturateU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_addSaturate, SimdSign::Unsigned);
- case Op::I16x8subSaturateU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_subSaturate, SimdSign::Unsigned);
- case Op::I16x8shiftRightByScalarU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
- case Op::I16x8lessThanU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
- case Op::I16x8lessThanOrEqualU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
- case Op::I16x8greaterThanU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
- case Op::I16x8greaterThanOrEqualU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
- case Op::I16x8extractLaneU:
- return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_extractLane, SimdSign::Unsigned);
-
- case Op::I32x4shiftRightByScalarU:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
- case Op::I32x4lessThanU:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
- case Op::I32x4lessThanOrEqualU:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
- case Op::I32x4greaterThanU:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
- case Op::I32x4greaterThanOrEqualU:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
- case Op::I32x4fromFloat32x4U:
- return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned);
-
// Atomics
case Op::I32AtomicsLoad:
return EmitAtomicsLoad(f);
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
index 0479bda59f..d5e1c2acd3 100644
--- a/js/src/wasm/WasmJS.cpp
+++ b/js/src/wasm/WasmJS.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2016 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
index 36011abe90..f0d334ba39 100644
--- a/js/src/wasm/WasmSignalHandlers.cpp
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2014 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
index d4e188a23d..67d744e585 100644
--- a/js/src/wasm/WasmStubs.cpp
+++ b/js/src/wasm/WasmStubs.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -186,17 +187,6 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
"ExportArg must be big enough to store SIMD values");
switch (type) {
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- masm.loadUnalignedSimd128Int(src, iter->fpu());
- break;
- case MIRType::Float32x4:
- masm.loadUnalignedSimd128Float(src, iter->fpu());
- break;
case MIRType::Double:
masm.loadDouble(src, iter->fpu());
break;
@@ -239,21 +229,6 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
masm.storeFloat32(ScratchFloat32Reg,
Address(masm.getStackPointer(), iter->offsetFromArgBase()));
break;
- case MIRType::Int8x16:
- case MIRType::Int16x8:
- case MIRType::Int32x4:
- case MIRType::Bool8x16:
- case MIRType::Bool16x8:
- case MIRType::Bool32x4:
- masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
- masm.storeAlignedSimd128Int(
- ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
- break;
- case MIRType::Float32x4:
- masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
- masm.storeAlignedSimd128Float(
- ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
- break;
default:
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
}
@@ -293,19 +268,6 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
masm.canonicalizeDouble(ReturnDoubleReg);
masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
break;
- case ExprType::I8x16:
- case ExprType::I16x8:
- case ExprType::I32x4:
- case ExprType::B8x16:
- case ExprType::B16x8:
- case ExprType::B32x4:
- // We don't have control on argv alignment, do an unaligned access.
- masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
- break;
- case ExprType::F32x4:
- // We don't have control on argv alignment, do an unaligned access.
- masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
- break;
case ExprType::Limit:
MOZ_CRASH("Limit");
}
@@ -593,14 +555,6 @@ wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint3
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.loadDouble(argv, ReturnDoubleReg);
break;
- case ExprType::I8x16:
- case ExprType::I16x8:
- case ExprType::I32x4:
- case ExprType::F32x4:
- case ExprType::B8x16:
- case ExprType::B16x8:
- case ExprType::B32x4:
- MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
@@ -781,14 +735,6 @@ wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* t
case ExprType::F64:
masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
break;
- case ExprType::I8x16:
- case ExprType::I16x8:
- case ExprType::I32x4:
- case ExprType::F32x4:
- case ExprType::B8x16:
- case ExprType::B16x8:
- case ExprType::B32x4:
- MOZ_CRASH("SIMD types shouldn't be returned from an import");
case ExprType::Limit:
MOZ_CRASH("Limit");
}
@@ -903,13 +849,12 @@ wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
}
// Generate a stub which is only used by the signal handlers to handle out of
-// bounds access by experimental SIMD.js and Atomics and unaligned accesses on
-// ARM. This stub is executed by direct PC transfer from the faulting memory
-// access and thus the stack depth is unknown. Since WasmActivation::fp is not
-// set before calling the error reporter, the current wasm activation will be
-// lost. This stub should be removed when SIMD.js and Atomics are moved to wasm
-// and given proper traps and when we use a non-faulting strategy for unaligned
-// ARM access.
+// bounds access by Atomics and unaligned accesses on ARM. This stub is
+// executed by direct PC transfer from the faulting memory access and thus the
+// stack depth is unknown. Since JitActivation::packedExitFP() is not set
+// before calling the error reporter, the current wasm activation will be lost.
+// This stub should be removed when Atomics are moved to wasm and given proper
+// traps and when we use a non-faulting strategy for unaligned ARM access.
static Offsets
GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
{
diff --git a/js/src/wasm/WasmTextUtils.cpp b/js/src/wasm/WasmTextUtils.cpp
index 78367f3984..197a211f19 100644
--- a/js/src/wasm/WasmTextUtils.cpp
+++ b/js/src/wasm/WasmTextUtils.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2016 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
index be8af3ada1..993c4903be 100644
--- a/js/src/wasm/WasmTypes.cpp
+++ b/js/src/wasm/WasmTypes.cpp
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -52,15 +53,6 @@ Val::writePayload(uint8_t* dst) const
case ValType::F64:
memcpy(dst, &u.i64_, sizeof(u.i64_));
return;
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- memcpy(dst, &u, jit::Simd128DataSize);
- return;
}
}
@@ -125,9 +117,6 @@ WasmReportTrap(int32_t trapIndex)
case Trap::IndirectCallBadSig:
errorNumber = JSMSG_WASM_IND_CALL_BAD_SIG;
break;
- case Trap::ImpreciseSimdConversion:
- errorNumber = JSMSG_SIMD_FAILED_CONVERSION;
- break;
case Trap::OutOfBounds:
errorNumber = JSMSG_WASM_OUT_OF_BOUNDS;
break;
@@ -463,14 +452,6 @@ IsImmediateType(ValType vt)
case ValType::F32:
case ValType::F64:
return true;
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- return false;
}
MOZ_CRASH("bad ValType");
}
@@ -488,14 +469,6 @@ EncodeImmediateType(ValType vt)
return 2;
case ValType::F64:
return 3;
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- break;
}
MOZ_CRASH("bad ValType");
}
diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
index 4e2caabfe9..57c737e454 100644
--- a/js/src/wasm/WasmTypes.h
+++ b/js/src/wasm/WasmTypes.h
@@ -1,6 +1,7 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* Copyright 2015 Mozilla Foundation
+ * Copyright 2023 Moonchild Productions
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@@ -83,11 +84,6 @@ using mozilla::Unused;
typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
-typedef int8_t I8x16[16];
-typedef int16_t I16x8[8];
-typedef int32_t I32x4[4];
-typedef float F32x4[4];
-
class Code;
class CodeRange;
class Memory;
@@ -151,89 +147,6 @@ struct ShareableBase : RefCounted<T>
// ValType utilities
-static inline bool
-IsSimdType(ValType vt)
-{
- switch (vt) {
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- return true;
- default:
- return false;
- }
-}
-
-static inline uint32_t
-NumSimdElements(ValType vt)
-{
- MOZ_ASSERT(IsSimdType(vt));
- switch (vt) {
- case ValType::I8x16:
- case ValType::B8x16:
- return 16;
- case ValType::I16x8:
- case ValType::B16x8:
- return 8;
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B32x4:
- return 4;
- default:
- MOZ_CRASH("Unhandled SIMD type");
- }
-}
-
-static inline ValType
-SimdElementType(ValType vt)
-{
- MOZ_ASSERT(IsSimdType(vt));
- switch (vt) {
- case ValType::I8x16:
- case ValType::I16x8:
- case ValType::I32x4:
- return ValType::I32;
- case ValType::F32x4:
- return ValType::F32;
- case ValType::B8x16:
- case ValType::B16x8:
- case ValType::B32x4:
- return ValType::I32;
- default:
- MOZ_CRASH("Unhandled SIMD type");
- }
-}
-
-static inline ValType
-SimdBoolType(ValType vt)
-{
- MOZ_ASSERT(IsSimdType(vt));
- switch (vt) {
- case ValType::I8x16:
- case ValType::B8x16:
- return ValType::B8x16;
- case ValType::I16x8:
- case ValType::B16x8:
- return ValType::B16x8;
- case ValType::I32x4:
- case ValType::F32x4:
- case ValType::B32x4:
- return ValType::B32x4;
- default:
- MOZ_CRASH("Unhandled SIMD type");
- }
-}
-
-static inline bool
-IsSimdBoolType(ValType vt)
-{
- return vt == ValType::B8x16 || vt == ValType::B16x8 || vt == ValType::B32x4;
-}
-
static inline jit::MIRType
ToMIRType(ValType vt)
{
@@ -242,13 +155,6 @@ ToMIRType(ValType vt)
case ValType::I64: return jit::MIRType::Int64;
case ValType::F32: return jit::MIRType::Float32;
case ValType::F64: return jit::MIRType::Double;
- case ValType::I8x16: return jit::MIRType::Int8x16;
- case ValType::I16x8: return jit::MIRType::Int16x8;
- case ValType::I32x4: return jit::MIRType::Int32x4;
- case ValType::F32x4: return jit::MIRType::Float32x4;
- case ValType::B8x16: return jit::MIRType::Bool8x16;
- case ValType::B16x8: return jit::MIRType::Bool16x8;
- case ValType::B32x4: return jit::MIRType::Bool32x4;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
}
@@ -267,14 +173,6 @@ enum class ExprType
F32 = uint8_t(TypeCode::F32),
F64 = uint8_t(TypeCode::F64),
- I8x16 = uint8_t(TypeCode::I8x16),
- I16x8 = uint8_t(TypeCode::I16x8),
- I32x4 = uint8_t(TypeCode::I32x4),
- F32x4 = uint8_t(TypeCode::F32x4),
- B8x16 = uint8_t(TypeCode::B8x16),
- B16x8 = uint8_t(TypeCode::B16x8),
- B32x4 = uint8_t(TypeCode::B32x4),
-
Limit = uint8_t(TypeCode::Limit)
};
@@ -297,12 +195,6 @@ ToExprType(ValType vt)
return ExprType(vt);
}
-static inline bool
-IsSimdType(ExprType et)
-{
- return IsVoid(et) ? false : IsSimdType(ValType(et));
-}
-
static inline jit::MIRType
ToMIRType(ExprType et)
{
@@ -318,13 +210,6 @@ ToCString(ExprType type)
case ExprType::I64: return "i64";
case ExprType::F32: return "f32";
case ExprType::F64: return "f64";
- case ExprType::I8x16: return "i8x16";
- case ExprType::I16x8: return "i16x8";
- case ExprType::I32x4: return "i32x4";
- case ExprType::F32x4: return "f32x4";
- case ExprType::B8x16: return "b8x16";
- case ExprType::B16x8: return "b16x8";
- case ExprType::B32x4: return "b32x4";
case ExprType::Limit:;
}
MOZ_CRASH("bad expression type");
@@ -386,10 +271,6 @@ class Val
uint64_t i64_;
RawF32 f32_;
RawF64 f64_;
- I8x16 i8x16_;
- I16x8 i16x8_;
- I32x4 i32x4_;
- F32x4 f32x4_;
U() {}
} u;
@@ -404,47 +285,13 @@ class Val
MOZ_IMPLICIT Val(float) = delete;
MOZ_IMPLICIT Val(double) = delete;
- explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
- MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
- memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
- }
- explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
- MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
- memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
- }
- explicit Val(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
- MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
- memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
- }
- explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) {
- memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
- }
-
ValType type() const { return type_; }
- bool isSimd() const { return IsSimdType(type()); }
uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
RawF32 f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
RawF64 f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
- const I8x16& i8x16() const {
- MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
- return u.i8x16_;
- }
- const I16x8& i16x8() const {
- MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
- return u.i16x8_;
- }
- const I32x4& i32x4() const {
- MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
- return u.i32x4_;
- }
- const F32x4& f32x4() const {
- MOZ_ASSERT(type_ == ValType::F32x4);
- return u.f32x4_;
- }
-
void writePayload(uint8_t* dst) const;
};
@@ -834,17 +681,13 @@ enum class Trap
InvalidConversionToInteger,
// Integer division by zero.
IntegerDivideByZero,
- // Out of bounds on wasm memory accesses and asm.js SIMD/atomic accesses.
+ // Out of bounds on wasm memory accesses and asm.js atomic accesses.
OutOfBounds,
// call_indirect to null.
IndirectCallToNull,
// call_indirect signature mismatch.
IndirectCallBadSig,
- // (asm.js only) SIMD float to int conversion failed because the input
- // wasn't in bounds.
- ImpreciseSimdConversion,
-
// The internal stack space was exhausted. For compatibility, this throws
// the same over-recursed error as JS.
StackOverflow,
@@ -1388,7 +1231,7 @@ ComputeMappedSize(uint32_t maxSize);
// Metadata for bounds check instructions that are patched at runtime with the
// appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and
-// SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler
+// Atomic) bounds checks, no BoundsCheck is created: the signal handler
// catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each
// memory access (except when statically eliminated by optimizations) so that
// the length can be patched in as an immediate. This requires that the bounds
@@ -1413,7 +1256,7 @@ class BoundsCheck
WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
// Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
-// (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
+// (non-Atomic) asm.js loads and stores create a MemoryAccess so that the
// signal handler can implement the semantically-correct wraparound logic; the
// rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
// the base address of memory is baked into each memory access instruction so