summaryrefslogtreecommitdiff
path: root/js/src/wasm
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/src/wasm
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloaduxp-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/src/wasm')
-rw-r--r--js/src/wasm/AsmJS.cpp8986
-rw-r--r--js/src/wasm/AsmJS.h89
-rw-r--r--js/src/wasm/WasmAST.h1038
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp7480
-rw-r--r--js/src/wasm/WasmBaselineCompile.h48
-rw-r--r--js/src/wasm/WasmBinaryConstants.h449
-rw-r--r--js/src/wasm/WasmBinaryFormat.cpp655
-rw-r--r--js/src/wasm/WasmBinaryFormat.h689
-rw-r--r--js/src/wasm/WasmBinaryIterator.cpp498
-rw-r--r--js/src/wasm/WasmBinaryIterator.h2246
-rw-r--r--js/src/wasm/WasmBinaryToAST.cpp2067
-rw-r--r--js/src/wasm/WasmBinaryToAST.h37
-rw-r--r--js/src/wasm/WasmBinaryToExperimentalText.cpp1922
-rw-r--r--js/src/wasm/WasmBinaryToExperimentalText.h59
-rw-r--r--js/src/wasm/WasmBinaryToText.cpp1744
-rw-r--r--js/src/wasm/WasmBinaryToText.h45
-rw-r--r--js/src/wasm/WasmCode.cpp835
-rw-r--r--js/src/wasm/WasmCode.h554
-rw-r--r--js/src/wasm/WasmCompartment.cpp180
-rw-r--r--js/src/wasm/WasmCompartment.h107
-rw-r--r--js/src/wasm/WasmCompile.cpp967
-rw-r--r--js/src/wasm/WasmCompile.h68
-rw-r--r--js/src/wasm/WasmFrameIterator.cpp891
-rw-r--r--js/src/wasm/WasmFrameIterator.h142
-rw-r--r--js/src/wasm/WasmGeneratedSourceMap.h151
-rw-r--r--js/src/wasm/WasmGenerator.cpp1174
-rw-r--r--js/src/wasm/WasmGenerator.h252
-rw-r--r--js/src/wasm/WasmInstance.cpp849
-rw-r--r--js/src/wasm/WasmInstance.h145
-rw-r--r--js/src/wasm/WasmIonCompile.cpp3811
-rw-r--r--js/src/wasm/WasmIonCompile.h159
-rw-r--r--js/src/wasm/WasmJS.cpp2048
-rw-r--r--js/src/wasm/WasmJS.h267
-rw-r--r--js/src/wasm/WasmModule.cpp1069
-rw-r--r--js/src/wasm/WasmModule.h242
-rw-r--r--js/src/wasm/WasmSerialize.h174
-rw-r--r--js/src/wasm/WasmSignalHandlers.cpp1499
-rw-r--r--js/src/wasm/WasmSignalHandlers.h81
-rw-r--r--js/src/wasm/WasmStubs.cpp1151
-rw-r--r--js/src/wasm/WasmStubs.h64
-rw-r--r--js/src/wasm/WasmTable.cpp211
-rw-r--r--js/src/wasm/WasmTable.h89
-rw-r--r--js/src/wasm/WasmTextToBinary.cpp4843
-rw-r--r--js/src/wasm/WasmTextToBinary.h37
-rw-r--r--js/src/wasm/WasmTextUtils.cpp77
-rw-r--r--js/src/wasm/WasmTextUtils.h45
-rw-r--r--js/src/wasm/WasmTypes.cpp727
-rw-r--r--js/src/wasm/WasmTypes.h1510
48 files changed, 52471 insertions, 0 deletions
diff --git a/js/src/wasm/AsmJS.cpp b/js/src/wasm/AsmJS.cpp
new file mode 100644
index 0000000000..6964c5d625
--- /dev/null
+++ b/js/src/wasm/AsmJS.cpp
@@ -0,0 +1,8986 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/AsmJS.h"
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Compression.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "jsmath.h"
+#include "jsprf.h"
+#include "jsstr.h"
+#include "jsutil.h"
+
+#include "jswrapper.h"
+
+#include "builtin/SIMD.h"
+#include "frontend/Parser.h"
+#include "gc/Policy.h"
+#include "js/MemoryMetrics.h"
+#include "vm/StringBuffer.h"
+#include "vm/Time.h"
+#include "vm/TypedArrayObject.h"
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+
+#include "jsobjinlines.h"
+
+#include "frontend/ParseNode-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+
+using namespace js;
+using namespace js::frontend;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CeilingLog2;
+using mozilla::Compression::LZ4;
+using mozilla::HashGeneric;
+using mozilla::IsNaN;
+using mozilla::IsNegativeZero;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::Move;
+using mozilla::PodCopy;
+using mozilla::PodEqual;
+using mozilla::PodZero;
+using mozilla::PositiveInfinity;
+using JS::AsmJSOption;
+using JS::GenericNaN;
+
+/*****************************************************************************/
+
+// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
+// greater or equal to MinHeapLength
+static const size_t MinHeapLength = PageSize;
+
+static uint32_t
+RoundUpToNextValidAsmJSHeapLength(uint32_t length)
+{
+ if (length <= MinHeapLength)
+ return MinHeapLength;
+
+ return wasm::RoundUpToNextValidARMImmediate(length);
+}
+
+
+/*****************************************************************************/
+// asm.js module object
+
+// The asm.js spec recognizes this set of builtin Math functions.
+enum AsmJSMathBuiltinFunction
+{
+ AsmJSMathBuiltin_sin, AsmJSMathBuiltin_cos, AsmJSMathBuiltin_tan,
+ AsmJSMathBuiltin_asin, AsmJSMathBuiltin_acos, AsmJSMathBuiltin_atan,
+ AsmJSMathBuiltin_ceil, AsmJSMathBuiltin_floor, AsmJSMathBuiltin_exp,
+ AsmJSMathBuiltin_log, AsmJSMathBuiltin_pow, AsmJSMathBuiltin_sqrt,
+ AsmJSMathBuiltin_abs, AsmJSMathBuiltin_atan2, AsmJSMathBuiltin_imul,
+ AsmJSMathBuiltin_fround, AsmJSMathBuiltin_min, AsmJSMathBuiltin_max,
+ AsmJSMathBuiltin_clz32
+};
+
+// The asm.js spec will recognize this set of builtin Atomics functions.
+enum AsmJSAtomicsBuiltinFunction
+{
+ AsmJSAtomicsBuiltin_compareExchange,
+ AsmJSAtomicsBuiltin_exchange,
+ AsmJSAtomicsBuiltin_load,
+ AsmJSAtomicsBuiltin_store,
+ AsmJSAtomicsBuiltin_add,
+ AsmJSAtomicsBuiltin_sub,
+ AsmJSAtomicsBuiltin_and,
+ AsmJSAtomicsBuiltin_or,
+ AsmJSAtomicsBuiltin_xor,
+ AsmJSAtomicsBuiltin_isLockFree
+};
+
+
+// An AsmJSGlobal represents a JS global variable in the asm.js module function.
+class AsmJSGlobal
+{
+ public:
+ enum Which { Variable, FFI, ArrayView, ArrayViewCtor, MathBuiltinFunction,
+ AtomicsBuiltinFunction, Constant, SimdCtor, SimdOp };
+ enum VarInitKind { InitConstant, InitImport };
+ enum ConstantKind { GlobalConstant, MathConstant };
+
+ private:
+ struct CacheablePod {
+ Which which_;
+ union V {
+ struct {
+ VarInitKind initKind_;
+ union U {
+ ValType importType_;
+ Val val_;
+ U() {}
+ } u;
+ } var;
+ uint32_t ffiIndex_;
+ Scalar::Type viewType_;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+ AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
+ SimdType simdCtorType_;
+ struct {
+ SimdType type_;
+ SimdOperation which_;
+ } simdOp;
+ struct {
+ ConstantKind kind_;
+ double value_;
+ } constant;
+ V() {}
+ } u;
+ } pod;
+ CacheableChars field_;
+
+ friend class ModuleValidator;
+
+ public:
+ AsmJSGlobal() = default;
+ AsmJSGlobal(Which which, UniqueChars field) {
+ mozilla::PodZero(&pod); // zero padding for Valgrind
+ pod.which_ = which;
+ field_ = Move(field);
+ }
+ const char* field() const {
+ return field_.get();
+ }
+ Which which() const {
+ return pod.which_;
+ }
+ VarInitKind varInitKind() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ return pod.u.var.initKind_;
+ }
+ Val varInitVal() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitConstant);
+ return pod.u.var.u.val_;
+ }
+ ValType varInitImportType() const {
+ MOZ_ASSERT(pod.which_ == Variable);
+ MOZ_ASSERT(pod.u.var.initKind_ == InitImport);
+ return pod.u.var.u.importType_;
+ }
+ uint32_t ffiIndex() const {
+ MOZ_ASSERT(pod.which_ == FFI);
+ return pod.u.ffiIndex_;
+ }
+ // When a view is created from an imported constructor:
+ // var I32 = stdlib.Int32Array;
+ // var i32 = new I32(buffer);
+ // the second import has nothing to validate and thus has a null field.
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(pod.which_ == ArrayView || pod.which_ == ArrayViewCtor);
+ return pod.u.viewType_;
+ }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(pod.which_ == MathBuiltinFunction);
+ return pod.u.mathBuiltinFunc_;
+ }
+ AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+ MOZ_ASSERT(pod.which_ == AtomicsBuiltinFunction);
+ return pod.u.atomicsBuiltinFunc_;
+ }
+ SimdType simdCtorType() const {
+ MOZ_ASSERT(pod.which_ == SimdCtor);
+ return pod.u.simdCtorType_;
+ }
+ SimdOperation simdOperation() const {
+ MOZ_ASSERT(pod.which_ == SimdOp);
+ return pod.u.simdOp.which_;
+ }
+ SimdType simdOperationType() const {
+ MOZ_ASSERT(pod.which_ == SimdOp);
+ return pod.u.simdOp.type_;
+ }
+ ConstantKind constantKind() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.kind_;
+ }
+ double constantValue() const {
+ MOZ_ASSERT(pod.which_ == Constant);
+ return pod.u.constant.value_;
+ }
+
+ WASM_DECLARE_SERIALIZABLE(AsmJSGlobal);
+};
+
+typedef Vector<AsmJSGlobal, 0, SystemAllocPolicy> AsmJSGlobalVector;
+
+// An AsmJSImport is slightly different than an asm.js FFI function: a single
+// asm.js FFI function can be called with many different signatures. When
+// compiled to wasm, each unique FFI function paired with signature generates a
+// wasm import.
+class AsmJSImport
+{
+ uint32_t ffiIndex_;
+ public:
+ AsmJSImport() = default;
+ explicit AsmJSImport(uint32_t ffiIndex) : ffiIndex_(ffiIndex) {}
+ uint32_t ffiIndex() const { return ffiIndex_; }
+};
+
+typedef Vector<AsmJSImport, 0, SystemAllocPolicy> AsmJSImportVector;
+
+// An AsmJSExport logically extends Export with the extra information needed for
+// an asm.js exported function, viz., the offsets in module's source chars in
+// case the function is toString()ed.
+class AsmJSExport
+{
+ uint32_t funcIndex_;
+
+ // All fields are treated as cacheable POD:
+ uint32_t startOffsetInModule_; // Store module-start-relative offsets
+ uint32_t endOffsetInModule_; // so preserved by serialization.
+
+ public:
+ AsmJSExport() { PodZero(this); }
+ AsmJSExport(uint32_t funcIndex, uint32_t startOffsetInModule, uint32_t endOffsetInModule)
+ : funcIndex_(funcIndex),
+ startOffsetInModule_(startOffsetInModule),
+ endOffsetInModule_(endOffsetInModule)
+ {}
+ uint32_t funcIndex() const {
+ return funcIndex_;
+ }
+ uint32_t startOffsetInModule() const {
+ return startOffsetInModule_;
+ }
+ uint32_t endOffsetInModule() const {
+ return endOffsetInModule_;
+ }
+};
+
+typedef Vector<AsmJSExport, 0, SystemAllocPolicy> AsmJSExportVector;
+
+enum class CacheResult
+{
+ Hit,
+ Miss
+};
+
+// Holds the immutable guts of an AsmJSModule.
+//
+// AsmJSMetadata is built incrementally by ModuleValidator and then shared
+// immutably between AsmJSModules.
+
+struct AsmJSMetadataCacheablePod
+{
+ uint32_t numFFIs;
+ uint32_t srcLength;
+ uint32_t srcLengthWithRightBrace;
+ bool usesSimd;
+
+ AsmJSMetadataCacheablePod() { PodZero(this); }
+};
+
+struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod
+{
+ AsmJSGlobalVector asmJSGlobals;
+ AsmJSImportVector asmJSImports;
+ AsmJSExportVector asmJSExports;
+ CacheableCharsVector asmJSFuncNames;
+ CacheableChars globalArgumentName;
+ CacheableChars importArgumentName;
+ CacheableChars bufferArgumentName;
+
+ CacheResult cacheResult;
+
+ // These values are not serialized since they are relative to the
+ // containing script which can be different between serialization and
+ // deserialization contexts. Thus, they must be set explicitly using the
+ // ambient Parser/ScriptSource after deserialization.
+ //
+ // srcStart refers to the offset in the ScriptSource to the beginning of
+ // the asm.js module function. If the function has been created with the
+ // Function constructor, this will be the first character in the function
+ // source. Otherwise, it will be the opening parenthesis of the arguments
+ // list.
+ uint32_t srcStart;
+ uint32_t srcBodyStart;
+ bool strict;
+ ScriptSourceHolder scriptSource;
+
+ uint32_t srcEndBeforeCurly() const {
+ return srcStart + srcLength;
+ }
+ uint32_t srcEndAfterCurly() const {
+ return srcStart + srcLengthWithRightBrace;
+ }
+
+ AsmJSMetadata()
+ : Metadata(ModuleKind::AsmJS),
+ cacheResult(CacheResult::Miss),
+ srcStart(0),
+ srcBodyStart(0),
+ strict(false)
+ {}
+ ~AsmJSMetadata() override {}
+
+ const AsmJSExport& lookupAsmJSExport(uint32_t funcIndex) const {
+ // The AsmJSExportVector isn't stored in sorted order so do a linear
+ // search. This is for the super-cold and already-expensive toString()
+ // path and the number of exports is generally small.
+ for (const AsmJSExport& exp : asmJSExports) {
+ if (exp.funcIndex() == funcIndex)
+ return exp;
+ }
+ MOZ_CRASH("missing asm.js func export");
+ }
+
+ bool mutedErrors() const override {
+ return scriptSource.get()->mutedErrors();
+ }
+ const char16_t* displayURL() const override {
+ return scriptSource.get()->hasDisplayURL() ? scriptSource.get()->displayURL() : nullptr;
+ }
+ ScriptSource* maybeScriptSource() const override {
+ return scriptSource.get();
+ }
+ bool getFuncName(JSContext* cx, const Bytes*, uint32_t funcIndex,
+ TwoByteName* name) const override
+ {
+ // asm.js doesn't allow exporting imports or putting imports in tables
+ MOZ_ASSERT(funcIndex >= AsmJSFirstDefFuncIndex);
+
+ const char* p = asmJSFuncNames[funcIndex - AsmJSFirstDefFuncIndex].get();
+ UTF8Chars utf8(p, strlen(p));
+
+ size_t twoByteLength;
+ UniqueTwoByteChars chars(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &twoByteLength).get());
+ if (!chars)
+ return false;
+
+ if (!name->growByUninitialized(twoByteLength))
+ return false;
+
+ PodCopy(name->begin(), chars.get(), twoByteLength);
+ return true;
+ }
+
+ AsmJSMetadataCacheablePod& pod() { return *this; }
+ const AsmJSMetadataCacheablePod& pod() const { return *this; }
+
+ WASM_DECLARE_SERIALIZABLE_OVERRIDE(AsmJSMetadata)
+};
+
+typedef RefPtr<AsmJSMetadata> MutableAsmJSMetadata;
+
+/*****************************************************************************/
+// ParseNode utilities
+
+static inline ParseNode*
+NextNode(ParseNode* pn)
+{
+ return pn->pn_next;
+}
+
+static inline ParseNode*
+UnaryKid(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_UNARY));
+ return pn->pn_kid;
+}
+
+static inline ParseNode*
+BinaryRight(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_BINARY));
+ return pn->pn_right;
+}
+
+static inline ParseNode*
+BinaryLeft(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_BINARY));
+ return pn->pn_left;
+}
+
+static inline ParseNode*
+ReturnExpr(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_RETURN));
+ return UnaryKid(pn);
+}
+
+static inline ParseNode*
+TernaryKid1(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_TERNARY));
+ return pn->pn_kid1;
+}
+
+static inline ParseNode*
+TernaryKid2(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_TERNARY));
+ return pn->pn_kid2;
+}
+
+static inline ParseNode*
+TernaryKid3(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_TERNARY));
+ return pn->pn_kid3;
+}
+
+static inline ParseNode*
+ListHead(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_LIST));
+ return pn->pn_head;
+}
+
+static inline unsigned
+ListLength(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isArity(PN_LIST));
+ return pn->pn_count;
+}
+
+static inline ParseNode*
+CallCallee(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_CALL));
+ return ListHead(pn);
+}
+
+static inline unsigned
+CallArgListLength(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_CALL));
+ MOZ_ASSERT(ListLength(pn) >= 1);
+ return ListLength(pn) - 1;
+}
+
+static inline ParseNode*
+CallArgList(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_CALL));
+ return NextNode(ListHead(pn));
+}
+
+static inline ParseNode*
+VarListHead(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_VAR) || pn->isKind(PNK_CONST));
+ return ListHead(pn);
+}
+
+static inline bool
+IsDefaultCase(ParseNode* pn)
+{
+ return pn->as<CaseClause>().isDefault();
+}
+
+static inline ParseNode*
+CaseExpr(ParseNode* pn)
+{
+ return pn->as<CaseClause>().caseExpression();
+}
+
+static inline ParseNode*
+CaseBody(ParseNode* pn)
+{
+ return pn->as<CaseClause>().statementList();
+}
+
+static inline ParseNode*
+BinaryOpLeft(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->isArity(PN_LIST));
+ MOZ_ASSERT(pn->pn_count == 2);
+ return ListHead(pn);
+}
+
+static inline ParseNode*
+BinaryOpRight(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isBinaryOperation());
+ MOZ_ASSERT(pn->isArity(PN_LIST));
+ MOZ_ASSERT(pn->pn_count == 2);
+ return NextNode(ListHead(pn));
+}
+
+static inline ParseNode*
+BitwiseLeft(ParseNode* pn)
+{
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode*
+BitwiseRight(ParseNode* pn)
+{
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode*
+MultiplyLeft(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_STAR));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode*
+MultiplyRight(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_STAR));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode*
+AddSubLeft(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_ADD) || pn->isKind(PNK_SUB));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode*
+AddSubRight(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_ADD) || pn->isKind(PNK_SUB));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode*
+DivOrModLeft(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_DIV) || pn->isKind(PNK_MOD));
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode*
+DivOrModRight(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_DIV) || pn->isKind(PNK_MOD));
+ return BinaryOpRight(pn);
+}
+
+static inline ParseNode*
+ComparisonLeft(ParseNode* pn)
+{
+ return BinaryOpLeft(pn);
+}
+
+static inline ParseNode*
+ComparisonRight(ParseNode* pn)
+{
+ return BinaryOpRight(pn);
+}
+
+static inline bool
+IsExpressionStatement(ParseNode* pn)
+{
+ return pn->isKind(PNK_SEMI);
+}
+
+static inline ParseNode*
+ExpressionStatementExpr(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_SEMI));
+ return UnaryKid(pn);
+}
+
+static inline PropertyName*
+LoopControlMaybeLabel(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_BREAK) || pn->isKind(PNK_CONTINUE));
+ MOZ_ASSERT(pn->isArity(PN_NULLARY));
+ return pn->as<LoopControlStatement>().label();
+}
+
+static inline PropertyName*
+LabeledStatementLabel(ParseNode* pn)
+{
+ return pn->as<LabeledStatement>().label();
+}
+
+static inline ParseNode*
+LabeledStatementStatement(ParseNode* pn)
+{
+ return pn->as<LabeledStatement>().statement();
+}
+
+static double
+NumberNodeValue(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_NUMBER));
+ return pn->pn_dval;
+}
+
+static bool
+NumberNodeHasFrac(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_NUMBER));
+ return pn->pn_u.number.decimalPoint == HasDecimal;
+}
+
+static ParseNode*
+DotBase(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_DOT));
+ MOZ_ASSERT(pn->isArity(PN_NAME));
+ return pn->expr();
+}
+
+static PropertyName*
+DotMember(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_DOT));
+ MOZ_ASSERT(pn->isArity(PN_NAME));
+ return pn->pn_atom->asPropertyName();
+}
+
+static ParseNode*
+ElemBase(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_ELEM));
+ return BinaryLeft(pn);
+}
+
+static ParseNode*
+ElemIndex(ParseNode* pn)
+{
+ MOZ_ASSERT(pn->isKind(PNK_ELEM));
+ return BinaryRight(pn);
+}
+
+static inline JSFunction*
+FunctionObject(ParseNode* fn)
+{
+ MOZ_ASSERT(fn->isKind(PNK_FUNCTION));
+ MOZ_ASSERT(fn->isArity(PN_CODE));
+ return fn->pn_funbox->function();
+}
+
+static inline PropertyName*
+FunctionName(ParseNode* fn)
+{
+ if (JSAtom* name = FunctionObject(fn)->name())
+ return name->asPropertyName();
+ return nullptr;
+}
+
+static inline ParseNode*
+FunctionStatementList(ParseNode* fn)
+{
+ MOZ_ASSERT(fn->pn_body->isKind(PNK_PARAMSBODY));
+ ParseNode* last = fn->pn_body->last();
+ MOZ_ASSERT(last->isKind(PNK_LEXICALSCOPE));
+ MOZ_ASSERT(last->isEmptyScope());
+ ParseNode* body = last->scopeBody();
+ MOZ_ASSERT(body->isKind(PNK_STATEMENTLIST));
+ return body;
+}
+
+static inline bool
+IsNormalObjectField(ExclusiveContext* cx, ParseNode* pn)
+{
+ return pn->isKind(PNK_COLON) &&
+ pn->getOp() == JSOP_INITPROP &&
+ BinaryLeft(pn)->isKind(PNK_OBJECT_PROPERTY_NAME);
+}
+
+static inline PropertyName*
+ObjectNormalFieldName(ExclusiveContext* cx, ParseNode* pn)
+{
+ MOZ_ASSERT(IsNormalObjectField(cx, pn));
+ MOZ_ASSERT(BinaryLeft(pn)->isKind(PNK_OBJECT_PROPERTY_NAME));
+ return BinaryLeft(pn)->pn_atom->asPropertyName();
+}
+
+static inline ParseNode*
+ObjectNormalFieldInitializer(ExclusiveContext* cx, ParseNode* pn)
+{
+ MOZ_ASSERT(IsNormalObjectField(cx, pn));
+ return BinaryRight(pn);
+}
+
+static inline ParseNode*
+MaybeInitializer(ParseNode* pn)
+{
+ return pn->expr();
+}
+
+static inline bool
+IsUseOfName(ParseNode* pn, PropertyName* name)
+{
+ return pn->isKind(PNK_NAME) && pn->name() == name;
+}
+
+static inline bool
+IsIgnoredDirectiveName(ExclusiveContext* cx, JSAtom* atom)
+{
+ return atom != cx->names().useStrict;
+}
+
+static inline bool
+IsIgnoredDirective(ExclusiveContext* cx, ParseNode* pn)
+{
+ return pn->isKind(PNK_SEMI) &&
+ UnaryKid(pn) &&
+ UnaryKid(pn)->isKind(PNK_STRING) &&
+ IsIgnoredDirectiveName(cx, UnaryKid(pn)->pn_atom);
+}
+
+static inline bool
+IsEmptyStatement(ParseNode* pn)
+{
+ return pn->isKind(PNK_SEMI) && !UnaryKid(pn);
+}
+
+static inline ParseNode*
+SkipEmptyStatements(ParseNode* pn)
+{
+ while (pn && IsEmptyStatement(pn))
+ pn = pn->pn_next;
+ return pn;
+}
+
+static inline ParseNode*
+NextNonEmptyStatement(ParseNode* pn)
+{
+ return SkipEmptyStatements(pn->pn_next);
+}
+
+static bool
+GetToken(AsmJSParser& parser, TokenKind* tkp)
+{
+ TokenStream& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.getToken(&tk, TokenStream::Operand))
+ return false;
+ if (tk != TOK_SEMI)
+ break;
+ }
+ *tkp = tk;
+ return true;
+}
+
+static bool
+PeekToken(AsmJSParser& parser, TokenKind* tkp)
+{
+ TokenStream& ts = parser.tokenStream;
+ TokenKind tk;
+ while (true) {
+ if (!ts.peekToken(&tk, TokenStream::Operand))
+ return false;
+ if (tk != TOK_SEMI)
+ break;
+ ts.consumeKnownToken(TOK_SEMI, TokenStream::Operand);
+ }
+ *tkp = tk;
+ return true;
+}
+
+static bool
+ParseVarOrConstStatement(AsmJSParser& parser, ParseNode** var)
+{
+ TokenKind tk;
+ if (!PeekToken(parser, &tk))
+ return false;
+ if (tk != TOK_VAR && tk != TOK_CONST) {
+ *var = nullptr;
+ return true;
+ }
+
+ *var = parser.statementListItem(YieldIsName);
+ if (!*var)
+ return false;
+
+ MOZ_ASSERT((*var)->isKind(PNK_VAR) || (*var)->isKind(PNK_CONST));
+ return true;
+}
+
+/*****************************************************************************/
+
+// Represents the type and value of an asm.js numeric literal.
+//
+// A literal is a double iff the literal contains a decimal point (even if the
+// fractional part is 0). Otherwise, integers may be classified:
+// fixnum: [0, 2^31)
+// negative int: [-2^31, 0)
+// big unsigned: [2^31, 2^32)
+// out of range: otherwise
+// Lastly, a literal may be a float literal which is any double or integer
+// literal coerced with Math.fround.
+//
+// This class distinguishes between signed and unsigned integer SIMD types like
+// Int32x4 and Uint32x4, and so does Type below. The wasm ValType and ExprType
+// enums, and the wasm::Val class do not.
+class NumLit
+{
+ public:
+ enum Which {
+ Fixnum,
+ NegativeInt,
+ BigUnsigned,
+ Double,
+ Float,
+ Int8x16,
+ Int16x8,
+ Int32x4,
+ Uint8x16,
+ Uint16x8,
+ Uint32x4,
+ Float32x4,
+ Bool8x16,
+ Bool16x8,
+ Bool32x4,
+ OutOfRangeInt = -1
+ };
+
+ private:
+ Which which_;
+ union {
+ Value scalar_;
+ SimdConstant simd_;
+ } u;
+
+ public:
+ NumLit() = default;
+
+ NumLit(Which w, const Value& v) : which_(w) {
+ u.scalar_ = v;
+ MOZ_ASSERT(!isSimd());
+ }
+
+ NumLit(Which w, SimdConstant c) : which_(w) {
+ u.simd_ = c;
+ MOZ_ASSERT(isSimd());
+ }
+
+ Which which() const {
+ return which_;
+ }
+
+ int32_t toInt32() const {
+ MOZ_ASSERT(which_ == Fixnum || which_ == NegativeInt || which_ == BigUnsigned);
+ return u.scalar_.toInt32();
+ }
+
+ uint32_t toUint32() const {
+ return (uint32_t)toInt32();
+ }
+
+ RawF64 toDouble() const {
+ MOZ_ASSERT(which_ == Double);
+ return RawF64(u.scalar_.toDouble());
+ }
+
+ RawF32 toFloat() const {
+ MOZ_ASSERT(which_ == Float);
+ return RawF32(float(u.scalar_.toDouble()));
+ }
+
+ Value scalarValue() const {
+ MOZ_ASSERT(which_ != OutOfRangeInt);
+ return u.scalar_;
+ }
+
+ bool isSimd() const
+ {
+ return which_ == Int8x16 || which_ == Uint8x16 || which_ == Int16x8 ||
+ which_ == Uint16x8 || which_ == Int32x4 || which_ == Uint32x4 ||
+ which_ == Float32x4 || which_ == Bool8x16 || which_ == Bool16x8 ||
+ which_ == Bool32x4;
+ }
+
+ const SimdConstant& simdValue() const {
+ MOZ_ASSERT(isSimd());
+ return u.simd_;
+ }
+
+ bool valid() const {
+ return which_ != OutOfRangeInt;
+ }
+
+ bool isZeroBits() const {
+ MOZ_ASSERT(valid());
+ switch (which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return toInt32() == 0;
+ case NumLit::Double:
+ return toDouble().bits() == 0;
+ case NumLit::Float:
+ return toFloat().bits() == 0;
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ case NumLit::Bool8x16:
+ return simdValue() == SimdConstant::SplatX16(0);
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ case NumLit::Bool16x8:
+ return simdValue() == SimdConstant::SplatX8(0);
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ case NumLit::Bool32x4:
+ return simdValue() == SimdConstant::SplatX4(0);
+ case NumLit::Float32x4:
+ return simdValue() == SimdConstant::SplatX4(0.f);
+ case NumLit::OutOfRangeInt:
+ MOZ_CRASH("can't be here because of valid() check above");
+ }
+ return false;
+ }
+
+ Val value() const {
+ switch (which_) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return Val(toUint32());
+ case NumLit::Float:
+ return Val(toFloat());
+ case NumLit::Double:
+ return Val(toDouble());
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ return Val(simdValue().asInt8x16());
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ return Val(simdValue().asInt16x8());
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ return Val(simdValue().asInt32x4());
+ case NumLit::Float32x4:
+ return Val(simdValue().asFloat32x4());
+ case NumLit::Bool8x16:
+ return Val(simdValue().asInt8x16(), ValType::B8x16);
+ case NumLit::Bool16x8:
+ return Val(simdValue().asInt16x8(), ValType::B16x8);
+ case NumLit::Bool32x4:
+ return Val(simdValue().asInt32x4(), ValType::B32x4);
+ case NumLit::OutOfRangeInt:;
+ }
+ MOZ_CRASH("bad literal");
+ }
+};
+
+// Represents the type of a general asm.js expression.
+//
+// A canonical subset of types representing the coercion targets: Int, Float,
+// Double, and the SIMD types. This is almost equivalent to wasm::ValType,
+// except the integer SIMD types have signed/unsigned variants.
+//
+// Void is also part of the canonical subset which then maps to wasm::ExprType.
+//
+// Note that while the canonical subset distinguishes signed and unsigned SIMD
+// types, it only uses |Int| to represent signed and unsigned 32-bit integers.
+// This is because the scalar coersions x|0 and x>>>0 work with any kind of
+// integer input, while the SIMD check functions throw a TypeError if the passed
+// type doesn't match.
+//
+class Type
+{
+ public:
+ enum Which {
+ Fixnum = NumLit::Fixnum,
+ Signed = NumLit::NegativeInt,
+ Unsigned = NumLit::BigUnsigned,
+ DoubleLit = NumLit::Double,
+ Float = NumLit::Float,
+ Int8x16 = NumLit::Int8x16,
+ Int16x8 = NumLit::Int16x8,
+ Int32x4 = NumLit::Int32x4,
+ Uint8x16 = NumLit::Uint8x16,
+ Uint16x8 = NumLit::Uint16x8,
+ Uint32x4 = NumLit::Uint32x4,
+ Float32x4 = NumLit::Float32x4,
+ Bool8x16 = NumLit::Bool8x16,
+ Bool16x8 = NumLit::Bool16x8,
+ Bool32x4 = NumLit::Bool32x4,
+ Double,
+ MaybeDouble,
+ MaybeFloat,
+ Floatish,
+ Int,
+ Intish,
+ Void
+ };
+
+ private:
+ Which which_;
+
+ public:
+ Type() = default;
+ MOZ_IMPLICIT Type(Which w) : which_(w) {}
+ MOZ_IMPLICIT Type(SimdType type) {
+ switch (type) {
+ case SimdType::Int8x16: which_ = Int8x16; return;
+ case SimdType::Int16x8: which_ = Int16x8; return;
+ case SimdType::Int32x4: which_ = Int32x4; return;
+ case SimdType::Uint8x16: which_ = Uint8x16; return;
+ case SimdType::Uint16x8: which_ = Uint16x8; return;
+ case SimdType::Uint32x4: which_ = Uint32x4; return;
+ case SimdType::Float32x4: which_ = Float32x4; return;
+ case SimdType::Bool8x16: which_ = Bool8x16; return;
+ case SimdType::Bool16x8: which_ = Bool16x8; return;
+ case SimdType::Bool32x4: which_ = Bool32x4; return;
+ default: break;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad SimdType");
+ }
+
+ // Map an already canonicalized Type to the return type of a function call.
+ static Type ret(Type t) {
+ MOZ_ASSERT(t.isCanonical());
+ // The 32-bit external type is Signed, not Int.
+ return t.isInt() ? Signed: t;
+ }
+
+ static Type lit(const NumLit& lit) {
+ MOZ_ASSERT(lit.valid());
+ Which which = Type::Which(lit.which());
+ MOZ_ASSERT(which >= Fixnum && which <= Bool32x4);
+ Type t;
+ t.which_ = which;
+ return t;
+ }
+
+ // Map |t| to one of the canonical vartype representations of a
+ // wasm::ExprType.
+ static Type canonicalize(Type t) {
+ switch(t.which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ return Int;
+
+ case Float:
+ return Float;
+
+ case DoubleLit:
+ case Double:
+ return Double;
+
+ case Void:
+ return Void;
+
+ case Int8x16:
+ case Int16x8:
+ case Int32x4:
+ case Uint8x16:
+ case Uint16x8:
+ case Uint32x4:
+ case Float32x4:
+ case Bool8x16:
+ case Bool16x8:
+ case Bool32x4:
+ return t;
+
+ case MaybeDouble:
+ case MaybeFloat:
+ case Floatish:
+ case Intish:
+ // These types need some kind of coercion, they can't be mapped
+ // to an ExprType.
+ break;
+ }
+ MOZ_CRASH("Invalid vartype");
+ }
+
+ Which which() const { return which_; }
+
+ bool operator==(Type rhs) const { return which_ == rhs.which_; }
+ bool operator!=(Type rhs) const { return which_ != rhs.which_; }
+
+ bool operator<=(Type rhs) const {
+ switch (rhs.which_) {
+ case Signed: return isSigned();
+ case Unsigned: return isUnsigned();
+ case DoubleLit: return isDoubleLit();
+ case Double: return isDouble();
+ case Float: return isFloat();
+ case Int8x16: return isInt8x16();
+ case Int16x8: return isInt16x8();
+ case Int32x4: return isInt32x4();
+ case Uint8x16: return isUint8x16();
+ case Uint16x8: return isUint16x8();
+ case Uint32x4: return isUint32x4();
+ case Float32x4: return isFloat32x4();
+ case Bool8x16: return isBool8x16();
+ case Bool16x8: return isBool16x8();
+ case Bool32x4: return isBool32x4();
+ case MaybeDouble: return isMaybeDouble();
+ case MaybeFloat: return isMaybeFloat();
+ case Floatish: return isFloatish();
+ case Int: return isInt();
+ case Intish: return isIntish();
+ case Fixnum: return isFixnum();
+ case Void: return isVoid();
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected rhs type");
+ }
+
+ bool isFixnum() const {
+ return which_ == Fixnum;
+ }
+
+ bool isSigned() const {
+ return which_ == Signed || which_ == Fixnum;
+ }
+
+ bool isUnsigned() const {
+ return which_ == Unsigned || which_ == Fixnum;
+ }
+
+ bool isInt() const {
+ return isSigned() || isUnsigned() || which_ == Int;
+ }
+
+ bool isIntish() const {
+ return isInt() || which_ == Intish;
+ }
+
+ bool isDoubleLit() const {
+ return which_ == DoubleLit;
+ }
+
+ bool isDouble() const {
+ return isDoubleLit() || which_ == Double;
+ }
+
+ bool isMaybeDouble() const {
+ return isDouble() || which_ == MaybeDouble;
+ }
+
+ bool isFloat() const {
+ return which_ == Float;
+ }
+
+ bool isMaybeFloat() const {
+ return isFloat() || which_ == MaybeFloat;
+ }
+
+ bool isFloatish() const {
+ return isMaybeFloat() || which_ == Floatish;
+ }
+
+ bool isVoid() const {
+ return which_ == Void;
+ }
+
+ bool isExtern() const {
+ return isDouble() || isSigned();
+ }
+
+ bool isInt8x16() const {
+ return which_ == Int8x16;
+ }
+
+ bool isInt16x8() const {
+ return which_ == Int16x8;
+ }
+
+ bool isInt32x4() const {
+ return which_ == Int32x4;
+ }
+
+ bool isUint8x16() const {
+ return which_ == Uint8x16;
+ }
+
+ bool isUint16x8() const {
+ return which_ == Uint16x8;
+ }
+
+ bool isUint32x4() const {
+ return which_ == Uint32x4;
+ }
+
+ bool isFloat32x4() const {
+ return which_ == Float32x4;
+ }
+
+ bool isBool8x16() const {
+ return which_ == Bool8x16;
+ }
+
+ bool isBool16x8() const {
+ return which_ == Bool16x8;
+ }
+
+ bool isBool32x4() const {
+ return which_ == Bool32x4;
+ }
+
+ bool isSimd() const {
+ return isInt8x16() || isInt16x8() || isInt32x4() || isUint8x16() || isUint16x8() ||
+ isUint32x4() || isFloat32x4() || isBool8x16() || isBool16x8() || isBool32x4();
+ }
+
+ bool isUnsignedSimd() const {
+ return isUint8x16() || isUint16x8() || isUint32x4();
+ }
+
+ // Check if this is one of the valid types for a function argument.
+ bool isArgType() const {
+ return isInt() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd());
+ }
+
+ // Check if this is one of the valid types for a function return value.
+ bool isReturnType() const {
+ return isSigned() || isFloat() || isDouble() || (isSimd() && !isUnsignedSimd()) ||
+ isVoid();
+ }
+
+ // Check if this is one of the valid types for a global variable.
+ bool isGlobalVarType() const {
+ return isArgType();
+ }
+
+ // Check if this is one of the canonical vartype representations of a
+ // wasm::ExprType. See Type::canonicalize().
+ bool isCanonical() const {
+ switch (which()) {
+ case Int:
+ case Float:
+ case Double:
+ case Void:
+ return true;
+ default:
+ return isSimd();
+ }
+ }
+
+ // Check if this is a canonical representation of a wasm::ValType.
+ bool isCanonicalValType() const {
+ return !isVoid() && isCanonical();
+ }
+
+ // Convert this canonical type to a wasm::ExprType.
+ ExprType canonicalToExprType() const {
+ switch (which()) {
+ case Int: return ExprType::I32;
+ case Float: return ExprType::F32;
+ case Double: return ExprType::F64;
+ case Void: return ExprType::Void;
+ case Uint8x16:
+ case Int8x16: return ExprType::I8x16;
+ case Uint16x8:
+ case Int16x8: return ExprType::I16x8;
+ case Uint32x4:
+ case Int32x4: return ExprType::I32x4;
+ case Float32x4: return ExprType::F32x4;
+ case Bool8x16: return ExprType::B8x16;
+ case Bool16x8: return ExprType::B16x8;
+ case Bool32x4: return ExprType::B32x4;
+ default: MOZ_CRASH("Need canonical type");
+ }
+ }
+
+ // Convert this canonical type to a wasm::ValType.
+ ValType canonicalToValType() const {
+ return NonVoidToValType(canonicalToExprType());
+ }
+
+ // Convert this type to a wasm::ExprType for use in a wasm
+ // block signature. This works for all types, including non-canonical
+ // ones. Consequently, the type isn't valid for subsequent asm.js
+ // validation; it's only valid for use in producing wasm.
+ ExprType toWasmBlockSignatureType() const {
+ switch (which()) {
+ case Fixnum:
+ case Signed:
+ case Unsigned:
+ case Int:
+ case Intish:
+ return ExprType::I32;
+
+ case Float:
+ case MaybeFloat:
+ case Floatish:
+ return ExprType::F32;
+
+ case DoubleLit:
+ case Double:
+ case MaybeDouble:
+ return ExprType::F64;
+
+ case Void:
+ return ExprType::Void;
+
+ case Uint8x16:
+ case Int8x16: return ExprType::I8x16;
+ case Uint16x8:
+ case Int16x8: return ExprType::I16x8;
+ case Uint32x4:
+ case Int32x4: return ExprType::I32x4;
+ case Float32x4: return ExprType::F32x4;
+ case Bool8x16: return ExprType::B8x16;
+ case Bool16x8: return ExprType::B16x8;
+ case Bool32x4: return ExprType::B32x4;
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+
+ const char* toChars() const {
+ switch (which_) {
+ case Double: return "double";
+ case DoubleLit: return "doublelit";
+ case MaybeDouble: return "double?";
+ case Float: return "float";
+ case Floatish: return "floatish";
+ case MaybeFloat: return "float?";
+ case Fixnum: return "fixnum";
+ case Int: return "int";
+ case Signed: return "signed";
+ case Unsigned: return "unsigned";
+ case Intish: return "intish";
+ case Int8x16: return "int8x16";
+ case Int16x8: return "int16x8";
+ case Int32x4: return "int32x4";
+ case Uint8x16: return "uint8x16";
+ case Uint16x8: return "uint16x8";
+ case Uint32x4: return "uint32x4";
+ case Float32x4: return "float32x4";
+ case Bool8x16: return "bool8x16";
+ case Bool16x8: return "bool16x8";
+ case Bool32x4: return "bool32x4";
+ case Void: return "void";
+ }
+ MOZ_CRASH("Invalid Type");
+ }
+};
+
+static const unsigned VALIDATION_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+
+// The ModuleValidator encapsulates the entire validation of an asm.js module.
+// Its lifetime goes from the validation of the top components of an asm.js
+// module (all the globals), the emission of bytecode for all the functions in
+// the module and the validation of function's pointer tables. It also finishes
+// the compilation of all the module's stubs.
+//
+// Rooting note: ModuleValidator is a stack class that contains unrooted
+// PropertyName (JSAtom) pointers. This is safe because it cannot be
+// constructed without a TokenStream reference. TokenStream is itself a stack
+// class that cannot be constructed without an AutoKeepAtoms being live on the
+// stack, which prevents collection of atoms.
+//
+// ModuleValidator is marked as rooted in the rooting analysis. Don't add
+// non-JSAtom pointers, or this will break!
+class MOZ_STACK_CLASS ModuleValidator
+{
+ public:
+ class Func
+ {
+ PropertyName* name_;
+ uint32_t firstUse_;
+ uint32_t index_;
+ uint32_t srcBegin_;
+ uint32_t srcEnd_;
+ bool defined_;
+
+ public:
+ Func(PropertyName* name, uint32_t firstUse, uint32_t index)
+ : name_(name), firstUse_(firstUse), index_(index),
+ srcBegin_(0), srcEnd_(0), defined_(false)
+ {}
+
+ PropertyName* name() const { return name_; }
+ uint32_t firstUse() const { return firstUse_; }
+ bool defined() const { return defined_; }
+ uint32_t index() const { return index_; }
+
+ void define(ParseNode* fn) {
+ MOZ_ASSERT(!defined_);
+ defined_ = true;
+ srcBegin_ = fn->pn_pos.begin;
+ srcEnd_ = fn->pn_pos.end;
+ }
+
+ uint32_t srcBegin() const { MOZ_ASSERT(defined_); return srcBegin_; }
+ uint32_t srcEnd() const { MOZ_ASSERT(defined_); return srcEnd_; }
+ };
+
+ typedef Vector<const Func*> ConstFuncVector;
+ typedef Vector<Func*> FuncVector;
+
+ class FuncPtrTable
+ {
+ uint32_t sigIndex_;
+ PropertyName* name_;
+ uint32_t firstUse_;
+ uint32_t mask_;
+ bool defined_;
+
+ FuncPtrTable(FuncPtrTable&& rhs) = delete;
+
+ public:
+ FuncPtrTable(uint32_t sigIndex, PropertyName* name, uint32_t firstUse, uint32_t mask)
+ : sigIndex_(sigIndex), name_(name), firstUse_(firstUse), mask_(mask), defined_(false)
+ {}
+
+ uint32_t sigIndex() const { return sigIndex_; }
+ PropertyName* name() const { return name_; }
+ uint32_t firstUse() const { return firstUse_; }
+ unsigned mask() const { return mask_; }
+ bool defined() const { return defined_; }
+ void define() { MOZ_ASSERT(!defined_); defined_ = true; }
+ };
+
+ typedef Vector<FuncPtrTable*> FuncPtrTableVector;
+
+ class Global
+ {
+ public:
+ enum Which {
+ Variable,
+ ConstantLiteral,
+ ConstantImport,
+ Function,
+ FuncPtrTable,
+ FFI,
+ ArrayView,
+ ArrayViewCtor,
+ MathBuiltinFunction,
+ AtomicsBuiltinFunction,
+ SimdCtor,
+ SimdOp
+ };
+
+ private:
+ Which which_;
+ union {
+ struct {
+ Type::Which type_;
+ unsigned index_;
+ NumLit literalValue_;
+ } varOrConst;
+ uint32_t funcIndex_;
+ uint32_t funcPtrTableIndex_;
+ uint32_t ffiIndex_;
+ struct {
+ Scalar::Type viewType_;
+ } viewInfo;
+ AsmJSMathBuiltinFunction mathBuiltinFunc_;
+ AsmJSAtomicsBuiltinFunction atomicsBuiltinFunc_;
+ SimdType simdCtorType_;
+ struct {
+ SimdType type_;
+ SimdOperation which_;
+ } simdOp;
+ } u;
+
+ friend class ModuleValidator;
+ friend class js::LifoAlloc;
+
+ explicit Global(Which which) : which_(which) {}
+
+ public:
+ Which which() const {
+ return which_;
+ }
+ Type varOrConstType() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantLiteral || which_ == ConstantImport);
+ return u.varOrConst.type_;
+ }
+ unsigned varOrConstIndex() const {
+ MOZ_ASSERT(which_ == Variable || which_ == ConstantImport);
+ return u.varOrConst.index_;
+ }
+ bool isConst() const {
+ return which_ == ConstantLiteral || which_ == ConstantImport;
+ }
+ NumLit constLiteralValue() const {
+ MOZ_ASSERT(which_ == ConstantLiteral);
+ return u.varOrConst.literalValue_;
+ }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(which_ == Function);
+ return u.funcIndex_;
+ }
+ uint32_t funcPtrTableIndex() const {
+ MOZ_ASSERT(which_ == FuncPtrTable);
+ return u.funcPtrTableIndex_;
+ }
+ unsigned ffiIndex() const {
+ MOZ_ASSERT(which_ == FFI);
+ return u.ffiIndex_;
+ }
+ bool isAnyArrayView() const {
+ return which_ == ArrayView || which_ == ArrayViewCtor;
+ }
+ Scalar::Type viewType() const {
+ MOZ_ASSERT(isAnyArrayView());
+ return u.viewInfo.viewType_;
+ }
+ bool isMathFunction() const {
+ return which_ == MathBuiltinFunction;
+ }
+ AsmJSMathBuiltinFunction mathBuiltinFunction() const {
+ MOZ_ASSERT(which_ == MathBuiltinFunction);
+ return u.mathBuiltinFunc_;
+ }
+ bool isAtomicsFunction() const {
+ return which_ == AtomicsBuiltinFunction;
+ }
+ AsmJSAtomicsBuiltinFunction atomicsBuiltinFunction() const {
+ MOZ_ASSERT(which_ == AtomicsBuiltinFunction);
+ return u.atomicsBuiltinFunc_;
+ }
+ bool isSimdCtor() const {
+ return which_ == SimdCtor;
+ }
+ SimdType simdCtorType() const {
+ MOZ_ASSERT(which_ == SimdCtor);
+ return u.simdCtorType_;
+ }
+ bool isSimdOperation() const {
+ return which_ == SimdOp;
+ }
+ SimdOperation simdOperation() const {
+ MOZ_ASSERT(which_ == SimdOp);
+ return u.simdOp.which_;
+ }
+ SimdType simdOperationType() const {
+ MOZ_ASSERT(which_ == SimdOp);
+ return u.simdOp.type_;
+ }
+ };
+
+ struct MathBuiltin
+ {
+ enum Kind { Function, Constant };
+ Kind kind;
+
+ union {
+ double cst;
+ AsmJSMathBuiltinFunction func;
+ } u;
+
+ MathBuiltin() : kind(Kind(-1)) {}
+ explicit MathBuiltin(double cst) : kind(Constant) {
+ u.cst = cst;
+ }
+ explicit MathBuiltin(AsmJSMathBuiltinFunction func) : kind(Function) {
+ u.func = func;
+ }
+ };
+
+ struct ArrayView
+ {
+ ArrayView(PropertyName* name, Scalar::Type type)
+ : name(name), type(type)
+ {}
+
+ PropertyName* name;
+ Scalar::Type type;
+ };
+
+ private:
+ class NamedSig
+ {
+ PropertyName* name_;
+ const SigWithId* sig_;
+
+ public:
+ NamedSig(PropertyName* name, const SigWithId& sig)
+ : name_(name), sig_(&sig)
+ {}
+ PropertyName* name() const {
+ return name_;
+ }
+ const Sig& sig() const {
+ return *sig_;
+ }
+
+ // Implement HashPolicy:
+ struct Lookup {
+ PropertyName* name;
+ const Sig& sig;
+ Lookup(PropertyName* name, const Sig& sig) : name(name), sig(sig) {}
+ };
+ static HashNumber hash(Lookup l) {
+ return HashGeneric(l.name, l.sig.hash());
+ }
+ static bool match(NamedSig lhs, Lookup rhs) {
+ return lhs.name_ == rhs.name && *lhs.sig_ == rhs.sig;
+ }
+ };
+ typedef HashMap<NamedSig, uint32_t, NamedSig> ImportMap;
+ typedef HashMap<const SigWithId*, uint32_t, SigHashPolicy> SigMap;
+ typedef HashMap<PropertyName*, Global*> GlobalMap;
+ typedef HashMap<PropertyName*, MathBuiltin> MathNameMap;
+ typedef HashMap<PropertyName*, AsmJSAtomicsBuiltinFunction> AtomicsNameMap;
+ typedef HashMap<PropertyName*, SimdOperation> SimdOperationNameMap;
+ typedef Vector<ArrayView> ArrayViewVector;
+
+ ExclusiveContext* cx_;
+ AsmJSParser& parser_;
+ ParseNode* moduleFunctionNode_;
+ PropertyName* moduleFunctionName_;
+ PropertyName* globalArgumentName_;
+ PropertyName* importArgumentName_;
+ PropertyName* bufferArgumentName_;
+ MathNameMap standardLibraryMathNames_;
+ AtomicsNameMap standardLibraryAtomicsNames_;
+ SimdOperationNameMap standardLibrarySimdOpNames_;
+ RootedFunction dummyFunction_;
+
+ // Validation-internal state:
+ LifoAlloc validationLifo_;
+ FuncVector functions_;
+ FuncPtrTableVector funcPtrTables_;
+ GlobalMap globalMap_;
+ SigMap sigMap_;
+ ImportMap importMap_;
+ ArrayViewVector arrayViews_;
+ bool atomicsPresent_;
+ bool simdPresent_;
+
+ // State used to build the AsmJSModule in finish():
+ ModuleGenerator mg_;
+ MutableAsmJSMetadata asmJSMetadata_;
+
+ // Error reporting:
+ UniqueChars errorString_;
+ uint32_t errorOffset_;
+ bool errorOverRecursed_;
+
+ // Helpers:
+ bool addStandardLibraryMathName(const char* name, AsmJSMathBuiltinFunction func) {
+ JSAtom* atom = Atomize(cx_, name, strlen(name));
+ if (!atom)
+ return false;
+ MathBuiltin builtin(func);
+ return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
+ }
+ bool addStandardLibraryMathName(const char* name, double cst) {
+ JSAtom* atom = Atomize(cx_, name, strlen(name));
+ if (!atom)
+ return false;
+ MathBuiltin builtin(cst);
+ return standardLibraryMathNames_.putNew(atom->asPropertyName(), builtin);
+ }
+ bool addStandardLibraryAtomicsName(const char* name, AsmJSAtomicsBuiltinFunction func) {
+ JSAtom* atom = Atomize(cx_, name, strlen(name));
+ if (!atom)
+ return false;
+ return standardLibraryAtomicsNames_.putNew(atom->asPropertyName(), func);
+ }
+ bool addStandardLibrarySimdOpName(const char* name, SimdOperation op) {
+ JSAtom* atom = Atomize(cx_, name, strlen(name));
+ if (!atom)
+ return false;
+ return standardLibrarySimdOpNames_.putNew(atom->asPropertyName(), op);
+ }
+ bool newSig(Sig&& sig, uint32_t* sigIndex) {
+ *sigIndex = 0;
+ if (mg_.numSigs() >= MaxSigs)
+ return failCurrentOffset("too many signatures");
+
+ *sigIndex = mg_.numSigs();
+ mg_.initSig(*sigIndex, Move(sig));
+ return true;
+ }
+ bool declareSig(Sig&& sig, uint32_t* sigIndex) {
+ SigMap::AddPtr p = sigMap_.lookupForAdd(sig);
+ if (p) {
+ *sigIndex = p->value();
+ MOZ_ASSERT(mg_.sig(*sigIndex) == sig);
+ return true;
+ }
+
+ return newSig(Move(sig), sigIndex) &&
+ sigMap_.add(p, &mg_.sig(*sigIndex), *sigIndex);
+ }
+
+ public:
+ ModuleValidator(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* moduleFunctionNode)
+ : cx_(cx),
+ parser_(parser),
+ moduleFunctionNode_(moduleFunctionNode),
+ moduleFunctionName_(FunctionName(moduleFunctionNode)),
+ globalArgumentName_(nullptr),
+ importArgumentName_(nullptr),
+ bufferArgumentName_(nullptr),
+ standardLibraryMathNames_(cx),
+ standardLibraryAtomicsNames_(cx),
+ standardLibrarySimdOpNames_(cx),
+ dummyFunction_(cx),
+ validationLifo_(VALIDATION_LIFO_DEFAULT_CHUNK_SIZE),
+ functions_(cx),
+ funcPtrTables_(cx),
+ globalMap_(cx),
+ sigMap_(cx),
+ importMap_(cx),
+ arrayViews_(cx),
+ atomicsPresent_(false),
+ simdPresent_(false),
+ mg_(ImportVector()),
+ errorString_(nullptr),
+ errorOffset_(UINT32_MAX),
+ errorOverRecursed_(false)
+ {}
+
+ ~ModuleValidator() {
+ if (errorString_) {
+ MOZ_ASSERT(errorOffset_ != UINT32_MAX);
+ tokenStream().reportAsmJSError(errorOffset_,
+ JSMSG_USE_ASM_TYPE_FAIL,
+ errorString_.get());
+ }
+ if (errorOverRecursed_)
+ ReportOverRecursed(cx_);
+ }
+
+ bool init() {
+ asmJSMetadata_ = cx_->new_<AsmJSMetadata>();
+ if (!asmJSMetadata_)
+ return false;
+
+ asmJSMetadata_->srcStart = moduleFunctionNode_->pn_body->pn_pos.begin;
+ asmJSMetadata_->srcBodyStart = parser_.tokenStream.currentToken().pos.end;
+ asmJSMetadata_->strict = parser_.pc->sc()->strict() &&
+ !parser_.pc->sc()->hasExplicitUseStrict();
+ asmJSMetadata_->scriptSource.reset(parser_.ss);
+
+ if (!globalMap_.init() || !sigMap_.init() || !importMap_.init())
+ return false;
+
+ if (!standardLibraryMathNames_.init() ||
+ !addStandardLibraryMathName("sin", AsmJSMathBuiltin_sin) ||
+ !addStandardLibraryMathName("cos", AsmJSMathBuiltin_cos) ||
+ !addStandardLibraryMathName("tan", AsmJSMathBuiltin_tan) ||
+ !addStandardLibraryMathName("asin", AsmJSMathBuiltin_asin) ||
+ !addStandardLibraryMathName("acos", AsmJSMathBuiltin_acos) ||
+ !addStandardLibraryMathName("atan", AsmJSMathBuiltin_atan) ||
+ !addStandardLibraryMathName("ceil", AsmJSMathBuiltin_ceil) ||
+ !addStandardLibraryMathName("floor", AsmJSMathBuiltin_floor) ||
+ !addStandardLibraryMathName("exp", AsmJSMathBuiltin_exp) ||
+ !addStandardLibraryMathName("log", AsmJSMathBuiltin_log) ||
+ !addStandardLibraryMathName("pow", AsmJSMathBuiltin_pow) ||
+ !addStandardLibraryMathName("sqrt", AsmJSMathBuiltin_sqrt) ||
+ !addStandardLibraryMathName("abs", AsmJSMathBuiltin_abs) ||
+ !addStandardLibraryMathName("atan2", AsmJSMathBuiltin_atan2) ||
+ !addStandardLibraryMathName("imul", AsmJSMathBuiltin_imul) ||
+ !addStandardLibraryMathName("clz32", AsmJSMathBuiltin_clz32) ||
+ !addStandardLibraryMathName("fround", AsmJSMathBuiltin_fround) ||
+ !addStandardLibraryMathName("min", AsmJSMathBuiltin_min) ||
+ !addStandardLibraryMathName("max", AsmJSMathBuiltin_max) ||
+ !addStandardLibraryMathName("E", M_E) ||
+ !addStandardLibraryMathName("LN10", M_LN10) ||
+ !addStandardLibraryMathName("LN2", M_LN2) ||
+ !addStandardLibraryMathName("LOG2E", M_LOG2E) ||
+ !addStandardLibraryMathName("LOG10E", M_LOG10E) ||
+ !addStandardLibraryMathName("PI", M_PI) ||
+ !addStandardLibraryMathName("SQRT1_2", M_SQRT1_2) ||
+ !addStandardLibraryMathName("SQRT2", M_SQRT2))
+ {
+ return false;
+ }
+
+ if (!standardLibraryAtomicsNames_.init() ||
+ !addStandardLibraryAtomicsName("compareExchange", AsmJSAtomicsBuiltin_compareExchange) ||
+ !addStandardLibraryAtomicsName("exchange", AsmJSAtomicsBuiltin_exchange) ||
+ !addStandardLibraryAtomicsName("load", AsmJSAtomicsBuiltin_load) ||
+ !addStandardLibraryAtomicsName("store", AsmJSAtomicsBuiltin_store) ||
+ !addStandardLibraryAtomicsName("add", AsmJSAtomicsBuiltin_add) ||
+ !addStandardLibraryAtomicsName("sub", AsmJSAtomicsBuiltin_sub) ||
+ !addStandardLibraryAtomicsName("and", AsmJSAtomicsBuiltin_and) ||
+ !addStandardLibraryAtomicsName("or", AsmJSAtomicsBuiltin_or) ||
+ !addStandardLibraryAtomicsName("xor", AsmJSAtomicsBuiltin_xor) ||
+ !addStandardLibraryAtomicsName("isLockFree", AsmJSAtomicsBuiltin_isLockFree))
+ {
+ return false;
+ }
+
+#define ADDSTDLIBSIMDOPNAME(op) || !addStandardLibrarySimdOpName(#op, SimdOperation::Fn_##op)
+ if (!standardLibrarySimdOpNames_.init()
+ FORALL_SIMD_ASMJS_OP(ADDSTDLIBSIMDOPNAME))
+ {
+ return false;
+ }
+#undef ADDSTDLIBSIMDOPNAME
+
+ // This flows into FunctionBox, so must be tenured.
+ dummyFunction_ = NewScriptedFunction(cx_, 0, JSFunction::INTERPRETED, nullptr,
+ /* proto = */ nullptr, gc::AllocKind::FUNCTION,
+ TenuredObject);
+ if (!dummyFunction_)
+ return false;
+
+ ScriptedCaller scriptedCaller;
+ if (parser_.ss->filename()) {
+ scriptedCaller.line = scriptedCaller.column = 0; // unused
+ scriptedCaller.filename = DuplicateString(parser_.ss->filename());
+ if (!scriptedCaller.filename)
+ return false;
+ }
+
+ CompileArgs args;
+ if (!args.initFromContext(cx_, Move(scriptedCaller)))
+ return false;
+
+ auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
+ if (!genData ||
+ !genData->sigs.resize(MaxSigs) ||
+ !genData->funcSigs.resize(MaxFuncs) ||
+ !genData->funcImportGlobalDataOffsets.resize(AsmJSMaxImports) ||
+ !genData->tables.resize(MaxTables) ||
+ !genData->asmJSSigToTableIndex.resize(MaxSigs))
+ {
+ return false;
+ }
+
+ genData->minMemoryLength = RoundUpToNextValidAsmJSHeapLength(0);
+
+ if (!mg_.init(Move(genData), args, asmJSMetadata_.get()))
+ return false;
+
+ return true;
+ }
+
+ ExclusiveContext* cx() const { return cx_; }
+ PropertyName* moduleFunctionName() const { return moduleFunctionName_; }
+ PropertyName* globalArgumentName() const { return globalArgumentName_; }
+ PropertyName* importArgumentName() const { return importArgumentName_; }
+ PropertyName* bufferArgumentName() const { return bufferArgumentName_; }
+ ModuleGenerator& mg() { return mg_; }
+ AsmJSParser& parser() const { return parser_; }
+ TokenStream& tokenStream() const { return parser_.tokenStream; }
+ RootedFunction& dummyFunction() { return dummyFunction_; }
+ bool supportsSimd() const { return cx_->jitSupportsSimd(); }
+ bool atomicsPresent() const { return atomicsPresent_; }
+ uint32_t minMemoryLength() const { return mg_.minMemoryLength(); }
+
+ void initModuleFunctionName(PropertyName* name) {
+ MOZ_ASSERT(!moduleFunctionName_);
+ moduleFunctionName_ = name;
+ }
+ MOZ_MUST_USE bool initGlobalArgumentName(PropertyName* n) {
+ MOZ_ASSERT(n->isTenured());
+ globalArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->globalArgumentName = StringToNewUTF8CharsZ(cx_, *n);
+ if (!asmJSMetadata_->globalArgumentName)
+ return false;
+ }
+ return true;
+ }
+ MOZ_MUST_USE bool initImportArgumentName(PropertyName* n) {
+ MOZ_ASSERT(n->isTenured());
+ importArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->importArgumentName = StringToNewUTF8CharsZ(cx_, *n);
+ if (!asmJSMetadata_->importArgumentName)
+ return false;
+ }
+ return true;
+ }
+ MOZ_MUST_USE bool initBufferArgumentName(PropertyName* n) {
+ MOZ_ASSERT(n->isTenured());
+ bufferArgumentName_ = n;
+ if (n) {
+ asmJSMetadata_->bufferArgumentName = StringToNewUTF8CharsZ(cx_, *n);
+ if (!asmJSMetadata_->bufferArgumentName)
+ return false;
+ }
+ return true;
+ }
+ bool addGlobalVarInit(PropertyName* var, const NumLit& lit, Type type, bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+ MOZ_ASSERT(type == Type::canonicalize(Type::lit(lit)));
+
+ uint32_t index;
+ if (!mg_.addGlobal(type.canonicalToValType(), isConst, &index))
+ return false;
+
+ Global::Which which = isConst ? Global::ConstantLiteral : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global)
+ return false;
+ global->u.varOrConst.index_ = index;
+ global->u.varOrConst.type_ = (isConst ? Type::lit(lit) : type).which();
+ if (isConst)
+ global->u.varOrConst.literalValue_ = lit;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, nullptr);
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitConstant;
+ g.pod.u.var.u.val_ = lit.value();
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addGlobalVarImport(PropertyName* var, PropertyName* field, Type type, bool isConst) {
+ MOZ_ASSERT(type.isGlobalVarType());
+
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ uint32_t index;
+ ValType valType = type.canonicalToValType();
+ if (!mg_.addGlobal(valType, isConst, &index))
+ return false;
+
+ Global::Which which = isConst ? Global::ConstantImport : Global::Variable;
+ Global* global = validationLifo_.new_<Global>(which);
+ if (!global)
+ return false;
+ global->u.varOrConst.index_ = index;
+ global->u.varOrConst.type_ = type.which();
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::Variable, Move(fieldChars));
+ g.pod.u.var.initKind_ = AsmJSGlobal::InitImport;
+ g.pod.u.var.u.importType_ = valType;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addArrayView(PropertyName* var, Scalar::Type vt, PropertyName* maybeField) {
+ UniqueChars fieldChars;
+ if (maybeField) {
+ fieldChars = StringToNewUTF8CharsZ(cx_, *maybeField);
+ if (!fieldChars)
+ return false;
+ }
+
+ if (!arrayViews_.append(ArrayView(var, vt)))
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayView);
+ if (!global)
+ return false;
+ global->u.viewInfo.viewType_ = vt;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayView, Move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addMathBuiltinFunction(PropertyName* var, AsmJSMathBuiltinFunction func,
+ PropertyName* field)
+ {
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::MathBuiltinFunction);
+ if (!global)
+ return false;
+ global->u.mathBuiltinFunc_ = func;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::MathBuiltinFunction, Move(fieldChars));
+ g.pod.u.mathBuiltinFunc_ = func;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ private:
+ bool addGlobalDoubleConstant(PropertyName* var, double constant) {
+ Global* global = validationLifo_.new_<Global>(Global::ConstantLiteral);
+ if (!global)
+ return false;
+ global->u.varOrConst.type_ = Type::Double;
+ global->u.varOrConst.literalValue_ = NumLit(NumLit::Double, DoubleValue(constant));
+ return globalMap_.putNew(var, global);
+ }
+ public:
+ bool addMathBuiltinConstant(PropertyName* var, double constant, PropertyName* field) {
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ if (!addGlobalDoubleConstant(var, constant))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, Move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::MathConstant;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addGlobalConstant(PropertyName* var, double constant, PropertyName* field) {
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ if (!addGlobalDoubleConstant(var, constant))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::Constant, Move(fieldChars));
+ g.pod.u.constant.value_ = constant;
+ g.pod.u.constant.kind_ = AsmJSGlobal::GlobalConstant;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addAtomicsBuiltinFunction(PropertyName* var, AsmJSAtomicsBuiltinFunction func,
+ PropertyName* field)
+ {
+ if (!JitOptions.asmJSAtomicsEnable)
+ return failCurrentOffset("asm.js Atomics only enabled in wasm test mode");
+
+ atomicsPresent_ = true;
+
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::AtomicsBuiltinFunction);
+ if (!global)
+ return false;
+ global->u.atomicsBuiltinFunc_ = func;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::AtomicsBuiltinFunction, Move(fieldChars));
+ g.pod.u.atomicsBuiltinFunc_ = func;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addSimdCtor(PropertyName* var, SimdType type, PropertyName* field) {
+ simdPresent_ = true;
+
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::SimdCtor);
+ if (!global)
+ return false;
+ global->u.simdCtorType_ = type;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::SimdCtor, Move(fieldChars));
+ g.pod.u.simdCtorType_ = type;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addSimdOperation(PropertyName* var, SimdType type, SimdOperation op, PropertyName* field) {
+ simdPresent_ = true;
+
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::SimdOp);
+ if (!global)
+ return false;
+ global->u.simdOp.type_ = type;
+ global->u.simdOp.which_ = op;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::SimdOp, Move(fieldChars));
+ g.pod.u.simdOp.type_ = type;
+ g.pod.u.simdOp.which_ = op;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addArrayViewCtor(PropertyName* var, Scalar::Type vt, PropertyName* field) {
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ Global* global = validationLifo_.new_<Global>(Global::ArrayViewCtor);
+ if (!global)
+ return false;
+ global->u.viewInfo.viewType_ = vt;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::ArrayViewCtor, Move(fieldChars));
+ g.pod.u.viewType_ = vt;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addFFI(PropertyName* var, PropertyName* field) {
+ UniqueChars fieldChars = StringToNewUTF8CharsZ(cx_, *field);
+ if (!fieldChars)
+ return false;
+
+ if (asmJSMetadata_->numFFIs == UINT32_MAX)
+ return false;
+ uint32_t ffiIndex = asmJSMetadata_->numFFIs++;
+
+ Global* global = validationLifo_.new_<Global>(Global::FFI);
+ if (!global)
+ return false;
+ global->u.ffiIndex_ = ffiIndex;
+ if (!globalMap_.putNew(var, global))
+ return false;
+
+ AsmJSGlobal g(AsmJSGlobal::FFI, Move(fieldChars));
+ g.pod.u.ffiIndex_ = ffiIndex;
+ return asmJSMetadata_->asmJSGlobals.append(Move(g));
+ }
+ bool addExportField(ParseNode* pn, const Func& func, PropertyName* maybeField) {
+ // Record the field name of this export.
+ CacheableChars fieldChars;
+ if (maybeField)
+ fieldChars = StringToNewUTF8CharsZ(cx_, *maybeField);
+ else
+ fieldChars = DuplicateString("");
+ if (!fieldChars)
+ return false;
+
+ // Declare which function is exported which gives us an index into the
+ // module FuncExportVector.
+ if (!mg_.addFuncExport(Move(fieldChars), func.index()))
+ return false;
+
+ // The exported function might have already been exported in which case
+ // the index will refer into the range of AsmJSExports.
+ return asmJSMetadata_->asmJSExports.emplaceBack(func.index(),
+ func.srcBegin() - asmJSMetadata_->srcStart,
+ func.srcEnd() - asmJSMetadata_->srcStart);
+ }
+ bool addFunction(PropertyName* name, uint32_t firstUse, Sig&& sig, Func** func) {
+ uint32_t sigIndex;
+ if (!declareSig(Move(sig), &sigIndex))
+ return false;
+ uint32_t funcIndex = AsmJSFirstDefFuncIndex + numFunctions();
+ if (funcIndex >= MaxFuncs)
+ return failCurrentOffset("too many functions");
+ mg_.initFuncSig(funcIndex, sigIndex);
+ Global* global = validationLifo_.new_<Global>(Global::Function);
+ if (!global)
+ return false;
+ global->u.funcIndex_ = funcIndex;
+ if (!globalMap_.putNew(name, global))
+ return false;
+ *func = validationLifo_.new_<Func>(name, firstUse, funcIndex);
+ return *func && functions_.append(*func);
+ }
+ bool declareFuncPtrTable(Sig&& sig, PropertyName* name, uint32_t firstUse, uint32_t mask,
+ uint32_t* index)
+ {
+ if (mask > MaxTableElems)
+ return failCurrentOffset("function pointer table too big");
+ uint32_t sigIndex;
+ if (!newSig(Move(sig), &sigIndex))
+ return false;
+ if (!mg_.initSigTableLength(sigIndex, mask + 1))
+ return false;
+ Global* global = validationLifo_.new_<Global>(Global::FuncPtrTable);
+ if (!global)
+ return false;
+ global->u.funcPtrTableIndex_ = *index = funcPtrTables_.length();
+ if (!globalMap_.putNew(name, global))
+ return false;
+ FuncPtrTable* t = validationLifo_.new_<FuncPtrTable>(sigIndex, name, firstUse, mask);
+ return t && funcPtrTables_.append(t);
+ }
+ bool defineFuncPtrTable(uint32_t funcPtrTableIndex, Uint32Vector&& elems) {
+ FuncPtrTable& table = *funcPtrTables_[funcPtrTableIndex];
+ if (table.defined())
+ return false;
+ table.define();
+ return mg_.initSigTableElems(table.sigIndex(), Move(elems));
+ }
+ bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* funcIndex) {
+ ImportMap::AddPtr p = importMap_.lookupForAdd(NamedSig::Lookup(name, sig));
+ if (p) {
+ *funcIndex = p->value();
+ return true;
+ }
+ *funcIndex = asmJSMetadata_->asmJSImports.length();
+ if (*funcIndex > AsmJSMaxImports)
+ return failCurrentOffset("too many imports");
+ if (!asmJSMetadata_->asmJSImports.emplaceBack(ffiIndex))
+ return false;
+ uint32_t sigIndex;
+ if (!declareSig(Move(sig), &sigIndex))
+ return false;
+ if (!mg_.initImport(*funcIndex, sigIndex))
+ return false;
+ return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *funcIndex);
+ }
+
+ bool tryConstantAccess(uint64_t start, uint64_t width) {
+ MOZ_ASSERT(UINT64_MAX - start > width);
+ uint64_t len = start + width;
+ if (len > uint64_t(INT32_MAX) + 1)
+ return false;
+ len = RoundUpToNextValidAsmJSHeapLength(len);
+ if (len > mg_.minMemoryLength())
+ mg_.bumpMinMemoryLength(len);
+ return true;
+ }
+
+ // Error handling.
+ bool hasAlreadyFailed() const {
+ return !!errorString_;
+ }
+
+ bool failOffset(uint32_t offset, const char* str) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(str);
+ errorOffset_ = offset;
+ errorString_ = DuplicateString(str);
+ return false;
+ }
+
+ bool failCurrentOffset(const char* str) {
+ return failOffset(tokenStream().currentToken().pos.begin, str);
+ }
+
+ bool fail(ParseNode* pn, const char* str) {
+ return failOffset(pn->pn_pos.begin, str);
+ }
+
+ bool failfVAOffset(uint32_t offset, const char* fmt, va_list ap) {
+ MOZ_ASSERT(!hasAlreadyFailed());
+ MOZ_ASSERT(errorOffset_ == UINT32_MAX);
+ MOZ_ASSERT(fmt);
+ errorOffset_ = offset;
+ errorString_.reset(JS_vsmprintf(fmt, ap));
+ return false;
+ }
+
+ bool failfOffset(uint32_t offset, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(offset, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failNameOffset(uint32_t offset, const char* fmt, PropertyName* name) {
+ // This function is invoked without the caller properly rooting its locals.
+ gc::AutoSuppressGC suppress(cx_);
+ JSAutoByteString bytes;
+ if (AtomToPrintableString(cx_, name, &bytes))
+ failfOffset(offset, fmt, bytes.ptr());
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, PropertyName* name) {
+ return failNameOffset(pn->pn_pos.begin, fmt, name);
+ }
+
+ bool failOverRecursed() {
+ errorOverRecursed_ = true;
+ return false;
+ }
+
+ unsigned numArrayViews() const {
+ return arrayViews_.length();
+ }
+ const ArrayView& arrayView(unsigned i) const {
+ return arrayViews_[i];
+ }
+ unsigned numFunctions() const {
+ return functions_.length();
+ }
+ Func& function(unsigned i) const {
+ return *functions_[i];
+ }
+ unsigned numFuncPtrTables() const {
+ return funcPtrTables_.length();
+ }
+ FuncPtrTable& funcPtrTable(unsigned i) const {
+ return *funcPtrTables_[i];
+ }
+
+ const Global* lookupGlobal(PropertyName* name) const {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name))
+ return p->value();
+ return nullptr;
+ }
+
+ Func* lookupFunction(PropertyName* name) {
+ if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
+ Global* value = p->value();
+ if (value->which() == Global::Function) {
+ MOZ_ASSERT(value->funcIndex() >= AsmJSFirstDefFuncIndex);
+ return functions_[value->funcIndex() - AsmJSFirstDefFuncIndex];
+ }
+ }
+ return nullptr;
+ }
+
+ bool lookupStandardLibraryMathName(PropertyName* name, MathBuiltin* mathBuiltin) const {
+ if (MathNameMap::Ptr p = standardLibraryMathNames_.lookup(name)) {
+ *mathBuiltin = p->value();
+ return true;
+ }
+ return false;
+ }
+ bool lookupStandardLibraryAtomicsName(PropertyName* name, AsmJSAtomicsBuiltinFunction* atomicsBuiltin) const {
+ if (AtomicsNameMap::Ptr p = standardLibraryAtomicsNames_.lookup(name)) {
+ *atomicsBuiltin = p->value();
+ return true;
+ }
+ return false;
+ }
+ bool lookupStandardSimdOpName(PropertyName* name, SimdOperation* op) const {
+ if (SimdOperationNameMap::Ptr p = standardLibrarySimdOpNames_.lookup(name)) {
+ *op = p->value();
+ return true;
+ }
+ return false;
+ }
+
+ bool startFunctionBodies() {
+ return mg_.startFuncDefs();
+ }
+ bool finishFunctionBodies() {
+ return mg_.finishFuncDefs();
+ }
+ SharedModule finish() {
+ if (!arrayViews_.empty())
+ mg_.initMemoryUsage(atomicsPresent_ ? MemoryUsage::Shared : MemoryUsage::Unshared);
+
+ asmJSMetadata_->usesSimd = simdPresent_;
+
+ MOZ_ASSERT(asmJSMetadata_->asmJSFuncNames.empty());
+ for (const Func* func : functions_) {
+ CacheableChars funcName = StringToNewUTF8CharsZ(cx_, *func->name());
+ if (!funcName || !asmJSMetadata_->asmJSFuncNames.emplaceBack(Move(funcName)))
+ return nullptr;
+ }
+
+ uint32_t endBeforeCurly = tokenStream().currentToken().pos.end;
+ asmJSMetadata_->srcLength = endBeforeCurly - asmJSMetadata_->srcStart;
+
+ TokenPos pos;
+ JS_ALWAYS_TRUE(tokenStream().peekTokenPos(&pos, TokenStream::Operand));
+ uint32_t endAfterCurly = pos.end;
+ asmJSMetadata_->srcLengthWithRightBrace = endAfterCurly - asmJSMetadata_->srcStart;
+
+ // asm.js does not have any wasm bytecode to save; view-source is
+ // provided through the ScriptSource.
+ SharedBytes bytes = js_new<ShareableBytes>();
+ if (!bytes)
+ return nullptr;
+
+ return mg_.finish(*bytes);
+ }
+};
+
+/*****************************************************************************/
+// Numeric literal utilities
+
+static bool
+IsNumericNonFloatLiteral(ParseNode* pn)
+{
+ // Note: '-' is never rolled into the number; numbers are always positive
+ // and negations must be applied manually.
+ return pn->isKind(PNK_NUMBER) ||
+ (pn->isKind(PNK_NEG) && UnaryKid(pn)->isKind(PNK_NUMBER));
+}
+
+static bool
+IsCallToGlobal(ModuleValidator& m, ParseNode* pn, const ModuleValidator::Global** global)
+{
+ if (!pn->isKind(PNK_CALL))
+ return false;
+
+ ParseNode* callee = CallCallee(pn);
+ if (!callee->isKind(PNK_NAME))
+ return false;
+
+ *global = m.lookupGlobal(callee->name());
+ return !!*global;
+}
+
+static bool
+IsCoercionCall(ModuleValidator& m, ParseNode* pn, Type* coerceTo, ParseNode** coercedExpr)
+{
+ const ModuleValidator::Global* global;
+ if (!IsCallToGlobal(m, pn, &global))
+ return false;
+
+ if (CallArgListLength(pn) != 1)
+ return false;
+
+ if (coercedExpr)
+ *coercedExpr = CallArgList(pn);
+
+ if (global->isMathFunction() && global->mathBuiltinFunction() == AsmJSMathBuiltin_fround) {
+ *coerceTo = Type::Float;
+ return true;
+ }
+
+ if (global->isSimdOperation() && global->simdOperation() == SimdOperation::Fn_check) {
+ *coerceTo = global->simdOperationType();
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+IsFloatLiteral(ModuleValidator& m, ParseNode* pn)
+{
+ ParseNode* coercedExpr;
+ Type coerceTo;
+ if (!IsCoercionCall(m, pn, &coerceTo, &coercedExpr))
+ return false;
+ // Don't fold into || to avoid clang/memcheck bug (bug 1077031).
+ if (!coerceTo.isFloat())
+ return false;
+ return IsNumericNonFloatLiteral(coercedExpr);
+}
+
+static bool
+IsSimdTuple(ModuleValidator& m, ParseNode* pn, SimdType* type)
+{
+ const ModuleValidator::Global* global;
+ if (!IsCallToGlobal(m, pn, &global))
+ return false;
+
+ if (!global->isSimdCtor())
+ return false;
+
+ if (CallArgListLength(pn) != GetSimdLanes(global->simdCtorType()))
+ return false;
+
+ *type = global->simdCtorType();
+ return true;
+}
+
+static bool
+IsNumericLiteral(ModuleValidator& m, ParseNode* pn, bool* isSimd = nullptr);
+
+static NumLit
+ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn);
+
+static inline bool
+IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32);
+
+static bool
+IsSimdLiteral(ModuleValidator& m, ParseNode* pn)
+{
+ SimdType type;
+ if (!IsSimdTuple(m, pn, &type))
+ return false;
+
+ ParseNode* arg = CallArgList(pn);
+ unsigned length = GetSimdLanes(type);
+ for (unsigned i = 0; i < length; i++) {
+ if (!IsNumericLiteral(m, arg))
+ return false;
+
+ uint32_t _;
+ switch (type) {
+ case SimdType::Int8x16:
+ case SimdType::Int16x8:
+ case SimdType::Int32x4:
+ case SimdType::Uint8x16:
+ case SimdType::Uint16x8:
+ case SimdType::Uint32x4:
+ case SimdType::Bool8x16:
+ case SimdType::Bool16x8:
+ case SimdType::Bool32x4:
+ if (!IsLiteralInt(m, arg, &_))
+ return false;
+ break;
+ case SimdType::Float32x4:
+ if (!IsNumericNonFloatLiteral(arg))
+ return false;
+ break;
+ default:
+ MOZ_CRASH("unhandled simd type");
+ }
+
+ arg = NextNode(arg);
+ }
+
+ MOZ_ASSERT(arg == nullptr);
+ return true;
+}
+
+static bool
+IsNumericLiteral(ModuleValidator& m, ParseNode* pn, bool* isSimd)
+{
+ if (IsNumericNonFloatLiteral(pn) || IsFloatLiteral(m, pn))
+ return true;
+ if (IsSimdLiteral(m, pn)) {
+ if (isSimd)
+ *isSimd = true;
+ return true;
+ }
+ return false;
+}
+
+// The JS grammar treats -42 as -(42) (i.e., with separate grammar
+// productions) for the unary - and literal 42). However, the asm.js spec
+// recognizes -42 (modulo parens, so -(42) and -((42))) as a single literal
+// so fold the two potential parse nodes into a single double value.
+static double
+ExtractNumericNonFloatValue(ParseNode* pn, ParseNode** out = nullptr)
+{
+ MOZ_ASSERT(IsNumericNonFloatLiteral(pn));
+
+ if (pn->isKind(PNK_NEG)) {
+ pn = UnaryKid(pn);
+ if (out)
+ *out = pn;
+ return -NumberNodeValue(pn);
+ }
+
+ return NumberNodeValue(pn);
+}
+
+static NumLit
+ExtractSimdValue(ModuleValidator& m, ParseNode* pn)
+{
+ MOZ_ASSERT(IsSimdLiteral(m, pn));
+
+ SimdType type = SimdType::Count;
+ JS_ALWAYS_TRUE(IsSimdTuple(m, pn, &type));
+ MOZ_ASSERT(CallArgListLength(pn) == GetSimdLanes(type));
+
+ ParseNode* arg = CallArgList(pn);
+ switch (type) {
+ case SimdType::Int8x16:
+ case SimdType::Uint8x16: {
+ MOZ_ASSERT(GetSimdLanes(type) == 16);
+ int8_t val[16];
+ for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = int8_t(u32);
+ }
+ MOZ_ASSERT(arg == nullptr);
+ NumLit::Which w = type == SimdType::Uint8x16 ? NumLit::Uint8x16 : NumLit::Int8x16;
+ return NumLit(w, SimdConstant::CreateX16(val));
+ }
+ case SimdType::Int16x8:
+ case SimdType::Uint16x8: {
+ MOZ_ASSERT(GetSimdLanes(type) == 8);
+ int16_t val[8];
+ for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = int16_t(u32);
+ }
+ MOZ_ASSERT(arg == nullptr);
+ NumLit::Which w = type == SimdType::Uint16x8 ? NumLit::Uint16x8 : NumLit::Int16x8;
+ return NumLit(w, SimdConstant::CreateX8(val));
+ }
+ case SimdType::Int32x4:
+ case SimdType::Uint32x4: {
+ MOZ_ASSERT(GetSimdLanes(type) == 4);
+ int32_t val[4];
+ for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = int32_t(u32);
+ }
+ MOZ_ASSERT(arg == nullptr);
+ NumLit::Which w = type == SimdType::Uint32x4 ? NumLit::Uint32x4 : NumLit::Int32x4;
+ return NumLit(w, SimdConstant::CreateX4(val));
+ }
+ case SimdType::Float32x4: {
+ MOZ_ASSERT(GetSimdLanes(type) == 4);
+ float val[4];
+ for (size_t i = 0; i < 4; i++, arg = NextNode(arg))
+ val[i] = float(ExtractNumericNonFloatValue(arg));
+ MOZ_ASSERT(arg == nullptr);
+ return NumLit(NumLit::Float32x4, SimdConstant::CreateX4(val));
+ }
+ case SimdType::Bool8x16: {
+ MOZ_ASSERT(GetSimdLanes(type) == 16);
+ int8_t val[16];
+ for (size_t i = 0; i < 16; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = u32 ? -1 : 0;
+ }
+ MOZ_ASSERT(arg == nullptr);
+ return NumLit(NumLit::Bool8x16, SimdConstant::CreateX16(val));
+ }
+ case SimdType::Bool16x8: {
+ MOZ_ASSERT(GetSimdLanes(type) == 8);
+ int16_t val[8];
+ for (size_t i = 0; i < 8; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = u32 ? -1 : 0;
+ }
+ MOZ_ASSERT(arg == nullptr);
+ return NumLit(NumLit::Bool16x8, SimdConstant::CreateX8(val));
+ }
+ case SimdType::Bool32x4: {
+ MOZ_ASSERT(GetSimdLanes(type) == 4);
+ int32_t val[4];
+ for (size_t i = 0; i < 4; i++, arg = NextNode(arg)) {
+ uint32_t u32;
+ JS_ALWAYS_TRUE(IsLiteralInt(m, arg, &u32));
+ val[i] = u32 ? -1 : 0;
+ }
+ MOZ_ASSERT(arg == nullptr);
+ return NumLit(NumLit::Bool32x4, SimdConstant::CreateX4(val));
+ }
+ default:
+ break;
+ }
+
+ MOZ_CRASH("Unexpected SIMD type.");
+}
+
+static NumLit
+ExtractNumericLiteral(ModuleValidator& m, ParseNode* pn)
+{
+ MOZ_ASSERT(IsNumericLiteral(m, pn));
+
+ if (pn->isKind(PNK_CALL)) {
+ // Float literals are explicitly coerced and thus the coerced literal may be
+ // any valid (non-float) numeric literal.
+ if (CallArgListLength(pn) == 1) {
+ pn = CallArgList(pn);
+ double d = ExtractNumericNonFloatValue(pn);
+ return NumLit(NumLit::Float, DoubleValue(d));
+ }
+
+ return ExtractSimdValue(m, pn);
+ }
+
+ double d = ExtractNumericNonFloatValue(pn, &pn);
+
+ // The asm.js spec syntactically distinguishes any literal containing a
+ // decimal point or the literal -0 as having double type.
+ if (NumberNodeHasFrac(pn) || IsNegativeZero(d))
+ return NumLit(NumLit::Double, DoubleValue(d));
+
+ // The syntactic checks above rule out these double values.
+ MOZ_ASSERT(!IsNegativeZero(d));
+ MOZ_ASSERT(!IsNaN(d));
+
+ // Although doubles can only *precisely* represent 53-bit integers, they
+ // can *imprecisely* represent integers much bigger than an int64_t.
+ // Furthermore, d may be inf or -inf. In both cases, casting to an int64_t
+ // is undefined, so test against the integer bounds using doubles.
+ if (d < double(INT32_MIN) || d > double(UINT32_MAX))
+ return NumLit(NumLit::OutOfRangeInt, UndefinedValue());
+
+ // With the above syntactic and range limitations, d is definitely an
+ // integer in the range [INT32_MIN, UINT32_MAX] range.
+ int64_t i64 = int64_t(d);
+ if (i64 >= 0) {
+ if (i64 <= INT32_MAX)
+ return NumLit(NumLit::Fixnum, Int32Value(i64));
+ MOZ_ASSERT(i64 <= UINT32_MAX);
+ return NumLit(NumLit::BigUnsigned, Int32Value(uint32_t(i64)));
+ }
+ MOZ_ASSERT(i64 >= INT32_MIN);
+ return NumLit(NumLit::NegativeInt, Int32Value(i64));
+}
+
+static inline bool
+IsLiteralInt(const NumLit& lit, uint32_t* u32)
+{
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::BigUnsigned:
+ case NumLit::NegativeInt:
+ *u32 = lit.toUint32();
+ return true;
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ case NumLit::Float32x4:
+ case NumLit::Bool8x16:
+ case NumLit::Bool16x8:
+ case NumLit::Bool32x4:
+ return false;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal type");
+}
+
+static inline bool
+IsLiteralInt(ModuleValidator& m, ParseNode* pn, uint32_t* u32)
+{
+ return IsNumericLiteral(m, pn) &&
+ IsLiteralInt(ExtractNumericLiteral(m, pn), u32);
+}
+
+/*****************************************************************************/
+
+namespace {
+
+#define CASE(TYPE, OP) case SimdOperation::Fn_##OP: return Op::TYPE##OP;
+#define I8x16CASE(OP) CASE(I8x16, OP)
+#define I16x8CASE(OP) CASE(I16x8, OP)
+#define I32x4CASE(OP) CASE(I32x4, OP)
+#define F32x4CASE(OP) CASE(F32x4, OP)
+#define B8x16CASE(OP) CASE(B8x16, OP)
+#define B16x8CASE(OP) CASE(B16x8, OP)
+#define B32x4CASE(OP) CASE(B32x4, OP)
+#define ENUMERATE(TYPE, FOR_ALL, DO) \
+ switch(op) { \
+ case SimdOperation::Constructor: return Op::TYPE##Constructor; \
+ FOR_ALL(DO) \
+ default: break; \
+ }
+
+static inline Op
+SimdToOp(SimdType type, SimdOperation op)
+{
+ switch (type) {
+ case SimdType::Uint8x16:
+ // Handle the special unsigned opcodes, then fall through to Int8x16.
+ switch (op) {
+ case SimdOperation::Fn_addSaturate: return Op::I8x16addSaturateU;
+ case SimdOperation::Fn_subSaturate: return Op::I8x16subSaturateU;
+ case SimdOperation::Fn_extractLane: return Op::I8x16extractLaneU;
+ case SimdOperation::Fn_shiftRightByScalar: return Op::I8x16shiftRightByScalarU;
+ case SimdOperation::Fn_lessThan: return Op::I8x16lessThanU;
+ case SimdOperation::Fn_lessThanOrEqual: return Op::I8x16lessThanOrEqualU;
+ case SimdOperation::Fn_greaterThan: return Op::I8x16greaterThanU;
+ case SimdOperation::Fn_greaterThanOrEqual: return Op::I8x16greaterThanOrEqualU;
+ case SimdOperation::Fn_fromInt8x16Bits: return Op::Limit;
+ default: break;
+ }
+ MOZ_FALLTHROUGH;
+ case SimdType::Int8x16:
+ // Bitcasts Uint8x16 <--> Int8x16 become noops.
+ switch (op) {
+ case SimdOperation::Fn_fromUint8x16Bits: return Op::Limit;
+ case SimdOperation::Fn_fromUint16x8Bits: return Op::I8x16fromInt16x8Bits;
+ case SimdOperation::Fn_fromUint32x4Bits: return Op::I8x16fromInt32x4Bits;
+ default: break;
+ }
+ ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
+ break;
+
+ case SimdType::Uint16x8:
+ // Handle the special unsigned opcodes, then fall through to Int16x8.
+ switch(op) {
+ case SimdOperation::Fn_addSaturate: return Op::I16x8addSaturateU;
+ case SimdOperation::Fn_subSaturate: return Op::I16x8subSaturateU;
+ case SimdOperation::Fn_extractLane: return Op::I16x8extractLaneU;
+ case SimdOperation::Fn_shiftRightByScalar: return Op::I16x8shiftRightByScalarU;
+ case SimdOperation::Fn_lessThan: return Op::I16x8lessThanU;
+ case SimdOperation::Fn_lessThanOrEqual: return Op::I16x8lessThanOrEqualU;
+ case SimdOperation::Fn_greaterThan: return Op::I16x8greaterThanU;
+ case SimdOperation::Fn_greaterThanOrEqual: return Op::I16x8greaterThanOrEqualU;
+ case SimdOperation::Fn_fromInt16x8Bits: return Op::Limit;
+ default: break;
+ }
+ MOZ_FALLTHROUGH;
+ case SimdType::Int16x8:
+ // Bitcasts Uint16x8 <--> Int16x8 become noops.
+ switch (op) {
+ case SimdOperation::Fn_fromUint8x16Bits: return Op::I16x8fromInt8x16Bits;
+ case SimdOperation::Fn_fromUint16x8Bits: return Op::Limit;
+ case SimdOperation::Fn_fromUint32x4Bits: return Op::I16x8fromInt32x4Bits;
+ default: break;
+ }
+ ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
+ break;
+
+ case SimdType::Uint32x4:
+ // Handle the special unsigned opcodes, then fall through to Int32x4.
+ switch(op) {
+ case SimdOperation::Fn_shiftRightByScalar: return Op::I32x4shiftRightByScalarU;
+ case SimdOperation::Fn_lessThan: return Op::I32x4lessThanU;
+ case SimdOperation::Fn_lessThanOrEqual: return Op::I32x4lessThanOrEqualU;
+ case SimdOperation::Fn_greaterThan: return Op::I32x4greaterThanU;
+ case SimdOperation::Fn_greaterThanOrEqual: return Op::I32x4greaterThanOrEqualU;
+ case SimdOperation::Fn_fromFloat32x4: return Op::I32x4fromFloat32x4U;
+ case SimdOperation::Fn_fromInt32x4Bits: return Op::Limit;
+ default: break;
+ }
+ MOZ_FALLTHROUGH;
+ case SimdType::Int32x4:
+ // Bitcasts Uint32x4 <--> Int32x4 become noops.
+ switch (op) {
+ case SimdOperation::Fn_fromUint8x16Bits: return Op::I32x4fromInt8x16Bits;
+ case SimdOperation::Fn_fromUint16x8Bits: return Op::I32x4fromInt16x8Bits;
+ case SimdOperation::Fn_fromUint32x4Bits: return Op::Limit;
+ default: break;
+ }
+ ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
+ break;
+
+ case SimdType::Float32x4:
+ switch (op) {
+ case SimdOperation::Fn_fromUint8x16Bits: return Op::F32x4fromInt8x16Bits;
+ case SimdOperation::Fn_fromUint16x8Bits: return Op::F32x4fromInt16x8Bits;
+ case SimdOperation::Fn_fromUint32x4Bits: return Op::F32x4fromInt32x4Bits;
+ default: break;
+ }
+ ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
+ break;
+
+ case SimdType::Bool8x16:
+ ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
+ break;
+
+ case SimdType::Bool16x8:
+ ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
+ break;
+
+ case SimdType::Bool32x4:
+ ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
+ break;
+
+ default: break;
+ }
+ MOZ_CRASH("unexpected SIMD (type, operator) combination");
+}
+
+#undef CASE
+#undef I8x16CASE
+#undef I16x8CASE
+#undef I32x4CASE
+#undef F32x4CASE
+#undef B8x16CASE
+#undef B16x8CASE
+#undef B32x4CASE
+#undef ENUMERATE
+
+typedef Vector<PropertyName*, 4, SystemAllocPolicy> NameVector;
+
+// Encapsulates the building of an asm bytecode function from an asm.js function
+// source code, packing the asm.js code into the asm bytecode form that can
+// be decoded and compiled with a FunctionCompiler.
+class MOZ_STACK_CLASS FunctionValidator
+{
+ public:
+ struct Local
+ {
+ Type type;
+ unsigned slot;
+ Local(Type t, unsigned slot) : type(t), slot(slot) {
+ MOZ_ASSERT(type.isCanonicalValType());
+ }
+ };
+
+ private:
+ typedef HashMap<PropertyName*, Local> LocalMap;
+ typedef HashMap<PropertyName*, uint32_t> LabelMap;
+
+ ModuleValidator& m_;
+ ParseNode* fn_;
+
+ FunctionGenerator fg_;
+ Maybe<Encoder> encoder_;
+
+ LocalMap locals_;
+
+ // Labels
+ LabelMap breakLabels_;
+ LabelMap continueLabels_;
+ Uint32Vector breakableStack_;
+ Uint32Vector continuableStack_;
+ uint32_t blockDepth_;
+
+ bool hasAlreadyReturned_;
+ ExprType ret_;
+
+ public:
+ FunctionValidator(ModuleValidator& m, ParseNode* fn)
+ : m_(m),
+ fn_(fn),
+ locals_(m.cx()),
+ breakLabels_(m.cx()),
+ continueLabels_(m.cx()),
+ blockDepth_(0),
+ hasAlreadyReturned_(false),
+ ret_(ExprType::Limit)
+ {}
+
+ ModuleValidator& m() const { return m_; }
+ ExclusiveContext* cx() const { return m_.cx(); }
+ ParseNode* fn() const { return fn_; }
+
+ bool init(PropertyName* name, unsigned line) {
+ if (!locals_.init() || !breakLabels_.init() || !continueLabels_.init())
+ return false;
+
+ if (!m_.mg().startFuncDef(line, &fg_))
+ return false;
+
+ encoder_.emplace(fg_.bytes());
+ return true;
+ }
+
+ bool finish(uint32_t funcIndex) {
+ MOZ_ASSERT(!blockDepth_);
+ MOZ_ASSERT(breakableStack_.empty());
+ MOZ_ASSERT(continuableStack_.empty());
+ MOZ_ASSERT(breakLabels_.empty());
+ MOZ_ASSERT(continueLabels_.empty());
+ for (auto iter = locals_.all(); !iter.empty(); iter.popFront()) {
+ if (iter.front().value().type.isSimd()) {
+ setUsesSimd();
+ break;
+ }
+ }
+
+ return m_.mg().finishFuncDef(funcIndex, &fg_);
+ }
+
+ bool fail(ParseNode* pn, const char* str) {
+ return m_.fail(pn, str);
+ }
+
+ bool failf(ParseNode* pn, const char* fmt, ...) MOZ_FORMAT_PRINTF(3, 4) {
+ va_list ap;
+ va_start(ap, fmt);
+ m_.failfVAOffset(pn->pn_pos.begin, fmt, ap);
+ va_end(ap);
+ return false;
+ }
+
+ bool failName(ParseNode* pn, const char* fmt, PropertyName* name) {
+ return m_.failName(pn, fmt, name);
+ }
+
+ /***************************************************** Attributes */
+
+ void setUsesSimd() {
+ fg_.setUsesSimd();
+ }
+
+ void setUsesAtomics() {
+ fg_.setUsesAtomics();
+ }
+
+ /***************************************************** Local scope setup */
+
+ bool addLocal(ParseNode* pn, PropertyName* name, Type type) {
+ LocalMap::AddPtr p = locals_.lookupForAdd(name);
+ if (p)
+ return failName(pn, "duplicate local name '%s' not allowed", name);
+ return locals_.add(p, name, Local(type, locals_.count()));
+ }
+
+ /****************************** For consistency of returns in a function */
+
+ bool hasAlreadyReturned() const {
+ return hasAlreadyReturned_;
+ }
+
+ ExprType returnedType() const {
+ return ret_;
+ }
+
+ void setReturnedType(ExprType ret) {
+ ret_ = ret;
+ hasAlreadyReturned_ = true;
+ }
+
+ /**************************************************************** Labels */
+ private:
+ bool writeBr(uint32_t absolute, Op op = Op::Br) {
+ MOZ_ASSERT(op == Op::Br || op == Op::BrIf);
+ MOZ_ASSERT(absolute < blockDepth_);
+ return encoder().writeOp(op) &&
+ encoder().writeVarU32(blockDepth_ - 1 - absolute);
+ }
+ void removeLabel(PropertyName* label, LabelMap* map) {
+ LabelMap::Ptr p = map->lookup(label);
+ MOZ_ASSERT(p);
+ map->remove(p);
+ }
+
+ public:
+ bool pushBreakableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(ExprType::Void)) &&
+ breakableStack_.append(blockDepth_++);
+ }
+ bool popBreakableBlock() {
+ JS_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushUnbreakableBlock(const NameVector* labels = nullptr) {
+ if (labels) {
+ for (PropertyName* label : *labels) {
+ if (!breakLabels_.putNew(label, blockDepth_))
+ return false;
+ }
+ }
+ blockDepth_++;
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(ExprType::Void));
+ }
+ bool popUnbreakableBlock(const NameVector* labels = nullptr) {
+ if (labels) {
+ for (PropertyName* label : *labels)
+ removeLabel(label, &breakLabels_);
+ }
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushContinuableBlock() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(ExprType::Void)) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popContinuableBlock() {
+ JS_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End);
+ }
+
+ bool pushLoop() {
+ return encoder().writeOp(Op::Block) &&
+ encoder().writeFixedU8(uint8_t(ExprType::Void)) &&
+ encoder().writeOp(Op::Loop) &&
+ encoder().writeFixedU8(uint8_t(ExprType::Void)) &&
+ breakableStack_.append(blockDepth_++) &&
+ continuableStack_.append(blockDepth_++);
+ }
+ bool popLoop() {
+ JS_ALWAYS_TRUE(continuableStack_.popCopy() == --blockDepth_);
+ JS_ALWAYS_TRUE(breakableStack_.popCopy() == --blockDepth_);
+ return encoder().writeOp(Op::End) &&
+ encoder().writeOp(Op::End);
+ }
+
+ bool pushIf(size_t* typeAt) {
+ ++blockDepth_;
+ return encoder().writeOp(Op::If) &&
+ encoder().writePatchableFixedU7(typeAt);
+ }
+ bool switchToElse() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ return encoder().writeOp(Op::Else);
+ }
+ void setIfType(size_t typeAt, ExprType type) {
+ encoder().patchFixedU7(typeAt, uint8_t(type));
+ }
+ bool popIf() {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ return encoder().writeOp(Op::End);
+ }
+ bool popIf(size_t typeAt, ExprType type) {
+ MOZ_ASSERT(blockDepth_ > 0);
+ --blockDepth_;
+ if (!encoder().writeOp(Op::End))
+ return false;
+
+ setIfType(typeAt, type);
+ return true;
+ }
+
+ bool writeBreakIf() {
+ return writeBr(breakableStack_.back(), Op::BrIf);
+ }
+ bool writeContinueIf() {
+ return writeBr(continuableStack_.back(), Op::BrIf);
+ }
+ bool writeUnlabeledBreakOrContinue(bool isBreak) {
+ return writeBr(isBreak? breakableStack_.back() : continuableStack_.back());
+ }
+ bool writeContinue() {
+ return writeBr(continuableStack_.back());
+ }
+
+ bool addLabels(const NameVector& labels, uint32_t relativeBreakDepth,
+ uint32_t relativeContinueDepth)
+ {
+ for (PropertyName* label : labels) {
+ if (!breakLabels_.putNew(label, blockDepth_ + relativeBreakDepth))
+ return false;
+ if (!continueLabels_.putNew(label, blockDepth_ + relativeContinueDepth))
+ return false;
+ }
+ return true;
+ }
+ void removeLabels(const NameVector& labels) {
+ for (PropertyName* label : labels) {
+ removeLabel(label, &breakLabels_);
+ removeLabel(label, &continueLabels_);
+ }
+ }
+ bool writeLabeledBreakOrContinue(PropertyName* label, bool isBreak) {
+ LabelMap& map = isBreak ? breakLabels_ : continueLabels_;
+ if (LabelMap::Ptr p = map.lookup(label))
+ return writeBr(p->value());
+ MOZ_CRASH("nonexistent label");
+ }
+
+ /*************************************************** Read-only interface */
+
+ const Local* lookupLocal(PropertyName* name) const {
+ if (auto p = locals_.lookup(name))
+ return &p->value();
+ return nullptr;
+ }
+
+ const ModuleValidator::Global* lookupGlobal(PropertyName* name) const {
+ if (locals_.has(name))
+ return nullptr;
+ return m_.lookupGlobal(name);
+ }
+
+ size_t numLocals() const { return locals_.count(); }
+
+ /**************************************************** Encoding interface */
+
+ Encoder& encoder() { return *encoder_; }
+
+ MOZ_MUST_USE bool writeInt32Lit(int32_t i32) {
+ return encoder().writeOp(Op::I32Const) &&
+ encoder().writeVarS32(i32);
+ }
+ MOZ_MUST_USE bool writeConstExpr(const NumLit& lit) {
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ case NumLit::BigUnsigned:
+ return writeInt32Lit(lit.toInt32());
+ case NumLit::Float:
+ return encoder().writeOp(Op::F32Const) &&
+ encoder().writeFixedF32(lit.toFloat());
+ case NumLit::Double:
+ return encoder().writeOp(Op::F64Const) &&
+ encoder().writeFixedF64(lit.toDouble());
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ return encoder().writeOp(Op::I8x16Const) &&
+ encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ return encoder().writeOp(Op::I16x8Const) &&
+ encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ return encoder().writeOp(Op::I32x4Const) &&
+ encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
+ case NumLit::Float32x4:
+ return encoder().writeOp(Op::F32x4Const) &&
+ encoder().writeFixedF32x4(lit.simdValue().asFloat32x4());
+ case NumLit::Bool8x16:
+ // Boolean vectors use the Int8x16 memory representation.
+ return encoder().writeOp(Op::B8x16Const) &&
+ encoder().writeFixedI8x16(lit.simdValue().asInt8x16());
+ case NumLit::Bool16x8:
+ // Boolean vectors use the Int16x8 memory representation.
+ return encoder().writeOp(Op::B16x8Const) &&
+ encoder().writeFixedI16x8(lit.simdValue().asInt16x8());
+ case NumLit::Bool32x4:
+ // Boolean vectors use the Int32x4 memory representation.
+ return encoder().writeOp(Op::B32x4Const) &&
+ encoder().writeFixedI32x4(lit.simdValue().asInt32x4());
+ case NumLit::OutOfRangeInt:
+ break;
+ }
+ MOZ_CRASH("unexpected literal type");
+ }
+ MOZ_MUST_USE bool writeCall(ParseNode* pn, Op op) {
+ return encoder().writeOp(op) &&
+ fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
+ }
+ MOZ_MUST_USE bool prepareCall(ParseNode* pn) {
+ return fg_.addCallSiteLineNum(m().tokenStream().srcCoords.lineNum(pn->pn_pos.begin));
+ }
+ MOZ_MUST_USE bool writeSimdOp(SimdType simdType, SimdOperation simdOp) {
+ Op op = SimdToOp(simdType, simdOp);
+ if (op == Op::Limit)
+ return true;
+ return encoder().writeOp(op);
+ }
+};
+
+} /* anonymous namespace */
+
+/*****************************************************************************/
+// asm.js type-checking and code-generation algorithm
+
+static bool
+CheckIdentifier(ModuleValidator& m, ParseNode* usepn, PropertyName* name)
+{
+ if (name == m.cx()->names().arguments || name == m.cx()->names().eval)
+ return m.failName(usepn, "'%s' is not an allowed identifier", name);
+ return true;
+}
+
+static bool
+CheckModuleLevelName(ModuleValidator& m, ParseNode* usepn, PropertyName* name)
+{
+ if (!CheckIdentifier(m, usepn, name))
+ return false;
+
+ if (name == m.moduleFunctionName() ||
+ name == m.globalArgumentName() ||
+ name == m.importArgumentName() ||
+ name == m.bufferArgumentName() ||
+ m.lookupGlobal(name))
+ {
+ return m.failName(usepn, "duplicate name '%s' not allowed", name);
+ }
+
+ return true;
+}
+
+static bool
+CheckFunctionHead(ModuleValidator& m, ParseNode* fn)
+{
+ JSFunction* fun = FunctionObject(fn);
+ if (fun->hasRest())
+ return m.fail(fn, "rest args not allowed");
+ if (fun->isExprBody())
+ return m.fail(fn, "expression closures not allowed");
+ if (fn->pn_funbox->hasDestructuringArgs)
+ return m.fail(fn, "destructuring args not allowed");
+ return true;
+}
+
+static bool
+CheckArgument(ModuleValidator& m, ParseNode* arg, PropertyName** name)
+{
+ *name = nullptr;
+
+ if (!arg->isKind(PNK_NAME))
+ return m.fail(arg, "argument is not a plain name");
+
+ if (!CheckIdentifier(m, arg, arg->name()))
+ return false;
+
+ *name = arg->name();
+ return true;
+}
+
+static bool
+CheckModuleArgument(ModuleValidator& m, ParseNode* arg, PropertyName** name)
+{
+ if (!CheckArgument(m, arg, name))
+ return false;
+
+ if (!CheckModuleLevelName(m, arg, *name))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckModuleArguments(ModuleValidator& m, ParseNode* fn)
+{
+ unsigned numFormals;
+ ParseNode* arg1 = FunctionFormalParametersList(fn, &numFormals);
+ ParseNode* arg2 = arg1 ? NextNode(arg1) : nullptr;
+ ParseNode* arg3 = arg2 ? NextNode(arg2) : nullptr;
+
+ if (numFormals > 3)
+ return m.fail(fn, "asm.js modules takes at most 3 argument");
+
+ PropertyName* arg1Name = nullptr;
+ if (arg1 && !CheckModuleArgument(m, arg1, &arg1Name))
+ return false;
+ if (!m.initGlobalArgumentName(arg1Name))
+ return false;
+
+ PropertyName* arg2Name = nullptr;
+ if (arg2 && !CheckModuleArgument(m, arg2, &arg2Name))
+ return false;
+ if (!m.initImportArgumentName(arg2Name))
+ return false;
+
+ PropertyName* arg3Name = nullptr;
+ if (arg3 && !CheckModuleArgument(m, arg3, &arg3Name))
+ return false;
+ if (!m.initBufferArgumentName(arg3Name))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckPrecedingStatements(ModuleValidator& m, ParseNode* stmtList)
+{
+ MOZ_ASSERT(stmtList->isKind(PNK_STATEMENTLIST));
+
+ ParseNode* stmt = ListHead(stmtList);
+ for (unsigned i = 0, n = ListLength(stmtList); i < n; i++) {
+ if (!IsIgnoredDirective(m.cx(), stmt))
+ return m.fail(stmt, "invalid asm.js statement");
+ }
+
+ return true;
+}
+
+static bool
+CheckGlobalVariableInitConstant(ModuleValidator& m, PropertyName* varName, ParseNode* initNode,
+ bool isConst)
+{
+ NumLit lit = ExtractNumericLiteral(m, initNode);
+ if (!lit.valid())
+ return m.fail(initNode, "global initializer is out of representable integer range");
+
+ Type canonicalType = Type::canonicalize(Type::lit(lit));
+ if (!canonicalType.isGlobalVarType())
+ return m.fail(initNode, "global variable type not allowed");
+
+ return m.addGlobalVarInit(varName, lit, canonicalType, isConst);
+}
+
+static bool
+CheckTypeAnnotation(ModuleValidator& m, ParseNode* coercionNode, Type* coerceTo,
+ ParseNode** coercedExpr = nullptr)
+{
+ switch (coercionNode->getKind()) {
+ case PNK_BITOR: {
+ ParseNode* rhs = BitwiseRight(coercionNode);
+ uint32_t i;
+ if (!IsLiteralInt(m, rhs, &i) || i != 0)
+ return m.fail(rhs, "must use |0 for argument/return coercion");
+ *coerceTo = Type::Int;
+ if (coercedExpr)
+ *coercedExpr = BitwiseLeft(coercionNode);
+ return true;
+ }
+ case PNK_POS: {
+ *coerceTo = Type::Double;
+ if (coercedExpr)
+ *coercedExpr = UnaryKid(coercionNode);
+ return true;
+ }
+ case PNK_CALL: {
+ if (IsCoercionCall(m, coercionNode, coerceTo, coercedExpr))
+ return true;
+ break;
+ }
+ default:;
+ }
+
+ return m.fail(coercionNode, "must be of the form +x, x|0, fround(x), or a SIMD check(x)");
+}
+
+static bool
+CheckGlobalVariableInitImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode,
+ bool isConst)
+{
+ Type coerceTo;
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(m, initNode, &coerceTo, &coercedExpr))
+ return false;
+
+ if (!coercedExpr->isKind(PNK_DOT))
+ return m.failName(coercedExpr, "invalid import expression for global '%s'", varName);
+
+ if (!coerceTo.isGlobalVarType())
+ return m.fail(initNode, "global variable type not allowed");
+
+ ParseNode* base = DotBase(coercedExpr);
+ PropertyName* field = DotMember(coercedExpr);
+
+ PropertyName* importName = m.importArgumentName();
+ if (!importName)
+ return m.fail(coercedExpr, "cannot import without an asm.js foreign parameter");
+ if (!IsUseOfName(base, importName))
+ return m.failName(coercedExpr, "base of import expression must be '%s'", importName);
+
+ return m.addGlobalVarImport(varName, field, coerceTo, isConst);
+}
+
+static bool
+IsArrayViewCtorName(ModuleValidator& m, PropertyName* name, Scalar::Type* type)
+{
+ JSAtomState& names = m.cx()->names();
+ if (name == names.Int8Array) {
+ *type = Scalar::Int8;
+ } else if (name == names.Uint8Array) {
+ *type = Scalar::Uint8;
+ } else if (name == names.Int16Array) {
+ *type = Scalar::Int16;
+ } else if (name == names.Uint16Array) {
+ *type = Scalar::Uint16;
+ } else if (name == names.Int32Array) {
+ *type = Scalar::Int32;
+ } else if (name == names.Uint32Array) {
+ *type = Scalar::Uint32;
+ } else if (name == names.Float32Array) {
+ *type = Scalar::Float32;
+ } else if (name == names.Float64Array) {
+ *type = Scalar::Float64;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+static bool
+CheckNewArrayViewArgs(ModuleValidator& m, ParseNode* ctorExpr, PropertyName* bufferName)
+{
+ ParseNode* bufArg = NextNode(ctorExpr);
+ if (!bufArg || NextNode(bufArg) != nullptr)
+ return m.fail(ctorExpr, "array view constructor takes exactly one argument");
+
+ if (!IsUseOfName(bufArg, bufferName))
+ return m.failName(bufArg, "argument to array view constructor must be '%s'", bufferName);
+
+ return true;
+}
+
+static bool
+CheckNewArrayView(ModuleValidator& m, PropertyName* varName, ParseNode* newExpr)
+{
+ PropertyName* globalName = m.globalArgumentName();
+ if (!globalName)
+ return m.fail(newExpr, "cannot create array view without an asm.js global parameter");
+
+ PropertyName* bufferName = m.bufferArgumentName();
+ if (!bufferName)
+ return m.fail(newExpr, "cannot create array view without an asm.js heap parameter");
+
+ ParseNode* ctorExpr = ListHead(newExpr);
+
+ PropertyName* field;
+ Scalar::Type type;
+ if (ctorExpr->isKind(PNK_DOT)) {
+ ParseNode* base = DotBase(ctorExpr);
+
+ if (!IsUseOfName(base, globalName))
+ return m.failName(base, "expecting '%s.*Array", globalName);
+
+ field = DotMember(ctorExpr);
+ if (!IsArrayViewCtorName(m, field, &type))
+ return m.fail(ctorExpr, "could not match typed array name");
+ } else {
+ if (!ctorExpr->isKind(PNK_NAME))
+ return m.fail(ctorExpr, "expecting name of imported array view constructor");
+
+ PropertyName* globalName = ctorExpr->name();
+ const ModuleValidator::Global* global = m.lookupGlobal(globalName);
+ if (!global)
+ return m.failName(ctorExpr, "%s not found in module global scope", globalName);
+
+ if (global->which() != ModuleValidator::Global::ArrayViewCtor)
+ return m.failName(ctorExpr, "%s must be an imported array view constructor", globalName);
+
+ field = nullptr;
+ type = global->viewType();
+ }
+
+ if (!CheckNewArrayViewArgs(m, ctorExpr, bufferName))
+ return false;
+
+ return m.addArrayView(varName, type, field);
+}
+
+static bool
+IsSimdValidOperationType(SimdType type, SimdOperation op)
+{
+#define CASE(op) case SimdOperation::Fn_##op:
+ switch(type) {
+ case SimdType::Int8x16:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_INT8X16_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Int16x8:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_INT16X8_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Int32x4:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_INT32X4_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Uint8x16:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromInt8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_INT8X16_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Uint16x8:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromInt16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_INT16X8_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Uint32x4:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromInt32x4Bits:
+ FORALL_INT32X4_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Float32x4:
+ switch (op) {
+ case SimdOperation::Constructor:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ FORALL_FLOAT32X4_ASMJS_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ case SimdType::Bool8x16:
+ case SimdType::Bool16x8:
+ case SimdType::Bool32x4:
+ switch (op) {
+ case SimdOperation::Constructor:
+ FORALL_BOOL_SIMD_OP(CASE) return true;
+ default: return false;
+ }
+ break;
+ default:
+ // Unimplemented SIMD type.
+ return false;
+ }
+#undef CASE
+}
+
+static bool
+CheckGlobalMathImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+ PropertyName* field)
+{
+ // Math builtin, with the form glob.Math.[[builtin]]
+ ModuleValidator::MathBuiltin mathBuiltin;
+ if (!m.lookupStandardLibraryMathName(field, &mathBuiltin))
+ return m.failName(initNode, "'%s' is not a standard Math builtin", field);
+
+ switch (mathBuiltin.kind) {
+ case ModuleValidator::MathBuiltin::Function:
+ return m.addMathBuiltinFunction(varName, mathBuiltin.u.func, field);
+ case ModuleValidator::MathBuiltin::Constant:
+ return m.addMathBuiltinConstant(varName, mathBuiltin.u.cst, field);
+ default:
+ break;
+ }
+ MOZ_CRASH("unexpected or uninitialized math builtin type");
+}
+
+static bool
+CheckGlobalAtomicsImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+ PropertyName* field)
+{
+ // Atomics builtin, with the form glob.Atomics.[[builtin]]
+ AsmJSAtomicsBuiltinFunction func;
+ if (!m.lookupStandardLibraryAtomicsName(field, &func))
+ return m.failName(initNode, "'%s' is not a standard Atomics builtin", field);
+
+ return m.addAtomicsBuiltinFunction(varName, func, field);
+}
+
+static bool
+CheckGlobalSimdImport(ModuleValidator& m, ParseNode* initNode, PropertyName* varName,
+ PropertyName* field)
+{
+ if (!m.supportsSimd())
+ return m.fail(initNode, "SIMD is not supported on this platform");
+
+ // SIMD constructor, with the form glob.SIMD.[[type]]
+ SimdType simdType;
+ if (!IsSimdTypeName(m.cx()->names(), field, &simdType))
+ return m.failName(initNode, "'%s' is not a standard SIMD type", field);
+
+ // IsSimdTypeName will return true for any SIMD type supported by the VM.
+ //
+ // Since we may not support all of those SIMD types in asm.js, use the
+ // asm.js-specific IsSimdValidOperationType() to check if this specific
+ // constructor is supported in asm.js.
+ if (!IsSimdValidOperationType(simdType, SimdOperation::Constructor))
+ return m.failName(initNode, "'%s' is not a supported SIMD type", field);
+
+ return m.addSimdCtor(varName, simdType, field);
+}
+
+static bool
+CheckGlobalSimdOperationImport(ModuleValidator& m, const ModuleValidator::Global* global,
+ ParseNode* initNode, PropertyName* varName, PropertyName* opName)
+{
+ SimdType simdType = global->simdCtorType();
+ SimdOperation simdOp;
+ if (!m.lookupStandardSimdOpName(opName, &simdOp))
+ return m.failName(initNode, "'%s' is not a standard SIMD operation", opName);
+ if (!IsSimdValidOperationType(simdType, simdOp))
+ return m.failName(initNode, "'%s' is not an operation supported by the SIMD type", opName);
+ return m.addSimdOperation(varName, simdType, simdOp, opName);
+}
+
+static bool
+CheckGlobalDotImport(ModuleValidator& m, PropertyName* varName, ParseNode* initNode)
+{
+ ParseNode* base = DotBase(initNode);
+ PropertyName* field = DotMember(initNode);
+
+ if (base->isKind(PNK_DOT)) {
+ ParseNode* global = DotBase(base);
+ PropertyName* mathOrAtomicsOrSimd = DotMember(base);
+
+ PropertyName* globalName = m.globalArgumentName();
+ if (!globalName)
+ return m.fail(base, "import statement requires the module have a stdlib parameter");
+
+ if (!IsUseOfName(global, globalName)) {
+ if (global->isKind(PNK_DOT)) {
+ return m.failName(base, "imports can have at most two dot accesses "
+ "(e.g. %s.Math.sin)", globalName);
+ }
+ return m.failName(base, "expecting %s.*", globalName);
+ }
+
+ if (mathOrAtomicsOrSimd == m.cx()->names().Math)
+ return CheckGlobalMathImport(m, initNode, varName, field);
+ if (mathOrAtomicsOrSimd == m.cx()->names().Atomics)
+ return CheckGlobalAtomicsImport(m, initNode, varName, field);
+ if (mathOrAtomicsOrSimd == m.cx()->names().SIMD)
+ return CheckGlobalSimdImport(m, initNode, varName, field);
+ return m.failName(base, "expecting %s.{Math|SIMD}", globalName);
+ }
+
+ if (!base->isKind(PNK_NAME))
+ return m.fail(base, "expected name of variable or parameter");
+
+ if (base->name() == m.globalArgumentName()) {
+ if (field == m.cx()->names().NaN)
+ return m.addGlobalConstant(varName, GenericNaN(), field);
+ if (field == m.cx()->names().Infinity)
+ return m.addGlobalConstant(varName, PositiveInfinity<double>(), field);
+
+ Scalar::Type type;
+ if (IsArrayViewCtorName(m, field, &type))
+ return m.addArrayViewCtor(varName, type, field);
+
+ return m.failName(initNode, "'%s' is not a standard constant or typed array name", field);
+ }
+
+ if (base->name() == m.importArgumentName())
+ return m.addFFI(varName, field);
+
+ const ModuleValidator::Global* global = m.lookupGlobal(base->name());
+ if (!global)
+ return m.failName(initNode, "%s not found in module global scope", base->name());
+
+ if (!global->isSimdCtor())
+ return m.failName(base, "expecting SIMD constructor name, got %s", field);
+
+ return CheckGlobalSimdOperationImport(m, global, initNode, varName, field);
+}
+
+static bool
+CheckModuleGlobal(ModuleValidator& m, ParseNode* var, bool isConst)
+{
+ if (!var->isKind(PNK_NAME))
+ return m.fail(var, "import variable is not a plain name");
+
+ if (!CheckModuleLevelName(m, var, var->name()))
+ return false;
+
+ ParseNode* initNode = MaybeInitializer(var);
+ if (!initNode)
+ return m.fail(var, "module import needs initializer");
+
+ if (IsNumericLiteral(m, initNode))
+ return CheckGlobalVariableInitConstant(m, var->name(), initNode, isConst);
+
+ if (initNode->isKind(PNK_BITOR) || initNode->isKind(PNK_POS) || initNode->isKind(PNK_CALL))
+ return CheckGlobalVariableInitImport(m, var->name(), initNode, isConst);
+
+ if (initNode->isKind(PNK_NEW))
+ return CheckNewArrayView(m, var->name(), initNode);
+
+ if (initNode->isKind(PNK_DOT))
+ return CheckGlobalDotImport(m, var->name(), initNode);
+
+ return m.fail(initNode, "unsupported import expression");
+}
+
+static bool
+CheckModuleProcessingDirectives(ModuleValidator& m)
+{
+ TokenStream& ts = m.parser().tokenStream;
+ while (true) {
+ bool matched;
+ if (!ts.matchToken(&matched, TOK_STRING, TokenStream::Operand))
+ return false;
+ if (!matched)
+ return true;
+
+ if (!IsIgnoredDirectiveName(m.cx(), ts.currentToken().atom()))
+ return m.failCurrentOffset("unsupported processing directive");
+
+ TokenKind tt;
+ if (!ts.getToken(&tt))
+ return false;
+ if (tt != TOK_SEMI)
+ return m.failCurrentOffset("expected semicolon after string literal");
+ }
+}
+
+static bool
+CheckModuleGlobals(ModuleValidator& m)
+{
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt))
+ return false;
+ if (!varStmt)
+ break;
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckModuleGlobal(m, var, varStmt->isKind(PNK_CONST)))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+ArgFail(FunctionValidator& f, PropertyName* argName, ParseNode* stmt)
+{
+ return f.failName(stmt, "expecting argument type declaration for '%s' of the "
+ "form 'arg = arg|0' or 'arg = +arg' or 'arg = fround(arg)'", argName);
+}
+
+static bool
+CheckArgumentType(FunctionValidator& f, ParseNode* stmt, PropertyName* name, Type* type)
+{
+ if (!stmt || !IsExpressionStatement(stmt))
+ return ArgFail(f, name, stmt ? stmt : f.fn());
+
+ ParseNode* initNode = ExpressionStatementExpr(stmt);
+ if (!initNode || !initNode->isKind(PNK_ASSIGN))
+ return ArgFail(f, name, stmt);
+
+ ParseNode* argNode = BinaryLeft(initNode);
+ ParseNode* coercionNode = BinaryRight(initNode);
+
+ if (!IsUseOfName(argNode, name))
+ return ArgFail(f, name, stmt);
+
+ ParseNode* coercedExpr;
+ if (!CheckTypeAnnotation(f.m(), coercionNode, type, &coercedExpr))
+ return false;
+
+ if (!type->isArgType())
+ return f.failName(stmt, "invalid type for argument '%s'", name);
+
+ if (!IsUseOfName(coercedExpr, name))
+ return ArgFail(f, name, stmt);
+
+ return true;
+}
+
+static bool
+CheckProcessingDirectives(ModuleValidator& m, ParseNode** stmtIter)
+{
+ ParseNode* stmt = *stmtIter;
+
+ while (stmt && IsIgnoredDirective(m.cx(), stmt))
+ stmt = NextNode(stmt);
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool
+CheckArguments(FunctionValidator& f, ParseNode** stmtIter, ValTypeVector* argTypes)
+{
+ ParseNode* stmt = *stmtIter;
+
+ unsigned numFormals;
+ ParseNode* argpn = FunctionFormalParametersList(f.fn(), &numFormals);
+
+ for (unsigned i = 0; i < numFormals; i++, argpn = NextNode(argpn), stmt = NextNode(stmt)) {
+ PropertyName* name;
+ if (!CheckArgument(f.m(), argpn, &name))
+ return false;
+
+ Type type;
+ if (!CheckArgumentType(f, stmt, name, &type))
+ return false;
+
+ if (!argTypes->append(type.canonicalToValType()))
+ return false;
+
+ if (!f.addLocal(argpn, name, type))
+ return false;
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool
+IsLiteralOrConst(FunctionValidator& f, ParseNode* pn, NumLit* lit)
+{
+ if (pn->isKind(PNK_NAME)) {
+ const ModuleValidator::Global* global = f.lookupGlobal(pn->name());
+ if (!global || global->which() != ModuleValidator::Global::ConstantLiteral)
+ return false;
+
+ *lit = global->constLiteralValue();
+ return true;
+ }
+
+ bool isSimd = false;
+ if (!IsNumericLiteral(f.m(), pn, &isSimd))
+ return false;
+
+ if (isSimd)
+ f.setUsesSimd();
+
+ *lit = ExtractNumericLiteral(f.m(), pn);
+ return true;
+}
+
+static bool
+CheckFinalReturn(FunctionValidator& f, ParseNode* lastNonEmptyStmt)
+{
+ if (!f.encoder().writeOp(Op::End))
+ return false;
+
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(ExprType::Void);
+ return true;
+ }
+
+ if (!lastNonEmptyStmt->isKind(PNK_RETURN) && !IsVoid(f.returnedType()))
+ return f.fail(lastNonEmptyStmt, "void incompatible with previous return type");
+
+ return true;
+}
+
+static bool
+CheckVariable(FunctionValidator& f, ParseNode* var, ValTypeVector* types, Vector<NumLit>* inits)
+{
+ if (!var->isKind(PNK_NAME))
+ return f.fail(var, "local variable is not a plain name");
+
+ PropertyName* name = var->name();
+
+ if (!CheckIdentifier(f.m(), var, name))
+ return false;
+
+ ParseNode* initNode = MaybeInitializer(var);
+ if (!initNode)
+ return f.failName(var, "var '%s' needs explicit type declaration via an initial value", name);
+
+ NumLit lit;
+ if (!IsLiteralOrConst(f, initNode, &lit))
+ return f.failName(var, "var '%s' initializer must be literal or const literal", name);
+
+ if (!lit.valid())
+ return f.failName(var, "var '%s' initializer out of range", name);
+
+ Type type = Type::canonicalize(Type::lit(lit));
+
+ return f.addLocal(var, name, type) &&
+ types->append(type.canonicalToValType()) &&
+ inits->append(lit);
+}
+
+static bool
+CheckVariables(FunctionValidator& f, ParseNode** stmtIter)
+{
+ ParseNode* stmt = *stmtIter;
+
+ uint32_t firstVar = f.numLocals();
+
+ ValTypeVector types;
+ Vector<NumLit> inits(f.cx());
+
+ for (; stmt && stmt->isKind(PNK_VAR); stmt = NextNonEmptyStatement(stmt)) {
+ for (ParseNode* var = VarListHead(stmt); var; var = NextNode(var)) {
+ if (!CheckVariable(f, var, &types, &inits))
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(f.encoder().empty());
+
+ if (!EncodeLocalEntries(f.encoder(), types))
+ return false;
+
+ for (uint32_t i = 0; i < inits.length(); i++) {
+ NumLit lit = inits[i];
+ if (lit.isZeroBits())
+ continue;
+ if (!f.writeConstExpr(lit))
+ return false;
+ if (!f.encoder().writeOp(Op::SetLocal))
+ return false;
+ if (!f.encoder().writeVarU32(firstVar + i))
+ return false;
+ }
+
+ *stmtIter = stmt;
+ return true;
+}
+
+static bool
+CheckExpr(FunctionValidator& f, ParseNode* op, Type* type);
+
+static bool
+CheckNumericLiteral(FunctionValidator& f, ParseNode* num, Type* type)
+{
+ NumLit lit = ExtractNumericLiteral(f.m(), num);
+ if (!lit.valid())
+ return f.fail(num, "numeric literal out of representable integer range");
+ *type = Type::lit(lit);
+ return f.writeConstExpr(lit);
+}
+
+static bool
+CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type)
+{
+ PropertyName* name = varRef->name();
+
+ if (const FunctionValidator::Local* local = f.lookupLocal(name)) {
+ if (!f.encoder().writeOp(Op::GetLocal))
+ return false;
+ if (!f.encoder().writeVarU32(local->slot))
+ return false;
+ *type = local->type;
+ return true;
+ }
+
+ if (const ModuleValidator::Global* global = f.lookupGlobal(name)) {
+ switch (global->which()) {
+ case ModuleValidator::Global::ConstantLiteral:
+ *type = global->varOrConstType();
+ return f.writeConstExpr(global->constLiteralValue());
+ case ModuleValidator::Global::ConstantImport:
+ case ModuleValidator::Global::Variable: {
+ *type = global->varOrConstType();
+ return f.encoder().writeOp(Op::GetGlobal) &&
+ f.encoder().writeVarU32(global->varOrConstIndex());
+ }
+ case ModuleValidator::Global::Function:
+ case ModuleValidator::Global::FFI:
+ case ModuleValidator::Global::MathBuiltinFunction:
+ case ModuleValidator::Global::AtomicsBuiltinFunction:
+ case ModuleValidator::Global::FuncPtrTable:
+ case ModuleValidator::Global::ArrayView:
+ case ModuleValidator::Global::ArrayViewCtor:
+ case ModuleValidator::Global::SimdCtor:
+ case ModuleValidator::Global::SimdOp:
+ break;
+ }
+ return f.failName(varRef, "'%s' may not be accessed by ordinary expressions", name);
+ }
+
+ return f.failName(varRef, "'%s' not found in local or asm.js module scope", name);
+}
+
+static inline bool
+IsLiteralOrConstInt(FunctionValidator& f, ParseNode* pn, uint32_t* u32)
+{
+ NumLit lit;
+ if (!IsLiteralOrConst(f, pn, &lit))
+ return false;
+
+ return IsLiteralInt(lit, u32);
+}
+
+static const int32_t NoMask = -1;
+static const bool YesSimd = true;
+static const bool NoSimd = false;
+
+static bool
+CheckArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+ bool isSimd, Scalar::Type* viewType)
+{
+ if (!viewName->isKind(PNK_NAME))
+ return f.fail(viewName, "base of array access must be a typed array view name");
+
+ const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
+ if (!global || !global->isAnyArrayView())
+ return f.fail(viewName, "base of array access must be a typed array view name");
+
+ *viewType = global->viewType();
+
+ uint32_t index;
+ if (IsLiteralOrConstInt(f, indexExpr, &index)) {
+ uint64_t byteOffset = uint64_t(index) << TypedArrayShift(*viewType);
+ uint64_t width = isSimd ? Simd128DataSize : TypedArrayElemSize(*viewType);
+ if (!f.m().tryConstantAccess(byteOffset, width))
+ return f.fail(indexExpr, "constant index out of range");
+
+ return f.writeInt32Lit(byteOffset);
+ }
+
+ // Mask off the low bits to account for the clearing effect of a right shift
+ // followed by the left shift implicit in the array access. E.g., H32[i>>2]
+ // loses the low two bits.
+ int32_t mask = ~(TypedArrayElemSize(*viewType) - 1);
+
+ if (indexExpr->isKind(PNK_RSH)) {
+ ParseNode* shiftAmountNode = BitwiseRight(indexExpr);
+
+ uint32_t shift;
+ if (!IsLiteralInt(f.m(), shiftAmountNode, &shift))
+ return f.failf(shiftAmountNode, "shift amount must be constant");
+
+ unsigned requiredShift = TypedArrayShift(*viewType);
+ if (shift != requiredShift)
+ return f.failf(shiftAmountNode, "shift amount must be %u", requiredShift);
+
+ ParseNode* pointerNode = BitwiseLeft(indexExpr);
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType))
+ return false;
+
+ if (!pointerType.isIntish())
+ return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
+ } else {
+ // For SIMD access, and legacy scalar access compatibility, accept
+ // Int8/Uint8 accesses with no shift.
+ if (TypedArrayShift(*viewType) != 0)
+ return f.fail(indexExpr, "index expression isn't shifted; must be an Int8/Uint8 access");
+
+ MOZ_ASSERT(mask == NoMask);
+
+ ParseNode* pointerNode = indexExpr;
+
+ Type pointerType;
+ if (!CheckExpr(f, pointerNode, &pointerType))
+ return false;
+
+ if (isSimd) {
+ if (!pointerType.isIntish())
+ return f.failf(pointerNode, "%s is not a subtype of intish", pointerType.toChars());
+ } else {
+ if (!pointerType.isInt())
+ return f.failf(pointerNode, "%s is not a subtype of int", pointerType.toChars());
+ }
+ }
+
+ // Don't generate the mask op if there is no need for it which could happen for
+ // a shift of zero or a SIMD access.
+ if (mask != NoMask) {
+ return f.writeInt32Lit(mask) &&
+ f.encoder().writeOp(Op::I32And);
+ }
+
+ return true;
+}
+
+static bool
+CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+ bool isSimd, Scalar::Type* viewType)
+{
+ return CheckArrayAccess(f, viewName, indexExpr, isSimd, viewType);
+}
+
+static bool
+WriteArrayAccessFlags(FunctionValidator& f, Scalar::Type viewType)
+{
+ // asm.js only has naturally-aligned accesses.
+ size_t align = TypedArrayElemSize(viewType);
+ MOZ_ASSERT(IsPowerOfTwo(align));
+ if (!f.encoder().writeFixedU8(CeilingLog2(align)))
+ return false;
+
+ // asm.js doesn't have constant offsets, so just encode a 0.
+ if (!f.encoder().writeVarU32(0))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type)
+{
+ Scalar::Type viewType;
+
+ if (!CheckAndPrepareArrayAccess(f, ElemBase(elem), ElemIndex(elem), NoSimd, &viewType))
+ return false;
+
+ switch (viewType) {
+ case Scalar::Int8: if (!f.encoder().writeOp(Op::I32Load8S)) return false; break;
+ case Scalar::Uint8: if (!f.encoder().writeOp(Op::I32Load8U)) return false; break;
+ case Scalar::Int16: if (!f.encoder().writeOp(Op::I32Load16S)) return false; break;
+ case Scalar::Uint16: if (!f.encoder().writeOp(Op::I32Load16U)) return false; break;
+ case Scalar::Uint32:
+ case Scalar::Int32: if (!f.encoder().writeOp(Op::I32Load)) return false; break;
+ case Scalar::Float32: if (!f.encoder().writeOp(Op::F32Load)) return false; break;
+ case Scalar::Float64: if (!f.encoder().writeOp(Op::F64Load)) return false; break;
+ default: MOZ_CRASH("unexpected scalar type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ *type = Type::Intish;
+ break;
+ case Scalar::Float32:
+ *type = Type::MaybeFloat;
+ break;
+ case Scalar::Float64:
+ *type = Type::MaybeDouble;
+ break;
+ default: MOZ_CRASH("Unexpected array type");
+ }
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
+{
+ Scalar::Type viewType;
+ if (!CheckAndPrepareArrayAccess(f, ElemBase(lhs), ElemIndex(lhs), NoSimd, &viewType))
+ return false;
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ if (!rhsType.isIntish())
+ return f.failf(lhs, "%s is not a subtype of intish", rhsType.toChars());
+ break;
+ case Scalar::Float32:
+ if (!rhsType.isMaybeDouble() && !rhsType.isFloatish())
+ return f.failf(lhs, "%s is not a subtype of double? or floatish", rhsType.toChars());
+ break;
+ case Scalar::Float64:
+ if (!rhsType.isMaybeFloat() && !rhsType.isMaybeDouble())
+ return f.failf(lhs, "%s is not a subtype of float? or double?", rhsType.toChars());
+ break;
+ default:
+ MOZ_CRASH("Unexpected view type");
+ }
+
+ switch (viewType) {
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ if (!f.encoder().writeOp(Op::I32TeeStore8))
+ return false;
+ break;
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ if (!f.encoder().writeOp(Op::I32TeeStore16))
+ return false;
+ break;
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ if (!f.encoder().writeOp(Op::I32TeeStore))
+ return false;
+ break;
+ case Scalar::Float32:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(Op::F32TeeStore))
+ return false;
+ } else {
+ if (!f.encoder().writeOp(Op::F64TeeStoreF32))
+ return false;
+ }
+ break;
+ case Scalar::Float64:
+ if (rhsType.isFloatish()) {
+ if (!f.encoder().writeOp(Op::F32TeeStoreF64))
+ return false;
+ } else {
+ if (!f.encoder().writeOp(Op::F64TeeStore))
+ return false;
+ }
+ break;
+ default: MOZ_CRASH("unexpected scalar type");
+ }
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = rhsType;
+ return true;
+}
+
+static bool
+CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type)
+{
+ RootedPropertyName name(f.cx(), lhs->name());
+
+ if (const FunctionValidator::Local* lhsVar = f.lookupLocal(name)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (!f.encoder().writeOp(Op::TeeLocal))
+ return false;
+ if (!f.encoder().writeVarU32(lhsVar->slot))
+ return false;
+
+ if (!(rhsType <= lhsVar->type)) {
+ return f.failf(lhs, "%s is not a subtype of %s",
+ rhsType.toChars(), lhsVar->type.toChars());
+ }
+ *type = rhsType;
+ return true;
+ }
+
+ if (const ModuleValidator::Global* global = f.lookupGlobal(name)) {
+ if (global->which() != ModuleValidator::Global::Variable)
+ return f.failName(lhs, "'%s' is not a mutable variable", name);
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ Type globType = global->varOrConstType();
+ if (!(rhsType <= globType))
+ return f.failf(lhs, "%s is not a subtype of %s", rhsType.toChars(), globType.toChars());
+ if (!f.encoder().writeOp(Op::TeeGlobal))
+ return false;
+ if (!f.encoder().writeVarU32(global->varOrConstIndex()))
+ return false;
+
+ *type = rhsType;
+ return true;
+ }
+
+ return f.failName(lhs, "'%s' not found in local or asm.js module scope", name);
+}
+
+static bool
+CheckAssign(FunctionValidator& f, ParseNode* assign, Type* type)
+{
+ MOZ_ASSERT(assign->isKind(PNK_ASSIGN));
+
+ ParseNode* lhs = BinaryLeft(assign);
+ ParseNode* rhs = BinaryRight(assign);
+
+ if (lhs->getKind() == PNK_ELEM)
+ return CheckStoreArray(f, lhs, rhs, type);
+
+ if (lhs->getKind() == PNK_NAME)
+ return CheckAssignName(f, lhs, rhs, type);
+
+ return f.fail(assign, "left-hand side of assignment must be a variable or array access");
+}
+
+static bool
+CheckMathIMul(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 2)
+ return f.fail(call, "Math.imul must be passed 2 arguments");
+
+ ParseNode* lhs = CallArgList(call);
+ ParseNode* rhs = NextNode(lhs);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (!lhsType.isIntish())
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ if (!rhsType.isIntish())
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+
+ *type = Type::Signed;
+ return f.encoder().writeOp(Op::I32Mul);
+}
+
+static bool
+CheckMathClz32(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 1)
+ return f.fail(call, "Math.clz32 must be passed 1 argument");
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType))
+ return false;
+
+ if (!argType.isIntish())
+ return f.failf(arg, "%s is not a subtype of intish", argType.toChars());
+
+ *type = Type::Fixnum;
+ return f.encoder().writeOp(Op::I32Clz);
+}
+
+static bool
+CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 1)
+ return f.fail(call, "Math.abs must be passed 1 argument");
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType))
+ return false;
+
+ if (argType.isSigned()) {
+ *type = Type::Unsigned;
+ return f.encoder().writeOp(Op::I32Abs);
+ }
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Abs);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Abs);
+ }
+
+ return f.failf(call, "%s is not a subtype of signed, float? or double?", argType.toChars());
+}
+
+static bool
+CheckMathSqrt(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 1)
+ return f.fail(call, "Math.sqrt must be passed 1 argument");
+
+ ParseNode* arg = CallArgList(call);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType))
+ return false;
+
+ if (argType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Sqrt);
+ }
+
+ if (argType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Sqrt);
+ }
+
+ return f.failf(call, "%s is neither a subtype of double? nor float?", argType.toChars());
+}
+
+static bool
+CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* type)
+{
+ if (CallArgListLength(callNode) < 2)
+ return f.fail(callNode, "Math.min/max must be passed at least 2 arguments");
+
+ ParseNode* firstArg = CallArgList(callNode);
+ Type firstType;
+ if (!CheckExpr(f, firstArg, &firstType))
+ return false;
+
+ Op op;
+ if (firstType.isMaybeDouble()) {
+ *type = Type::Double;
+ firstType = Type::MaybeDouble;
+ op = isMax ? Op::F64Max : Op::F64Min;
+ } else if (firstType.isMaybeFloat()) {
+ *type = Type::Float;
+ firstType = Type::MaybeFloat;
+ op = isMax ? Op::F32Max : Op::F32Min;
+ } else if (firstType.isSigned()) {
+ *type = Type::Signed;
+ firstType = Type::Signed;
+ op = isMax ? Op::I32Max : Op::I32Min;
+ } else {
+ return f.failf(firstArg, "%s is not a subtype of double?, float? or signed",
+ firstType.toChars());
+ }
+
+ unsigned numArgs = CallArgListLength(callNode);
+ ParseNode* nextArg = NextNode(firstArg);
+ for (unsigned i = 1; i < numArgs; i++, nextArg = NextNode(nextArg)) {
+ Type nextType;
+ if (!CheckExpr(f, nextArg, &nextType))
+ return false;
+ if (!(nextType <= firstType))
+ return f.failf(nextArg, "%s is not a subtype of %s", nextType.toChars(), firstType.toChars());
+
+ if (!f.encoder().writeOp(op))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CheckSharedArrayAtomicAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* indexExpr,
+ Scalar::Type* viewType)
+{
+ if (!CheckAndPrepareArrayAccess(f, viewName, indexExpr, NoSimd, viewType))
+ return false;
+
+ // The global will be sane, CheckArrayAccess checks it.
+ const ModuleValidator::Global* global = f.lookupGlobal(viewName->name());
+ if (global->which() != ModuleValidator::Global::ArrayView)
+ return f.fail(viewName, "base of array access must be a typed array view");
+
+ MOZ_ASSERT(f.m().atomicsPresent());
+
+ switch (*viewType) {
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32:
+ return true;
+ default:
+ return f.failf(viewName, "not an integer array");
+ }
+
+ return true;
+}
+
+static bool
+WriteAtomicOperator(FunctionValidator& f, Op opcode, Scalar::Type viewType)
+{
+ return f.encoder().writeOp(opcode) &&
+ f.encoder().writeFixedU8(viewType);
+}
+
+static bool
+CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 2)
+ return f.fail(call, "Atomics.load must be passed 2 arguments");
+
+ ParseNode* arrayArg = CallArgList(call);
+ ParseNode* indexArg = NextNode(arrayArg);
+
+ Scalar::Type viewType;
+ if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
+ return false;
+
+ if (!WriteAtomicOperator(f, Op::I32AtomicsLoad, viewType))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 3)
+ return f.fail(call, "Atomics.store must be passed 3 arguments");
+
+ ParseNode* arrayArg = CallArgList(call);
+ ParseNode* indexArg = NextNode(arrayArg);
+ ParseNode* valueArg = NextNode(indexArg);
+
+ Type rhsType;
+ if (!CheckExpr(f, valueArg, &rhsType))
+ return false;
+
+ if (!rhsType.isIntish())
+ return f.failf(arrayArg, "%s is not a subtype of intish", rhsType.toChars());
+
+ Scalar::Type viewType;
+ if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
+ return false;
+
+ if (!WriteAtomicOperator(f, Op::I32AtomicsStore, viewType))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = rhsType;
+ return true;
+}
+
+static bool
+CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op)
+{
+ if (CallArgListLength(call) != 3)
+ return f.fail(call, "Atomics binary operator must be passed 3 arguments");
+
+ ParseNode* arrayArg = CallArgList(call);
+ ParseNode* indexArg = NextNode(arrayArg);
+ ParseNode* valueArg = NextNode(indexArg);
+
+ Type valueArgType;
+ if (!CheckExpr(f, valueArg, &valueArgType))
+ return false;
+
+ if (!valueArgType.isIntish())
+ return f.failf(valueArg, "%s is not a subtype of intish", valueArgType.toChars());
+
+ Scalar::Type viewType;
+ if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
+ return false;
+
+ if (!WriteAtomicOperator(f, Op::I32AtomicsBinOp, viewType))
+ return false;
+ if (!f.encoder().writeFixedU8(uint8_t(op)))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckAtomicsIsLockFree(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 1)
+ return f.fail(call, "Atomics.isLockFree must be passed 1 argument");
+
+ ParseNode* sizeArg = CallArgList(call);
+
+ uint32_t size;
+ if (!IsLiteralInt(f.m(), sizeArg, &size))
+ return f.fail(sizeArg, "Atomics.isLockFree requires an integer literal argument");
+
+ *type = Type::Int;
+ return f.writeInt32Lit(AtomicOperations::isLockfree(size));
+}
+
+static bool
+CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 4)
+ return f.fail(call, "Atomics.compareExchange must be passed 4 arguments");
+
+ ParseNode* arrayArg = CallArgList(call);
+ ParseNode* indexArg = NextNode(arrayArg);
+ ParseNode* oldValueArg = NextNode(indexArg);
+ ParseNode* newValueArg = NextNode(oldValueArg);
+
+ Type oldValueArgType;
+ if (!CheckExpr(f, oldValueArg, &oldValueArgType))
+ return false;
+
+ Type newValueArgType;
+ if (!CheckExpr(f, newValueArg, &newValueArgType))
+ return false;
+
+ if (!oldValueArgType.isIntish())
+ return f.failf(oldValueArg, "%s is not a subtype of intish", oldValueArgType.toChars());
+
+ if (!newValueArgType.isIntish())
+ return f.failf(newValueArg, "%s is not a subtype of intish", newValueArgType.toChars());
+
+ Scalar::Type viewType;
+ if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
+ return false;
+
+ if (!WriteAtomicOperator(f, Op::I32AtomicsCompareExchange, viewType))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type)
+{
+ if (CallArgListLength(call) != 3)
+ return f.fail(call, "Atomics.exchange must be passed 3 arguments");
+
+ ParseNode* arrayArg = CallArgList(call);
+ ParseNode* indexArg = NextNode(arrayArg);
+ ParseNode* valueArg = NextNode(indexArg);
+
+ Type valueArgType;
+ if (!CheckExpr(f, valueArg, &valueArgType))
+ return false;
+
+ if (!valueArgType.isIntish())
+ return f.failf(arrayArg, "%s is not a subtype of intish", valueArgType.toChars());
+
+ Scalar::Type viewType;
+ if (!CheckSharedArrayAtomicAccess(f, arrayArg, indexArg, &viewType))
+ return false;
+
+ if (!WriteAtomicOperator(f, Op::I32AtomicsExchange, viewType))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSAtomicsBuiltinFunction func,
+ Type* type)
+{
+ f.setUsesAtomics();
+
+ switch (func) {
+ case AsmJSAtomicsBuiltin_compareExchange:
+ return CheckAtomicsCompareExchange(f, callNode, type);
+ case AsmJSAtomicsBuiltin_exchange:
+ return CheckAtomicsExchange(f, callNode, type);
+ case AsmJSAtomicsBuiltin_load:
+ return CheckAtomicsLoad(f, callNode, type);
+ case AsmJSAtomicsBuiltin_store:
+ return CheckAtomicsStore(f, callNode, type);
+ case AsmJSAtomicsBuiltin_add:
+ return CheckAtomicsBinop(f, callNode, type, AtomicFetchAddOp);
+ case AsmJSAtomicsBuiltin_sub:
+ return CheckAtomicsBinop(f, callNode, type, AtomicFetchSubOp);
+ case AsmJSAtomicsBuiltin_and:
+ return CheckAtomicsBinop(f, callNode, type, AtomicFetchAndOp);
+ case AsmJSAtomicsBuiltin_or:
+ return CheckAtomicsBinop(f, callNode, type, AtomicFetchOrOp);
+ case AsmJSAtomicsBuiltin_xor:
+ return CheckAtomicsBinop(f, callNode, type, AtomicFetchXorOp);
+ case AsmJSAtomicsBuiltin_isLockFree:
+ return CheckAtomicsIsLockFree(f, callNode, type);
+ default:
+ MOZ_CRASH("unexpected atomicsBuiltin function");
+ }
+}
+
+typedef bool (*CheckArgType)(FunctionValidator& f, ParseNode* argNode, Type type);
+
+template <CheckArgType checkArg>
+static bool
+CheckCallArgs(FunctionValidator& f, ParseNode* callNode, ValTypeVector* args)
+{
+ ParseNode* argNode = CallArgList(callNode);
+ for (unsigned i = 0; i < CallArgListLength(callNode); i++, argNode = NextNode(argNode)) {
+ Type type;
+ if (!CheckExpr(f, argNode, &type))
+ return false;
+
+ if (!checkArg(f, argNode, type))
+ return false;
+
+ if (!args->append(Type::canonicalize(type).canonicalToValType()))
+ return false;
+ }
+ return true;
+}
+
+static bool
+CheckSignatureAgainstExisting(ModuleValidator& m, ParseNode* usepn, const Sig& sig, const Sig& existing)
+{
+ if (sig.args().length() != existing.args().length()) {
+ return m.failf(usepn, "incompatible number of arguments (%" PRIuSIZE
+ " here vs. %" PRIuSIZE " before)",
+ sig.args().length(), existing.args().length());
+ }
+
+ for (unsigned i = 0; i < sig.args().length(); i++) {
+ if (sig.arg(i) != existing.arg(i)) {
+ return m.failf(usepn, "incompatible type for argument %u: (%s here vs. %s before)", i,
+ ToCString(sig.arg(i)), ToCString(existing.arg(i)));
+ }
+ }
+
+ if (sig.ret() != existing.ret()) {
+ return m.failf(usepn, "%s incompatible with previous return of type %s",
+ ToCString(sig.ret()), ToCString(existing.ret()));
+ }
+
+ MOZ_ASSERT(sig == existing);
+ return true;
+}
+
+static bool
+CheckFunctionSignature(ModuleValidator& m, ParseNode* usepn, Sig&& sig, PropertyName* name,
+ ModuleValidator::Func** func)
+{
+ ModuleValidator::Func* existing = m.lookupFunction(name);
+ if (!existing) {
+ if (!CheckModuleLevelName(m, usepn, name))
+ return false;
+ return m.addFunction(name, usepn->pn_pos.begin, Move(sig), func);
+ }
+
+ if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcSig(existing->index())))
+ return false;
+
+ *func = existing;
+ return true;
+}
+
+static bool
+CheckIsArgType(FunctionValidator& f, ParseNode* argNode, Type type)
+{
+ if (!type.isArgType())
+ return f.failf(argNode,
+ "%s is not a subtype of int, float, double, or an allowed SIMD type",
+ type.toChars());
+
+ return true;
+}
+
+static bool
+CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calleeName,
+ Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args))
+ return false;
+
+ Sig sig(Move(args), ret.canonicalToExprType());
+
+ ModuleValidator::Func* callee;
+ if (!CheckFunctionSignature(f.m(), callNode, Move(sig), calleeName, &callee))
+ return false;
+
+ if (!f.writeCall(callNode, Op::Call))
+ return false;
+
+ if (!f.encoder().writeVarU32(callee->index()))
+ return false;
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool
+CheckFuncPtrTableAgainstExisting(ModuleValidator& m, ParseNode* usepn, PropertyName* name,
+ Sig&& sig, unsigned mask, uint32_t* funcPtrTableIndex)
+{
+ if (const ModuleValidator::Global* existing = m.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidator::Global::FuncPtrTable)
+ return m.failName(usepn, "'%s' is not a function-pointer table", name);
+
+ ModuleValidator::FuncPtrTable& table = m.funcPtrTable(existing->funcPtrTableIndex());
+ if (mask != table.mask())
+ return m.failf(usepn, "mask does not match previous value (%u)", table.mask());
+
+ if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().sig(table.sigIndex())))
+ return false;
+
+ *funcPtrTableIndex = existing->funcPtrTableIndex();
+ return true;
+ }
+
+ if (!CheckModuleLevelName(m, usepn, name))
+ return false;
+
+ if (!m.declareFuncPtrTable(Move(sig), name, usepn->pn_pos.begin, mask, funcPtrTableIndex))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ ParseNode* callee = CallCallee(callNode);
+ ParseNode* tableNode = ElemBase(callee);
+ ParseNode* indexExpr = ElemIndex(callee);
+
+ if (!tableNode->isKind(PNK_NAME))
+ return f.fail(tableNode, "expecting name of function-pointer array");
+
+ PropertyName* name = tableNode->name();
+ if (const ModuleValidator::Global* existing = f.lookupGlobal(name)) {
+ if (existing->which() != ModuleValidator::Global::FuncPtrTable)
+ return f.failName(tableNode, "'%s' is not the name of a function-pointer array", name);
+ }
+
+ if (!indexExpr->isKind(PNK_BITAND))
+ return f.fail(indexExpr, "function-pointer table index expression needs & mask");
+
+ ParseNode* indexNode = BitwiseLeft(indexExpr);
+ ParseNode* maskNode = BitwiseRight(indexExpr);
+
+ uint32_t mask;
+ if (!IsLiteralInt(f.m(), maskNode, &mask) || mask == UINT32_MAX || !IsPowerOfTwo(mask + 1))
+ return f.fail(maskNode, "function-pointer table index mask value must be a power of two minus 1");
+
+ Type indexType;
+ if (!CheckExpr(f, indexNode, &indexType))
+ return false;
+
+ if (!indexType.isIntish())
+ return f.failf(indexNode, "%s is not a subtype of intish", indexType.toChars());
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsArgType>(f, callNode, &args))
+ return false;
+
+ Sig sig(Move(args), ret.canonicalToExprType());
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(f.m(), tableNode, name, Move(sig), mask, &tableIndex))
+ return false;
+
+ if (!f.writeCall(callNode, Op::OldCallIndirect))
+ return false;
+
+ // Call signature
+ if (!f.encoder().writeVarU32(f.m().funcPtrTable(tableIndex).sigIndex()))
+ return false;
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool
+CheckIsExternType(FunctionValidator& f, ParseNode* argNode, Type type)
+{
+ if (!type.isExtern())
+ return f.failf(argNode, "%s is not a subtype of extern", type.toChars());
+ return true;
+}
+
+static bool
+CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ PropertyName* calleeName = CallCallee(callNode)->name();
+
+ if (ret.isFloat())
+ return f.fail(callNode, "FFI calls can't return float");
+ if (ret.isSimd())
+ return f.fail(callNode, "FFI calls can't return SIMD values");
+
+ ValTypeVector args;
+ if (!CheckCallArgs<CheckIsExternType>(f, callNode, &args))
+ return false;
+
+ Sig sig(Move(args), ret.canonicalToExprType());
+
+ uint32_t funcIndex;
+ if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &funcIndex))
+ return false;
+
+ if (!f.writeCall(callNode, Op::Call))
+ return false;
+
+ if (!f.encoder().writeVarU32(funcIndex))
+ return false;
+
+ *type = Type::ret(ret);
+ return true;
+}
+
+static bool
+CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType)
+{
+ if (inputType.isMaybeDouble())
+ return f.encoder().writeOp(Op::F32DemoteF64);
+ if (inputType.isSigned())
+ return f.encoder().writeOp(Op::F32ConvertSI32);
+ if (inputType.isUnsigned())
+ return f.encoder().writeOp(Op::F32ConvertUI32);
+ if (inputType.isFloatish())
+ return true;
+
+ return f.failf(inputNode, "%s is not a subtype of signed, unsigned, double? or floatish",
+ inputType.toChars());
+}
+
+static bool
+CheckCoercedCall(FunctionValidator& f, ParseNode* call, Type ret, Type* type);
+
+static bool
+CheckCoercionArg(FunctionValidator& f, ParseNode* arg, Type expected, Type* type)
+{
+ MOZ_ASSERT(expected.isCanonicalValType());
+
+ if (arg->isKind(PNK_CALL))
+ return CheckCoercedCall(f, arg, expected, type);
+
+ Type argType;
+ if (!CheckExpr(f, arg, &argType))
+ return false;
+
+ if (expected.isFloat()) {
+ if (!CheckFloatCoercionArg(f, arg, argType))
+ return false;
+ } else if (expected.isSimd()) {
+ if (!(argType <= expected))
+ return f.fail(arg, "argument to SIMD coercion isn't from the correct SIMD type");
+ } else {
+ MOZ_CRASH("not call coercions");
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+static bool
+CheckMathFRound(FunctionValidator& f, ParseNode* callNode, Type* type)
+{
+ if (CallArgListLength(callNode) != 1)
+ return f.fail(callNode, "Math.fround must be passed 1 argument");
+
+ ParseNode* argNode = CallArgList(callNode);
+ Type argType;
+ if (!CheckCoercionArg(f, argNode, Type::Float, &argType))
+ return false;
+
+ MOZ_ASSERT(argType == Type::Float);
+ *type = Type::Float;
+ return true;
+}
+
+static bool
+CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltinFunction func,
+ Type* type)
+{
+ unsigned arity = 0;
+ Op f32;
+ Op f64;
+ switch (func) {
+ case AsmJSMathBuiltin_imul: return CheckMathIMul(f, callNode, type);
+ case AsmJSMathBuiltin_clz32: return CheckMathClz32(f, callNode, type);
+ case AsmJSMathBuiltin_abs: return CheckMathAbs(f, callNode, type);
+ case AsmJSMathBuiltin_sqrt: return CheckMathSqrt(f, callNode, type);
+ case AsmJSMathBuiltin_fround: return CheckMathFRound(f, callNode, type);
+ case AsmJSMathBuiltin_min: return CheckMathMinMax(f, callNode, /* isMax = */ false, type);
+ case AsmJSMathBuiltin_max: return CheckMathMinMax(f, callNode, /* isMax = */ true, type);
+ case AsmJSMathBuiltin_ceil: arity = 1; f64 = Op::F64Ceil; f32 = Op::F32Ceil; break;
+ case AsmJSMathBuiltin_floor: arity = 1; f64 = Op::F64Floor; f32 = Op::F32Floor; break;
+ case AsmJSMathBuiltin_sin: arity = 1; f64 = Op::F64Sin; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_cos: arity = 1; f64 = Op::F64Cos; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_tan: arity = 1; f64 = Op::F64Tan; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_asin: arity = 1; f64 = Op::F64Asin; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_acos: arity = 1; f64 = Op::F64Acos; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_atan: arity = 1; f64 = Op::F64Atan; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_exp: arity = 1; f64 = Op::F64Exp; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_log: arity = 1; f64 = Op::F64Log; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_pow: arity = 2; f64 = Op::F64Pow; f32 = Op::Unreachable; break;
+ case AsmJSMathBuiltin_atan2: arity = 2; f64 = Op::F64Atan2; f32 = Op::Unreachable; break;
+ default: MOZ_CRASH("unexpected mathBuiltin function");
+ }
+
+ unsigned actualArity = CallArgListLength(callNode);
+ if (actualArity != arity)
+ return f.failf(callNode, "call passed %u arguments, expected %u", actualArity, arity);
+
+ if (!f.prepareCall(callNode))
+ return false;
+
+ Type firstType;
+ ParseNode* argNode = CallArgList(callNode);
+ if (!CheckExpr(f, argNode, &firstType))
+ return false;
+
+ if (!firstType.isMaybeFloat() && !firstType.isMaybeDouble())
+ return f.fail(argNode, "arguments to math call should be a subtype of double? or float?");
+
+ bool opIsDouble = firstType.isMaybeDouble();
+ if (!opIsDouble && f32 == Op::Unreachable)
+ return f.fail(callNode, "math builtin cannot be used as float");
+
+ if (arity == 2) {
+ Type secondType;
+ argNode = NextNode(argNode);
+ if (!CheckExpr(f, argNode, &secondType))
+ return false;
+
+ if (firstType.isMaybeDouble() && !secondType.isMaybeDouble())
+ return f.fail(argNode, "both arguments to math builtin call should be the same type");
+ if (firstType.isMaybeFloat() && !secondType.isMaybeFloat())
+ return f.fail(argNode, "both arguments to math builtin call should be the same type");
+ }
+
+ if (opIsDouble) {
+ if (!f.encoder().writeOp(f64))
+ return false;
+ } else {
+ if (!f.encoder().writeOp(f32))
+ return false;
+ }
+
+ *type = opIsDouble ? Type::Double : Type::Floatish;
+ return true;
+}
+
+namespace {
+// Include CheckSimdCallArgs in unnamed namespace to avoid MSVC name lookup bug.
+
+template<class CheckArgOp>
+static bool
+CheckSimdCallArgs(FunctionValidator& f, ParseNode* call, unsigned expectedArity,
+ const CheckArgOp& checkArg)
+{
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != expectedArity)
+ return f.failf(call, "expected %u arguments to SIMD call, got %u", expectedArity, numArgs);
+
+ ParseNode* arg = CallArgList(call);
+ for (size_t i = 0; i < numArgs; i++, arg = NextNode(arg)) {
+ MOZ_ASSERT(!!arg);
+ Type argType;
+ if (!CheckExpr(f, arg, &argType))
+ return false;
+ if (!checkArg(f, arg, i, argType))
+ return false;
+ }
+
+ return true;
+}
+
+
+class CheckArgIsSubtypeOf
+{
+ Type formalType_;
+
+ public:
+ explicit CheckArgIsSubtypeOf(SimdType t) : formalType_(t) {}
+
+ bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+ {
+ if (!(actualType <= formalType_)) {
+ return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+ formalType_.toChars());
+ }
+ return true;
+ }
+};
+
+static inline Type
+SimdToCoercedScalarType(SimdType t)
+{
+ switch (t) {
+ case SimdType::Int8x16:
+ case SimdType::Int16x8:
+ case SimdType::Int32x4:
+ case SimdType::Uint8x16:
+ case SimdType::Uint16x8:
+ case SimdType::Uint32x4:
+ case SimdType::Bool8x16:
+ case SimdType::Bool16x8:
+ case SimdType::Bool32x4:
+ return Type::Intish;
+ case SimdType::Float32x4:
+ return Type::Floatish;
+ default:
+ break;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected SIMD type");
+}
+
+class CheckSimdScalarArgs
+{
+ SimdType simdType_;
+ Type formalType_;
+
+ public:
+ explicit CheckSimdScalarArgs(SimdType simdType)
+ : simdType_(simdType), formalType_(SimdToCoercedScalarType(simdType))
+ {}
+
+ bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+ {
+ if (!(actualType <= formalType_)) {
+ // As a special case, accept doublelit arguments to float32x4 ops by
+ // re-emitting them as float32 constants.
+ if (simdType_ != SimdType::Float32x4 || !actualType.isDoubleLit()) {
+ return f.failf(arg, "%s is not a subtype of %s%s",
+ actualType.toChars(), formalType_.toChars(),
+ simdType_ == SimdType::Float32x4 ? " or doublelit" : "");
+ }
+
+ // We emitted a double literal and actually want a float32.
+ return f.encoder().writeOp(Op::F32DemoteF64);
+ }
+
+ return true;
+ }
+};
+
+class CheckSimdSelectArgs
+{
+ Type formalType_;
+ Type maskType_;
+
+ public:
+ explicit CheckSimdSelectArgs(SimdType t) : formalType_(t), maskType_(GetBooleanSimdType(t)) {}
+
+ bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+ {
+ // The first argument is the boolean selector, the next two are the
+ // values to choose from.
+ Type wantedType = argIndex == 0 ? maskType_ : formalType_;
+
+ if (!(actualType <= wantedType)) {
+ return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+ wantedType.toChars());
+ }
+ return true;
+ }
+};
+
+class CheckSimdVectorScalarArgs
+{
+ SimdType formalSimdType_;
+
+ public:
+ explicit CheckSimdVectorScalarArgs(SimdType t) : formalSimdType_(t) {}
+
+ bool operator()(FunctionValidator& f, ParseNode* arg, unsigned argIndex, Type actualType) const
+ {
+ MOZ_ASSERT(argIndex < 2);
+ if (argIndex == 0) {
+ // First argument is the vector
+ if (!(actualType <= Type(formalSimdType_))) {
+ return f.failf(arg, "%s is not a subtype of %s", actualType.toChars(),
+ Type(formalSimdType_).toChars());
+ }
+
+ return true;
+ }
+
+ // Second argument is the scalar
+ return CheckSimdScalarArgs(formalSimdType_)(f, arg, argIndex, actualType);
+ }
+};
+
+} // namespace
+
+static bool
+CheckSimdUnary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, op))
+ return false;
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdBinaryShift(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type *type)
+{
+ if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, op))
+ return false;
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdBinaryComp(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type *type)
+{
+ if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, op))
+ return false;
+ *type = GetBooleanSimdType(opType);
+ return true;
+}
+
+static bool
+CheckSimdBinary(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 2, CheckArgIsSubtypeOf(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, op))
+ return false;
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ switch (opType) {
+ case SimdType::Int8x16:
+ case SimdType::Int16x8:
+ case SimdType::Int32x4: *type = Type::Signed; break;
+ case SimdType::Uint8x16:
+ case SimdType::Uint16x8:
+ case SimdType::Uint32x4: *type = Type::Unsigned; break;
+ case SimdType::Float32x4: *type = Type::Float; break;
+ case SimdType::Bool8x16:
+ case SimdType::Bool16x8:
+ case SimdType::Bool32x4: *type = Type::Int; break;
+ default: MOZ_CRASH("unhandled simd type");
+ }
+
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 2)
+ return f.failf(call, "expected 2 arguments to SIMD extract, got %u", numArgs);
+
+ ParseNode* arg = CallArgList(call);
+
+ // First argument is the vector
+ Type vecType;
+ if (!CheckExpr(f, arg, &vecType))
+ return false;
+ if (!(vecType <= Type(opType))) {
+ return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
+ Type(opType).toChars());
+ }
+
+ arg = NextNode(arg);
+
+ // Second argument is the lane < vector length
+ uint32_t lane;
+ if (!IsLiteralOrConstInt(f, arg, &lane))
+ return f.failf(arg, "lane selector should be a constant integer literal");
+ if (lane >= GetSimdLanes(opType))
+ return f.failf(arg, "lane selector should be in bounds");
+
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_extractLane))
+ return false;
+ if (!f.encoder().writeVarU32(lane))
+ return false;
+ return true;
+}
+
+static bool
+CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 3)
+ return f.failf(call, "expected 2 arguments to SIMD replace, got %u", numArgs);
+
+ ParseNode* arg = CallArgList(call);
+
+ // First argument is the vector
+ Type vecType;
+ if (!CheckExpr(f, arg, &vecType))
+ return false;
+ if (!(vecType <= Type(opType))) {
+ return f.failf(arg, "%s is not a subtype of %s", vecType.toChars(),
+ Type(opType).toChars());
+ }
+
+ arg = NextNode(arg);
+
+ // Second argument is the lane < vector length
+ uint32_t lane;
+ if (!IsLiteralOrConstInt(f, arg, &lane))
+ return f.failf(arg, "lane selector should be a constant integer literal");
+ if (lane >= GetSimdLanes(opType))
+ return f.failf(arg, "lane selector should be in bounds");
+
+ arg = NextNode(arg);
+
+ // Third argument is the scalar
+ Type scalarType;
+ if (!CheckExpr(f, arg, &scalarType))
+ return false;
+ if (!(scalarType <= SimdToCoercedScalarType(opType))) {
+ if (opType == SimdType::Float32x4 && scalarType.isDoubleLit()) {
+ if (!f.encoder().writeOp(Op::F32DemoteF64))
+ return false;
+ } else {
+ return f.failf(arg, "%s is not the correct type to replace an element of %s",
+ scalarType.toChars(), vecType.toChars());
+ }
+ }
+
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_replaceLane))
+ return false;
+ if (!f.encoder().writeVarU32(lane))
+ return false;
+ *type = opType;
+ return true;
+}
+
+typedef bool Bitcast;
+
+namespace {
+// Include CheckSimdCast in unnamed namespace to avoid MSVC name lookup bug (due to the use of Type).
+
+static bool
+CheckSimdCast(FunctionValidator& f, ParseNode* call, SimdType fromType, SimdType toType,
+ SimdOperation op, Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(fromType)))
+ return false;
+ if (!f.writeSimdOp(toType, op))
+ return false;
+ *type = toType;
+ return true;
+}
+
+} // namespace
+
+static bool
+CheckSimdShuffleSelectors(FunctionValidator& f, ParseNode* lane,
+ mozilla::Array<uint8_t, 16>& lanes, unsigned numLanes, unsigned maxLane)
+{
+ for (unsigned i = 0; i < numLanes; i++, lane = NextNode(lane)) {
+ uint32_t u32;
+ if (!IsLiteralInt(f.m(), lane, &u32))
+ return f.failf(lane, "lane selector should be a constant integer literal");
+ if (u32 >= maxLane)
+ return f.failf(lane, "lane selector should be less than %u", maxLane);
+ lanes[i] = uint8_t(u32);
+ }
+ return true;
+}
+
+static bool
+CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ const unsigned numLanes = GetSimdLanes(opType);
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 1 + numLanes)
+ return f.failf(call, "expected %u arguments to SIMD swizzle, got %u", 1 + numLanes,
+ numArgs);
+
+ Type retType = opType;
+ ParseNode* vec = CallArgList(call);
+ Type vecType;
+ if (!CheckExpr(f, vec, &vecType))
+ return false;
+ if (!(vecType <= retType))
+ return f.failf(vec, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
+
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_swizzle))
+ return false;
+
+ mozilla::Array<uint8_t, 16> lanes;
+ if (!CheckSimdShuffleSelectors(f, NextNode(vec), lanes, numLanes, numLanes))
+ return false;
+
+ for (unsigned i = 0; i < numLanes; i++) {
+ if (!f.encoder().writeFixedU8(lanes[i]))
+ return false;
+ }
+
+ *type = retType;
+ return true;
+}
+
+static bool
+CheckSimdShuffle(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ const unsigned numLanes = GetSimdLanes(opType);
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 2 + numLanes)
+ return f.failf(call, "expected %u arguments to SIMD shuffle, got %u", 2 + numLanes,
+ numArgs);
+
+ Type retType = opType;
+ ParseNode* arg = CallArgList(call);
+ for (unsigned i = 0; i < 2; i++, arg = NextNode(arg)) {
+ Type type;
+ if (!CheckExpr(f, arg, &type))
+ return false;
+ if (!(type <= retType))
+ return f.failf(arg, "%s is not a subtype of %s", type.toChars(), retType.toChars());
+ }
+
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_shuffle))
+ return false;
+
+ mozilla::Array<uint8_t, 16> lanes;
+ if (!CheckSimdShuffleSelectors(f, arg, lanes, numLanes, 2 * numLanes))
+ return false;
+
+ for (unsigned i = 0; i < numLanes; i++) {
+ if (!f.encoder().writeFixedU8(uint8_t(lanes[i])))
+ return false;
+ }
+
+ *type = retType;
+ return true;
+}
+
+static bool
+CheckSimdLoadStoreArgs(FunctionValidator& f, ParseNode* call, Scalar::Type* viewType)
+{
+ ParseNode* view = CallArgList(call);
+ if (!view->isKind(PNK_NAME))
+ return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
+
+ ParseNode* indexExpr = NextNode(view);
+
+ if (!CheckAndPrepareArrayAccess(f, view, indexExpr, YesSimd, viewType))
+ return false;
+
+ if (*viewType != Scalar::Uint8)
+ return f.fail(view, "expected Uint8Array view as SIMD.*.load/store first argument");
+
+ return true;
+}
+
+static bool
+CheckSimdLoad(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type* type)
+{
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 2)
+ return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs);
+
+ Scalar::Type viewType;
+ if (!CheckSimdLoadStoreArgs(f, call, &viewType))
+ return false;
+
+ if (!f.writeSimdOp(opType, op))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdStore(FunctionValidator& f, ParseNode* call, SimdType opType, SimdOperation op,
+ Type* type)
+{
+ unsigned numArgs = CallArgListLength(call);
+ if (numArgs != 3)
+ return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs);
+
+ Scalar::Type viewType;
+ if (!CheckSimdLoadStoreArgs(f, call, &viewType))
+ return false;
+
+ Type retType = opType;
+ ParseNode* vecExpr = NextNode(NextNode(CallArgList(call)));
+ Type vecType;
+ if (!CheckExpr(f, vecExpr, &vecType))
+ return false;
+
+ if (!f.writeSimdOp(opType, op))
+ return false;
+
+ if (!WriteArrayAccessFlags(f, viewType))
+ return false;
+
+ if (!(vecType <= retType))
+ return f.failf(vecExpr, "%s is not a subtype of %s", vecType.toChars(), retType.toChars());
+
+ *type = vecType;
+ return true;
+}
+
+static bool
+CheckSimdSelect(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_select))
+ return false;
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_allTrue))
+ return false;
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckSimdAnyTrue(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 1, CheckArgIsSubtypeOf(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_anyTrue))
+ return false;
+ *type = Type::Int;
+ return true;
+}
+
+static bool
+CheckSimdCheck(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ Type coerceTo;
+ ParseNode* argNode;
+ if (!IsCoercionCall(f.m(), call, &coerceTo, &argNode))
+ return f.failf(call, "expected 1 argument in call to check");
+ return CheckCoercionArg(f, argNode, coerceTo, type);
+}
+
+static bool
+CheckSimdSplat(FunctionValidator& f, ParseNode* call, SimdType opType, Type* type)
+{
+ if (!CheckSimdCallArgs(f, call, 1, CheckSimdScalarArgs(opType)))
+ return false;
+ if (!f.writeSimdOp(opType, SimdOperation::Fn_splat))
+ return false;
+ *type = opType;
+ return true;
+}
+
+static bool
+CheckSimdOperationCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+ Type* type)
+{
+ f.setUsesSimd();
+
+ MOZ_ASSERT(global->isSimdOperation());
+
+ SimdType opType = global->simdOperationType();
+
+ switch (SimdOperation op = global->simdOperation()) {
+ case SimdOperation::Fn_check:
+ return CheckSimdCheck(f, call, opType, type);
+
+#define _CASE(OP) case SimdOperation::Fn_##OP:
+ FOREACH_SHIFT_SIMD_OP(_CASE)
+ return CheckSimdBinaryShift(f, call, opType, op, type);
+
+ FOREACH_COMP_SIMD_OP(_CASE)
+ return CheckSimdBinaryComp(f, call, opType, op, type);
+
+ FOREACH_NUMERIC_SIMD_BINOP(_CASE)
+ FOREACH_FLOAT_SIMD_BINOP(_CASE)
+ FOREACH_BITWISE_SIMD_BINOP(_CASE)
+ FOREACH_SMINT_SIMD_BINOP(_CASE)
+ return CheckSimdBinary(f, call, opType, op, type);
+#undef _CASE
+
+ case SimdOperation::Fn_extractLane:
+ return CheckSimdExtractLane(f, call, opType, type);
+ case SimdOperation::Fn_replaceLane:
+ return CheckSimdReplaceLane(f, call, opType, type);
+
+ case SimdOperation::Fn_fromInt8x16Bits:
+ return CheckSimdCast(f, call, SimdType::Int8x16, opType, op, type);
+ case SimdOperation::Fn_fromUint8x16Bits:
+ return CheckSimdCast(f, call, SimdType::Uint8x16, opType, op, type);
+ case SimdOperation::Fn_fromInt16x8Bits:
+ return CheckSimdCast(f, call, SimdType::Int16x8, opType, op, type);
+ case SimdOperation::Fn_fromUint16x8Bits:
+ return CheckSimdCast(f, call, SimdType::Uint16x8, opType, op, type);
+ case SimdOperation::Fn_fromInt32x4:
+ case SimdOperation::Fn_fromInt32x4Bits:
+ return CheckSimdCast(f, call, SimdType::Int32x4, opType, op, type);
+ case SimdOperation::Fn_fromUint32x4:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ return CheckSimdCast(f, call, SimdType::Uint32x4, opType, op, type);
+ case SimdOperation::Fn_fromFloat32x4:
+ case SimdOperation::Fn_fromFloat32x4Bits:
+ return CheckSimdCast(f, call, SimdType::Float32x4, opType, op, type);
+
+ case SimdOperation::Fn_abs:
+ case SimdOperation::Fn_neg:
+ case SimdOperation::Fn_not:
+ case SimdOperation::Fn_sqrt:
+ case SimdOperation::Fn_reciprocalApproximation:
+ case SimdOperation::Fn_reciprocalSqrtApproximation:
+ return CheckSimdUnary(f, call, opType, op, type);
+
+ case SimdOperation::Fn_swizzle:
+ return CheckSimdSwizzle(f, call, opType, type);
+ case SimdOperation::Fn_shuffle:
+ return CheckSimdShuffle(f, call, opType, type);
+
+ case SimdOperation::Fn_load:
+ case SimdOperation::Fn_load1:
+ case SimdOperation::Fn_load2:
+ return CheckSimdLoad(f, call, opType, op, type);
+ case SimdOperation::Fn_store:
+ case SimdOperation::Fn_store1:
+ case SimdOperation::Fn_store2:
+ return CheckSimdStore(f, call, opType, op, type);
+
+ case SimdOperation::Fn_select:
+ return CheckSimdSelect(f, call, opType, type);
+
+ case SimdOperation::Fn_splat:
+ return CheckSimdSplat(f, call, opType, type);
+
+ case SimdOperation::Fn_allTrue:
+ return CheckSimdAllTrue(f, call, opType, type);
+ case SimdOperation::Fn_anyTrue:
+ return CheckSimdAnyTrue(f, call, opType, type);
+
+ case SimdOperation::Fn_load3:
+ case SimdOperation::Fn_store3:
+ return f.fail(call, "asm.js does not support 3-element SIMD loads or stores");
+
+ case SimdOperation::Constructor:
+ MOZ_CRASH("constructors are handled in CheckSimdCtorCall");
+ case SimdOperation::Fn_fromFloat64x2Bits:
+ MOZ_CRASH("NYI");
+ }
+ MOZ_CRASH("unexpected simd operation in CheckSimdOperationCall");
+}
+
+static bool
+CheckSimdCtorCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+ Type* type)
+{
+ f.setUsesSimd();
+
+ MOZ_ASSERT(call->isKind(PNK_CALL));
+
+ SimdType simdType = global->simdCtorType();
+ unsigned length = GetSimdLanes(simdType);
+ if (!CheckSimdCallArgs(f, call, length, CheckSimdScalarArgs(simdType)))
+ return false;
+
+ if (!f.writeSimdOp(simdType, SimdOperation::Constructor))
+ return false;
+
+ *type = simdType;
+ return true;
+}
+
+static bool
+CheckUncoercedCall(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ MOZ_ASSERT(expr->isKind(PNK_CALL));
+
+ const ModuleValidator::Global* global;
+ if (IsCallToGlobal(f.m(), expr, &global)) {
+ if (global->isMathFunction())
+ return CheckMathBuiltinCall(f, expr, global->mathBuiltinFunction(), type);
+ if (global->isAtomicsFunction())
+ return CheckAtomicsBuiltinCall(f, expr, global->atomicsBuiltinFunction(), type);
+ if (global->isSimdCtor())
+ return CheckSimdCtorCall(f, expr, global, type);
+ if (global->isSimdOperation())
+ return CheckSimdOperationCall(f, expr, global, type);
+ }
+
+ return f.fail(expr, "all function calls must either be calls to standard lib math functions, "
+ "standard atomic functions, standard SIMD constructors or operations, "
+ "ignored (via f(); or comma-expression), coerced to signed (via f()|0), "
+ "coerced to float (via fround(f())) or coerced to double (via +f())");
+}
+
+static bool
+CoerceResult(FunctionValidator& f, ParseNode* expr, Type expected, Type actual,
+ Type* type)
+{
+ MOZ_ASSERT(expected.isCanonical());
+
+ // At this point, the bytecode resembles this:
+ // | the thing we wanted to coerce | current position |>
+ switch (expected.which()) {
+ case Type::Void:
+ if (!actual.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop))
+ return false;
+ }
+ break;
+ case Type::Int:
+ if (!actual.isIntish())
+ return f.failf(expr, "%s is not a subtype of intish", actual.toChars());
+ break;
+ case Type::Float:
+ if (!CheckFloatCoercionArg(f, expr, actual))
+ return false;
+ break;
+ case Type::Double:
+ if (actual.isMaybeDouble()) {
+ // No conversion necessary.
+ } else if (actual.isMaybeFloat()) {
+ if (!f.encoder().writeOp(Op::F64PromoteF32))
+ return false;
+ } else if (actual.isSigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertSI32))
+ return false;
+ } else if (actual.isUnsigned()) {
+ if (!f.encoder().writeOp(Op::F64ConvertUI32))
+ return false;
+ } else {
+ return f.failf(expr, "%s is not a subtype of double?, float?, signed or unsigned", actual.toChars());
+ }
+ break;
+ default:
+ MOZ_ASSERT(expected.isSimd(), "Incomplete switch");
+ if (actual != expected)
+ return f.failf(expr, "got type %s, expected %s", actual.toChars(), expected.toChars());
+ break;
+ }
+
+ *type = Type::ret(expected);
+ return true;
+}
+
+static bool
+CheckCoercedMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltinFunction func,
+ Type ret, Type* type)
+{
+ Type actual;
+ if (!CheckMathBuiltinCall(f, callNode, func, &actual))
+ return false;
+ return CoerceResult(f, callNode, ret, actual, type);
+}
+
+static bool
+CheckCoercedSimdCall(FunctionValidator& f, ParseNode* call, const ModuleValidator::Global* global,
+ Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ Type actual;
+ if (global->isSimdCtor()) {
+ if (!CheckSimdCtorCall(f, call, global, &actual))
+ return false;
+ MOZ_ASSERT(actual.isSimd());
+ } else {
+ MOZ_ASSERT(global->isSimdOperation());
+ if (!CheckSimdOperationCall(f, call, global, &actual))
+ return false;
+ }
+
+ return CoerceResult(f, call, ret, actual, type);
+}
+
+static bool
+CheckCoercedAtomicsBuiltinCall(FunctionValidator& f, ParseNode* callNode,
+ AsmJSAtomicsBuiltinFunction func, Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ Type actual;
+ if (!CheckAtomicsBuiltinCall(f, callNode, func, &actual))
+ return false;
+ return CoerceResult(f, callNode, ret, actual, type);
+}
+
+static bool
+CheckCoercedCall(FunctionValidator& f, ParseNode* call, Type ret, Type* type)
+{
+ MOZ_ASSERT(ret.isCanonical());
+
+ JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
+
+ bool isSimd = false;
+ if (IsNumericLiteral(f.m(), call, &isSimd)) {
+ if (isSimd)
+ f.setUsesSimd();
+ NumLit lit = ExtractNumericLiteral(f.m(), call);
+ if (!f.writeConstExpr(lit))
+ return false;
+ return CoerceResult(f, call, ret, Type::lit(lit), type);
+ }
+
+ ParseNode* callee = CallCallee(call);
+
+ if (callee->isKind(PNK_ELEM))
+ return CheckFuncPtrCall(f, call, ret, type);
+
+ if (!callee->isKind(PNK_NAME))
+ return f.fail(callee, "unexpected callee expression type");
+
+ PropertyName* calleeName = callee->name();
+
+ if (const ModuleValidator::Global* global = f.lookupGlobal(calleeName)) {
+ switch (global->which()) {
+ case ModuleValidator::Global::FFI:
+ return CheckFFICall(f, call, global->ffiIndex(), ret, type);
+ case ModuleValidator::Global::MathBuiltinFunction:
+ return CheckCoercedMathBuiltinCall(f, call, global->mathBuiltinFunction(), ret, type);
+ case ModuleValidator::Global::AtomicsBuiltinFunction:
+ return CheckCoercedAtomicsBuiltinCall(f, call, global->atomicsBuiltinFunction(), ret, type);
+ case ModuleValidator::Global::ConstantLiteral:
+ case ModuleValidator::Global::ConstantImport:
+ case ModuleValidator::Global::Variable:
+ case ModuleValidator::Global::FuncPtrTable:
+ case ModuleValidator::Global::ArrayView:
+ case ModuleValidator::Global::ArrayViewCtor:
+ return f.failName(callee, "'%s' is not callable function", callee->name());
+ case ModuleValidator::Global::SimdCtor:
+ case ModuleValidator::Global::SimdOp:
+ return CheckCoercedSimdCall(f, call, global, ret, type);
+ case ModuleValidator::Global::Function:
+ break;
+ }
+ }
+
+ return CheckInternalCall(f, call, calleeName, ret, type);
+}
+
+static bool
+CheckPos(FunctionValidator& f, ParseNode* pos, Type* type)
+{
+ MOZ_ASSERT(pos->isKind(PNK_POS));
+ ParseNode* operand = UnaryKid(pos);
+
+ if (operand->isKind(PNK_CALL))
+ return CheckCoercedCall(f, operand, Type::Double, type);
+
+ Type actual;
+ if (!CheckExpr(f, operand, &actual))
+ return false;
+
+ return CoerceResult(f, operand, Type::Double, actual, type);
+}
+
+static bool
+CheckNot(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ MOZ_ASSERT(expr->isKind(PNK_NOT));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType))
+ return false;
+
+ if (!operandType.isInt())
+ return f.failf(operand, "%s is not a subtype of int", operandType.toChars());
+
+ *type = Type::Int;
+ return f.encoder().writeOp(Op::I32Eqz);
+}
+
+static bool
+CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ MOZ_ASSERT(expr->isKind(PNK_NEG));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType))
+ return false;
+
+ if (operandType.isInt()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(Op::I32Neg);
+ }
+
+ if (operandType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Neg);
+ }
+
+ if (operandType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Neg);
+ }
+
+ return f.failf(operand, "%s is not a subtype of int, float? or double?", operandType.toChars());
+}
+
+static bool
+CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ MOZ_ASSERT(expr->isKind(PNK_BITNOT));
+ ParseNode* operand = UnaryKid(expr);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType))
+ return false;
+
+ if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) {
+ *type = Type::Signed;
+ Op opcode = operandType.isMaybeDouble() ? Op::I32TruncSF64 : Op::I32TruncSF32;
+ return f.encoder().writeOp(opcode);
+ }
+
+ if (!operandType.isIntish())
+ return f.failf(operand, "%s is not a subtype of double?, float? or intish", operandType.toChars());
+
+ *type = Type::Signed;
+ return true;
+}
+
+static bool
+CheckBitNot(FunctionValidator& f, ParseNode* neg, Type* type)
+{
+ MOZ_ASSERT(neg->isKind(PNK_BITNOT));
+ ParseNode* operand = UnaryKid(neg);
+
+ if (operand->isKind(PNK_BITNOT))
+ return CheckCoerceToInt(f, operand, type);
+
+ Type operandType;
+ if (!CheckExpr(f, operand, &operandType))
+ return false;
+
+ if (!operandType.isIntish())
+ return f.failf(operand, "%s is not a subtype of intish", operandType.toChars());
+
+ if (!f.encoder().writeOp(Op::I32BitNot))
+ return false;
+
+ *type = Type::Signed;
+ return true;
+}
+
+static bool
+CheckAsExprStatement(FunctionValidator& f, ParseNode* exprStmt);
+
+static bool
+CheckComma(FunctionValidator& f, ParseNode* comma, Type* type)
+{
+ MOZ_ASSERT(comma->isKind(PNK_COMMA));
+ ParseNode* operands = ListHead(comma);
+
+ // The block depth isn't taken into account here, because a comma list can't
+ // contain breaks and continues and nested control flow structures.
+ if (!f.encoder().writeOp(Op::Block))
+ return false;
+
+ size_t typeAt;
+ if (!f.encoder().writePatchableFixedU7(&typeAt))
+ return false;
+
+ ParseNode* pn = operands;
+ for (; NextNode(pn); pn = NextNode(pn)) {
+ if (!CheckAsExprStatement(f, pn))
+ return false;
+ }
+
+ if (!CheckExpr(f, pn, type))
+ return false;
+
+ f.encoder().patchFixedU7(typeAt, uint8_t(type->toWasmBlockSignatureType()));
+
+ return f.encoder().writeOp(Op::End);
+}
+
+static bool
+CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type)
+{
+ MOZ_ASSERT(ternary->isKind(PNK_CONDITIONAL));
+
+ ParseNode* cond = TernaryKid1(ternary);
+ ParseNode* thenExpr = TernaryKid2(ternary);
+ ParseNode* elseExpr = TernaryKid3(ternary);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType))
+ return false;
+
+ if (!condType.isInt())
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt))
+ return false;
+
+ Type thenType;
+ if (!CheckExpr(f, thenExpr, &thenType))
+ return false;
+
+ if (!f.switchToElse())
+ return false;
+
+ Type elseType;
+ if (!CheckExpr(f, elseExpr, &elseType))
+ return false;
+
+ if (thenType.isInt() && elseType.isInt()) {
+ *type = Type::Int;
+ } else if (thenType.isDouble() && elseType.isDouble()) {
+ *type = Type::Double;
+ } else if (thenType.isFloat() && elseType.isFloat()) {
+ *type = Type::Float;
+ } else if (thenType.isSimd() && elseType == thenType) {
+ *type = thenType;
+ } else {
+ return f.failf(ternary, "then/else branches of conditional must both produce int, float, "
+ "double or SIMD types, current types are %s and %s",
+ thenType.toChars(), elseType.toChars());
+ }
+
+ if (!f.popIf(typeAt, type->toWasmBlockSignatureType()))
+ return false;
+
+ return true;
+}
+
+static bool
+IsValidIntMultiplyConstant(ModuleValidator& m, ParseNode* expr)
+{
+ if (!IsNumericLiteral(m, expr))
+ return false;
+
+ NumLit lit = ExtractNumericLiteral(m, expr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ if (abs(lit.toInt32()) < (1<<20))
+ return true;
+ return false;
+ case NumLit::BigUnsigned:
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::OutOfRangeInt:
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ case NumLit::Float32x4:
+ case NumLit::Bool8x16:
+ case NumLit::Bool16x8:
+ case NumLit::Bool32x4:
+ return false;
+ }
+
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("Bad literal");
+}
+
+static bool
+CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type)
+{
+ MOZ_ASSERT(star->isKind(PNK_STAR));
+ ParseNode* lhs = MultiplyLeft(star);
+ ParseNode* rhs = MultiplyRight(star);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!IsValidIntMultiplyConstant(f.m(), lhs) && !IsValidIntMultiplyConstant(f.m(), rhs))
+ return f.fail(star, "one arg to int multiply must be a small (-2^20, 2^20) int literal");
+ *type = Type::Intish;
+ return f.encoder().writeOp(Op::I32Mul);
+ }
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(Op::F64Mul);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ return f.encoder().writeOp(Op::F32Mul);
+ }
+
+ return f.fail(star, "multiply operands must be both int, both double? or both float?");
+}
+
+static bool
+CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAddOrSubOut = nullptr)
+{
+ JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
+
+ MOZ_ASSERT(expr->isKind(PNK_ADD) || expr->isKind(PNK_SUB));
+ ParseNode* lhs = AddSubLeft(expr);
+ ParseNode* rhs = AddSubRight(expr);
+
+ Type lhsType, rhsType;
+ unsigned lhsNumAddOrSub, rhsNumAddOrSub;
+
+ if (lhs->isKind(PNK_ADD) || lhs->isKind(PNK_SUB)) {
+ if (!CheckAddOrSub(f, lhs, &lhsType, &lhsNumAddOrSub))
+ return false;
+ if (lhsType == Type::Intish)
+ lhsType = Type::Int;
+ } else {
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+ lhsNumAddOrSub = 0;
+ }
+
+ if (rhs->isKind(PNK_ADD) || rhs->isKind(PNK_SUB)) {
+ if (!CheckAddOrSub(f, rhs, &rhsType, &rhsNumAddOrSub))
+ return false;
+ if (rhsType == Type::Intish)
+ rhsType = Type::Int;
+ } else {
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+ rhsNumAddOrSub = 0;
+ }
+
+ unsigned numAddOrSub = lhsNumAddOrSub + rhsNumAddOrSub + 1;
+ if (numAddOrSub > (1<<20))
+ return f.fail(expr, "too many + or - without intervening coercion");
+
+ if (lhsType.isInt() && rhsType.isInt()) {
+ if (!f.encoder().writeOp(expr->isKind(PNK_ADD) ? Op::I32Add : Op::I32Sub))
+ return false;
+ *type = Type::Intish;
+ } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ if (!f.encoder().writeOp(expr->isKind(PNK_ADD) ? Op::F64Add : Op::F64Sub))
+ return false;
+ *type = Type::Double;
+ } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ if (!f.encoder().writeOp(expr->isKind(PNK_ADD) ? Op::F32Add : Op::F32Sub))
+ return false;
+ *type = Type::Floatish;
+ } else {
+ return f.failf(expr, "operands to + or - must both be int, float? or double?, got %s and %s",
+ lhsType.toChars(), rhsType.toChars());
+ }
+
+ if (numAddOrSubOut)
+ *numAddOrSubOut = numAddOrSub;
+ return true;
+}
+
+static bool
+CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ MOZ_ASSERT(expr->isKind(PNK_DIV) || expr->isKind(PNK_MOD));
+
+ ParseNode* lhs = DivOrModLeft(expr);
+ ParseNode* rhs = DivOrModRight(expr);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) {
+ *type = Type::Double;
+ return f.encoder().writeOp(expr->isKind(PNK_DIV) ? Op::F64Div : Op::F64Mod);
+ }
+
+ if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) {
+ *type = Type::Floatish;
+ if (expr->isKind(PNK_DIV))
+ return f.encoder().writeOp(Op::F32Div);
+ else
+ return f.fail(expr, "modulo cannot receive float arguments");
+ }
+
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(expr->isKind(PNK_DIV) ? Op::I32DivS : Op::I32RemS);
+ }
+
+ if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ *type = Type::Intish;
+ return f.encoder().writeOp(expr->isKind(PNK_DIV) ? Op::I32DivU : Op::I32RemU);
+ }
+
+ return f.failf(expr, "arguments to / or %% must both be double?, float?, signed, or unsigned; "
+ "%s and %s are given", lhsType.toChars(), rhsType.toChars());
+}
+
+static bool
+CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type)
+{
+ MOZ_ASSERT(comp->isKind(PNK_LT) || comp->isKind(PNK_LE) || comp->isKind(PNK_GT) ||
+ comp->isKind(PNK_GE) || comp->isKind(PNK_EQ) || comp->isKind(PNK_NE));
+
+ ParseNode* lhs = ComparisonLeft(comp);
+ ParseNode* rhs = ComparisonRight(comp);
+
+ Type lhsType, rhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (!(lhsType.isSigned() && rhsType.isSigned()) &&
+ !(lhsType.isUnsigned() && rhsType.isUnsigned()) &&
+ !(lhsType.isDouble() && rhsType.isDouble()) &&
+ !(lhsType.isFloat() && rhsType.isFloat()))
+ {
+ return f.failf(comp, "arguments to a comparison must both be signed, unsigned, floats or doubles; "
+ "%s and %s are given", lhsType.toChars(), rhsType.toChars());
+ }
+
+ Op stmt;
+ if (lhsType.isSigned() && rhsType.isSigned()) {
+ switch (comp->getOp()) {
+ case JSOP_EQ: stmt = Op::I32Eq; break;
+ case JSOP_NE: stmt = Op::I32Ne; break;
+ case JSOP_LT: stmt = Op::I32LtS; break;
+ case JSOP_LE: stmt = Op::I32LeS; break;
+ case JSOP_GT: stmt = Op::I32GtS; break;
+ case JSOP_GE: stmt = Op::I32GeS; break;
+ default: MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isUnsigned() && rhsType.isUnsigned()) {
+ switch (comp->getOp()) {
+ case JSOP_EQ: stmt = Op::I32Eq; break;
+ case JSOP_NE: stmt = Op::I32Ne; break;
+ case JSOP_LT: stmt = Op::I32LtU; break;
+ case JSOP_LE: stmt = Op::I32LeU; break;
+ case JSOP_GT: stmt = Op::I32GtU; break;
+ case JSOP_GE: stmt = Op::I32GeU; break;
+ default: MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isDouble()) {
+ switch (comp->getOp()) {
+ case JSOP_EQ: stmt = Op::F64Eq; break;
+ case JSOP_NE: stmt = Op::F64Ne; break;
+ case JSOP_LT: stmt = Op::F64Lt; break;
+ case JSOP_LE: stmt = Op::F64Le; break;
+ case JSOP_GT: stmt = Op::F64Gt; break;
+ case JSOP_GE: stmt = Op::F64Ge; break;
+ default: MOZ_CRASH("unexpected comparison op");
+ }
+ } else if (lhsType.isFloat()) {
+ switch (comp->getOp()) {
+ case JSOP_EQ: stmt = Op::F32Eq; break;
+ case JSOP_NE: stmt = Op::F32Ne; break;
+ case JSOP_LT: stmt = Op::F32Lt; break;
+ case JSOP_LE: stmt = Op::F32Le; break;
+ case JSOP_GT: stmt = Op::F32Gt; break;
+ case JSOP_GE: stmt = Op::F32Ge; break;
+ default: MOZ_CRASH("unexpected comparison op");
+ }
+ } else {
+ MOZ_CRASH("unexpected type");
+ }
+
+ *type = Type::Int;
+ return f.encoder().writeOp(stmt);
+}
+
+static bool
+CheckBitwise(FunctionValidator& f, ParseNode* bitwise, Type* type)
+{
+ ParseNode* lhs = BitwiseLeft(bitwise);
+ ParseNode* rhs = BitwiseRight(bitwise);
+
+ int32_t identityElement;
+ bool onlyOnRight;
+ switch (bitwise->getKind()) {
+ case PNK_BITOR: identityElement = 0; onlyOnRight = false; *type = Type::Signed; break;
+ case PNK_BITAND: identityElement = -1; onlyOnRight = false; *type = Type::Signed; break;
+ case PNK_BITXOR: identityElement = 0; onlyOnRight = false; *type = Type::Signed; break;
+ case PNK_LSH: identityElement = 0; onlyOnRight = true; *type = Type::Signed; break;
+ case PNK_RSH: identityElement = 0; onlyOnRight = true; *type = Type::Signed; break;
+ case PNK_URSH: identityElement = 0; onlyOnRight = true; *type = Type::Unsigned; break;
+ default: MOZ_CRASH("not a bitwise op");
+ }
+
+ uint32_t i;
+ if (!onlyOnRight && IsLiteralInt(f.m(), lhs, &i) && i == uint32_t(identityElement)) {
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+ if (!rhsType.isIntish())
+ return f.failf(bitwise, "%s is not a subtype of intish", rhsType.toChars());
+ return true;
+ }
+
+ if (IsLiteralInt(f.m(), rhs, &i) && i == uint32_t(identityElement)) {
+ if (bitwise->isKind(PNK_BITOR) && lhs->isKind(PNK_CALL))
+ return CheckCoercedCall(f, lhs, Type::Int, type);
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+ if (!lhsType.isIntish())
+ return f.failf(bitwise, "%s is not a subtype of intish", lhsType.toChars());
+ return true;
+ }
+
+ Type lhsType;
+ if (!CheckExpr(f, lhs, &lhsType))
+ return false;
+
+ Type rhsType;
+ if (!CheckExpr(f, rhs, &rhsType))
+ return false;
+
+ if (!lhsType.isIntish())
+ return f.failf(lhs, "%s is not a subtype of intish", lhsType.toChars());
+ if (!rhsType.isIntish())
+ return f.failf(rhs, "%s is not a subtype of intish", rhsType.toChars());
+
+ switch (bitwise->getKind()) {
+ case PNK_BITOR: if (!f.encoder().writeOp(Op::I32Or)) return false; break;
+ case PNK_BITAND: if (!f.encoder().writeOp(Op::I32And)) return false; break;
+ case PNK_BITXOR: if (!f.encoder().writeOp(Op::I32Xor)) return false; break;
+ case PNK_LSH: if (!f.encoder().writeOp(Op::I32Shl)) return false; break;
+ case PNK_RSH: if (!f.encoder().writeOp(Op::I32ShrS)) return false; break;
+ case PNK_URSH: if (!f.encoder().writeOp(Op::I32ShrU)) return false; break;
+ default: MOZ_CRASH("not a bitwise op");
+ }
+
+ return true;
+}
+
+static bool
+CheckExpr(FunctionValidator& f, ParseNode* expr, Type* type)
+{
+ JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
+
+ bool isSimd = false;
+ if (IsNumericLiteral(f.m(), expr, &isSimd)) {
+ if (isSimd)
+ f.setUsesSimd();
+ return CheckNumericLiteral(f, expr, type);
+ }
+
+ switch (expr->getKind()) {
+ case PNK_NAME: return CheckVarRef(f, expr, type);
+ case PNK_ELEM: return CheckLoadArray(f, expr, type);
+ case PNK_ASSIGN: return CheckAssign(f, expr, type);
+ case PNK_POS: return CheckPos(f, expr, type);
+ case PNK_NOT: return CheckNot(f, expr, type);
+ case PNK_NEG: return CheckNeg(f, expr, type);
+ case PNK_BITNOT: return CheckBitNot(f, expr, type);
+ case PNK_COMMA: return CheckComma(f, expr, type);
+ case PNK_CONDITIONAL: return CheckConditional(f, expr, type);
+ case PNK_STAR: return CheckMultiply(f, expr, type);
+ case PNK_CALL: return CheckUncoercedCall(f, expr, type);
+
+ case PNK_ADD:
+ case PNK_SUB: return CheckAddOrSub(f, expr, type);
+
+ case PNK_DIV:
+ case PNK_MOD: return CheckDivOrMod(f, expr, type);
+
+ case PNK_LT:
+ case PNK_LE:
+ case PNK_GT:
+ case PNK_GE:
+ case PNK_EQ:
+ case PNK_NE: return CheckComparison(f, expr, type);
+
+ case PNK_BITOR:
+ case PNK_BITAND:
+ case PNK_BITXOR:
+ case PNK_LSH:
+ case PNK_RSH:
+ case PNK_URSH: return CheckBitwise(f, expr, type);
+
+ default:;
+ }
+
+ return f.fail(expr, "unsupported expression");
+}
+
+static bool
+CheckStatement(FunctionValidator& f, ParseNode* stmt);
+
+static bool
+CheckAsExprStatement(FunctionValidator& f, ParseNode* expr)
+{
+ if (expr->isKind(PNK_CALL)) {
+ Type ignored;
+ return CheckCoercedCall(f, expr, Type::Void, &ignored);
+ }
+
+ Type resultType;
+ if (!CheckExpr(f, expr, &resultType))
+ return false;
+
+ if (!resultType.isVoid()) {
+ if (!f.encoder().writeOp(Op::Drop))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CheckExprStatement(FunctionValidator& f, ParseNode* exprStmt)
+{
+ MOZ_ASSERT(exprStmt->isKind(PNK_SEMI));
+ ParseNode* expr = UnaryKid(exprStmt);
+ if (!expr)
+ return true;
+ return CheckAsExprStatement(f, expr);
+}
+
+static bool
+CheckLoopConditionOnEntry(FunctionValidator& f, ParseNode* cond)
+{
+ uint32_t maybeLit;
+ if (IsLiteralInt(f.m(), cond, &maybeLit) && maybeLit)
+ return true;
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType))
+ return false;
+ if (!condType.isInt())
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+
+ // TODO change this to i32.eqz
+ // i32.eq 0 $f
+ if (!f.writeInt32Lit(0))
+ return false;
+ if (!f.encoder().writeOp(Op::I32Eq))
+ return false;
+
+ // brIf (i32.eq 0 $f) $out
+ if (!f.writeBreakIf())
+ return false;
+
+ return true;
+}
+
+static bool
+CheckWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* labels = nullptr)
+{
+ MOZ_ASSERT(whileStmt->isKind(PNK_WHILE));
+ ParseNode* cond = BinaryLeft(whileStmt);
+ ParseNode* body = BinaryRight(whileStmt);
+
+ // A while loop `while(#cond) #body` is equivalent to:
+ // (block $after_loop
+ // (loop $top
+ // (brIf $after_loop (i32.eq 0 #cond))
+ // #body
+ // (br $top)
+ // )
+ // )
+ if (labels && !f.addLabels(*labels, 0, 1))
+ return false;
+
+ if (!f.pushLoop())
+ return false;
+
+ if (!CheckLoopConditionOnEntry(f, cond))
+ return false;
+ if (!CheckStatement(f, body))
+ return false;
+ if (!f.writeContinue())
+ return false;
+
+ if (!f.popLoop())
+ return false;
+ if (labels)
+ f.removeLabels(*labels);
+ return true;
+}
+
+static bool
+CheckFor(FunctionValidator& f, ParseNode* forStmt, const NameVector* labels = nullptr)
+{
+ MOZ_ASSERT(forStmt->isKind(PNK_FOR));
+ ParseNode* forHead = BinaryLeft(forStmt);
+ ParseNode* body = BinaryRight(forStmt);
+
+ if (!forHead->isKind(PNK_FORHEAD))
+ return f.fail(forHead, "unsupported for-loop statement");
+
+ ParseNode* maybeInit = TernaryKid1(forHead);
+ ParseNode* maybeCond = TernaryKid2(forHead);
+ ParseNode* maybeInc = TernaryKid3(forHead);
+
+ // A for-loop `for (#init; #cond; #inc) #body` is equivalent to:
+ // (block // depth X
+ // (#init)
+ // (block $after_loop // depth X+1 (block)
+ // (loop $loop_top // depth X+2 (loop)
+ // (brIf $after (eq 0 #cond))
+ // (block $after_body #body) // depth X+3
+ // #inc
+ // (br $loop_top)
+ // )
+ // )
+ // )
+ // A break in the body should break out to $after_loop, i.e. depth + 1.
+ // A continue in the body should break out to $after_body, i.e. depth + 3.
+ if (labels && !f.addLabels(*labels, 1, 3))
+ return false;
+
+ if (!f.pushUnbreakableBlock())
+ return false;
+
+ if (maybeInit && !CheckAsExprStatement(f, maybeInit))
+ return false;
+
+ {
+ if (!f.pushLoop())
+ return false;
+
+ if (maybeCond && !CheckLoopConditionOnEntry(f, maybeCond))
+ return false;
+
+ {
+ // Continuing in the body should just break out to the increment.
+ if (!f.pushContinuableBlock())
+ return false;
+ if (!CheckStatement(f, body))
+ return false;
+ if (!f.popContinuableBlock())
+ return false;
+ }
+
+ if (maybeInc && !CheckAsExprStatement(f, maybeInc))
+ return false;
+
+ if (!f.writeContinue())
+ return false;
+ if (!f.popLoop())
+ return false;
+ }
+
+ if (!f.popUnbreakableBlock())
+ return false;
+
+ if (labels)
+ f.removeLabels(*labels);
+
+ return true;
+}
+
+static bool
+CheckDoWhile(FunctionValidator& f, ParseNode* whileStmt, const NameVector* labels = nullptr)
+{
+ MOZ_ASSERT(whileStmt->isKind(PNK_DOWHILE));
+ ParseNode* body = BinaryLeft(whileStmt);
+ ParseNode* cond = BinaryRight(whileStmt);
+
+ // A do-while loop `do { #body } while (#cond)` is equivalent to:
+ // (block $after_loop // depth X
+ // (loop $top // depth X+1
+ // (block #body) // depth X+2
+ // (brIf #cond $top)
+ // )
+ // )
+ // A break should break out of the entire loop, i.e. at depth 0.
+ // A continue should break out to the condition, i.e. at depth 2.
+ if (labels && !f.addLabels(*labels, 0, 2))
+ return false;
+
+ if (!f.pushLoop())
+ return false;
+
+ {
+ // An unlabeled continue in the body should break out to the condition.
+ if (!f.pushContinuableBlock())
+ return false;
+ if (!CheckStatement(f, body))
+ return false;
+ if (!f.popContinuableBlock())
+ return false;
+ }
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType))
+ return false;
+ if (!condType.isInt())
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+
+ if (!f.writeContinueIf())
+ return false;
+
+ if (!f.popLoop())
+ return false;
+ if (labels)
+ f.removeLabels(*labels);
+ return true;
+}
+
+static bool CheckStatementList(FunctionValidator& f, ParseNode*, const NameVector* = nullptr);
+
+static bool
+CheckLabel(FunctionValidator& f, ParseNode* labeledStmt)
+{
+ MOZ_ASSERT(labeledStmt->isKind(PNK_LABEL));
+
+ NameVector labels;
+ ParseNode* innermost = labeledStmt;
+ do {
+ if (!labels.append(LabeledStatementLabel(innermost)))
+ return false;
+ innermost = LabeledStatementStatement(innermost);
+ } while (innermost->getKind() == PNK_LABEL);
+
+ switch (innermost->getKind()) {
+ case PNK_FOR:
+ return CheckFor(f, innermost, &labels);
+ case PNK_DOWHILE:
+ return CheckDoWhile(f, innermost, &labels);
+ case PNK_WHILE:
+ return CheckWhile(f, innermost, &labels);
+ case PNK_STATEMENTLIST:
+ return CheckStatementList(f, innermost, &labels);
+ default:
+ break;
+ }
+
+ if (!f.pushUnbreakableBlock(&labels))
+ return false;
+
+ if (!CheckStatement(f, innermost))
+ return false;
+
+ if (!f.popUnbreakableBlock(&labels))
+ return false;
+ return true;
+}
+
+static bool
+CheckIf(FunctionValidator& f, ParseNode* ifStmt)
+{
+ uint32_t numIfEnd = 1;
+
+ recurse:
+ MOZ_ASSERT(ifStmt->isKind(PNK_IF));
+ ParseNode* cond = TernaryKid1(ifStmt);
+ ParseNode* thenStmt = TernaryKid2(ifStmt);
+ ParseNode* elseStmt = TernaryKid3(ifStmt);
+
+ Type condType;
+ if (!CheckExpr(f, cond, &condType))
+ return false;
+ if (!condType.isInt())
+ return f.failf(cond, "%s is not a subtype of int", condType.toChars());
+
+ size_t typeAt;
+ if (!f.pushIf(&typeAt))
+ return false;
+
+ f.setIfType(typeAt, ExprType::Void);
+
+ if (!CheckStatement(f, thenStmt))
+ return false;
+
+ if (elseStmt) {
+ if (!f.switchToElse())
+ return false;
+
+ if (elseStmt->isKind(PNK_IF)) {
+ ifStmt = elseStmt;
+ if (numIfEnd++ == UINT32_MAX)
+ return false;
+ goto recurse;
+ }
+
+ if (!CheckStatement(f, elseStmt))
+ return false;
+ }
+
+ for (uint32_t i = 0; i != numIfEnd; ++i) {
+ if (!f.popIf())
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CheckCaseExpr(FunctionValidator& f, ParseNode* caseExpr, int32_t* value)
+{
+ if (!IsNumericLiteral(f.m(), caseExpr))
+ return f.fail(caseExpr, "switch case expression must be an integer literal");
+
+ NumLit lit = ExtractNumericLiteral(f.m(), caseExpr);
+ switch (lit.which()) {
+ case NumLit::Fixnum:
+ case NumLit::NegativeInt:
+ *value = lit.toInt32();
+ break;
+ case NumLit::OutOfRangeInt:
+ case NumLit::BigUnsigned:
+ return f.fail(caseExpr, "switch case expression out of integer range");
+ case NumLit::Double:
+ case NumLit::Float:
+ case NumLit::Int8x16:
+ case NumLit::Uint8x16:
+ case NumLit::Int16x8:
+ case NumLit::Uint16x8:
+ case NumLit::Int32x4:
+ case NumLit::Uint32x4:
+ case NumLit::Float32x4:
+ case NumLit::Bool8x16:
+ case NumLit::Bool16x8:
+ case NumLit::Bool32x4:
+ return f.fail(caseExpr, "switch case expression must be an integer literal");
+ }
+
+ return true;
+}
+
+static bool
+CheckDefaultAtEnd(FunctionValidator& f, ParseNode* stmt)
+{
+ for (; stmt; stmt = NextNode(stmt)) {
+ if (IsDefaultCase(stmt) && NextNode(stmt) != nullptr)
+ return f.fail(stmt, "default label must be at the end");
+ }
+
+ return true;
+}
+
+static bool
+CheckSwitchRange(FunctionValidator& f, ParseNode* stmt, int32_t* low, int32_t* high,
+ uint32_t* tableLength)
+{
+ if (IsDefaultCase(stmt)) {
+ *low = 0;
+ *high = -1;
+ *tableLength = 0;
+ return true;
+ }
+
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i))
+ return false;
+
+ *low = *high = i;
+
+ ParseNode* initialStmt = stmt;
+ for (stmt = NextNode(stmt); stmt && !IsDefaultCase(stmt); stmt = NextNode(stmt)) {
+ int32_t i = 0;
+ if (!CheckCaseExpr(f, CaseExpr(stmt), &i))
+ return false;
+
+ *low = Min(*low, i);
+ *high = Max(*high, i);
+ }
+
+ int64_t i64 = (int64_t(*high) - int64_t(*low)) + 1;
+ if (i64 > MaxBrTableElems)
+ return f.fail(initialStmt, "all switch statements generate tables; this table would be too big");
+
+ *tableLength = uint32_t(i64);
+ return true;
+}
+
+static bool
+CheckSwitchExpr(FunctionValidator& f, ParseNode* switchExpr)
+{
+ Type exprType;
+ if (!CheckExpr(f, switchExpr, &exprType))
+ return false;
+ if (!exprType.isSigned())
+ return f.failf(switchExpr, "%s is not a subtype of signed", exprType.toChars());
+ return true;
+}
+
+// A switch will be constructed as:
+// - the default block wrapping all the other blocks, to be able to break
+// out of the switch with an unlabeled break statement. It has two statements
+// (an inner block and the default expr). asm.js rules require default to be at
+// the end, so the default block always encloses all the cases blocks.
+// - one block per case between low and high; undefined cases just jump to the
+// default case. Each of these blocks contain two statements: the next case's
+// block and the possibly empty statement list comprising the case body. The
+// last block pushed is the first case so the (relative) branch target therefore
+// matches the sequential order of cases.
+// - one block for the br_table, so that the first break goes to the first
+// case's block.
+static bool
+CheckSwitch(FunctionValidator& f, ParseNode* switchStmt)
+{
+ MOZ_ASSERT(switchStmt->isKind(PNK_SWITCH));
+
+ ParseNode* switchExpr = BinaryLeft(switchStmt);
+ ParseNode* switchBody = BinaryRight(switchStmt);
+
+ if (switchBody->isKind(PNK_LEXICALSCOPE)) {
+ if (!switchBody->isEmptyScope())
+ return f.fail(switchBody, "switch body may not contain lexical declarations");
+ switchBody = switchBody->scopeBody();
+ }
+
+ ParseNode* stmt = ListHead(switchBody);
+ if (!stmt) {
+ if (!CheckSwitchExpr(f, switchExpr))
+ return false;
+ if (!f.encoder().writeOp(Op::Drop))
+ return false;
+ return true;
+ }
+
+ if (!CheckDefaultAtEnd(f, stmt))
+ return false;
+
+ int32_t low = 0, high = 0;
+ uint32_t tableLength = 0;
+ if (!CheckSwitchRange(f, stmt, &low, &high, &tableLength))
+ return false;
+
+ static const uint32_t CASE_NOT_DEFINED = UINT32_MAX;
+
+ Uint32Vector caseDepths;
+ if (!caseDepths.appendN(CASE_NOT_DEFINED, tableLength))
+ return false;
+
+ uint32_t numCases = 0;
+ for (ParseNode* s = stmt; s && !IsDefaultCase(s); s = NextNode(s)) {
+ int32_t caseValue = ExtractNumericLiteral(f.m(), CaseExpr(s)).toInt32();
+
+ MOZ_ASSERT(caseValue >= low);
+ unsigned i = caseValue - low;
+ if (caseDepths[i] != CASE_NOT_DEFINED)
+ return f.fail(s, "no duplicate case labels");
+
+ MOZ_ASSERT(numCases != CASE_NOT_DEFINED);
+ caseDepths[i] = numCases++;
+ }
+
+ // Open the wrapping breakable default block.
+ if (!f.pushBreakableBlock())
+ return false;
+
+ // Open all the case blocks.
+ for (uint32_t i = 0; i < numCases; i++) {
+ if (!f.pushUnbreakableBlock())
+ return false;
+ }
+
+ // Open the br_table block.
+ if (!f.pushUnbreakableBlock())
+ return false;
+
+ // The default block is the last one.
+ uint32_t defaultDepth = numCases;
+
+ // Subtract lowest case value, so that all the cases start from 0.
+ if (low) {
+ if (!CheckSwitchExpr(f, switchExpr))
+ return false;
+ if (!f.writeInt32Lit(low))
+ return false;
+ if (!f.encoder().writeOp(Op::I32Sub))
+ return false;
+ } else {
+ if (!CheckSwitchExpr(f, switchExpr))
+ return false;
+ }
+
+ // Start the br_table block.
+ if (!f.encoder().writeOp(Op::BrTable))
+ return false;
+
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ // Write the number of cases (tableLength - 1 + 1 (default)).
+ if (!f.encoder().writeVarU32(tableLength))
+ return false;
+
+ // Each case value describes the relative depth to the actual block. When
+ // a case is not explicitly defined, it goes to the default.
+ for (size_t i = 0; i < tableLength; i++) {
+ uint32_t target = caseDepths[i] == CASE_NOT_DEFINED ? defaultDepth : caseDepths[i];
+ if (!f.encoder().writeVarU32(target))
+ return false;
+ }
+
+ // Write the default depth.
+ if (!f.encoder().writeVarU32(defaultDepth))
+ return false;
+
+ // Our br_table is done. Close its block, write the cases down in order.
+ if (!f.popUnbreakableBlock())
+ return false;
+
+ for (; stmt && !IsDefaultCase(stmt); stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt)))
+ return false;
+ if (!f.popUnbreakableBlock())
+ return false;
+ }
+
+ // Write the default block.
+ if (stmt && IsDefaultCase(stmt)) {
+ if (!CheckStatement(f, CaseBody(stmt)))
+ return false;
+ }
+
+ // Close the wrapping block.
+ if (!f.popBreakableBlock())
+ return false;
+ return true;
+}
+
+static bool
+CheckReturnType(FunctionValidator& f, ParseNode* usepn, Type ret)
+{
+ if (!f.hasAlreadyReturned()) {
+ f.setReturnedType(ret.canonicalToExprType());
+ return true;
+ }
+
+ if (f.returnedType() != ret.canonicalToExprType()) {
+ return f.failf(usepn, "%s incompatible with previous return of type %s",
+ Type::ret(ret).toChars(), ToCString(f.returnedType()));
+ }
+
+ return true;
+}
+
+static bool
+CheckReturn(FunctionValidator& f, ParseNode* returnStmt)
+{
+ ParseNode* expr = ReturnExpr(returnStmt);
+
+ if (!expr) {
+ if (!CheckReturnType(f, returnStmt, Type::Void))
+ return false;
+ } else {
+ Type type;
+ if (!CheckExpr(f, expr, &type))
+ return false;
+
+ if (!type.isReturnType())
+ return f.failf(expr, "%s is not a valid return type", type.toChars());
+
+ if (!CheckReturnType(f, expr, Type::canonicalize(type)))
+ return false;
+ }
+
+ if (!f.encoder().writeOp(Op::Return))
+ return false;
+
+ return true;
+}
+
+static bool
+CheckStatementList(FunctionValidator& f, ParseNode* stmtList, const NameVector* labels /*= nullptr */)
+{
+ MOZ_ASSERT(stmtList->isKind(PNK_STATEMENTLIST));
+
+ if (!f.pushUnbreakableBlock(labels))
+ return false;
+
+ for (ParseNode* stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) {
+ if (!CheckStatement(f, stmt))
+ return false;
+ }
+
+ if (!f.popUnbreakableBlock(labels))
+ return false;
+ return true;
+}
+
+static bool
+CheckLexicalScope(FunctionValidator& f, ParseNode* lexicalScope)
+{
+ MOZ_ASSERT(lexicalScope->isKind(PNK_LEXICALSCOPE));
+
+ if (!lexicalScope->isEmptyScope())
+ return f.fail(lexicalScope, "cannot have 'let' or 'const' declarations");
+
+ return CheckStatement(f, lexicalScope->scopeBody());
+}
+
+static bool
+CheckBreakOrContinue(FunctionValidator& f, bool isBreak, ParseNode* stmt)
+{
+ if (PropertyName* maybeLabel = LoopControlMaybeLabel(stmt))
+ return f.writeLabeledBreakOrContinue(maybeLabel, isBreak);
+ return f.writeUnlabeledBreakOrContinue(isBreak);
+}
+
+static bool
+CheckStatement(FunctionValidator& f, ParseNode* stmt)
+{
+ JS_CHECK_RECURSION_DONT_REPORT(f.cx(), return f.m().failOverRecursed());
+
+ switch (stmt->getKind()) {
+ case PNK_SEMI: return CheckExprStatement(f, stmt);
+ case PNK_WHILE: return CheckWhile(f, stmt);
+ case PNK_FOR: return CheckFor(f, stmt);
+ case PNK_DOWHILE: return CheckDoWhile(f, stmt);
+ case PNK_LABEL: return CheckLabel(f, stmt);
+ case PNK_IF: return CheckIf(f, stmt);
+ case PNK_SWITCH: return CheckSwitch(f, stmt);
+ case PNK_RETURN: return CheckReturn(f, stmt);
+ case PNK_STATEMENTLIST: return CheckStatementList(f, stmt);
+ case PNK_BREAK: return CheckBreakOrContinue(f, true, stmt);
+ case PNK_CONTINUE: return CheckBreakOrContinue(f, false, stmt);
+ case PNK_LEXICALSCOPE: return CheckLexicalScope(f, stmt);
+ default:;
+ }
+
+ return f.fail(stmt, "unexpected statement kind");
+}
+
+static bool
+ParseFunction(ModuleValidator& m, ParseNode** fnOut, unsigned* line)
+{
+ TokenStream& tokenStream = m.tokenStream();
+
+ tokenStream.consumeKnownToken(TOK_FUNCTION, TokenStream::Operand);
+ *line = tokenStream.srcCoords.lineNum(tokenStream.currentToken().pos.end);
+
+ TokenKind tk;
+ if (!tokenStream.getToken(&tk, TokenStream::Operand))
+ return false;
+ if (tk != TOK_NAME && tk != TOK_YIELD)
+ return false; // The regular parser will throw a SyntaxError, no need to m.fail.
+
+ RootedPropertyName name(m.cx(), m.parser().bindingIdentifier(YieldIsName));
+ if (!name)
+ return false;
+
+ ParseNode* fn = m.parser().handler.newFunctionDefinition();
+ if (!fn)
+ return false;
+
+ RootedFunction& fun = m.dummyFunction();
+ fun->setAtom(name);
+ fun->setArgCount(0);
+
+ ParseContext* outerpc = m.parser().pc;
+ Directives directives(outerpc);
+ FunctionBox* funbox = m.parser().newFunctionBox(fn, fun, directives, NotGenerator,
+ SyncFunction, /* tryAnnexB = */ false);
+ if (!funbox)
+ return false;
+ funbox->initWithEnclosingParseContext(outerpc, frontend::Statement);
+
+ Directives newDirectives = directives;
+ ParseContext funpc(&m.parser(), funbox, &newDirectives);
+ if (!funpc.init())
+ return false;
+
+ if (!m.parser().functionFormalParametersAndBody(InAllowed, YieldIsName, fn, Statement)) {
+ if (tokenStream.hadError() || directives == newDirectives)
+ return false;
+
+ return m.fail(fn, "encountered new directive in function");
+ }
+
+ MOZ_ASSERT(!tokenStream.hadError());
+ MOZ_ASSERT(directives == newDirectives);
+
+ *fnOut = fn;
+ return true;
+}
+
+static bool
+CheckFunction(ModuleValidator& m)
+{
+ // asm.js modules can be quite large when represented as parse trees so pop
+ // the backing LifoAlloc after parsing/compiling each function.
+ AsmJSParser::Mark mark = m.parser().mark();
+
+ ParseNode* fn = nullptr;
+ unsigned line = 0;
+ if (!ParseFunction(m, &fn, &line))
+ return false;
+
+ if (!CheckFunctionHead(m, fn))
+ return false;
+
+ FunctionValidator f(m, fn);
+ if (!f.init(FunctionName(fn), line))
+ return m.fail(fn, "internal compiler failure (probably out of memory)");
+
+ ParseNode* stmtIter = ListHead(FunctionStatementList(fn));
+
+ if (!CheckProcessingDirectives(m, &stmtIter))
+ return false;
+
+ ValTypeVector args;
+ if (!CheckArguments(f, &stmtIter, &args))
+ return false;
+
+ if (!CheckVariables(f, &stmtIter))
+ return false;
+
+ ParseNode* lastNonEmptyStmt = nullptr;
+ for (; stmtIter; stmtIter = NextNonEmptyStatement(stmtIter)) {
+ lastNonEmptyStmt = stmtIter;
+ if (!CheckStatement(f, stmtIter))
+ return false;
+ }
+
+ if (!CheckFinalReturn(f, lastNonEmptyStmt))
+ return false;
+
+ ModuleValidator::Func* func = nullptr;
+ if (!CheckFunctionSignature(m, fn, Sig(Move(args), f.returnedType()), FunctionName(fn), &func))
+ return false;
+
+ if (func->defined())
+ return m.failName(fn, "function '%s' already defined", FunctionName(fn));
+
+ func->define(fn);
+
+ if (!f.finish(func->index()))
+ return m.fail(fn, "internal compiler failure (probably out of memory)");
+
+ // Release the parser's lifo memory only after the last use of a parse node.
+ m.parser().release(mark);
+ return true;
+}
+
+static bool
+CheckAllFunctionsDefined(ModuleValidator& m)
+{
+ for (unsigned i = 0; i < m.numFunctions(); i++) {
+ ModuleValidator::Func& f = m.function(i);
+ if (!f.defined())
+ return m.failNameOffset(f.firstUse(), "missing definition of function %s", f.name());
+ }
+
+ return true;
+}
+
+static bool
+CheckFunctions(ModuleValidator& m)
+{
+ while (true) {
+ TokenKind tk;
+ if (!PeekToken(m.parser(), &tk))
+ return false;
+
+ if (tk != TOK_FUNCTION)
+ break;
+
+ if (!CheckFunction(m))
+ return false;
+ }
+
+ return CheckAllFunctionsDefined(m);
+}
+
+static bool
+CheckFuncPtrTable(ModuleValidator& m, ParseNode* var)
+{
+ if (!var->isKind(PNK_NAME))
+ return m.fail(var, "function-pointer table name is not a plain name");
+
+ ParseNode* arrayLiteral = MaybeInitializer(var);
+ if (!arrayLiteral || !arrayLiteral->isKind(PNK_ARRAY))
+ return m.fail(var, "function-pointer table's initializer must be an array literal");
+
+ unsigned length = ListLength(arrayLiteral);
+
+ if (!IsPowerOfTwo(length))
+ return m.failf(arrayLiteral, "function-pointer table length must be a power of 2 (is %u)", length);
+
+ unsigned mask = length - 1;
+
+ Uint32Vector elemFuncIndices;
+ const Sig* sig = nullptr;
+ for (ParseNode* elem = ListHead(arrayLiteral); elem; elem = NextNode(elem)) {
+ if (!elem->isKind(PNK_NAME))
+ return m.fail(elem, "function-pointer table's elements must be names of functions");
+
+ PropertyName* funcName = elem->name();
+ const ModuleValidator::Func* func = m.lookupFunction(funcName);
+ if (!func)
+ return m.fail(elem, "function-pointer table's elements must be names of functions");
+
+ const Sig& funcSig = m.mg().funcSig(func->index());
+ if (sig) {
+ if (*sig != funcSig)
+ return m.fail(elem, "all functions in table must have same signature");
+ } else {
+ sig = &funcSig;
+ }
+
+ if (!elemFuncIndices.append(func->index()))
+ return false;
+ }
+
+ Sig copy;
+ if (!copy.clone(*sig))
+ return false;
+
+ uint32_t tableIndex;
+ if (!CheckFuncPtrTableAgainstExisting(m, var, var->name(), Move(copy), mask, &tableIndex))
+ return false;
+
+ if (!m.defineFuncPtrTable(tableIndex, Move(elemFuncIndices)))
+ return m.fail(var, "duplicate function-pointer definition");
+
+ return true;
+}
+
+static bool
+CheckFuncPtrTables(ModuleValidator& m)
+{
+ while (true) {
+ ParseNode* varStmt;
+ if (!ParseVarOrConstStatement(m.parser(), &varStmt))
+ return false;
+ if (!varStmt)
+ break;
+ for (ParseNode* var = VarListHead(varStmt); var; var = NextNode(var)) {
+ if (!CheckFuncPtrTable(m, var))
+ return false;
+ }
+ }
+
+ for (unsigned i = 0; i < m.numFuncPtrTables(); i++) {
+ ModuleValidator::FuncPtrTable& funcPtrTable = m.funcPtrTable(i);
+ if (!funcPtrTable.defined()) {
+ return m.failNameOffset(funcPtrTable.firstUse(),
+ "function-pointer table %s wasn't defined",
+ funcPtrTable.name());
+ }
+ }
+
+ return true;
+}
+
+static bool
+CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybeFieldName = nullptr)
+{
+ if (!pn->isKind(PNK_NAME))
+ return m.fail(pn, "expected name of exported function");
+
+ PropertyName* funcName = pn->name();
+ const ModuleValidator::Func* func = m.lookupFunction(funcName);
+ if (!func)
+ return m.failName(pn, "function '%s' not found", funcName);
+
+ return m.addExportField(pn, *func, maybeFieldName);
+}
+
+static bool
+CheckModuleExportObject(ModuleValidator& m, ParseNode* object)
+{
+ MOZ_ASSERT(object->isKind(PNK_OBJECT));
+
+ for (ParseNode* pn = ListHead(object); pn; pn = NextNode(pn)) {
+ if (!IsNormalObjectField(m.cx(), pn))
+ return m.fail(pn, "only normal object properties may be used in the export object literal");
+
+ PropertyName* fieldName = ObjectNormalFieldName(m.cx(), pn);
+
+ ParseNode* initNode = ObjectNormalFieldInitializer(m.cx(), pn);
+ if (!initNode->isKind(PNK_NAME))
+ return m.fail(initNode, "initializer of exported object literal must be name of function");
+
+ if (!CheckModuleExportFunction(m, initNode, fieldName))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CheckModuleReturn(ModuleValidator& m)
+{
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk))
+ return false;
+ TokenStream& ts = m.parser().tokenStream;
+ if (tk != TOK_RETURN) {
+ return m.failCurrentOffset((tk == TOK_RC || tk == TOK_EOF)
+ ? "expecting return statement"
+ : "invalid asm.js. statement");
+ }
+ ts.ungetToken();
+
+ ParseNode* returnStmt = m.parser().statementListItem(YieldIsName);
+ if (!returnStmt)
+ return false;
+
+ ParseNode* returnExpr = ReturnExpr(returnStmt);
+ if (!returnExpr)
+ return m.fail(returnStmt, "export statement must return something");
+
+ if (returnExpr->isKind(PNK_OBJECT)) {
+ if (!CheckModuleExportObject(m, returnExpr))
+ return false;
+ } else {
+ if (!CheckModuleExportFunction(m, returnExpr))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+CheckModuleEnd(ModuleValidator &m)
+{
+ TokenKind tk;
+ if (!GetToken(m.parser(), &tk))
+ return false;
+
+ if (tk != TOK_EOF && tk != TOK_RC)
+ return m.failCurrentOffset("top-level export (return) must be the last statement");
+
+ m.parser().tokenStream.ungetToken();
+ return true;
+}
+
+static SharedModule
+CheckModule(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, unsigned* time)
+{
+ int64_t before = PRMJ_Now();
+
+ ParseNode* moduleFunctionNode = parser.pc->functionBox()->functionNode;
+ MOZ_ASSERT(moduleFunctionNode);
+
+ ModuleValidator m(cx, parser, moduleFunctionNode);
+ if (!m.init())
+ return nullptr;
+
+ if (!CheckFunctionHead(m, moduleFunctionNode))
+ return nullptr;
+
+ if (!CheckModuleArguments(m, moduleFunctionNode))
+ return nullptr;
+
+ if (!CheckPrecedingStatements(m, stmtList))
+ return nullptr;
+
+ if (!CheckModuleProcessingDirectives(m))
+ return nullptr;
+
+ if (!CheckModuleGlobals(m))
+ return nullptr;
+
+ if (!m.startFunctionBodies())
+ return nullptr;
+
+ if (!CheckFunctions(m))
+ return nullptr;
+
+ if (!m.finishFunctionBodies())
+ return nullptr;
+
+ if (!CheckFuncPtrTables(m))
+ return nullptr;
+
+ if (!CheckModuleReturn(m))
+ return nullptr;
+
+ if (!CheckModuleEnd(m))
+ return nullptr;
+
+ SharedModule module = m.finish();
+ if (!module)
+ return nullptr;
+
+ *time = (PRMJ_Now() - before) / PRMJ_USEC_PER_MSEC;
+ return module;
+}
+
+/*****************************************************************************/
+// Link-time validation
+
+static bool
+LinkFail(JSContext* cx, const char* str)
+{
+ JS_ReportErrorFlagsAndNumberASCII(cx, JSREPORT_WARNING, GetErrorMessage, nullptr,
+ JSMSG_USE_ASM_LINK_FAIL, str);
+ return false;
+}
+
+static bool
+IsMaybeWrappedScriptedProxy(JSObject* obj)
+{
+ JSObject* unwrapped = UncheckedUnwrap(obj);
+ return unwrapped && IsScriptedProxy(unwrapped);
+}
+
+static bool
+GetDataProperty(JSContext* cx, HandleValue objVal, HandleAtom field, MutableHandleValue v)
+{
+ if (!objVal.isObject())
+ return LinkFail(cx, "accessing property of non-object");
+
+ RootedObject obj(cx, &objVal.toObject());
+ if (IsMaybeWrappedScriptedProxy(obj))
+ return LinkFail(cx, "accessing property of a Proxy");
+
+ Rooted<PropertyDescriptor> desc(cx);
+ RootedId id(cx, AtomToId(field));
+ if (!GetPropertyDescriptor(cx, obj, id, &desc))
+ return false;
+
+ if (!desc.object())
+ return LinkFail(cx, "property not present on object");
+
+ if (!desc.isDataDescriptor())
+ return LinkFail(cx, "property is not a data property");
+
+ v.set(desc.value());
+ return true;
+}
+
+static bool
+GetDataProperty(JSContext* cx, HandleValue objVal, const char* fieldChars, MutableHandleValue v)
+{
+ RootedAtom field(cx, AtomizeUTF8Chars(cx, fieldChars, strlen(fieldChars)));
+ if (!field)
+ return false;
+
+ return GetDataProperty(cx, objVal, field, v);
+}
+
+static bool
+GetDataProperty(JSContext* cx, HandleValue objVal, ImmutablePropertyNamePtr field, MutableHandleValue v)
+{
+ // Help the conversion along for all the cx->names().* users.
+ HandlePropertyName fieldHandle = field;
+ return GetDataProperty(cx, objVal, fieldHandle, v);
+}
+
+static bool
+HasPureCoercion(JSContext* cx, HandleValue v)
+{
+ // Unsigned SIMD types are not allowed in function signatures.
+ if (IsVectorObject<Int32x4>(v) || IsVectorObject<Float32x4>(v) || IsVectorObject<Bool32x4>(v))
+ return true;
+
+ // Ideally, we'd reject all non-SIMD non-primitives, but Emscripten has a
+ // bug that generates code that passes functions for some imports. To avoid
+ // breaking all the code that contains this bug, we make an exception for
+ // functions that don't have user-defined valueOf or toString, for their
+ // coercions are not observable and coercion via ToNumber/ToInt32
+ // definitely produces NaN/0. We should remove this special case later once
+ // most apps have been built with newer Emscripten.
+ jsid toString = NameToId(cx->names().toString);
+ if (v.toObject().is<JSFunction>() &&
+ HasObjectValueOf(&v.toObject(), cx) &&
+ ClassMethodIsNative(cx, &v.toObject().as<JSFunction>(), &JSFunction::class_, toString, fun_toString))
+ {
+ return true;
+ }
+
+ return false;
+}
+
+static bool
+ValidateGlobalVariable(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal, Val* val)
+{
+ switch (global.varInitKind()) {
+ case AsmJSGlobal::InitConstant:
+ *val = global.varInitVal();
+ return true;
+
+ case AsmJSGlobal::InitImport: {
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v))
+ return false;
+
+ if (!v.isPrimitive() && !HasPureCoercion(cx, v))
+ return LinkFail(cx, "Imported values must be primitives");
+
+ switch (global.varInitImportType()) {
+ case ValType::I32: {
+ int32_t i32;
+ if (!ToInt32(cx, v, &i32))
+ return false;
+ *val = Val(uint32_t(i32));
+ return true;
+ }
+ case ValType::I64:
+ MOZ_CRASH("int64");
+ case ValType::F32: {
+ float f;
+ if (!RoundFloat32(cx, v, &f))
+ return false;
+ *val = Val(RawF32(f));
+ return true;
+ }
+ case ValType::F64: {
+ double d;
+ if (!ToNumber(cx, v, &d))
+ return false;
+ *val = Val(RawF64(d));
+ return true;
+ }
+ case ValType::I8x16: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Int8x16>(cx, v, &simdConstant))
+ return false;
+ *val = Val(simdConstant.asInt8x16());
+ return true;
+ }
+ case ValType::I16x8: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Int16x8>(cx, v, &simdConstant))
+ return false;
+ *val = Val(simdConstant.asInt16x8());
+ return true;
+ }
+ case ValType::I32x4: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Int32x4>(cx, v, &simdConstant))
+ return false;
+ *val = Val(simdConstant.asInt32x4());
+ return true;
+ }
+ case ValType::F32x4: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Float32x4>(cx, v, &simdConstant))
+ return false;
+ *val = Val(simdConstant.asFloat32x4());
+ return true;
+ }
+ case ValType::B8x16: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Bool8x16>(cx, v, &simdConstant))
+ return false;
+ // Bool8x16 uses the same data layout as Int8x16.
+ *val = Val(simdConstant.asInt8x16());
+ return true;
+ }
+ case ValType::B16x8: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Bool16x8>(cx, v, &simdConstant))
+ return false;
+ // Bool16x8 uses the same data layout as Int16x8.
+ *val = Val(simdConstant.asInt16x8());
+ return true;
+ }
+ case ValType::B32x4: {
+ SimdConstant simdConstant;
+ if (!ToSimdConstant<Bool32x4>(cx, v, &simdConstant))
+ return false;
+ // Bool32x4 uses the same data layout as Int32x4.
+ *val = Val(simdConstant.asInt32x4());
+ return true;
+ }
+ }
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool
+ValidateFFI(JSContext* cx, const AsmJSGlobal& global, HandleValue importVal,
+ MutableHandle<FunctionVector> ffis)
+{
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, importVal, global.field(), &v))
+ return false;
+
+ if (!IsFunctionObject(v))
+ return LinkFail(cx, "FFI imports must be functions");
+
+ ffis[global.ffiIndex()].set(&v.toObject().as<JSFunction>());
+ return true;
+}
+
+static bool
+ValidateArrayView(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ if (!global.field())
+ return true;
+
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, global.field(), &v))
+ return false;
+
+ bool tac = IsTypedArrayConstructor(v, global.viewType());
+ if (!tac)
+ return LinkFail(cx, "bad typed array constructor");
+
+ return true;
+}
+
+static bool
+ValidateMathBuiltinFunction(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, cx->names().Math, &v))
+ return false;
+
+ if (!GetDataProperty(cx, v, global.field(), &v))
+ return false;
+
+ Native native = nullptr;
+ switch (global.mathBuiltinFunction()) {
+ case AsmJSMathBuiltin_sin: native = math_sin; break;
+ case AsmJSMathBuiltin_cos: native = math_cos; break;
+ case AsmJSMathBuiltin_tan: native = math_tan; break;
+ case AsmJSMathBuiltin_asin: native = math_asin; break;
+ case AsmJSMathBuiltin_acos: native = math_acos; break;
+ case AsmJSMathBuiltin_atan: native = math_atan; break;
+ case AsmJSMathBuiltin_ceil: native = math_ceil; break;
+ case AsmJSMathBuiltin_floor: native = math_floor; break;
+ case AsmJSMathBuiltin_exp: native = math_exp; break;
+ case AsmJSMathBuiltin_log: native = math_log; break;
+ case AsmJSMathBuiltin_pow: native = math_pow; break;
+ case AsmJSMathBuiltin_sqrt: native = math_sqrt; break;
+ case AsmJSMathBuiltin_min: native = math_min; break;
+ case AsmJSMathBuiltin_max: native = math_max; break;
+ case AsmJSMathBuiltin_abs: native = math_abs; break;
+ case AsmJSMathBuiltin_atan2: native = math_atan2; break;
+ case AsmJSMathBuiltin_imul: native = math_imul; break;
+ case AsmJSMathBuiltin_clz32: native = math_clz32; break;
+ case AsmJSMathBuiltin_fround: native = math_fround; break;
+ }
+
+ if (!IsNativeFunction(v, native))
+ return LinkFail(cx, "bad Math.* builtin function");
+
+ return true;
+}
+
+static bool
+ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal,
+ MutableHandleValue out)
+{
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, cx->names().SIMD, &v))
+ return false;
+
+ SimdType type;
+ if (global.which() == AsmJSGlobal::SimdCtor)
+ type = global.simdCtorType();
+ else
+ type = global.simdOperationType();
+
+ RootedPropertyName simdTypeName(cx, SimdTypeToName(cx->names(), type));
+ if (!GetDataProperty(cx, v, simdTypeName, &v))
+ return false;
+
+ if (!v.isObject())
+ return LinkFail(cx, "bad SIMD type");
+
+ RootedObject simdDesc(cx, &v.toObject());
+ if (!simdDesc->is<SimdTypeDescr>())
+ return LinkFail(cx, "bad SIMD type");
+
+ if (type != simdDesc->as<SimdTypeDescr>().type())
+ return LinkFail(cx, "bad SIMD type");
+
+ out.set(v);
+ return true;
+}
+
+static bool
+ValidateSimdType(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ RootedValue _(cx);
+ return ValidateSimdType(cx, global, globalVal, &_);
+}
+
+static bool
+ValidateSimdOperation(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ RootedValue v(cx);
+ JS_ALWAYS_TRUE(ValidateSimdType(cx, global, globalVal, &v));
+
+ if (!GetDataProperty(cx, v, global.field(), &v))
+ return false;
+
+ Native native = nullptr;
+ switch (global.simdOperationType()) {
+#define SET_NATIVE_INT8X16(op) case SimdOperation::Fn_##op: native = simd_int8x16_##op; break;
+#define SET_NATIVE_INT16X8(op) case SimdOperation::Fn_##op: native = simd_int16x8_##op; break;
+#define SET_NATIVE_INT32X4(op) case SimdOperation::Fn_##op: native = simd_int32x4_##op; break;
+#define SET_NATIVE_UINT8X16(op) case SimdOperation::Fn_##op: native = simd_uint8x16_##op; break;
+#define SET_NATIVE_UINT16X8(op) case SimdOperation::Fn_##op: native = simd_uint16x8_##op; break;
+#define SET_NATIVE_UINT32X4(op) case SimdOperation::Fn_##op: native = simd_uint32x4_##op; break;
+#define SET_NATIVE_FLOAT32X4(op) case SimdOperation::Fn_##op: native = simd_float32x4_##op; break;
+#define SET_NATIVE_BOOL8X16(op) case SimdOperation::Fn_##op: native = simd_bool8x16_##op; break;
+#define SET_NATIVE_BOOL16X8(op) case SimdOperation::Fn_##op: native = simd_bool16x8_##op; break;
+#define SET_NATIVE_BOOL32X4(op) case SimdOperation::Fn_##op: native = simd_bool32x4_##op; break;
+#define FALLTHROUGH(op) case SimdOperation::Fn_##op:
+ case SimdType::Int8x16:
+ switch (global.simdOperation()) {
+ FORALL_INT8X16_ASMJS_OP(SET_NATIVE_INT8X16)
+ SET_NATIVE_INT8X16(fromUint8x16Bits)
+ SET_NATIVE_INT8X16(fromUint16x8Bits)
+ SET_NATIVE_INT8X16(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Int16x8:
+ switch (global.simdOperation()) {
+ FORALL_INT16X8_ASMJS_OP(SET_NATIVE_INT16X8)
+ SET_NATIVE_INT16X8(fromUint8x16Bits)
+ SET_NATIVE_INT16X8(fromUint16x8Bits)
+ SET_NATIVE_INT16X8(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Int32x4:
+ switch (global.simdOperation()) {
+ FORALL_INT32X4_ASMJS_OP(SET_NATIVE_INT32X4)
+ SET_NATIVE_INT32X4(fromUint8x16Bits)
+ SET_NATIVE_INT32X4(fromUint16x8Bits)
+ SET_NATIVE_INT32X4(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Uint8x16:
+ switch (global.simdOperation()) {
+ FORALL_INT8X16_ASMJS_OP(SET_NATIVE_UINT8X16)
+ SET_NATIVE_UINT8X16(fromInt8x16Bits)
+ SET_NATIVE_UINT8X16(fromUint16x8Bits)
+ SET_NATIVE_UINT8X16(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Uint16x8:
+ switch (global.simdOperation()) {
+ FORALL_INT16X8_ASMJS_OP(SET_NATIVE_UINT16X8)
+ SET_NATIVE_UINT16X8(fromUint8x16Bits)
+ SET_NATIVE_UINT16X8(fromInt16x8Bits)
+ SET_NATIVE_UINT16X8(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Uint32x4:
+ switch (global.simdOperation()) {
+ FORALL_INT32X4_ASMJS_OP(SET_NATIVE_UINT32X4)
+ SET_NATIVE_UINT32X4(fromUint8x16Bits)
+ SET_NATIVE_UINT32X4(fromUint16x8Bits)
+ SET_NATIVE_UINT32X4(fromInt32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Float32x4:
+ switch (global.simdOperation()) {
+ FORALL_FLOAT32X4_ASMJS_OP(SET_NATIVE_FLOAT32X4)
+ SET_NATIVE_FLOAT32X4(fromUint8x16Bits)
+ SET_NATIVE_FLOAT32X4(fromUint16x8Bits)
+ SET_NATIVE_FLOAT32X4(fromUint32x4Bits)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Bool8x16:
+ switch (global.simdOperation()) {
+ FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL8X16)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Bool16x8:
+ switch (global.simdOperation()) {
+ FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL16X8)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ case SimdType::Bool32x4:
+ switch (global.simdOperation()) {
+ FORALL_BOOL_SIMD_OP(SET_NATIVE_BOOL32X4)
+ default: MOZ_CRASH("shouldn't have been validated in the first place");
+ }
+ break;
+ default: MOZ_CRASH("unhandled simd type");
+#undef FALLTHROUGH
+#undef SET_NATIVE_INT8X16
+#undef SET_NATIVE_INT16X8
+#undef SET_NATIVE_INT32X4
+#undef SET_NATIVE_UINT8X16
+#undef SET_NATIVE_UINT16X8
+#undef SET_NATIVE_UINT32X4
+#undef SET_NATIVE_FLOAT32X4
+#undef SET_NATIVE_BOOL8X16
+#undef SET_NATIVE_BOOL16X8
+#undef SET_NATIVE_BOOL32X4
+#undef SET_NATIVE
+ }
+ if (!native || !IsNativeFunction(v, native))
+ return LinkFail(cx, "bad SIMD.type.* operation");
+ return true;
+}
+
+static bool
+ValidateAtomicsBuiltinFunction(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ RootedValue v(cx);
+ if (!GetDataProperty(cx, globalVal, cx->names().Atomics, &v))
+ return false;
+
+ if (!GetDataProperty(cx, v, global.field(), &v))
+ return false;
+
+ Native native = nullptr;
+ switch (global.atomicsBuiltinFunction()) {
+ case AsmJSAtomicsBuiltin_compareExchange: native = atomics_compareExchange; break;
+ case AsmJSAtomicsBuiltin_exchange: native = atomics_exchange; break;
+ case AsmJSAtomicsBuiltin_load: native = atomics_load; break;
+ case AsmJSAtomicsBuiltin_store: native = atomics_store; break;
+ case AsmJSAtomicsBuiltin_add: native = atomics_add; break;
+ case AsmJSAtomicsBuiltin_sub: native = atomics_sub; break;
+ case AsmJSAtomicsBuiltin_and: native = atomics_and; break;
+ case AsmJSAtomicsBuiltin_or: native = atomics_or; break;
+ case AsmJSAtomicsBuiltin_xor: native = atomics_xor; break;
+ case AsmJSAtomicsBuiltin_isLockFree: native = atomics_isLockFree; break;
+ }
+
+ if (!IsNativeFunction(v, native))
+ return LinkFail(cx, "bad Atomics.* builtin function");
+
+ return true;
+}
+
+static bool
+ValidateConstant(JSContext* cx, const AsmJSGlobal& global, HandleValue globalVal)
+{
+ RootedValue v(cx, globalVal);
+
+ if (global.constantKind() == AsmJSGlobal::MathConstant) {
+ if (!GetDataProperty(cx, v, cx->names().Math, &v))
+ return false;
+ }
+
+ if (!GetDataProperty(cx, v, global.field(), &v))
+ return false;
+
+ if (!v.isNumber())
+ return LinkFail(cx, "math / global constant value needs to be a number");
+
+ // NaN != NaN
+ if (IsNaN(global.constantValue())) {
+ if (!IsNaN(v.toNumber()))
+ return LinkFail(cx, "global constant value needs to be NaN");
+ } else {
+ if (v.toNumber() != global.constantValue())
+ return LinkFail(cx, "global constant value mismatch");
+ }
+
+ return true;
+}
+
+static bool
+CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata, HandleValue bufferVal,
+ MutableHandle<ArrayBufferObjectMaybeShared*> buffer)
+{
+ if (metadata.memoryUsage == MemoryUsage::Shared) {
+ if (!IsSharedArrayBuffer(bufferVal))
+ return LinkFail(cx, "shared views can only be constructed onto SharedArrayBuffer");
+ } else {
+ if (!IsArrayBuffer(bufferVal))
+ return LinkFail(cx, "unshared views can only be constructed onto ArrayBuffer");
+ }
+
+ buffer.set(&AsAnyArrayBuffer(bufferVal));
+ uint32_t memoryLength = buffer->byteLength();
+
+ if (!IsValidAsmJSHeapLength(memoryLength)) {
+ UniqueChars msg(
+ JS_smprintf("ArrayBuffer byteLength 0x%x is not a valid heap length. The next "
+ "valid length is 0x%x",
+ memoryLength,
+ RoundUpToNextValidAsmJSHeapLength(memoryLength)));
+ if (!msg)
+ return false;
+ return LinkFail(cx, msg.get());
+ }
+
+ // This check is sufficient without considering the size of the loaded datum because heap
+ // loads and stores start on an aligned boundary and the heap byteLength has larger alignment.
+ MOZ_ASSERT((metadata.minMemoryLength - 1) <= INT32_MAX);
+ if (memoryLength < metadata.minMemoryLength) {
+ UniqueChars msg(
+ JS_smprintf("ArrayBuffer byteLength of 0x%x is less than 0x%x (the size implied "
+ "by const heap accesses).",
+ memoryLength,
+ metadata.minMemoryLength));
+ if (!msg)
+ return false;
+ return LinkFail(cx, msg.get());
+ }
+
+ if (buffer->is<ArrayBufferObject>()) {
+ // On 64-bit, bounds checks are statically removed so the huge guard
+ // region is always necessary. On 32-bit, allocating a guard page
+ // requires reallocating the incoming ArrayBuffer which could trigger
+ // OOM. Thus, only ask for a guard page when SIMD is used since SIMD
+ // allows unaligned memory access (see MaxMemoryAccessSize comment);
+#ifdef WASM_HUGE_MEMORY
+ bool needGuard = true;
+#else
+ bool needGuard = metadata.usesSimd;
+#endif
+ Rooted<ArrayBufferObject*> arrayBuffer(cx, &buffer->as<ArrayBufferObject>());
+ if (!ArrayBufferObject::prepareForAsmJS(cx, arrayBuffer, needGuard))
+ return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
+ } else {
+ if (!buffer->as<SharedArrayBufferObject>().isPreparedForAsmJS())
+ return LinkFail(cx, "SharedArrayBuffer must be created with wasm test mode enabled");
+ }
+
+ MOZ_ASSERT(buffer->isPreparedForAsmJS());
+ return true;
+}
+
+static bool
+GetImports(JSContext* cx, const AsmJSMetadata& metadata, HandleValue globalVal,
+ HandleValue importVal, MutableHandle<FunctionVector> funcImports, ValVector* valImports)
+{
+ Rooted<FunctionVector> ffis(cx, FunctionVector(cx));
+ if (!ffis.resize(metadata.numFFIs))
+ return false;
+
+ for (const AsmJSGlobal& global : metadata.asmJSGlobals) {
+ switch (global.which()) {
+ case AsmJSGlobal::Variable: {
+ Val val;
+ if (!ValidateGlobalVariable(cx, global, importVal, &val))
+ return false;
+ if (!valImports->append(val))
+ return false;
+ break;
+ }
+ case AsmJSGlobal::FFI:
+ if (!ValidateFFI(cx, global, importVal, &ffis))
+ return false;
+ break;
+ case AsmJSGlobal::ArrayView:
+ case AsmJSGlobal::ArrayViewCtor:
+ if (!ValidateArrayView(cx, global, globalVal))
+ return false;
+ break;
+ case AsmJSGlobal::MathBuiltinFunction:
+ if (!ValidateMathBuiltinFunction(cx, global, globalVal))
+ return false;
+ break;
+ case AsmJSGlobal::AtomicsBuiltinFunction:
+ if (!ValidateAtomicsBuiltinFunction(cx, global, globalVal))
+ return false;
+ break;
+ case AsmJSGlobal::Constant:
+ if (!ValidateConstant(cx, global, globalVal))
+ return false;
+ break;
+ case AsmJSGlobal::SimdCtor:
+ if (!ValidateSimdType(cx, global, globalVal))
+ return false;
+ break;
+ case AsmJSGlobal::SimdOp:
+ if (!ValidateSimdOperation(cx, global, globalVal))
+ return false;
+ break;
+ }
+ }
+
+ for (const AsmJSImport& import : metadata.asmJSImports) {
+ if (!funcImports.append(ffis[import.ffiIndex()]))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+TryInstantiate(JSContext* cx, CallArgs args, Module& module, const AsmJSMetadata& metadata,
+ MutableHandleWasmInstanceObject instanceObj, MutableHandleObject exportObj)
+{
+ HandleValue globalVal = args.get(0);
+ HandleValue importVal = args.get(1);
+ HandleValue bufferVal = args.get(2);
+
+ RootedArrayBufferObjectMaybeShared buffer(cx);
+ RootedWasmMemoryObject memory(cx);
+ if (module.metadata().usesMemory()) {
+ if (!CheckBuffer(cx, metadata, bufferVal, &buffer))
+ return false;
+
+ memory = WasmMemoryObject::create(cx, buffer, nullptr);
+ if (!memory)
+ return false;
+ }
+
+ ValVector valImports;
+ Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+ if (!GetImports(cx, metadata, globalVal, importVal, &funcs, &valImports))
+ return false;
+
+ RootedWasmTableObject table(cx);
+ if (!module.instantiate(cx, funcs, table, memory, valImports, nullptr, instanceObj))
+ return false;
+
+ RootedValue exportObjVal(cx);
+ if (!JS_GetProperty(cx, instanceObj, InstanceExportField, &exportObjVal))
+ return false;
+
+ MOZ_RELEASE_ASSERT(exportObjVal.isObject());
+ exportObj.set(&exportObjVal.toObject());
+ return true;
+}
+
+static MOZ_MUST_USE bool
+MaybeAppendUTF8Name(JSContext* cx, const char* utf8Chars, MutableHandle<PropertyNameVector> names)
+{
+ if (!utf8Chars)
+ return true;
+
+ UTF8Chars utf8(utf8Chars, strlen(utf8Chars));
+
+ JSAtom* atom = AtomizeUTF8Chars(cx, utf8Chars, strlen(utf8Chars));
+ if (!atom)
+ return false;
+
+ return names.append(atom->asPropertyName());
+}
+
+static bool
+HandleInstantiationFailure(JSContext* cx, CallArgs args, const AsmJSMetadata& metadata)
+{
+ RootedAtom name(cx, args.callee().as<JSFunction>().name());
+
+ if (cx->isExceptionPending())
+ return false;
+
+ ScriptSource* source = metadata.scriptSource.get();
+
+ // Source discarding is allowed to affect JS semantics because it is never
+ // enabled for normal JS content.
+ bool haveSource = source->hasSourceData();
+ if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
+ return false;
+ if (!haveSource) {
+ JS_ReportErrorASCII(cx, "asm.js link failure with source discarding enabled");
+ return false;
+ }
+
+ uint32_t begin = metadata.srcBodyStart; // starts right after 'use asm'
+ uint32_t end = metadata.srcEndBeforeCurly();
+ Rooted<JSFlatString*> src(cx, source->substringDontDeflate(cx, begin, end));
+ if (!src)
+ return false;
+
+ RootedFunction fun(cx, NewScriptedFunction(cx, 0, JSFunction::INTERPRETED_NORMAL,
+ name, /* proto = */ nullptr, gc::AllocKind::FUNCTION,
+ TenuredObject));
+ if (!fun)
+ return false;
+
+ Rooted<PropertyNameVector> formals(cx, PropertyNameVector(cx));
+ if (!MaybeAppendUTF8Name(cx, metadata.globalArgumentName.get(), &formals))
+ return false;
+ if (!MaybeAppendUTF8Name(cx, metadata.importArgumentName.get(), &formals))
+ return false;
+ if (!MaybeAppendUTF8Name(cx, metadata.bufferArgumentName.get(), &formals))
+ return false;
+
+ CompileOptions options(cx);
+ options.setMutedErrors(source->mutedErrors())
+ .setFile(source->filename())
+ .setNoScriptRval(false);
+
+ // The exported function inherits an implicit strict context if the module
+ // also inherited it somehow.
+ if (metadata.strict)
+ options.strictOption = true;
+
+ AutoStableStringChars stableChars(cx);
+ if (!stableChars.initTwoByte(cx, src))
+ return false;
+
+ const char16_t* chars = stableChars.twoByteRange().begin().get();
+ SourceBufferHolder::Ownership ownership = stableChars.maybeGiveOwnershipToCaller()
+ ? SourceBufferHolder::GiveOwnership
+ : SourceBufferHolder::NoOwnership;
+ SourceBufferHolder srcBuf(chars, end - begin, ownership);
+ if (!frontend::CompileFunctionBody(cx, &fun, options, formals, srcBuf))
+ return false;
+
+ // Call the function we just recompiled.
+ args.setCallee(ObjectValue(*fun));
+ return InternalCallOrConstruct(cx, args, args.isConstructing() ? CONSTRUCT : NO_CONSTRUCT);
+}
+
+static Module&
+AsmJSModuleFunctionToModule(JSFunction* fun)
+{
+ MOZ_ASSERT(IsAsmJSModule(fun));
+ const Value& v = fun->getExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT);
+ return v.toObject().as<WasmModuleObject>().module();
+}
+
+// Implements the semantics of an asm.js module function that has been successfully validated.
+static bool
+InstantiateAsmJS(JSContext* cx, unsigned argc, JS::Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ JSFunction* callee = &args.callee().as<JSFunction>();
+ Module& module = AsmJSModuleFunctionToModule(callee);
+ const AsmJSMetadata& metadata = module.metadata().asAsmJS();
+
+ RootedWasmInstanceObject instanceObj(cx);
+ RootedObject exportObj(cx);
+ if (!TryInstantiate(cx, args, module, metadata, &instanceObj, &exportObj)) {
+ // Link-time validation checks failed, so reparse the entire asm.js
+ // module from scratch to get normal interpreted bytecode which we can
+ // simply Invoke. Very slow.
+ return HandleInstantiationFailure(cx, args, metadata);
+ }
+
+ args.rval().set(ObjectValue(*exportObj));
+ return true;
+}
+
+static JSFunction*
+NewAsmJSModuleFunction(ExclusiveContext* cx, JSFunction* origFun, HandleObject moduleObj)
+{
+ RootedAtom name(cx, origFun->name());
+
+ JSFunction::Flags flags = origFun->isLambda() ? JSFunction::ASMJS_LAMBDA_CTOR
+ : JSFunction::ASMJS_CTOR;
+ JSFunction* moduleFun =
+ NewNativeConstructor(cx, InstantiateAsmJS, origFun->nargs(), name,
+ gc::AllocKind::FUNCTION_EXTENDED, TenuredObject,
+ flags);
+ if (!moduleFun)
+ return nullptr;
+
+ moduleFun->setExtendedSlot(FunctionExtended::ASMJS_MODULE_SLOT, ObjectValue(*moduleObj));
+
+ MOZ_ASSERT(IsAsmJSModule(moduleFun));
+ return moduleFun;
+}
+
+/*****************************************************************************/
+// Caching and cloning
+
+size_t
+AsmJSGlobal::serializedSize() const
+{
+ return sizeof(pod) +
+ field_.serializedSize();
+}
+
+uint8_t*
+AsmJSGlobal::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ cursor = field_.serialize(cursor);
+ return cursor;
+}
+
+const uint8_t*
+AsmJSGlobal::deserialize(const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod))) &&
+ (cursor = field_.deserialize(cursor));
+ return cursor;
+}
+
+size_t
+AsmJSGlobal::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return field_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+AsmJSMetadata::serializedSize() const
+{
+ return Metadata::serializedSize() +
+ sizeof(pod()) +
+ SerializedVectorSize(asmJSGlobals) +
+ SerializedPodVectorSize(asmJSImports) +
+ SerializedPodVectorSize(asmJSExports) +
+ SerializedVectorSize(asmJSFuncNames) +
+ globalArgumentName.serializedSize() +
+ importArgumentName.serializedSize() +
+ bufferArgumentName.serializedSize();
+}
+
+uint8_t*
+AsmJSMetadata::serialize(uint8_t* cursor) const
+{
+ cursor = Metadata::serialize(cursor);
+ cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+ cursor = SerializeVector(cursor, asmJSGlobals);
+ cursor = SerializePodVector(cursor, asmJSImports);
+ cursor = SerializePodVector(cursor, asmJSExports);
+ cursor = SerializeVector(cursor, asmJSFuncNames);
+ cursor = globalArgumentName.serialize(cursor);
+ cursor = importArgumentName.serialize(cursor);
+ cursor = bufferArgumentName.serialize(cursor);
+ return cursor;
+}
+
+const uint8_t*
+AsmJSMetadata::deserialize(const uint8_t* cursor)
+{
+ (cursor = Metadata::deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+ (cursor = DeserializeVector(cursor, &asmJSGlobals)) &&
+ (cursor = DeserializePodVector(cursor, &asmJSImports)) &&
+ (cursor = DeserializePodVector(cursor, &asmJSExports)) &&
+ (cursor = DeserializeVector(cursor, &asmJSFuncNames)) &&
+ (cursor = globalArgumentName.deserialize(cursor)) &&
+ (cursor = importArgumentName.deserialize(cursor)) &&
+ (cursor = bufferArgumentName.deserialize(cursor));
+ cacheResult = CacheResult::Hit;
+ return cursor;
+}
+
+size_t
+AsmJSMetadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return Metadata::sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(asmJSGlobals, mallocSizeOf) +
+ asmJSImports.sizeOfExcludingThis(mallocSizeOf) +
+ asmJSExports.sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(asmJSFuncNames, mallocSizeOf) +
+ globalArgumentName.sizeOfExcludingThis(mallocSizeOf) +
+ importArgumentName.sizeOfExcludingThis(mallocSizeOf) +
+ bufferArgumentName.sizeOfExcludingThis(mallocSizeOf);
+}
+
+namespace {
+
+class ModuleChars
+{
+ protected:
+ uint32_t isFunCtor_;
+ Vector<CacheableChars, 0, SystemAllocPolicy> funCtorArgs_;
+
+ public:
+ static uint32_t beginOffset(AsmJSParser& parser) {
+ return parser.pc->functionBox()->functionNode->pn_pos.begin;
+ }
+
+ static uint32_t endOffset(AsmJSParser& parser) {
+ TokenPos pos(0, 0); // initialize to silence GCC warning
+ MOZ_ALWAYS_TRUE(parser.tokenStream.peekTokenPos(&pos, TokenStream::Operand));
+ return pos.end;
+ }
+};
+
+class ModuleCharsForStore : ModuleChars
+{
+ uint32_t uncompressedSize_;
+ uint32_t compressedSize_;
+ Vector<char, 0, SystemAllocPolicy> compressedBuffer_;
+
+ public:
+ bool init(AsmJSParser& parser) {
+ MOZ_ASSERT(beginOffset(parser) < endOffset(parser));
+
+ uncompressedSize_ = (endOffset(parser) - beginOffset(parser)) * sizeof(char16_t);
+ size_t maxCompressedSize = LZ4::maxCompressedSize(uncompressedSize_);
+ if (maxCompressedSize < uncompressedSize_)
+ return false;
+
+ if (!compressedBuffer_.resize(maxCompressedSize))
+ return false;
+
+ const char16_t* chars = parser.tokenStream.rawCharPtrAt(beginOffset(parser));
+ const char* source = reinterpret_cast<const char*>(chars);
+ size_t compressedSize = LZ4::compress(source, uncompressedSize_, compressedBuffer_.begin());
+ if (!compressedSize || compressedSize > UINT32_MAX)
+ return false;
+
+ compressedSize_ = compressedSize;
+
+ // For a function statement or named function expression:
+ // function f(x,y,z) { abc }
+ // the range [beginOffset, endOffset) captures the source:
+ // f(x,y,z) { abc }
+ // An unnamed function expression captures the same thing, sans 'f'.
+ // Since asm.js modules do not contain any free variables, equality of
+ // [beginOffset, endOffset) is sufficient to guarantee identical code
+ // generation, modulo Assumptions.
+ //
+ // For functions created with 'new Function', function arguments are
+ // not present in the source so we must manually explicitly serialize
+ // and match the formals as a Vector of PropertyName.
+ isFunCtor_ = parser.pc->isStandaloneFunctionBody();
+ if (isFunCtor_) {
+ unsigned numArgs;
+ ParseNode* functionNode = parser.pc->functionBox()->functionNode;
+ ParseNode* arg = FunctionFormalParametersList(functionNode, &numArgs);
+ for (unsigned i = 0; i < numArgs; i++, arg = arg->pn_next) {
+ UniqueChars name = StringToNewUTF8CharsZ(nullptr, *arg->name());
+ if (!name || !funCtorArgs_.append(Move(name)))
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ size_t serializedSize() const {
+ return sizeof(uint32_t) +
+ sizeof(uint32_t) +
+ compressedSize_ +
+ sizeof(uint32_t) +
+ (isFunCtor_ ? SerializedVectorSize(funCtorArgs_) : 0);
+ }
+
+ uint8_t* serialize(uint8_t* cursor) const {
+ cursor = WriteScalar<uint32_t>(cursor, uncompressedSize_);
+ cursor = WriteScalar<uint32_t>(cursor, compressedSize_);
+ cursor = WriteBytes(cursor, compressedBuffer_.begin(), compressedSize_);
+ cursor = WriteScalar<uint32_t>(cursor, isFunCtor_);
+ if (isFunCtor_)
+ cursor = SerializeVector(cursor, funCtorArgs_);
+ return cursor;
+ }
+};
+
+class ModuleCharsForLookup : ModuleChars
+{
+ Vector<char16_t, 0, SystemAllocPolicy> chars_;
+
+ public:
+ const uint8_t* deserialize(const uint8_t* cursor) {
+ uint32_t uncompressedSize;
+ cursor = ReadScalar<uint32_t>(cursor, &uncompressedSize);
+
+ uint32_t compressedSize;
+ cursor = ReadScalar<uint32_t>(cursor, &compressedSize);
+
+ if (!chars_.resize(uncompressedSize / sizeof(char16_t)))
+ return nullptr;
+
+ const char* source = reinterpret_cast<const char*>(cursor);
+ char* dest = reinterpret_cast<char*>(chars_.begin());
+ if (!LZ4::decompress(source, dest, uncompressedSize))
+ return nullptr;
+
+ cursor += compressedSize;
+
+ cursor = ReadScalar<uint32_t>(cursor, &isFunCtor_);
+ if (isFunCtor_)
+ cursor = DeserializeVector(cursor, &funCtorArgs_);
+
+ return cursor;
+ }
+
+ bool match(AsmJSParser& parser) const {
+ const char16_t* parseBegin = parser.tokenStream.rawCharPtrAt(beginOffset(parser));
+ const char16_t* parseLimit = parser.tokenStream.rawLimit();
+ MOZ_ASSERT(parseLimit >= parseBegin);
+ if (uint32_t(parseLimit - parseBegin) < chars_.length())
+ return false;
+ if (!PodEqual(chars_.begin(), parseBegin, chars_.length()))
+ return false;
+ if (isFunCtor_ != parser.pc->isStandaloneFunctionBody())
+ return false;
+ if (isFunCtor_) {
+ // For function statements, the closing } is included as the last
+ // character of the matched source. For Function constructor,
+ // parsing terminates with EOF which we must explicitly check. This
+ // prevents
+ // new Function('"use asm"; function f() {} return f')
+ // from incorrectly matching
+ // new Function('"use asm"; function f() {} return ff')
+ if (parseBegin + chars_.length() != parseLimit)
+ return false;
+ unsigned numArgs;
+ ParseNode* functionNode = parser.pc->functionBox()->functionNode;
+ ParseNode* arg = FunctionFormalParametersList(functionNode, &numArgs);
+ if (funCtorArgs_.length() != numArgs)
+ return false;
+ for (unsigned i = 0; i < funCtorArgs_.length(); i++, arg = arg->pn_next) {
+ UniqueChars name = StringToNewUTF8CharsZ(nullptr, *arg->name());
+ if (!name || strcmp(funCtorArgs_[i].get(), name.get()))
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+struct ScopedCacheEntryOpenedForWrite
+{
+ ExclusiveContext* cx;
+ const size_t serializedSize;
+ uint8_t* memory;
+ intptr_t handle;
+
+ ScopedCacheEntryOpenedForWrite(ExclusiveContext* cx, size_t serializedSize)
+ : cx(cx), serializedSize(serializedSize), memory(nullptr), handle(-1)
+ {}
+
+ ~ScopedCacheEntryOpenedForWrite() {
+ if (memory)
+ cx->asmJSCacheOps().closeEntryForWrite(serializedSize, memory, handle);
+ }
+};
+
+struct ScopedCacheEntryOpenedForRead
+{
+ ExclusiveContext* cx;
+ size_t serializedSize;
+ const uint8_t* memory;
+ intptr_t handle;
+
+ explicit ScopedCacheEntryOpenedForRead(ExclusiveContext* cx)
+ : cx(cx), serializedSize(0), memory(nullptr), handle(0)
+ {}
+
+ ~ScopedCacheEntryOpenedForRead() {
+ if (memory)
+ cx->asmJSCacheOps().closeEntryForRead(serializedSize, memory, handle);
+ }
+};
+
+} // unnamed namespace
+
+static JS::AsmJSCacheResult
+StoreAsmJSModuleInCache(AsmJSParser& parser, Module& module, ExclusiveContext* cx)
+{
+ ModuleCharsForStore moduleChars;
+ if (!moduleChars.init(parser))
+ return JS::AsmJSCache_InternalError;
+
+ size_t bytecodeSize, compiledSize;
+ module.serializedSize(&bytecodeSize, &compiledSize);
+ MOZ_RELEASE_ASSERT(bytecodeSize == 0);
+ MOZ_RELEASE_ASSERT(compiledSize <= UINT32_MAX);
+
+ size_t serializedSize = sizeof(uint32_t) +
+ compiledSize +
+ moduleChars.serializedSize();
+
+ JS::OpenAsmJSCacheEntryForWriteOp open = cx->asmJSCacheOps().openEntryForWrite;
+ if (!open)
+ return JS::AsmJSCache_Disabled_Internal;
+
+ const char16_t* begin = parser.tokenStream.rawCharPtrAt(ModuleChars::beginOffset(parser));
+ const char16_t* end = parser.tokenStream.rawCharPtrAt(ModuleChars::endOffset(parser));
+ bool installed = parser.options().installedFile;
+
+ ScopedCacheEntryOpenedForWrite entry(cx, serializedSize);
+ JS::AsmJSCacheResult openResult =
+ open(cx->global(), installed, begin, end, serializedSize, &entry.memory, &entry.handle);
+ if (openResult != JS::AsmJSCache_Success)
+ return openResult;
+
+ uint8_t* cursor = entry.memory;
+
+ // Everything serialized before the Module must not change incompatibly
+ // between any two builds (regardless of platform, architecture, ...).
+ // (The Module::assumptionsMatch() guard everything in the Module and
+ // afterwards.)
+ cursor = WriteScalar<uint32_t>(cursor, compiledSize);
+
+ module.serialize(/* bytecodeBegin = */ nullptr, /* bytecodeSize = */ 0, cursor, compiledSize);
+ cursor += compiledSize;
+
+ cursor = moduleChars.serialize(cursor);
+
+ MOZ_RELEASE_ASSERT(cursor == entry.memory + serializedSize);
+
+ return JS::AsmJSCache_Success;
+}
+
+static bool
+LookupAsmJSModuleInCache(ExclusiveContext* cx, AsmJSParser& parser, bool* loadedFromCache,
+ SharedModule* module, UniqueChars* compilationTimeReport)
+{
+ int64_t before = PRMJ_Now();
+
+ *loadedFromCache = false;
+
+ JS::OpenAsmJSCacheEntryForReadOp open = cx->asmJSCacheOps().openEntryForRead;
+ if (!open)
+ return true;
+
+ const char16_t* begin = parser.tokenStream.rawCharPtrAt(ModuleChars::beginOffset(parser));
+ const char16_t* limit = parser.tokenStream.rawLimit();
+
+ ScopedCacheEntryOpenedForRead entry(cx);
+ if (!open(cx->global(), begin, limit, &entry.serializedSize, &entry.memory, &entry.handle))
+ return true;
+
+ size_t remain = entry.serializedSize;
+ const uint8_t* cursor = entry.memory;
+
+ uint32_t compiledSize;
+ cursor = ReadScalarChecked<uint32_t>(cursor, &remain, &compiledSize);
+ if (!cursor)
+ return true;
+
+ Assumptions assumptions;
+ if (!assumptions.initBuildIdFromContext(cx))
+ return false;
+
+ if (!Module::assumptionsMatch(assumptions, cursor, remain))
+ return true;
+
+ MutableAsmJSMetadata asmJSMetadata = cx->new_<AsmJSMetadata>();
+ if (!asmJSMetadata)
+ return false;
+
+ *module = Module::deserialize(/* bytecodeBegin = */ nullptr, /* bytecodeSize = */ 0,
+ cursor, compiledSize, asmJSMetadata.get());
+ if (!*module) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ cursor += compiledSize;
+
+ // Due to the hash comparison made by openEntryForRead, this should succeed
+ // with high probability.
+ ModuleCharsForLookup moduleChars;
+ cursor = moduleChars.deserialize(cursor);
+ if (!moduleChars.match(parser))
+ return true;
+
+ // Don't punish release users by crashing if there is a programmer error
+ // here, just gracefully return with a cache miss.
+#ifdef NIGHTLY_BUILD
+ MOZ_RELEASE_ASSERT(cursor == entry.memory + entry.serializedSize);
+#endif
+ if (cursor != entry.memory + entry.serializedSize)
+ return true;
+
+ // See AsmJSMetadata comment as well as ModuleValidator::init().
+ asmJSMetadata->srcStart = parser.pc->functionBox()->functionNode->pn_body->pn_pos.begin;
+ asmJSMetadata->srcBodyStart = parser.tokenStream.currentToken().pos.end;
+ asmJSMetadata->strict = parser.pc->sc()->strict() && !parser.pc->sc()->hasExplicitUseStrict();
+ asmJSMetadata->scriptSource.reset(parser.ss);
+
+ if (!parser.tokenStream.advance(asmJSMetadata->srcEndBeforeCurly()))
+ return false;
+
+ int64_t after = PRMJ_Now();
+ int ms = (after - before) / PRMJ_USEC_PER_MSEC;
+ *compilationTimeReport = UniqueChars(JS_smprintf("loaded from cache in %dms", ms));
+ if (!*compilationTimeReport)
+ return false;
+
+ *loadedFromCache = true;
+ return true;
+}
+
+/*****************************************************************************/
+// Top-level js::CompileAsmJS
+
+static bool
+NoExceptionPending(ExclusiveContext* cx)
+{
+ return !cx->isJSContext() || !cx->asJSContext()->isExceptionPending();
+}
+
+static bool
+Warn(AsmJSParser& parser, int errorNumber, const char* str)
+{
+ ParseReportKind reportKind = parser.options().throwOnAsmJSValidationFailureOption &&
+ errorNumber == JSMSG_USE_ASM_TYPE_FAIL
+ ? ParseError
+ : ParseWarning;
+ parser.reportNoOffset(reportKind, /* strict = */ false, errorNumber, str ? str : "");
+ return false;
+}
+
+static bool
+EstablishPreconditions(ExclusiveContext* cx, AsmJSParser& parser)
+{
+ if (!HasCompilerSupport(cx))
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by lack of compiler support");
+
+ switch (parser.options().asmJSOption) {
+ case AsmJSOption::Disabled:
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by 'asmjs' runtime option");
+ case AsmJSOption::DisabledByDebugger:
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by debugger");
+ case AsmJSOption::Enabled:
+ break;
+ }
+
+ if (parser.pc->isGenerator())
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by generator context");
+
+ if (parser.pc->isArrowFunction())
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by arrow function context");
+
+ // Class constructors are also methods
+ if (parser.pc->isMethod())
+ return Warn(parser, JSMSG_USE_ASM_TYPE_FAIL, "Disabled by class constructor or method context");
+
+ return true;
+}
+
+static UniqueChars
+BuildConsoleMessage(ExclusiveContext* cx, unsigned time, JS::AsmJSCacheResult cacheResult)
+{
+#ifndef JS_MORE_DETERMINISTIC
+ const char* cacheString = "";
+ switch (cacheResult) {
+ case JS::AsmJSCache_Success:
+ cacheString = "stored in cache";
+ break;
+ case JS::AsmJSCache_ModuleTooSmall:
+ cacheString = "not stored in cache (too small to benefit)";
+ break;
+ case JS::AsmJSCache_SynchronousScript:
+ cacheString = "unable to cache asm.js in synchronous scripts; try loading "
+ "asm.js via <script async> or createElement('script')";
+ break;
+ case JS::AsmJSCache_QuotaExceeded:
+ cacheString = "not enough temporary storage quota to store in cache";
+ break;
+ case JS::AsmJSCache_StorageInitFailure:
+ cacheString = "storage initialization failed (consider filing a bug)";
+ break;
+ case JS::AsmJSCache_Disabled_Internal:
+ cacheString = "caching disabled by internal configuration (consider filing a bug)";
+ break;
+ case JS::AsmJSCache_Disabled_ShellFlags:
+ cacheString = "caching disabled by missing command-line arguments";
+ break;
+ case JS::AsmJSCache_Disabled_JitInspector:
+ cacheString = "caching disabled by active JIT inspector";
+ break;
+ case JS::AsmJSCache_InternalError:
+ cacheString = "unable to store in cache due to internal error (consider filing a bug)";
+ break;
+ case JS::AsmJSCache_Disabled_PrivateBrowsing:
+ cacheString = "caching disabled by private browsing mode";
+ break;
+ case JS::AsmJSCache_ESR52:
+ cacheString = "caching disabled in Firefox ESR52";
+ break;
+ case JS::AsmJSCache_LIMIT:
+ MOZ_CRASH("bad AsmJSCacheResult");
+ break;
+ }
+
+ return UniqueChars(JS_smprintf("total compilation time %dms; %s", time, cacheString));
+#else
+ return DuplicateString("");
+#endif
+}
+
+bool
+js::CompileAsmJS(ExclusiveContext* cx, AsmJSParser& parser, ParseNode* stmtList, bool* validated)
+{
+ *validated = false;
+
+ // Various conditions disable asm.js optimizations.
+ if (!EstablishPreconditions(cx, parser))
+ return NoExceptionPending(cx);
+
+ // Before spending any time parsing the module, try to look it up in the
+ // embedding's cache using the chars about to be parsed as the key.
+ bool loadedFromCache;
+ SharedModule module;
+ UniqueChars message;
+ if (!LookupAsmJSModuleInCache(cx, parser, &loadedFromCache, &module, &message))
+ return false;
+
+ // If not present in the cache, parse, validate and generate code in a
+ // single linear pass over the chars of the asm.js module.
+ if (!loadedFromCache) {
+ // "Checking" parses, validates and compiles, producing a fully compiled
+ // WasmModuleObject as result.
+ unsigned time;
+ module = CheckModule(cx, parser, stmtList, &time);
+ if (!module)
+ return NoExceptionPending(cx);
+
+ // Try to store the AsmJSModule in the embedding's cache. The
+ // AsmJSModule must be stored before static linking since static linking
+ // specializes the AsmJSModule to the current process's address space
+ // and therefore must be executed after a cache hit.
+ JS::AsmJSCacheResult cacheResult = StoreAsmJSModuleInCache(parser, *module, cx);
+
+ // Build the string message to display in the developer console.
+ message = BuildConsoleMessage(cx, time, cacheResult);
+ if (!message)
+ return NoExceptionPending(cx);
+ }
+
+ // Hand over ownership to a GC object wrapper which can then be referenced
+ // from the module function.
+ Rooted<WasmModuleObject*> moduleObj(cx, WasmModuleObject::create(cx, *module));
+ if (!moduleObj)
+ return false;
+
+ // The module function dynamically links the AsmJSModule when called and
+ // generates a set of functions wrapping all the exports.
+ FunctionBox* funbox = parser.pc->functionBox();
+ RootedFunction moduleFun(cx, NewAsmJSModuleFunction(cx, funbox->function(), moduleObj));
+ if (!moduleFun)
+ return false;
+
+ // Finished! Clobber the default function created by the parser with the new
+ // asm.js module function. Special cases in the bytecode emitter avoid
+ // generating bytecode for asm.js functions, allowing this asm.js module
+ // function to be the finished result.
+ MOZ_ASSERT(funbox->function()->isInterpreted());
+ funbox->object = moduleFun;
+
+ // Success! Write to the console with a "warning" message.
+ *validated = true;
+ Warn(parser, JSMSG_USE_ASM_TYPE_OK, message.get());
+ return NoExceptionPending(cx);
+}
+
+/*****************************************************************************/
+// asm.js testing functions
+
+bool
+js::IsAsmJSModuleNative(Native native)
+{
+ return native == InstantiateAsmJS;
+}
+
+bool
+js::IsAsmJSModule(JSFunction* fun)
+{
+ return fun->maybeNative() == InstantiateAsmJS;
+}
+
+bool
+js::IsAsmJSFunction(JSFunction* fun)
+{
+ if (IsExportedFunction(fun))
+ return ExportedFunctionToInstance(fun).metadata().isAsmJS();
+ return false;
+}
+
+bool
+js::IsAsmJSStrictModeModuleOrFunction(JSFunction* fun)
+{
+ if (IsAsmJSModule(fun))
+ return AsmJSModuleFunctionToModule(fun).metadata().asAsmJS().strict;
+
+ if (IsAsmJSFunction(fun))
+ return ExportedFunctionToInstance(fun).metadata().asAsmJS().strict;
+
+ return false;
+}
+
+bool
+js::IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ // See EstablishPreconditions.
+ bool available = HasCompilerSupport(cx) && cx->options().asmJS();
+
+ args.rval().set(BooleanValue(available));
+ return true;
+}
+
+static JSFunction*
+MaybeWrappedNativeFunction(const Value& v)
+{
+ if (!v.isObject())
+ return nullptr;
+
+ JSObject* obj = CheckedUnwrap(&v.toObject());
+ if (!obj)
+ return nullptr;
+
+ if (!obj->is<JSFunction>())
+ return nullptr;
+
+ return &obj->as<JSFunction>();
+}
+
+bool
+js::IsAsmJSModule(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0)))
+ rval = IsAsmJSModule(fun);
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+bool
+js::IsAsmJSFunction(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ bool rval = false;
+ if (JSFunction* fun = MaybeWrappedNativeFunction(args.get(0)))
+ rval = IsAsmJSFunction(fun);
+
+ args.rval().set(BooleanValue(rval));
+ return true;
+}
+
+bool
+js::IsAsmJSModuleLoadedFromCache(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ JSFunction* fun = MaybeWrappedNativeFunction(args.get(0));
+ if (!fun || !IsAsmJSModule(fun)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_USE_ASM_TYPE_FAIL,
+ "argument passed to isAsmJSModuleLoadedFromCache is not a "
+ "validated asm.js module");
+ return false;
+ }
+
+ bool loadedFromCache =
+ AsmJSModuleFunctionToModule(fun).metadata().asAsmJS().cacheResult == CacheResult::Hit;
+
+ args.rval().set(BooleanValue(loadedFromCache));
+ return true;
+}
+
+/*****************************************************************************/
+// asm.js toString/toSource support
+
+static MOZ_MUST_USE bool
+MaybeAppendUTF8Chars(JSContext* cx, const char* sep, const char* utf8Chars, StringBuffer* sb)
+{
+ if (!utf8Chars)
+ return true;
+
+ UTF8Chars utf8(utf8Chars, strlen(utf8Chars));
+
+ size_t length;
+ UniqueTwoByteChars twoByteChars(UTF8CharsToNewTwoByteCharsZ(cx, utf8, &length).get());
+ if (!twoByteChars)
+ return false;
+
+ return sb->append(sep, strlen(sep)) &&
+ sb->append(twoByteChars.get(), length);
+}
+
+JSString*
+js::AsmJSModuleToString(JSContext* cx, HandleFunction fun, bool addParenToLambda)
+{
+ MOZ_ASSERT(IsAsmJSModule(fun));
+
+ const AsmJSMetadata& metadata = AsmJSModuleFunctionToModule(fun).metadata().asAsmJS();
+ uint32_t begin = metadata.srcStart;
+ uint32_t end = metadata.srcEndAfterCurly();
+ ScriptSource* source = metadata.scriptSource.get();
+
+ StringBuffer out(cx);
+
+ if (addParenToLambda && fun->isLambda() && !out.append("("))
+ return nullptr;
+
+ if (!out.append("function "))
+ return nullptr;
+
+ if (fun->name() && !out.append(fun->name()))
+ return nullptr;
+
+ bool haveSource = source->hasSourceData();
+ if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
+ return nullptr;
+
+ if (!haveSource) {
+ if (!out.append("() {\n [sourceless code]\n}"))
+ return nullptr;
+ } else {
+ // Whether the function has been created with a Function ctor
+ bool funCtor = begin == 0 && end == source->length() && source->argumentsNotIncluded();
+ if (funCtor) {
+ // Functions created with the function constructor don't have arguments in their source.
+ if (!out.append("("))
+ return nullptr;
+
+ if (!MaybeAppendUTF8Chars(cx, "", metadata.globalArgumentName.get(), &out))
+ return nullptr;
+ if (!MaybeAppendUTF8Chars(cx, ", ", metadata.importArgumentName.get(), &out))
+ return nullptr;
+ if (!MaybeAppendUTF8Chars(cx, ", ", metadata.bufferArgumentName.get(), &out))
+ return nullptr;
+
+ if (!out.append(") {\n"))
+ return nullptr;
+ }
+
+ Rooted<JSFlatString*> src(cx, source->substring(cx, begin, end));
+ if (!src)
+ return nullptr;
+
+ if (!out.append(src))
+ return nullptr;
+
+ if (funCtor && !out.append("\n}"))
+ return nullptr;
+ }
+
+ if (addParenToLambda && fun->isLambda() && !out.append(")"))
+ return nullptr;
+
+ return out.finishString();
+}
+
+JSString*
+js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
+{
+ MOZ_ASSERT(IsAsmJSFunction(fun));
+
+ const AsmJSMetadata& metadata = ExportedFunctionToInstance(fun).metadata().asAsmJS();
+ const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToFuncIndex(fun));
+
+ uint32_t begin = metadata.srcStart + f.startOffsetInModule();
+ uint32_t end = metadata.srcStart + f.endOffsetInModule();
+
+ ScriptSource* source = metadata.scriptSource.get();
+ StringBuffer out(cx);
+
+ if (!out.append("function "))
+ return nullptr;
+
+ bool haveSource = source->hasSourceData();
+ if (!haveSource && !JSScript::loadSource(cx, source, &haveSource))
+ return nullptr;
+
+ if (!haveSource) {
+ // asm.js functions can't be anonymous
+ MOZ_ASSERT(fun->name());
+ if (!out.append(fun->name()))
+ return nullptr;
+ if (!out.append("() {\n [sourceless code]\n}"))
+ return nullptr;
+ } else {
+ // asm.js functions cannot have been created with a Function constructor
+ // as they belong within a module.
+ MOZ_ASSERT(!(begin == 0 && end == source->length() && source->argumentsNotIncluded()));
+
+ Rooted<JSFlatString*> src(cx, source->substring(cx, begin, end));
+ if (!src)
+ return nullptr;
+ if (!out.append(src))
+ return nullptr;
+ }
+
+ return out.finishString();
+}
+
+bool
+js::IsValidAsmJSHeapLength(uint32_t length)
+{
+ if (length < MinHeapLength)
+ return false;
+
+ return wasm::IsValidARMImmediate(length);
+}
diff --git a/js/src/wasm/AsmJS.h b/js/src/wasm/AsmJS.h
new file mode 100644
index 0000000000..1395966d21
--- /dev/null
+++ b/js/src/wasm/AsmJS.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef asmjs_asmjs_h
+#define asmjs_asmjs_h
+
+#include "NamespaceImports.h"
+
+namespace js {
+
+class ExclusiveContext;
+namespace frontend {
+ template <typename ParseHandler> class Parser;
+ class ParseContext;
+ class FullParseHandler;
+ class ParseNode;
+}
+typedef frontend::Parser<frontend::FullParseHandler> AsmJSParser;
+
+// This function takes over parsing of a function starting with "use asm". The
+// return value indicates whether an error was reported which the caller should
+// propagate. If no error was reported, the function may still fail to validate
+// as asm.js. In this case, the parser.tokenStream has been advanced an
+// indeterminate amount and the entire function should be reparsed from the
+// beginning.
+
+extern MOZ_MUST_USE bool
+CompileAsmJS(ExclusiveContext* cx, AsmJSParser& parser, frontend::ParseNode* stmtList,
+ bool* validated);
+
+// asm.js module/export queries:
+
+extern bool
+IsAsmJSModuleNative(JSNative native);
+
+extern bool
+IsAsmJSModule(JSFunction* fun);
+
+extern bool
+IsAsmJSFunction(JSFunction* fun);
+
+extern bool
+IsAsmJSStrictModeModuleOrFunction(JSFunction* fun);
+
+// asm.js testing natives:
+
+extern bool
+IsAsmJSCompilationAvailable(JSContext* cx, unsigned argc, JS::Value* vp);
+
+extern bool
+IsAsmJSModule(JSContext* cx, unsigned argc, JS::Value* vp);
+
+extern bool
+IsAsmJSModuleLoadedFromCache(JSContext* cx, unsigned argc, Value* vp);
+
+extern bool
+IsAsmJSFunction(JSContext* cx, unsigned argc, JS::Value* vp);
+
+// asm.js toString/toSource support:
+
+extern JSString*
+AsmJSFunctionToString(JSContext* cx, HandleFunction fun);
+
+extern JSString*
+AsmJSModuleToString(JSContext* cx, HandleFunction fun, bool addParenToLambda);
+
+// asm.js heap:
+
+extern bool
+IsValidAsmJSHeapLength(uint32_t length);
+
+} // namespace js
+
+#endif // asmjs_asmjs_h
diff --git a/js/src/wasm/WasmAST.h b/js/src/wasm/WasmAST.h
new file mode 100644
index 0000000000..d442fa19a0
--- /dev/null
+++ b/js/src/wasm/WasmAST.h
@@ -0,0 +1,1038 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasmast_h
+#define wasmast_h
+
+#include "ds/LifoAlloc.h"
+#include "js/HashTable.h"
+#include "js/Vector.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+const uint32_t AstNoIndex = UINT32_MAX;
+const unsigned AST_LIFO_DEFAULT_CHUNK_SIZE = 4096;
+
+/*****************************************************************************/
+// wasm AST
+
+class AstExpr;
+
+template <class T>
+using AstVector = mozilla::Vector<T, 0, LifoAllocPolicy<Fallible>>;
+
+template <class K, class V, class HP>
+using AstHashMap = HashMap<K, V, HP, LifoAllocPolicy<Fallible>>;
+
+class AstName
+{
+ const char16_t* begin_;
+ const char16_t* end_;
+ public:
+ template <size_t Length>
+ explicit AstName(const char16_t (&str)[Length]) : begin_(str), end_(str + Length - 1) {
+ MOZ_ASSERT(str[Length - 1] == u'\0');
+ }
+
+ AstName(const char16_t* begin, size_t length) : begin_(begin), end_(begin + length) {}
+ AstName() : begin_(nullptr), end_(nullptr) {}
+ const char16_t* begin() const { return begin_; }
+ const char16_t* end() const { return end_; }
+ size_t length() const { return end_ - begin_; }
+ bool empty() const { return begin_ == nullptr; }
+
+ bool operator==(AstName rhs) const {
+ if (length() != rhs.length())
+ return false;
+ if (begin() == rhs.begin())
+ return true;
+ return EqualChars(begin(), rhs.begin(), length());
+ }
+ bool operator!=(AstName rhs) const {
+ return !(*this == rhs);
+ }
+};
+
+class AstRef
+{
+ AstName name_;
+ uint32_t index_;
+
+ public:
+ AstRef() : index_(AstNoIndex) {
+ MOZ_ASSERT(isInvalid());
+ }
+ explicit AstRef(AstName name) : name_(name), index_(AstNoIndex) {
+ MOZ_ASSERT(!isInvalid());
+ }
+ explicit AstRef(uint32_t index) : index_(index) {
+ MOZ_ASSERT(!isInvalid());
+ }
+ bool isInvalid() const {
+ return name_.empty() && index_ == AstNoIndex;
+ }
+ AstName name() const {
+ return name_;
+ }
+ size_t index() const {
+ MOZ_ASSERT(index_ != AstNoIndex);
+ return index_;
+ }
+ void setIndex(uint32_t index) {
+ MOZ_ASSERT(index_ == AstNoIndex);
+ index_ = index;
+ }
+};
+
+struct AstNameHasher
+{
+ typedef const AstName Lookup;
+ static js::HashNumber hash(Lookup l) {
+ return mozilla::HashString(l.begin(), l.length());
+ }
+ static bool match(const AstName key, Lookup lookup) {
+ return key == lookup;
+ }
+};
+
+using AstNameMap = AstHashMap<AstName, uint32_t, AstNameHasher>;
+
+typedef AstVector<ValType> AstValTypeVector;
+typedef AstVector<AstExpr*> AstExprVector;
+typedef AstVector<AstName> AstNameVector;
+typedef AstVector<AstRef> AstRefVector;
+
+struct AstBase
+{
+ void* operator new(size_t numBytes, LifoAlloc& astLifo) throw() {
+ return astLifo.alloc(numBytes);
+ }
+};
+
+class AstSig : public AstBase
+{
+ AstName name_;
+ AstValTypeVector args_;
+ ExprType ret_;
+
+ public:
+ explicit AstSig(LifoAlloc& lifo)
+ : args_(lifo),
+ ret_(ExprType::Void)
+ {}
+ AstSig(AstValTypeVector&& args, ExprType ret)
+ : args_(Move(args)),
+ ret_(ret)
+ {}
+ AstSig(AstName name, AstSig&& rhs)
+ : name_(name),
+ args_(Move(rhs.args_)),
+ ret_(rhs.ret_)
+ {}
+ const AstValTypeVector& args() const {
+ return args_;
+ }
+ ExprType ret() const {
+ return ret_;
+ }
+ AstName name() const {
+ return name_;
+ }
+ bool operator==(const AstSig& rhs) const {
+ return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
+ }
+
+ typedef const AstSig& Lookup;
+ static HashNumber hash(Lookup sig) {
+ return AddContainerToHash(sig.args(), HashNumber(sig.ret()));
+ }
+ static bool match(const AstSig* lhs, Lookup rhs) {
+ return *lhs == rhs;
+ }
+};
+
+const uint32_t AstNodeUnknownOffset = 0;
+
+class AstNode : public AstBase
+{
+ uint32_t offset_; // if applicable, offset in the binary format file
+
+ public:
+ AstNode() : offset_(AstNodeUnknownOffset) {}
+
+ uint32_t offset() const { return offset_; }
+ void setOffset(uint32_t offset) { offset_ = offset; }
+};
+
+enum class AstExprKind
+{
+ BinaryOperator,
+ Block,
+ Branch,
+ BranchTable,
+ Call,
+ CallIndirect,
+ ComparisonOperator,
+ Const,
+ ConversionOperator,
+ CurrentMemory,
+ Drop,
+ First,
+ GetGlobal,
+ GetLocal,
+ GrowMemory,
+ If,
+ Load,
+ Nop,
+ Pop,
+ Return,
+ SetGlobal,
+ SetLocal,
+ TeeLocal,
+ Store,
+ TernaryOperator,
+ UnaryOperator,
+ Unreachable
+};
+
+class AstExpr : public AstNode
+{
+ const AstExprKind kind_;
+ ExprType type_;
+
+ protected:
+ AstExpr(AstExprKind kind, ExprType type)
+ : kind_(kind), type_(type)
+ {}
+
+ public:
+ AstExprKind kind() const { return kind_; }
+
+ bool isVoid() const { return IsVoid(type_); }
+
+ // Note that for nodes other than blocks and block-like things, this
+ // may return ExprType::Limit for nodes with non-void types.
+ ExprType type() const { return type_; }
+
+ template <class T>
+ T& as() {
+ MOZ_ASSERT(kind() == T::Kind);
+ return static_cast<T&>(*this);
+ }
+};
+
+struct AstNop : AstExpr
+{
+ static const AstExprKind Kind = AstExprKind::Nop;
+ AstNop()
+ : AstExpr(AstExprKind::Nop, ExprType::Void)
+ {}
+};
+
+struct AstUnreachable : AstExpr
+{
+ static const AstExprKind Kind = AstExprKind::Unreachable;
+ AstUnreachable()
+ : AstExpr(AstExprKind::Unreachable, ExprType::Void)
+ {}
+};
+
+class AstDrop : public AstExpr
+{
+ AstExpr& value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Drop;
+ explicit AstDrop(AstExpr& value)
+ : AstExpr(AstExprKind::Drop, ExprType::Void),
+ value_(value)
+ {}
+ AstExpr& value() const {
+ return value_;
+ }
+};
+
+class AstConst : public AstExpr
+{
+ const Val val_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Const;
+ explicit AstConst(Val val)
+ : AstExpr(Kind, ExprType::Limit),
+ val_(val)
+ {}
+ Val val() const { return val_; }
+};
+
+class AstGetLocal : public AstExpr
+{
+ AstRef local_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::GetLocal;
+ explicit AstGetLocal(AstRef local)
+ : AstExpr(Kind, ExprType::Limit),
+ local_(local)
+ {}
+ AstRef& local() {
+ return local_;
+ }
+};
+
+class AstSetLocal : public AstExpr
+{
+ AstRef local_;
+ AstExpr& value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::SetLocal;
+ AstSetLocal(AstRef local, AstExpr& value)
+ : AstExpr(Kind, ExprType::Void),
+ local_(local),
+ value_(value)
+ {}
+ AstRef& local() {
+ return local_;
+ }
+ AstExpr& value() const {
+ return value_;
+ }
+};
+
+class AstGetGlobal : public AstExpr
+{
+ AstRef global_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::GetGlobal;
+ explicit AstGetGlobal(AstRef global)
+ : AstExpr(Kind, ExprType::Limit),
+ global_(global)
+ {}
+ AstRef& global() {
+ return global_;
+ }
+};
+
+class AstSetGlobal : public AstExpr
+{
+ AstRef global_;
+ AstExpr& value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::SetGlobal;
+ AstSetGlobal(AstRef global, AstExpr& value)
+ : AstExpr(Kind, ExprType::Void),
+ global_(global),
+ value_(value)
+ {}
+ AstRef& global() {
+ return global_;
+ }
+ AstExpr& value() const {
+ return value_;
+ }
+};
+
+class AstTeeLocal : public AstExpr
+{
+ AstRef local_;
+ AstExpr& value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::TeeLocal;
+ AstTeeLocal(AstRef local, AstExpr& value)
+ : AstExpr(Kind, ExprType::Limit),
+ local_(local),
+ value_(value)
+ {}
+ AstRef& local() {
+ return local_;
+ }
+ AstExpr& value() const {
+ return value_;
+ }
+};
+
+class AstBlock : public AstExpr
+{
+ Op op_;
+ AstName name_;
+ AstExprVector exprs_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Block;
+ explicit AstBlock(Op op, ExprType type, AstName name, AstExprVector&& exprs)
+ : AstExpr(Kind, type),
+ op_(op),
+ name_(name),
+ exprs_(Move(exprs))
+ {}
+
+ Op op() const { return op_; }
+ AstName name() const { return name_; }
+ const AstExprVector& exprs() const { return exprs_; }
+};
+
+class AstBranch : public AstExpr
+{
+ Op op_;
+ AstExpr* cond_;
+ AstRef target_;
+ AstExpr* value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Branch;
+ explicit AstBranch(Op op, ExprType type,
+ AstExpr* cond, AstRef target, AstExpr* value)
+ : AstExpr(Kind, type),
+ op_(op),
+ cond_(cond),
+ target_(target),
+ value_(value)
+ {}
+
+ Op op() const { return op_; }
+ AstRef& target() { return target_; }
+ AstExpr& cond() const { MOZ_ASSERT(cond_); return *cond_; }
+ AstExpr* maybeValue() const { return value_; }
+};
+
+class AstCall : public AstExpr
+{
+ Op op_;
+ AstRef func_;
+ AstExprVector args_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Call;
+ AstCall(Op op, ExprType type, AstRef func, AstExprVector&& args)
+ : AstExpr(Kind, type), op_(op), func_(func), args_(Move(args))
+ {}
+
+ Op op() const { return op_; }
+ AstRef& func() { return func_; }
+ const AstExprVector& args() const { return args_; }
+};
+
+class AstCallIndirect : public AstExpr
+{
+ AstRef sig_;
+ AstExprVector args_;
+ AstExpr* index_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::CallIndirect;
+ AstCallIndirect(AstRef sig, ExprType type, AstExprVector&& args, AstExpr* index)
+ : AstExpr(Kind, type), sig_(sig), args_(Move(args)), index_(index)
+ {}
+ AstRef& sig() { return sig_; }
+ const AstExprVector& args() const { return args_; }
+ AstExpr* index() const { return index_; }
+};
+
+class AstReturn : public AstExpr
+{
+ AstExpr* maybeExpr_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Return;
+ explicit AstReturn(AstExpr* maybeExpr)
+ : AstExpr(Kind, ExprType::Void),
+ maybeExpr_(maybeExpr)
+ {}
+ AstExpr* maybeExpr() const { return maybeExpr_; }
+};
+
+class AstIf : public AstExpr
+{
+ AstExpr* cond_;
+ AstName name_;
+ AstExprVector thenExprs_;
+ AstExprVector elseExprs_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::If;
+ AstIf(ExprType type, AstExpr* cond, AstName name,
+ AstExprVector&& thenExprs, AstExprVector&& elseExprs)
+ : AstExpr(Kind, type),
+ cond_(cond),
+ name_(name),
+ thenExprs_(Move(thenExprs)),
+ elseExprs_(Move(elseExprs))
+ {}
+
+ AstExpr& cond() const { return *cond_; }
+ const AstExprVector& thenExprs() const { return thenExprs_; }
+ bool hasElse() const { return elseExprs_.length(); }
+ const AstExprVector& elseExprs() const { MOZ_ASSERT(hasElse()); return elseExprs_; }
+ AstName name() const { return name_; }
+};
+
+class AstLoadStoreAddress
+{
+ AstExpr* base_;
+ int32_t flags_;
+ int32_t offset_;
+
+ public:
+ explicit AstLoadStoreAddress(AstExpr* base, int32_t flags, int32_t offset)
+ : base_(base),
+ flags_(flags),
+ offset_(offset)
+ {}
+
+ AstExpr& base() const { return *base_; }
+ int32_t flags() const { return flags_; }
+ int32_t offset() const { return offset_; }
+};
+
+class AstLoad : public AstExpr
+{
+ Op op_;
+ AstLoadStoreAddress address_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Load;
+ explicit AstLoad(Op op, const AstLoadStoreAddress &address)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op),
+ address_(address)
+ {}
+
+ Op op() const { return op_; }
+ const AstLoadStoreAddress& address() const { return address_; }
+};
+
+class AstStore : public AstExpr
+{
+ Op op_;
+ AstLoadStoreAddress address_;
+ AstExpr* value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::Store;
+ explicit AstStore(Op op, const AstLoadStoreAddress &address, AstExpr* value)
+ : AstExpr(Kind, ExprType::Void),
+ op_(op),
+ address_(address),
+ value_(value)
+ {}
+
+ Op op() const { return op_; }
+ const AstLoadStoreAddress& address() const { return address_; }
+ AstExpr& value() const { return *value_; }
+};
+
+class AstCurrentMemory final : public AstExpr
+{
+ public:
+ static const AstExprKind Kind = AstExprKind::CurrentMemory;
+ explicit AstCurrentMemory()
+ : AstExpr(Kind, ExprType::I32)
+ {}
+};
+
+class AstGrowMemory final : public AstExpr
+{
+ AstExpr* operand_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::GrowMemory;
+ explicit AstGrowMemory(AstExpr* operand)
+ : AstExpr(Kind, ExprType::I32), operand_(operand)
+ {}
+
+ AstExpr* operand() const { return operand_; }
+};
+
+class AstBranchTable : public AstExpr
+{
+ AstExpr& index_;
+ AstRef default_;
+ AstRefVector table_;
+ AstExpr* value_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::BranchTable;
+ explicit AstBranchTable(AstExpr& index, AstRef def, AstRefVector&& table,
+ AstExpr* maybeValue)
+ : AstExpr(Kind, ExprType::Void),
+ index_(index),
+ default_(def),
+ table_(Move(table)),
+ value_(maybeValue)
+ {}
+ AstExpr& index() const { return index_; }
+ AstRef& def() { return default_; }
+ AstRefVector& table() { return table_; }
+ AstExpr* maybeValue() { return value_; }
+};
+
+class AstFunc : public AstNode
+{
+ AstName name_;
+ AstRef sig_;
+ AstValTypeVector vars_;
+ AstNameVector localNames_;
+ AstExprVector body_;
+
+ public:
+ AstFunc(AstName name, AstRef sig, AstValTypeVector&& vars,
+ AstNameVector&& locals, AstExprVector&& body)
+ : name_(name),
+ sig_(sig),
+ vars_(Move(vars)),
+ localNames_(Move(locals)),
+ body_(Move(body))
+ {}
+ AstRef& sig() { return sig_; }
+ const AstValTypeVector& vars() const { return vars_; }
+ const AstNameVector& locals() const { return localNames_; }
+ const AstExprVector& body() const { return body_; }
+ AstName name() const { return name_; }
+};
+
+class AstGlobal : public AstNode
+{
+ AstName name_;
+ bool isMutable_;
+ ValType type_;
+ Maybe<AstExpr*> init_;
+
+ public:
+ AstGlobal() : isMutable_(false), type_(ValType(TypeCode::Limit))
+ {}
+
+ explicit AstGlobal(AstName name, ValType type, bool isMutable,
+ Maybe<AstExpr*> init = Maybe<AstExpr*>())
+ : name_(name), isMutable_(isMutable), type_(type), init_(init)
+ {}
+
+ AstName name() const { return name_; }
+ bool isMutable() const { return isMutable_; }
+ ValType type() const { return type_; }
+
+ bool hasInit() const { return !!init_; }
+ AstExpr& init() const { MOZ_ASSERT(hasInit()); return **init_; }
+};
+
+typedef AstVector<AstGlobal*> AstGlobalVector;
+
+class AstImport : public AstNode
+{
+ AstName name_;
+ AstName module_;
+ AstName field_;
+ DefinitionKind kind_;
+
+ AstRef funcSig_;
+ Limits limits_;
+ AstGlobal global_;
+
+ public:
+ AstImport(AstName name, AstName module, AstName field, AstRef funcSig)
+ : name_(name), module_(module), field_(field), kind_(DefinitionKind::Function), funcSig_(funcSig)
+ {}
+ AstImport(AstName name, AstName module, AstName field, DefinitionKind kind, Limits limits)
+ : name_(name), module_(module), field_(field), kind_(kind), limits_(limits)
+ {}
+ AstImport(AstName name, AstName module, AstName field, AstGlobal global)
+ : name_(name), module_(module), field_(field), kind_(DefinitionKind::Global), global_(global)
+ {}
+
+ AstName name() const { return name_; }
+ AstName module() const { return module_; }
+ AstName field() const { return field_; }
+
+ DefinitionKind kind() const { return kind_; }
+ AstRef& funcSig() {
+ MOZ_ASSERT(kind_ == DefinitionKind::Function);
+ return funcSig_;
+ }
+ Limits limits() const {
+ MOZ_ASSERT(kind_ == DefinitionKind::Memory || kind_ == DefinitionKind::Table);
+ return limits_;
+ }
+ const AstGlobal& global() const {
+ MOZ_ASSERT(kind_ == DefinitionKind::Global);
+ return global_;
+ }
+};
+
+class AstExport : public AstNode
+{
+ AstName name_;
+ DefinitionKind kind_;
+ AstRef ref_;
+
+ public:
+ AstExport(AstName name, DefinitionKind kind, AstRef ref)
+ : name_(name), kind_(kind), ref_(ref)
+ {}
+ explicit AstExport(AstName name, DefinitionKind kind)
+ : name_(name), kind_(kind)
+ {}
+ AstName name() const { return name_; }
+ DefinitionKind kind() const { return kind_; }
+ AstRef& ref() { return ref_; }
+};
+
+class AstDataSegment : public AstNode
+{
+ AstExpr* offset_;
+ AstNameVector fragments_;
+
+ public:
+ AstDataSegment(AstExpr* offset, AstNameVector&& fragments)
+ : offset_(offset), fragments_(Move(fragments))
+ {}
+
+ AstExpr* offset() const { return offset_; }
+ const AstNameVector& fragments() const { return fragments_; }
+};
+
+typedef AstVector<AstDataSegment*> AstDataSegmentVector;
+
+class AstElemSegment : public AstNode
+{
+ AstExpr* offset_;
+ AstRefVector elems_;
+
+ public:
+ AstElemSegment(AstExpr* offset, AstRefVector&& elems)
+ : offset_(offset), elems_(Move(elems))
+ {}
+
+ AstExpr* offset() const { return offset_; }
+ AstRefVector& elems() { return elems_; }
+ const AstRefVector& elems() const { return elems_; }
+};
+
+typedef AstVector<AstElemSegment*> AstElemSegmentVector;
+
+class AstStartFunc : public AstNode
+{
+ AstRef func_;
+
+ public:
+ explicit AstStartFunc(AstRef func)
+ : func_(func)
+ {}
+
+ AstRef& func() {
+ return func_;
+ }
+};
+
+struct AstResizable
+{
+ AstName name;
+ Limits limits;
+ bool imported;
+
+ AstResizable(Limits limits, bool imported, AstName name = AstName())
+ : name(name),
+ limits(limits),
+ imported(imported)
+ {}
+};
+
+class AstModule : public AstNode
+{
+ public:
+ typedef AstVector<AstFunc*> FuncVector;
+ typedef AstVector<AstImport*> ImportVector;
+ typedef AstVector<AstExport*> ExportVector;
+ typedef AstVector<AstSig*> SigVector;
+ typedef AstVector<AstName> NameVector;
+ typedef AstVector<AstResizable> AstResizableVector;
+
+ private:
+ typedef AstHashMap<AstSig*, uint32_t, AstSig> SigMap;
+
+ LifoAlloc& lifo_;
+ SigVector sigs_;
+ SigMap sigMap_;
+ ImportVector imports_;
+ NameVector funcImportNames_;
+ AstResizableVector tables_;
+ AstResizableVector memories_;
+ ExportVector exports_;
+ Maybe<AstStartFunc> startFunc_;
+ FuncVector funcs_;
+ AstDataSegmentVector dataSegments_;
+ AstElemSegmentVector elemSegments_;
+ AstGlobalVector globals_;
+
+ public:
+ explicit AstModule(LifoAlloc& lifo)
+ : lifo_(lifo),
+ sigs_(lifo),
+ sigMap_(lifo),
+ imports_(lifo),
+ funcImportNames_(lifo),
+ tables_(lifo),
+ memories_(lifo),
+ exports_(lifo),
+ funcs_(lifo),
+ dataSegments_(lifo),
+ elemSegments_(lifo),
+ globals_(lifo)
+ {}
+ bool init() {
+ return sigMap_.init();
+ }
+ bool addMemory(AstName name, Limits memory) {
+ return memories_.append(AstResizable(memory, false, name));
+ }
+ bool hasMemory() const {
+ return !!memories_.length();
+ }
+ const AstResizableVector& memories() const {
+ return memories_;
+ }
+ bool addTable(AstName name, Limits table) {
+ return tables_.append(AstResizable(table, false, name));
+ }
+ bool hasTable() const {
+ return !!tables_.length();
+ }
+ const AstResizableVector& tables() const {
+ return tables_;
+ }
+ bool append(AstDataSegment* seg) {
+ return dataSegments_.append(seg);
+ }
+ const AstDataSegmentVector& dataSegments() const {
+ return dataSegments_;
+ }
+ bool append(AstElemSegment* seg) {
+ return elemSegments_.append(seg);
+ }
+ const AstElemSegmentVector& elemSegments() const {
+ return elemSegments_;
+ }
+ bool hasStartFunc() const {
+ return !!startFunc_;
+ }
+ bool setStartFunc(AstStartFunc startFunc) {
+ if (startFunc_)
+ return false;
+ startFunc_.emplace(startFunc);
+ return true;
+ }
+ AstStartFunc& startFunc() {
+ return *startFunc_;
+ }
+ bool declare(AstSig&& sig, uint32_t* sigIndex) {
+ SigMap::AddPtr p = sigMap_.lookupForAdd(sig);
+ if (p) {
+ *sigIndex = p->value();
+ return true;
+ }
+ *sigIndex = sigs_.length();
+ auto* lifoSig = new (lifo_) AstSig(AstName(), Move(sig));
+ return lifoSig &&
+ sigs_.append(lifoSig) &&
+ sigMap_.add(p, sigs_.back(), *sigIndex);
+ }
+ bool append(AstSig* sig) {
+ uint32_t sigIndex = sigs_.length();
+ if (!sigs_.append(sig))
+ return false;
+ SigMap::AddPtr p = sigMap_.lookupForAdd(*sig);
+ return p || sigMap_.add(p, sig, sigIndex);
+ }
+ const SigVector& sigs() const {
+ return sigs_;
+ }
+ bool append(AstFunc* func) {
+ return funcs_.append(func);
+ }
+ const FuncVector& funcs() const {
+ return funcs_;
+ }
+ bool append(AstImport* imp) {
+ switch (imp->kind()) {
+ case DefinitionKind::Function:
+ if (!funcImportNames_.append(imp->name()))
+ return false;
+ break;
+ case DefinitionKind::Table:
+ if (!tables_.append(AstResizable(imp->limits(), true)))
+ return false;
+ break;
+ case DefinitionKind::Memory:
+ if (!memories_.append(AstResizable(imp->limits(), true)))
+ return false;
+ break;
+ case DefinitionKind::Global:
+ break;
+ }
+
+ return imports_.append(imp);
+ }
+ const ImportVector& imports() const {
+ return imports_;
+ }
+ const NameVector& funcImportNames() const {
+ return funcImportNames_;
+ }
+ size_t numFuncImports() const {
+ return funcImportNames_.length();
+ }
+ bool append(AstExport* exp) {
+ return exports_.append(exp);
+ }
+ const ExportVector& exports() const {
+ return exports_;
+ }
+ bool append(AstGlobal* glob) {
+ return globals_.append(glob);
+ }
+ const AstGlobalVector& globals() const {
+ return globals_;
+ }
+};
+
+class AstUnaryOperator final : public AstExpr
+{
+ Op op_;
+ AstExpr* operand_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::UnaryOperator;
+ explicit AstUnaryOperator(Op op, AstExpr* operand)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op), operand_(operand)
+ {}
+
+ Op op() const { return op_; }
+ AstExpr* operand() const { return operand_; }
+};
+
+class AstBinaryOperator final : public AstExpr
+{
+ Op op_;
+ AstExpr* lhs_;
+ AstExpr* rhs_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::BinaryOperator;
+ explicit AstBinaryOperator(Op op, AstExpr* lhs, AstExpr* rhs)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op), lhs_(lhs), rhs_(rhs)
+ {}
+
+ Op op() const { return op_; }
+ AstExpr* lhs() const { return lhs_; }
+ AstExpr* rhs() const { return rhs_; }
+};
+
+class AstTernaryOperator : public AstExpr
+{
+ Op op_;
+ AstExpr* op0_;
+ AstExpr* op1_;
+ AstExpr* op2_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::TernaryOperator;
+ AstTernaryOperator(Op op, AstExpr* op0, AstExpr* op1, AstExpr* op2)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op), op0_(op0), op1_(op1), op2_(op2)
+ {}
+
+ Op op() const { return op_; }
+ AstExpr* op0() const { return op0_; }
+ AstExpr* op1() const { return op1_; }
+ AstExpr* op2() const { return op2_; }
+};
+
+class AstComparisonOperator final : public AstExpr
+{
+ Op op_;
+ AstExpr* lhs_;
+ AstExpr* rhs_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::ComparisonOperator;
+ explicit AstComparisonOperator(Op op, AstExpr* lhs, AstExpr* rhs)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op), lhs_(lhs), rhs_(rhs)
+ {}
+
+ Op op() const { return op_; }
+ AstExpr* lhs() const { return lhs_; }
+ AstExpr* rhs() const { return rhs_; }
+};
+
+class AstConversionOperator final : public AstExpr
+{
+ Op op_;
+ AstExpr* operand_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::ConversionOperator;
+ explicit AstConversionOperator(Op op, AstExpr* operand)
+ : AstExpr(Kind, ExprType::Limit),
+ op_(op), operand_(operand)
+ {}
+
+ Op op() const { return op_; }
+ AstExpr* operand() const { return operand_; }
+};
+
+// This is an artificial AST node which can fill operand slots in an AST
+// constructed from parsing or decoding stack-machine code that doesn't have
+// an inherent AST structure.
+class AstPop final : public AstExpr
+{
+ public:
+ static const AstExprKind Kind = AstExprKind::Pop;
+ AstPop()
+ : AstExpr(Kind, ExprType::Void)
+ {}
+};
+
+// This is an artificial AST node which can be used to represent some forms
+// of stack-machine code in an AST form. It is similar to Block, but returns the
+// value of its first operand, rather than the last.
+class AstFirst : public AstExpr
+{
+ AstExprVector exprs_;
+
+ public:
+ static const AstExprKind Kind = AstExprKind::First;
+ explicit AstFirst(AstExprVector&& exprs)
+ : AstExpr(Kind, ExprType::Limit),
+ exprs_(Move(exprs))
+ {}
+
+ AstExprVector& exprs() { return exprs_; }
+ const AstExprVector& exprs() const { return exprs_; }
+};
+
+} // end wasm namespace
+} // end js namespace
+
+#endif // namespace wasmast_h
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
new file mode 100644
index 0000000000..564b81f683
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -0,0 +1,7480 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* WebAssembly baseline compiler ("RabaldrMonkey")
+ *
+ * General status notes:
+ *
+ * "FIXME" indicates a known or suspected bug. Always has a bug#.
+ *
+ * "TODO" indicates an opportunity for a general improvement, with an additional
+ * tag to indicate the area of improvement. Usually has a bug#.
+ *
+ * Unimplemented functionality:
+ *
+ * - Tiered compilation (bug 1277562)
+ * - profiler support / devtools (bug 1286948)
+ * - SIMD
+ * - Atomics
+ *
+ * There are lots of machine dependencies here but they are pretty well isolated
+ * to a segment of the compiler. Many dependencies will eventually be factored
+ * into the MacroAssembler layer and shared with other code generators.
+ *
+ *
+ * High-value compiler performance improvements:
+ *
+ * - (Bug 1316802) The specific-register allocator (the needI32(r), needI64(r)
+ * etc methods) can avoid syncing the value stack if the specific register is
+ * in use but there is a free register to shuffle the specific register into.
+ * (This will also improve the generated code.) The sync happens often enough
+ * here to show up in profiles, because it is triggered by integer multiply
+ * and divide.
+ *
+ *
+ * High-value code generation improvements:
+ *
+ * - (Bug 1316803) Opportunities for cheaply folding in a constant rhs to
+ * arithmetic operations, we do this already for I32 add and shift operators,
+ * this reduces register pressure and instruction count.
+ *
+ * - (Bug 1286816) Opportunities for cheaply folding in a constant rhs to
+ * conditionals.
+ *
+ * - (Bug 1286816) Boolean evaluation for control can be optimized by pushing a
+ * bool-generating operation onto the value stack in the same way that we now
+ * push latent constants and local lookups, or (easier) by remembering the
+ * operation in a side location if the next Op will consume it.
+ *
+ * - (Bug 1286816) brIf pessimizes by branching over code that performs stack
+ * cleanup and a branch. If no cleanup is needed we can just branch
+ * conditionally to the target.
+ *
+ * - (Bug 1316804) brTable pessimizes by always dispatching to code that pops
+ * the stack and then jumps to the code for the target case. If no cleanup is
+ * needed we could just branch conditionally to the target; if the same amount
+ * of cleanup is needed for all cases then the cleanup can be done before the
+ * dispatch. Both are highly likely.
+ *
+ * - (Bug 1316806) Register management around calls: At the moment we sync the
+ * value stack unconditionally (this is simple) but there are probably many
+ * common cases where we could instead save/restore live caller-saves
+ * registers and perform parallel assignment into argument registers. This
+ * may be important if we keep some locals in registers.
+ *
+ * - (Bug 1316808) Allocate some locals to registers on machines where there are
+ * enough registers. This is probably hard to do well in a one-pass compiler
+ * but it might be that just keeping register arguments and the first few
+ * locals in registers is a viable strategy; another (more general) strategy
+ * is caching locals in registers in straight-line code. Such caching could
+ * also track constant values in registers, if that is deemed valuable. A
+ * combination of techniques may be desirable: parameters and the first few
+ * locals could be cached on entry to the function but not statically assigned
+ * to registers throughout.
+ *
+ * (On a large corpus of code it should be possible to compute, for every
+ * signature comprising the types of parameters and locals, and using a static
+ * weight for loops, a list in priority order of which parameters and locals
+ * that should be assigned to registers. Or something like that. Wasm makes
+ * this simple. Static assignments are desirable because they are not flushed
+ * to memory by the pre-block sync() call.)
+ */
+
+#include "wasm/WasmBaselineCompile.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/AtomicOp.h"
+#include "jit/IonTypes.h"
+#include "jit/JitAllocPolicy.h"
+#include "jit/Label.h"
+#include "jit/MacroAssembler.h"
+#include "jit/MIR.h"
+#include "jit/Registers.h"
+#include "jit/RegisterSets.h"
+#if defined(JS_CODEGEN_ARM)
+# include "jit/arm/Assembler-arm.h"
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+# include "jit/x86-shared/Architecture-x86-shared.h"
+# include "jit/x86-shared/Assembler-x86-shared.h"
+#endif
+
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmBinaryIterator.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using mozilla::DebugOnly;
+using mozilla::FloatingPoint;
+using mozilla::IsPowerOfTwo;
+using mozilla::SpecificNaN;
+
+namespace js {
+namespace wasm {
+
+using namespace js::jit;
+using JS::GenericNaN;
+
+struct BaseCompilePolicy : OpIterPolicy
+{
+ static const bool Output = true;
+
+ // The baseline compiler tracks values on a stack of its own -- it
+ // needs to scan that stack for spilling -- and thus has no need
+ // for the values maintained by the iterator.
+ //
+ // The baseline compiler tracks control items on a stack of its
+ // own as well.
+ //
+ // TODO / REDUNDANT (Bug 1316814): It would be nice if we could
+ // make use of the iterator's ControlItems and not require our own
+ // stack for that.
+};
+
+typedef OpIter<BaseCompilePolicy> BaseOpIter;
+
+typedef bool IsUnsigned;
+typedef bool IsSigned;
+typedef bool ZeroOnOverflow;
+typedef bool IsKnownNotZero;
+typedef bool HandleNaNSpecially;
+typedef unsigned ByteSize;
+typedef unsigned BitSize;
+
+// UseABI::Wasm implies that the Tls/Heap/Global registers are nonvolatile,
+// except when InterModule::True is also set, when they are volatile.
+//
+// UseABI::System implies that the Tls/Heap/Global registers are volatile.
+// Additionally, the parameter passing mechanism may be slightly different from
+// the UseABI::Wasm convention.
+//
+// When the Tls/Heap/Global registers are not volatile, the baseline compiler
+// will restore the Tls register from its save slot before the call, since the
+// baseline compiler uses the Tls register for other things.
+//
+// When those registers are volatile, the baseline compiler will reload them
+// after the call (it will restore the Tls register from the save slot and load
+// the other two from the Tls data).
+
+enum class UseABI { Wasm, System };
+enum class InterModule { False = false, True = true };
+
+#ifdef JS_CODEGEN_ARM64
+// FIXME: This is not correct, indeed for ARM64 there is no reliable
+// StackPointer and we'll need to change the abstractions that use
+// SP-relative addressing. There's a guard in emitFunction() below to
+// prevent this workaround from having any consequence. This hack
+// exists only as a stopgap; there is no ARM64 JIT support yet.
+static const Register StackPointer = RealStackPointer;
+#endif
+
+#ifdef JS_CODEGEN_X86
+// The selection of EBX here steps gingerly around: the need for EDX
+// to be allocatable for multiply/divide; ECX to be allocatable for
+// shift/rotate; EAX (= ReturnReg) to be allocatable as the joinreg;
+// EBX not being one of the WasmTableCall registers; and needing a
+// temp register for load/store that has a single-byte persona.
+static const Register ScratchRegX86 = ebx;
+
+# define INT_DIV_I64_CALLOUT
+#endif
+
+#ifdef JS_CODEGEN_ARM
+// We need a temp for funcPtrCall. It can't be any of the
+// WasmTableCall registers, an argument register, or a scratch
+// register, and probably should not be ReturnReg.
+static const Register FuncPtrCallTemp = CallTempReg1;
+
+// We use our own scratch register, because the macro assembler uses
+// the regular scratch register(s) pretty liberally. We could
+// work around that in several cases but the mess does not seem
+// worth it yet. CallTempReg2 seems safe.
+static const Register ScratchRegARM = CallTempReg2;
+
+# define INT_DIV_I64_CALLOUT
+# define I64_TO_FLOAT_CALLOUT
+# define FLOAT_TO_I64_CALLOUT
+#endif
+
+class BaseCompiler
+{
+ // We define our own ScratchRegister abstractions, deferring to
+ // the platform's when possible.
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ typedef ScratchDoubleScope ScratchF64;
+#else
+ class ScratchF64
+ {
+ public:
+ ScratchF64(BaseCompiler& b) {}
+ operator FloatRegister() const {
+ MOZ_CRASH("BaseCompiler platform hook - ScratchF64");
+ }
+ };
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ typedef ScratchFloat32Scope ScratchF32;
+#else
+ class ScratchF32
+ {
+ public:
+ ScratchF32(BaseCompiler& b) {}
+ operator FloatRegister() const {
+ MOZ_CRASH("BaseCompiler platform hook - ScratchF32");
+ }
+ };
+#endif
+
+#if defined(JS_CODEGEN_X64)
+ typedef ScratchRegisterScope ScratchI32;
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ class ScratchI32
+ {
+# ifdef DEBUG
+ BaseCompiler& bc;
+ public:
+ explicit ScratchI32(BaseCompiler& bc) : bc(bc) {
+ MOZ_ASSERT(!bc.scratchRegisterTaken());
+ bc.setScratchRegisterTaken(true);
+ }
+ ~ScratchI32() {
+ MOZ_ASSERT(bc.scratchRegisterTaken());
+ bc.setScratchRegisterTaken(false);
+ }
+# else
+ public:
+ explicit ScratchI32(BaseCompiler& bc) {}
+# endif
+ operator Register() const {
+# ifdef JS_CODEGEN_X86
+ return ScratchRegX86;
+# else
+ return ScratchRegARM;
+# endif
+ }
+ };
+#else
+ class ScratchI32
+ {
+ public:
+ ScratchI32(BaseCompiler& bc) {}
+ operator Register() const {
+ MOZ_CRASH("BaseCompiler platform hook - ScratchI32");
+ }
+ };
+#endif
+
+ // A Label in the code, allocated out of a temp pool in the
+ // TempAllocator attached to the compilation.
+
+ struct PooledLabel : public Label, public TempObject, public InlineListNode<PooledLabel>
+ {
+ PooledLabel() : f(nullptr) {}
+ explicit PooledLabel(BaseCompiler* f) : f(f) {}
+ BaseCompiler* f;
+ };
+
+ typedef Vector<PooledLabel*, 8, SystemAllocPolicy> LabelVector;
+
+ struct UniquePooledLabelFreePolicy
+ {
+ void operator()(PooledLabel* p) {
+ p->f->freeLabel(p);
+ }
+ };
+
+ typedef UniquePtr<PooledLabel, UniquePooledLabelFreePolicy> UniquePooledLabel;
+
+ // The strongly typed register wrappers have saved my bacon a few
+ // times; though they are largely redundant they stay, for now.
+
+ // TODO / INVESTIGATE (Bug 1316815): Things would probably be
+ // simpler if these inherited from Register, Register64, and
+ // FloatRegister.
+
+ struct RegI32
+ {
+ RegI32() : reg(Register::Invalid()) {}
+ explicit RegI32(Register reg) : reg(reg) {}
+ Register reg;
+ bool operator==(const RegI32& that) { return reg == that.reg; }
+ bool operator!=(const RegI32& that) { return reg != that.reg; }
+ };
+
+ struct RegI64
+ {
+ RegI64() : reg(Register64::Invalid()) {}
+ explicit RegI64(Register64 reg) : reg(reg) {}
+ Register64 reg;
+ bool operator==(const RegI64& that) { return reg == that.reg; }
+ bool operator!=(const RegI64& that) { return reg != that.reg; }
+ };
+
+ struct RegF32
+ {
+ RegF32() {}
+ explicit RegF32(FloatRegister reg) : reg(reg) {}
+ FloatRegister reg;
+ bool operator==(const RegF32& that) { return reg == that.reg; }
+ bool operator!=(const RegF32& that) { return reg != that.reg; }
+ };
+
+ struct RegF64
+ {
+ RegF64() {}
+ explicit RegF64(FloatRegister reg) : reg(reg) {}
+ FloatRegister reg;
+ bool operator==(const RegF64& that) { return reg == that.reg; }
+ bool operator!=(const RegF64& that) { return reg != that.reg; }
+ };
+
+ struct AnyReg
+ {
+ AnyReg() { tag = NONE; }
+ explicit AnyReg(RegI32 r) { tag = I32; i32_ = r; }
+ explicit AnyReg(RegI64 r) { tag = I64; i64_ = r; }
+ explicit AnyReg(RegF32 r) { tag = F32; f32_ = r; }
+ explicit AnyReg(RegF64 r) { tag = F64; f64_ = r; }
+
+ RegI32 i32() {
+ MOZ_ASSERT(tag == I32);
+ return i32_;
+ }
+ RegI64 i64() {
+ MOZ_ASSERT(tag == I64);
+ return i64_;
+ }
+ RegF32 f32() {
+ MOZ_ASSERT(tag == F32);
+ return f32_;
+ }
+ RegF64 f64() {
+ MOZ_ASSERT(tag == F64);
+ return f64_;
+ }
+ AnyRegister any() {
+ switch (tag) {
+ case F32: return AnyRegister(f32_.reg);
+ case F64: return AnyRegister(f64_.reg);
+ case I32: return AnyRegister(i32_.reg);
+ case I64:
+#ifdef JS_PUNBOX64
+ return AnyRegister(i64_.reg.reg);
+#else
+ // The compiler is written so that this is never needed: any() is called
+ // on arbitrary registers for asm.js but asm.js does not have 64-bit ints.
+ // For wasm, any() is called on arbitrary registers only on 64-bit platforms.
+ MOZ_CRASH("AnyReg::any() on 32-bit platform");
+#endif
+ case NONE:
+ MOZ_CRASH("AnyReg::any() on NONE");
+ }
+ // Work around GCC 5 analysis/warning bug.
+ MOZ_CRASH("AnyReg::any(): impossible case");
+ }
+
+ union {
+ RegI32 i32_;
+ RegI64 i64_;
+ RegF32 f32_;
+ RegF64 f64_;
+ };
+ enum { NONE, I32, I64, F32, F64 } tag;
+ };
+
+ struct Local
+ {
+ Local() : type_(MIRType::None), offs_(UINT32_MAX) {}
+ Local(MIRType type, uint32_t offs) : type_(type), offs_(offs) {}
+
+ void init(MIRType type_, uint32_t offs_) {
+ this->type_ = type_;
+ this->offs_ = offs_;
+ }
+
+ MIRType type_; // Type of the value, or MIRType::None
+ uint32_t offs_; // Zero-based frame offset of value, or UINT32_MAX
+
+ MIRType type() const { MOZ_ASSERT(type_ != MIRType::None); return type_; }
+ uint32_t offs() const { MOZ_ASSERT(offs_ != UINT32_MAX); return offs_; }
+ };
+
+ // Control node, representing labels and stack heights at join points.
+
+ struct Control
+ {
+ Control(uint32_t framePushed, uint32_t stackSize)
+ : label(nullptr),
+ otherLabel(nullptr),
+ framePushed(framePushed),
+ stackSize(stackSize),
+ deadOnArrival(false),
+ deadThenBranch(false)
+ {}
+
+ PooledLabel* label;
+ PooledLabel* otherLabel; // Used for the "else" branch of if-then-else
+ uint32_t framePushed; // From masm
+ uint32_t stackSize; // Value stack height
+ bool deadOnArrival; // deadCode_ was set on entry to the region
+ bool deadThenBranch; // deadCode_ was set on exit from "then"
+ };
+
+ // Volatile registers except ReturnReg.
+
+ static LiveRegisterSet VolatileReturnGPR;
+
+ // The baseline compiler will use OOL code more sparingly than
+ // Baldr since our code is not high performance and frills like
+ // code density and branch prediction friendliness will be less
+ // important.
+
+ class OutOfLineCode : public TempObject
+ {
+ private:
+ Label entry_;
+ Label rejoin_;
+ uint32_t framePushed_;
+
+ public:
+ OutOfLineCode() : framePushed_(UINT32_MAX) {}
+
+ Label* entry() { return &entry_; }
+ Label* rejoin() { return &rejoin_; }
+
+ void setFramePushed(uint32_t framePushed) {
+ MOZ_ASSERT(framePushed_ == UINT32_MAX);
+ framePushed_ = framePushed;
+ }
+
+ void bind(MacroAssembler& masm) {
+ MOZ_ASSERT(framePushed_ != UINT32_MAX);
+ masm.bind(&entry_);
+ masm.setFramePushed(framePushed_);
+ }
+
+ // Save volatile registers but not ReturnReg.
+
+ void saveVolatileReturnGPR(MacroAssembler& masm) {
+ masm.PushRegsInMask(BaseCompiler::VolatileReturnGPR);
+ }
+
+ // Restore volatile registers but not ReturnReg.
+
+ void restoreVolatileReturnGPR(MacroAssembler& masm) {
+ masm.PopRegsInMask(BaseCompiler::VolatileReturnGPR);
+ }
+
+ // The generate() method must be careful about register use
+ // because it will be invoked when there is a register
+ // assignment in the BaseCompiler that does not correspond
+ // to the available registers when the generated OOL code is
+ // executed. The register allocator *must not* be called.
+ //
+ // The best strategy is for the creator of the OOL object to
+ // allocate all temps that the OOL code will need.
+ //
+ // Input, output, and temp registers are embedded in the OOL
+ // object and are known to the code generator.
+ //
+ // Scratch registers are available to use in OOL code.
+ //
+ // All other registers must be explicitly saved and restored
+ // by the OOL code before being used.
+
+ virtual void generate(MacroAssembler& masm) = 0;
+ };
+
+ const ModuleGeneratorData& mg_;
+ BaseOpIter iter_;
+ const FuncBytes& func_;
+ size_t lastReadCallSite_;
+ TempAllocator& alloc_;
+ const ValTypeVector& locals_; // Types of parameters and locals
+ int32_t localSize_; // Size of local area in bytes (stable after beginFunction)
+ int32_t varLow_; // Low byte offset of local area for true locals (not parameters)
+ int32_t varHigh_; // High byte offset + 1 of local area for true locals
+ int32_t maxFramePushed_; // Max value of masm.framePushed() observed
+ bool deadCode_; // Flag indicating we should decode & discard the opcode
+ ValTypeVector SigI64I64_;
+ ValTypeVector SigDD_;
+ ValTypeVector SigD_;
+ ValTypeVector SigF_;
+ ValTypeVector SigI_;
+ ValTypeVector Sig_;
+ Label returnLabel_;
+ Label outOfLinePrologue_;
+ Label bodyLabel_;
+ TrapOffset prologueTrapOffset_;
+
+ FuncCompileResults& compileResults_;
+ MacroAssembler& masm; // No '_' suffix - too tedious...
+
+ AllocatableGeneralRegisterSet availGPR_;
+ AllocatableFloatRegisterSet availFPU_;
+#ifdef DEBUG
+ bool scratchRegisterTaken_;
+#endif
+
+ TempObjectPool<PooledLabel> labelPool_;
+
+ Vector<Local, 8, SystemAllocPolicy> localInfo_;
+ Vector<OutOfLineCode*, 8, SystemAllocPolicy> outOfLine_;
+
+ // Index into localInfo_ of the special local used for saving the TLS
+ // pointer. This follows the function's real arguments and locals.
+ uint32_t tlsSlot_;
+
+ // On specific platforms we sometimes need to use specific registers.
+
+#ifdef JS_CODEGEN_X64
+ RegI64 specific_rax;
+ RegI64 specific_rcx;
+ RegI64 specific_rdx;
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ RegI32 specific_eax;
+ RegI32 specific_ecx;
+ RegI32 specific_edx;
+#endif
+
+#if defined(JS_CODEGEN_X86)
+ AllocatableGeneralRegisterSet singleByteRegs_;
+#endif
+#if defined(JS_NUNBOX32)
+ RegI64 abiReturnRegI64;
+#endif
+
+ // The join registers are used to carry values out of blocks.
+ // JoinRegI32 and joinRegI64 must overlap: emitBrIf and
+ // emitBrTable assume that.
+
+ RegI32 joinRegI32;
+ RegI64 joinRegI64;
+ RegF32 joinRegF32;
+ RegF64 joinRegF64;
+
+ // More members: see the stk_ and ctl_ vectors, defined below.
+
+ public:
+ BaseCompiler(const ModuleGeneratorData& mg,
+ Decoder& decoder,
+ const FuncBytes& func,
+ const ValTypeVector& locals,
+ FuncCompileResults& compileResults);
+
+ MOZ_MUST_USE bool init();
+
+ void finish();
+
+ MOZ_MUST_USE bool emitFunction();
+
+ // Used by some of the ScratchRegister implementations.
+ operator MacroAssembler&() const { return masm; }
+
+#ifdef DEBUG
+ bool scratchRegisterTaken() const {
+ return scratchRegisterTaken_;
+ }
+ void setScratchRegisterTaken(bool state) {
+ scratchRegisterTaken_ = state;
+ }
+#endif
+
+ private:
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Out of line code management.
+
+ MOZ_MUST_USE OutOfLineCode* addOutOfLineCode(OutOfLineCode* ool) {
+ if (!ool || !outOfLine_.append(ool))
+ return nullptr;
+ ool->setFramePushed(masm.framePushed());
+ return ool;
+ }
+
+ MOZ_MUST_USE bool generateOutOfLineCode() {
+ for (uint32_t i = 0; i < outOfLine_.length(); i++) {
+ OutOfLineCode* ool = outOfLine_[i];
+ ool->bind(masm);
+ ool->generate(masm);
+ }
+
+ return !masm.oom();
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // The stack frame.
+
+ // SP-relative load and store.
+
+ int32_t localOffsetToSPOffset(int32_t offset) {
+ return masm.framePushed() - offset;
+ }
+
+ void storeToFrameI32(Register r, int32_t offset) {
+ masm.store32(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+ }
+
+ void storeToFrameI64(Register64 r, int32_t offset) {
+ masm.store64(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+ }
+
+ void storeToFramePtr(Register r, int32_t offset) {
+ masm.storePtr(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+ }
+
+ void storeToFrameF64(FloatRegister r, int32_t offset) {
+ masm.storeDouble(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+ }
+
+ void storeToFrameF32(FloatRegister r, int32_t offset) {
+ masm.storeFloat32(r, Address(StackPointer, localOffsetToSPOffset(offset)));
+ }
+
+ void loadFromFrameI32(Register r, int32_t offset) {
+ masm.load32(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+ }
+
+ void loadFromFrameI64(Register64 r, int32_t offset) {
+ masm.load64(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+ }
+
+ void loadFromFramePtr(Register r, int32_t offset) {
+ masm.loadPtr(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+ }
+
+ void loadFromFrameF64(FloatRegister r, int32_t offset) {
+ masm.loadDouble(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+ }
+
+ void loadFromFrameF32(FloatRegister r, int32_t offset) {
+ masm.loadFloat32(Address(StackPointer, localOffsetToSPOffset(offset)), r);
+ }
+
+ // Stack-allocated local slots.
+
+ int32_t pushLocal(size_t nbytes) {
+ if (nbytes == 8)
+ localSize_ = AlignBytes(localSize_, 8u);
+ else if (nbytes == 16)
+ localSize_ = AlignBytes(localSize_, 16u);
+ localSize_ += nbytes;
+ return localSize_; // Locals grow down so capture base address
+ }
+
+ int32_t frameOffsetFromSlot(uint32_t slot, MIRType type) {
+ MOZ_ASSERT(localInfo_[slot].type() == type);
+ return localInfo_[slot].offs();
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Low-level register allocation.
+
+ bool isAvailable(Register r) {
+ return availGPR_.has(r);
+ }
+
+ bool hasGPR() {
+ return !availGPR_.empty();
+ }
+
+ void allocGPR(Register r) {
+ MOZ_ASSERT(isAvailable(r));
+ availGPR_.take(r);
+ }
+
+ Register allocGPR() {
+ MOZ_ASSERT(hasGPR());
+ return availGPR_.takeAny();
+ }
+
+ void freeGPR(Register r) {
+ availGPR_.add(r);
+ }
+
+ bool isAvailable(Register64 r) {
+#ifdef JS_PUNBOX64
+ return isAvailable(r.reg);
+#else
+ return isAvailable(r.low) && isAvailable(r.high);
+#endif
+ }
+
+ bool hasInt64() {
+#ifdef JS_PUNBOX64
+ return !availGPR_.empty();
+#else
+ if (availGPR_.empty())
+ return false;
+ Register r = allocGPR();
+ bool available = !availGPR_.empty();
+ freeGPR(r);
+ return available;
+#endif
+ }
+
+ void allocInt64(Register64 r) {
+ MOZ_ASSERT(isAvailable(r));
+#ifdef JS_PUNBOX64
+ availGPR_.take(r.reg);
+#else
+ availGPR_.take(r.low);
+ availGPR_.take(r.high);
+#endif
+ }
+
+ Register64 allocInt64() {
+ MOZ_ASSERT(hasInt64());
+#ifdef JS_PUNBOX64
+ return Register64(availGPR_.takeAny());
+#else
+ Register high = availGPR_.takeAny();
+ Register low = availGPR_.takeAny();
+ return Register64(high, low);
+#endif
+ }
+
+ void freeInt64(Register64 r) {
+#ifdef JS_PUNBOX64
+ availGPR_.add(r.reg);
+#else
+ availGPR_.add(r.low);
+ availGPR_.add(r.high);
+#endif
+ }
+
+ // Notes on float register allocation.
+ //
+ // The general rule in SpiderMonkey is that float registers can
+ // alias double registers, but there are predicates to handle
+ // exceptions to that rule: hasUnaliasedDouble() and
+ // hasMultiAlias(). The way aliasing actually works is platform
+ // dependent and exposed through the aliased(n, &r) predicate,
+ // etc.
+ //
+ // - hasUnaliasedDouble(): on ARM VFPv3-D32 there are double
+ // registers that cannot be treated as float.
+ // - hasMultiAlias(): on ARM and MIPS a double register aliases
+ // two float registers.
+ // - notes in Architecture-arm.h indicate that when we use a
+ // float register that aliases a double register we only use
+ // the low float register, never the high float register. I
+ // think those notes lie, or at least are confusing.
+ // - notes in Architecture-mips32.h suggest that the MIPS port
+ // will use both low and high float registers except on the
+ // Longsoon, which may be the only MIPS that's being tested, so
+ // who knows what's working.
+ // - SIMD is not yet implemented on ARM or MIPS so constraints
+ // may change there.
+ //
+ // On some platforms (x86, x64, ARM64) but not all (ARM)
+ // ScratchFloat32Register is the same as ScratchDoubleRegister.
+ //
+ // It's a basic invariant of the AllocatableRegisterSet that it
+ // deals properly with aliasing of registers: if s0 or s1 are
+ // allocated then d0 is not allocatable; if s0 and s1 are freed
+ // individually then d0 becomes allocatable.
+
+ template<MIRType t>
+ FloatRegisters::SetType maskFromTypeFPU() {
+ static_assert(t == MIRType::Float32 || t == MIRType::Double, "Float mask type");
+ if (t == MIRType::Float32)
+ return FloatRegisters::AllSingleMask;
+ return FloatRegisters::AllDoubleMask;
+ }
+
+ template<MIRType t>
+ bool hasFPU() {
+ return !!(availFPU_.bits() & maskFromTypeFPU<t>());
+ }
+
+ bool isAvailable(FloatRegister r) {
+ return availFPU_.has(r);
+ }
+
+ void allocFPU(FloatRegister r) {
+ MOZ_ASSERT(isAvailable(r));
+ availFPU_.take(r);
+ }
+
+ template<MIRType t>
+ FloatRegister allocFPU() {
+ MOZ_ASSERT(hasFPU<t>());
+ FloatRegister r =
+ FloatRegisterSet::Intersect(FloatRegisterSet(availFPU_.bits()),
+ FloatRegisterSet(maskFromTypeFPU<t>())).getAny();
+ availFPU_.take(r);
+ return r;
+ }
+
+ void freeFPU(FloatRegister r) {
+ availFPU_.add(r);
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Value stack and high-level register allocation.
+ //
+ // The value stack facilitates some on-the-fly register allocation
+ // and immediate-constant use. It tracks constants, latent
+ // references to locals, register contents, and values on the CPU
+ // stack.
+ //
+ // The stack can be flushed to memory using sync(). This is handy
+ // to avoid problems with control flow and messy register usage
+ // patterns.
+
+ struct Stk
+ {
+ enum Kind
+ {
+ // The Mem opcodes are all clustered at the beginning to
+ // allow for a quick test within sync().
+ MemI32, // 32-bit integer stack value ("offs")
+ MemI64, // 64-bit integer stack value ("offs")
+ MemF32, // 32-bit floating stack value ("offs")
+ MemF64, // 64-bit floating stack value ("offs")
+
+ // The Local opcodes follow the Mem opcodes for a similar
+ // quick test within hasLocal().
+ LocalI32, // Local int32 var ("slot")
+ LocalI64, // Local int64 var ("slot")
+ LocalF32, // Local float32 var ("slot")
+ LocalF64, // Local double var ("slot")
+
+ RegisterI32, // 32-bit integer register ("i32reg")
+ RegisterI64, // 64-bit integer register ("i64reg")
+ RegisterF32, // 32-bit floating register ("f32reg")
+ RegisterF64, // 64-bit floating register ("f64reg")
+
+ ConstI32, // 32-bit integer constant ("i32val")
+ ConstI64, // 64-bit integer constant ("i64val")
+ ConstF32, // 32-bit floating constant ("f32val")
+ ConstF64, // 64-bit floating constant ("f64val")
+
+ None // Uninitialized or void
+ };
+
+ Kind kind_;
+
+ static const Kind MemLast = MemF64;
+ static const Kind LocalLast = LocalF64;
+
+ union {
+ RegI32 i32reg_;
+ RegI64 i64reg_;
+ RegF32 f32reg_;
+ RegF64 f64reg_;
+ int32_t i32val_;
+ int64_t i64val_;
+ RawF32 f32val_;
+ RawF64 f64val_;
+ uint32_t slot_;
+ uint32_t offs_;
+ };
+
+ Stk() { kind_ = None; }
+
+ Kind kind() const { return kind_; }
+ bool isMem() const { return kind_ <= MemLast; }
+
+ RegI32 i32reg() const { MOZ_ASSERT(kind_ == RegisterI32); return i32reg_; }
+ RegI64 i64reg() const { MOZ_ASSERT(kind_ == RegisterI64); return i64reg_; }
+ RegF32 f32reg() const { MOZ_ASSERT(kind_ == RegisterF32); return f32reg_; }
+ RegF64 f64reg() const { MOZ_ASSERT(kind_ == RegisterF64); return f64reg_; }
+ int32_t i32val() const { MOZ_ASSERT(kind_ == ConstI32); return i32val_; }
+ int64_t i64val() const { MOZ_ASSERT(kind_ == ConstI64); return i64val_; }
+ RawF32 f32val() const { MOZ_ASSERT(kind_ == ConstF32); return f32val_; }
+ RawF64 f64val() const { MOZ_ASSERT(kind_ == ConstF64); return f64val_; }
+ uint32_t slot() const { MOZ_ASSERT(kind_ > MemLast && kind_ <= LocalLast); return slot_; }
+ uint32_t offs() const { MOZ_ASSERT(isMem()); return offs_; }
+
+ void setI32Reg(RegI32 r) { kind_ = RegisterI32; i32reg_ = r; }
+ void setI64Reg(RegI64 r) { kind_ = RegisterI64; i64reg_ = r; }
+ void setF32Reg(RegF32 r) { kind_ = RegisterF32; f32reg_ = r; }
+ void setF64Reg(RegF64 r) { kind_ = RegisterF64; f64reg_ = r; }
+ void setI32Val(int32_t v) { kind_ = ConstI32; i32val_ = v; }
+ void setI64Val(int64_t v) { kind_ = ConstI64; i64val_ = v; }
+ void setF32Val(RawF32 v) { kind_ = ConstF32; f32val_ = v; }
+ void setF64Val(RawF64 v) { kind_ = ConstF64; f64val_ = v; }
+ void setSlot(Kind k, uint32_t v) { MOZ_ASSERT(k > MemLast && k <= LocalLast); kind_ = k; slot_ = v; }
+ void setOffs(Kind k, uint32_t v) { MOZ_ASSERT(k <= MemLast); kind_ = k; offs_ = v; }
+ };
+
+ Vector<Stk, 8, SystemAllocPolicy> stk_;
+
+ Stk& push() {
+ stk_.infallibleEmplaceBack(Stk());
+ return stk_.back();
+ }
+
+ Register64 invalidRegister64() {
+ return Register64::Invalid();
+ }
+
+ RegI32 invalidI32() {
+ return RegI32(Register::Invalid());
+ }
+
+ RegI64 invalidI64() {
+ return RegI64(invalidRegister64());
+ }
+
+ RegF64 invalidF64() {
+ return RegF64(InvalidFloatReg);
+ }
+
+ RegI32 fromI64(RegI64 r) {
+ return RegI32(lowPart(r));
+ }
+
+ RegI64 widenI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailable(r.reg));
+#ifdef JS_PUNBOX64
+ return RegI64(Register64(r.reg));
+#else
+ RegI32 high = needI32();
+ return RegI64(Register64(high.reg, r.reg));
+#endif
+ }
+
+ Register lowPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return r.reg.reg;
+#else
+ return r.reg.low;
+#endif
+ }
+
+ Register maybeHighPart(RegI64 r) {
+#ifdef JS_PUNBOX64
+ return Register::Invalid();
+#else
+ return r.reg.high;
+#endif
+ }
+
+ void maybeClearHighPart(RegI64 r) {
+#ifdef JS_NUNBOX32
+ masm.move32(Imm32(0), r.reg.high);
+#endif
+ }
+
+ void freeI32(RegI32 r) {
+ freeGPR(r.reg);
+ }
+
+ void freeI64(RegI64 r) {
+ freeInt64(r.reg);
+ }
+
+ void freeI64Except(RegI64 r, RegI32 except) {
+#ifdef JS_PUNBOX64
+ MOZ_ASSERT(r.reg.reg == except.reg);
+#else
+ MOZ_ASSERT(r.reg.high == except.reg || r.reg.low == except.reg);
+ freeI64(r);
+ needI32(except);
+#endif
+ }
+
+ void freeF64(RegF64 r) {
+ freeFPU(r.reg);
+ }
+
+ void freeF32(RegF32 r) {
+ freeFPU(r.reg);
+ }
+
+ MOZ_MUST_USE RegI32 needI32() {
+ if (!hasGPR())
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ return RegI32(allocGPR());
+ }
+
+ void needI32(RegI32 specific) {
+ if (!isAvailable(specific.reg))
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ allocGPR(specific.reg);
+ }
+
+ // TODO / OPTIMIZE: need2xI32() can be optimized along with needI32()
+ // to avoid sync(). (Bug 1316802)
+
+ void need2xI32(RegI32 r0, RegI32 r1) {
+ needI32(r0);
+ needI32(r1);
+ }
+
+ MOZ_MUST_USE RegI64 needI64() {
+ if (!hasInt64())
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ return RegI64(allocInt64());
+ }
+
+ void needI64(RegI64 specific) {
+ if (!isAvailable(specific.reg))
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ allocInt64(specific.reg);
+ }
+
+ void need2xI64(RegI64 r0, RegI64 r1) {
+ needI64(r0);
+ needI64(r1);
+ }
+
+ MOZ_MUST_USE RegF32 needF32() {
+ if (!hasFPU<MIRType::Float32>())
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ return RegF32(allocFPU<MIRType::Float32>());
+ }
+
+ void needF32(RegF32 specific) {
+ if (!isAvailable(specific.reg))
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ allocFPU(specific.reg);
+ }
+
+ MOZ_MUST_USE RegF64 needF64() {
+ if (!hasFPU<MIRType::Double>())
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ return RegF64(allocFPU<MIRType::Double>());
+ }
+
+ void needF64(RegF64 specific) {
+ if (!isAvailable(specific.reg))
+ sync(); // TODO / OPTIMIZE: improve this (Bug 1316802)
+ allocFPU(specific.reg);
+ }
+
+ void moveI32(RegI32 src, RegI32 dest) {
+ if (src != dest)
+ masm.move32(src.reg, dest.reg);
+ }
+
+ void moveI64(RegI64 src, RegI64 dest) {
+ if (src != dest)
+ masm.move64(src.reg, dest.reg);
+ }
+
+ void moveF64(RegF64 src, RegF64 dest) {
+ if (src != dest)
+ masm.moveDouble(src.reg, dest.reg);
+ }
+
+ void moveF32(RegF32 src, RegF32 dest) {
+ if (src != dest)
+ masm.moveFloat32(src.reg, dest.reg);
+ }
+
+ void setI64(int64_t v, RegI64 r) {
+ masm.move64(Imm64(v), r.reg);
+ }
+
+ void loadConstI32(Register r, Stk& src) {
+ masm.mov(ImmWord((uint32_t)src.i32val() & 0xFFFFFFFFU), r);
+ }
+
+ void loadMemI32(Register r, Stk& src) {
+ loadFromFrameI32(r, src.offs());
+ }
+
+ void loadLocalI32(Register r, Stk& src) {
+ loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int32));
+ }
+
+ void loadRegisterI32(Register r, Stk& src) {
+ if (src.i32reg().reg != r)
+ masm.move32(src.i32reg().reg, r);
+ }
+
+ void loadI32(Register r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(r, src);
+ break;
+ case Stk::MemI32:
+ loadMemI32(r, src);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(r, src);
+ break;
+ case Stk::RegisterI32:
+ loadRegisterI32(r, src);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected int on stack");
+ }
+ }
+
+ // TODO / OPTIMIZE: Refactor loadI64, loadF64, and loadF32 in the
+ // same way as loadI32 to avoid redundant dispatch in callers of
+ // these load() functions. (Bug 1316816, also see annotations on
+ // popI64 et al below.)
+
+ void loadI64(Register64 r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ masm.move64(Imm64(src.i64val()), r);
+ break;
+ case Stk::MemI64:
+ loadFromFrameI64(r, src.offs());
+ break;
+ case Stk::LocalI64:
+ loadFromFrameI64(r, frameOffsetFromSlot(src.slot(), MIRType::Int64));
+ break;
+ case Stk::RegisterI64:
+ if (src.i64reg().reg != r)
+ masm.move64(src.i64reg().reg, r);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected int on stack");
+ }
+ }
+
+#ifdef JS_NUNBOX32
+ void loadI64Low(Register r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ masm.move32(Imm64(src.i64val()).low(), r);
+ break;
+ case Stk::MemI64:
+ loadFromFrameI32(r, src.offs() - INT64LOW_OFFSET);
+ break;
+ case Stk::LocalI64:
+ loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int64) - INT64LOW_OFFSET);
+ break;
+ case Stk::RegisterI64:
+ if (src.i64reg().reg.low != r)
+ masm.move32(src.i64reg().reg.low, r);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected int on stack");
+ }
+ }
+
+ void loadI64High(Register r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstI64:
+ masm.move32(Imm64(src.i64val()).hi(), r);
+ break;
+ case Stk::MemI64:
+ loadFromFrameI32(r, src.offs() - INT64HIGH_OFFSET);
+ break;
+ case Stk::LocalI64:
+ loadFromFrameI32(r, frameOffsetFromSlot(src.slot(), MIRType::Int64) - INT64HIGH_OFFSET);
+ break;
+ case Stk::RegisterI64:
+ if (src.i64reg().reg.high != r)
+ masm.move32(src.i64reg().reg.high, r);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Expected int on stack");
+ }
+ }
+#endif
+
+ void loadF64(FloatRegister r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstF64:
+ masm.loadConstantDouble(src.f64val(), r);
+ break;
+ case Stk::MemF64:
+ loadFromFrameF64(r, src.offs());
+ break;
+ case Stk::LocalF64:
+ loadFromFrameF64(r, frameOffsetFromSlot(src.slot(), MIRType::Double));
+ break;
+ case Stk::RegisterF64:
+ if (src.f64reg().reg != r)
+ masm.moveDouble(src.f64reg().reg, r);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected double on stack");
+ }
+ }
+
+ void loadF32(FloatRegister r, Stk& src) {
+ switch (src.kind()) {
+ case Stk::ConstF32:
+ masm.loadConstantFloat32(src.f32val(), r);
+ break;
+ case Stk::MemF32:
+ loadFromFrameF32(r, src.offs());
+ break;
+ case Stk::LocalF32:
+ loadFromFrameF32(r, frameOffsetFromSlot(src.slot(), MIRType::Float32));
+ break;
+ case Stk::RegisterF32:
+ if (src.f32reg().reg != r)
+ masm.moveFloat32(src.f32reg().reg, r);
+ break;
+ case Stk::None:
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected float on stack");
+ }
+ }
+
+ // Flush all local and register value stack elements to memory.
+ //
+ // TODO / OPTIMIZE: As this is fairly expensive and causes worse
+ // code to be emitted subsequently, it is useful to avoid calling
+ // it. (Bug 1316802)
+ //
+ // Some optimization has been done already. Remaining
+ // opportunities:
+ //
+ // - It would be interesting to see if we can specialize it
+ // before calls with particularly simple signatures, or where
+ // we can do parallel assignment of register arguments, or
+ // similar. See notes in emitCall().
+ //
+ // - Operations that need specific registers: multiply, quotient,
+ // remainder, will tend to sync because the registers we need
+ // will tend to be allocated. We may be able to avoid that by
+ // prioritizing registers differently (takeLast instead of
+ // takeFirst) but we may also be able to allocate an unused
+ // register on demand to free up one we need, thus avoiding the
+ // sync. That type of fix would go into needI32().
+
+ void sync() {
+ size_t start = 0;
+ size_t lim = stk_.length();
+
+ for (size_t i = lim; i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is fine.
+ if (stk_[i - 1].kind() <= Stk::MemLast) {
+ start = i;
+ break;
+ }
+ }
+
+ for (size_t i = start; i < lim; i++) {
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::LocalI32: {
+ ScratchI32 scratch(*this);
+ loadLocalI32(scratch, v);
+ masm.Push(scratch);
+ v.setOffs(Stk::MemI32, masm.framePushed());
+ break;
+ }
+ case Stk::RegisterI32: {
+ masm.Push(v.i32reg().reg);
+ freeI32(v.i32reg());
+ v.setOffs(Stk::MemI32, masm.framePushed());
+ break;
+ }
+ case Stk::LocalI64: {
+ ScratchI32 scratch(*this);
+#ifdef JS_PUNBOX64
+ loadI64(Register64(scratch), v);
+ masm.Push(scratch);
+#else
+ int32_t offset = frameOffsetFromSlot(v.slot(), MIRType::Int64);
+ loadFromFrameI32(scratch, offset - INT64HIGH_OFFSET);
+ masm.Push(scratch);
+ loadFromFrameI32(scratch, offset - INT64LOW_OFFSET);
+ masm.Push(scratch);
+#endif
+ v.setOffs(Stk::MemI64, masm.framePushed());
+ break;
+ }
+ case Stk::RegisterI64: {
+#ifdef JS_PUNBOX64
+ masm.Push(v.i64reg().reg.reg);
+ freeI64(v.i64reg());
+#else
+ masm.Push(v.i64reg().reg.high);
+ masm.Push(v.i64reg().reg.low);
+ freeI64(v.i64reg());
+#endif
+ v.setOffs(Stk::MemI64, masm.framePushed());
+ break;
+ }
+ case Stk::LocalF64: {
+ ScratchF64 scratch(*this);
+ loadF64(scratch, v);
+ masm.Push(scratch);
+ v.setOffs(Stk::MemF64, masm.framePushed());
+ break;
+ }
+ case Stk::RegisterF64: {
+ masm.Push(v.f64reg().reg);
+ freeF64(v.f64reg());
+ v.setOffs(Stk::MemF64, masm.framePushed());
+ break;
+ }
+ case Stk::LocalF32: {
+ ScratchF32 scratch(*this);
+ loadF32(scratch, v);
+ masm.Push(scratch);
+ v.setOffs(Stk::MemF32, masm.framePushed());
+ break;
+ }
+ case Stk::RegisterF32: {
+ masm.Push(v.f32reg().reg);
+ freeF32(v.f32reg());
+ v.setOffs(Stk::MemF32, masm.framePushed());
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ maxFramePushed_ = Max(maxFramePushed_, int32_t(masm.framePushed()));
+ }
+
+ // This is an optimization used to avoid calling sync() for
+ // setLocal(): if the local does not exist unresolved on the stack
+ // then we can skip the sync.
+
+ bool hasLocal(uint32_t slot) {
+ for (size_t i = stk_.length(); i > 0; i--) {
+ // Memory opcodes are first in the enum, single check against MemLast is fine.
+ Stk::Kind kind = stk_[i-1].kind();
+ if (kind <= Stk::MemLast)
+ return false;
+
+ // Local opcodes follow memory opcodes in the enum, single check against
+ // LocalLast is sufficient.
+ if (kind <= Stk::LocalLast && stk_[i-1].slot() == slot)
+ return true;
+ }
+ return false;
+ }
+
+ void syncLocal(uint32_t slot) {
+ if (hasLocal(slot))
+ sync(); // TODO / OPTIMIZE: Improve this? (Bug 1316817)
+ }
+
+ // Push the register r onto the stack.
+
+ void pushI32(RegI32 r) {
+ MOZ_ASSERT(!isAvailable(r.reg));
+ Stk& x = push();
+ x.setI32Reg(r);
+ }
+
+ void pushI64(RegI64 r) {
+ MOZ_ASSERT(!isAvailable(r.reg));
+ Stk& x = push();
+ x.setI64Reg(r);
+ }
+
+ void pushF64(RegF64 r) {
+ MOZ_ASSERT(!isAvailable(r.reg));
+ Stk& x = push();
+ x.setF64Reg(r);
+ }
+
+ void pushF32(RegF32 r) {
+ MOZ_ASSERT(!isAvailable(r.reg));
+ Stk& x = push();
+ x.setF32Reg(r);
+ }
+
+ // Push the value onto the stack.
+
+ void pushI32(int32_t v) {
+ Stk& x = push();
+ x.setI32Val(v);
+ }
+
+ void pushI64(int64_t v) {
+ Stk& x = push();
+ x.setI64Val(v);
+ }
+
+ void pushF64(RawF64 v) {
+ Stk& x = push();
+ x.setF64Val(v);
+ }
+
+ void pushF32(RawF32 v) {
+ Stk& x = push();
+ x.setF32Val(v);
+ }
+
+ // Push the local slot onto the stack. The slot will not be read
+ // here; it will be read when it is consumed, or when a side
+ // effect to the slot forces its value to be saved.
+
+ void pushLocalI32(uint32_t slot) {
+ Stk& x = push();
+ x.setSlot(Stk::LocalI32, slot);
+ }
+
+ void pushLocalI64(uint32_t slot) {
+ Stk& x = push();
+ x.setSlot(Stk::LocalI64, slot);
+ }
+
+ void pushLocalF64(uint32_t slot) {
+ Stk& x = push();
+ x.setSlot(Stk::LocalF64, slot);
+ }
+
+ void pushLocalF32(uint32_t slot) {
+ Stk& x = push();
+ x.setSlot(Stk::LocalF32, slot);
+ }
+
+ // PRIVATE. Call only from other popI32() variants.
+ // v must be the stack top.
+
+ void popI32(Stk& v, RegI32 r) {
+ switch (v.kind()) {
+ case Stk::ConstI32:
+ loadConstI32(r.reg, v);
+ break;
+ case Stk::LocalI32:
+ loadLocalI32(r.reg, v);
+ break;
+ case Stk::MemI32:
+ masm.Pop(r.reg);
+ break;
+ case Stk::RegisterI32:
+ moveI32(v.i32reg(), r);
+ break;
+ case Stk::None:
+ // This case crops up in situations where there's unreachable code that
+ // the type system interprets as "generating" a value of the correct type:
+ //
+ // (if (return) E1 E2) type is type(E1) meet type(E2)
+ // (if E (unreachable) (i32.const 1)) type is int
+ // (if E (i32.const 1) (unreachable)) type is int
+ //
+ // It becomes silly to handle this throughout the code, so just handle it
+ // here even if that means weaker run-time checking.
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected int on stack");
+ }
+ }
+
+ MOZ_MUST_USE RegI32 popI32() {
+ Stk& v = stk_.back();
+ RegI32 r;
+ if (v.kind() == Stk::RegisterI32)
+ r = v.i32reg();
+ else
+ popI32(v, (r = needI32()));
+ stk_.popBack();
+ return r;
+ }
+
+ RegI32 popI32(RegI32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI32 && v.i32reg() == specific)) {
+ needI32(specific);
+ popI32(v, specific);
+ if (v.kind() == Stk::RegisterI32)
+ freeI32(v.i32reg());
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ // PRIVATE. Call only from other popI64() variants.
+ // v must be the stack top.
+
+ void popI64(Stk& v, RegI64 r) {
+ // TODO / OPTIMIZE: avoid loadI64() here. (Bug 1316816)
+ switch (v.kind()) {
+ case Stk::ConstI64:
+ case Stk::LocalI64:
+ loadI64(r.reg, v);
+ break;
+ case Stk::MemI64:
+#ifdef JS_PUNBOX64
+ masm.Pop(r.reg.reg);
+#else
+ masm.Pop(r.reg.low);
+ masm.Pop(r.reg.high);
+#endif
+ break;
+ case Stk::RegisterI64:
+ moveI64(v.i64reg(), r);
+ break;
+ case Stk::None:
+ // See popI32()
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected long on stack");
+ }
+ }
+
+ MOZ_MUST_USE RegI64 popI64() {
+ Stk& v = stk_.back();
+ RegI64 r;
+ if (v.kind() == Stk::RegisterI64)
+ r = v.i64reg();
+ else
+ popI64(v, (r = needI64()));
+ stk_.popBack();
+ return r;
+ }
+
+ // Note, the stack top can be in one half of "specific" on 32-bit
+ // systems. We can optimize, but for simplicity, if the register
+ // does not match exactly, then just force the stack top to memory
+ // and then read it back in.
+
+ RegI64 popI64(RegI64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterI64 && v.i64reg() == specific)) {
+ needI64(specific);
+ popI64(v, specific);
+ if (v.kind() == Stk::RegisterI64)
+ freeI64(v.i64reg());
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ // PRIVATE. Call only from other popF64() variants.
+ // v must be the stack top.
+
+ void popF64(Stk& v, RegF64 r) {
+ // TODO / OPTIMIZE: avoid loadF64 here. (Bug 1316816)
+ switch (v.kind()) {
+ case Stk::ConstF64:
+ case Stk::LocalF64:
+ loadF64(r.reg, v);
+ break;
+ case Stk::MemF64:
+ masm.Pop(r.reg);
+ break;
+ case Stk::RegisterF64:
+ moveF64(v.f64reg(), r);
+ break;
+ case Stk::None:
+ // See popI32()
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected double on stack");
+ }
+ }
+
+ MOZ_MUST_USE RegF64 popF64() {
+ Stk& v = stk_.back();
+ RegF64 r;
+ if (v.kind() == Stk::RegisterF64)
+ r = v.f64reg();
+ else
+ popF64(v, (r = needF64()));
+ stk_.popBack();
+ return r;
+ }
+
+ RegF64 popF64(RegF64 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF64 && v.f64reg() == specific)) {
+ needF64(specific);
+ popF64(v, specific);
+ if (v.kind() == Stk::RegisterF64)
+ freeF64(v.f64reg());
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ // PRIVATE. Call only from other popF32() variants.
+ // v must be the stack top.
+
+ void popF32(Stk& v, RegF32 r) {
+ // TODO / OPTIMIZE: avoid loadF32 here. (Bug 1316816)
+ switch (v.kind()) {
+ case Stk::ConstF32:
+ case Stk::LocalF32:
+ loadF32(r.reg, v);
+ break;
+ case Stk::MemF32:
+ masm.Pop(r.reg);
+ break;
+ case Stk::RegisterF32:
+ moveF32(v.f32reg(), r);
+ break;
+ case Stk::None:
+ // See popI32()
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: expected float on stack");
+ }
+ }
+
+ MOZ_MUST_USE RegF32 popF32() {
+ Stk& v = stk_.back();
+ RegF32 r;
+ if (v.kind() == Stk::RegisterF32)
+ r = v.f32reg();
+ else
+ popF32(v, (r = needF32()));
+ stk_.popBack();
+ return r;
+ }
+
+ RegF32 popF32(RegF32 specific) {
+ Stk& v = stk_.back();
+
+ if (!(v.kind() == Stk::RegisterF32 && v.f32reg() == specific)) {
+ needF32(specific);
+ popF32(v, specific);
+ if (v.kind() == Stk::RegisterF32)
+ freeF32(v.f32reg());
+ }
+
+ stk_.popBack();
+ return specific;
+ }
+
+ MOZ_MUST_USE bool popConstI32(int32_t& c) {
+ Stk& v = stk_.back();
+ if (v.kind() != Stk::ConstI32)
+ return false;
+ c = v.i32val();
+ stk_.popBack();
+ return true;
+ }
+
+ // TODO / OPTIMIZE (Bug 1316818): At the moment we use ReturnReg
+ // for JoinReg. It is possible other choices would lead to better
+ // register allocation, as ReturnReg is often first in the
+ // register set and will be heavily wanted by the register
+ // allocator that uses takeFirst().
+ //
+ // Obvious options:
+ // - pick a register at the back of the register set
+ // - pick a random register per block (different blocks have
+ // different join regs)
+ //
+ // On the other hand, we sync() before every block and only the
+ // JoinReg is live out of the block. But on the way out, we
+ // currently pop the JoinReg before freeing regs to be discarded,
+ // so there is a real risk of some pointless shuffling there. If
+ // we instead integrate the popping of the join reg into the
+ // popping of the stack we can just use the JoinReg as it will
+ // become available in that process.
+
+ MOZ_MUST_USE AnyReg popJoinReg() {
+ switch (stk_.back().kind()) {
+ case Stk::RegisterI32:
+ case Stk::ConstI32:
+ case Stk::MemI32:
+ case Stk::LocalI32:
+ return AnyReg(popI32(joinRegI32));
+ case Stk::RegisterI64:
+ case Stk::ConstI64:
+ case Stk::MemI64:
+ case Stk::LocalI64:
+ return AnyReg(popI64(joinRegI64));
+ case Stk::RegisterF64:
+ case Stk::ConstF64:
+ case Stk::MemF64:
+ case Stk::LocalF64:
+ return AnyReg(popF64(joinRegF64));
+ case Stk::RegisterF32:
+ case Stk::ConstF32:
+ case Stk::MemF32:
+ case Stk::LocalF32:
+ return AnyReg(popF32(joinRegF32));
+ case Stk::None:
+ stk_.popBack();
+ return AnyReg();
+ default:
+ MOZ_CRASH("Compiler bug: unexpected value on stack");
+ }
+ }
+
+ MOZ_MUST_USE AnyReg allocJoinReg(ExprType type) {
+ switch (type) {
+ case ExprType::I32:
+ allocGPR(joinRegI32.reg);
+ return AnyReg(joinRegI32);
+ case ExprType::I64:
+ allocInt64(joinRegI64.reg);
+ return AnyReg(joinRegI64);
+ case ExprType::F32:
+ allocFPU(joinRegF32.reg);
+ return AnyReg(joinRegF32);
+ case ExprType::F64:
+ allocFPU(joinRegF64.reg);
+ return AnyReg(joinRegF64);
+ case ExprType::Void:
+ MOZ_CRASH("Compiler bug: allocating void join reg");
+ default:
+ MOZ_CRASH("Compiler bug: unexpected type");
+ }
+ }
+
+ void pushJoinReg(AnyReg r) {
+ switch (r.tag) {
+ case AnyReg::NONE:
+ MOZ_CRASH("Compile bug: attempting to push void");
+ break;
+ case AnyReg::I32:
+ pushI32(r.i32());
+ break;
+ case AnyReg::I64:
+ pushI64(r.i64());
+ break;
+ case AnyReg::F64:
+ pushF64(r.f64());
+ break;
+ case AnyReg::F32:
+ pushF32(r.f32());
+ break;
+ }
+ }
+
+ void freeJoinReg(AnyReg r) {
+ switch (r.tag) {
+ case AnyReg::NONE:
+ MOZ_CRASH("Compile bug: attempting to free void reg");
+ break;
+ case AnyReg::I32:
+ freeI32(r.i32());
+ break;
+ case AnyReg::I64:
+ freeI64(r.i64());
+ break;
+ case AnyReg::F64:
+ freeF64(r.f64());
+ break;
+ case AnyReg::F32:
+ freeF32(r.f32());
+ break;
+ }
+ }
+
+ void maybeReserveJoinRegI(ExprType type) {
+ if (type == ExprType::I32)
+ needI32(joinRegI32);
+ else if (type == ExprType::I64)
+ needI64(joinRegI64);
+ }
+
+ void maybeUnreserveJoinRegI(ExprType type) {
+ if (type == ExprType::I32)
+ freeI32(joinRegI32);
+ else if (type == ExprType::I64)
+ freeI64(joinRegI64);
+ }
+
+ // Return the amount of execution stack consumed by the top numval
+ // values on the value stack.
+
+ size_t stackConsumed(size_t numval) {
+ size_t size = 0;
+ MOZ_ASSERT(numval <= stk_.length());
+ for (uint32_t i = stk_.length() - 1; numval > 0; numval--, i--) {
+ // The size computations come from the implementation of Push() in
+ // MacroAssembler-x86-shared.cpp and MacroAssembler-arm-shared.cpp,
+ // and from VFPRegister::size() in Architecture-arm.h.
+ //
+ // On ARM unlike on x86 we push a single for float.
+
+ Stk& v = stk_[i];
+ switch (v.kind()) {
+ case Stk::MemI32:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ size += sizeof(intptr_t);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: stackConsumed I32");
+#endif
+ break;
+ case Stk::MemI64:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ size += sizeof(int64_t);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: stackConsumed I64");
+#endif
+ break;
+ case Stk::MemF64:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ size += sizeof(double);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: stackConsumed F64");
+#endif
+ break;
+ case Stk::MemF32:
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ size += sizeof(double);
+#elif defined(JS_CODEGEN_ARM)
+ size += sizeof(float);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: stackConsumed F32");
+#endif
+ break;
+ default:
+ break;
+ }
+ }
+ return size;
+ }
+
+ void popValueStackTo(uint32_t stackSize) {
+ for (uint32_t i = stk_.length(); i > stackSize; i--) {
+ Stk& v = stk_[i-1];
+ switch (v.kind()) {
+ case Stk::RegisterI32:
+ freeI32(v.i32reg());
+ break;
+ case Stk::RegisterI64:
+ freeI64(v.i64reg());
+ break;
+ case Stk::RegisterF64:
+ freeF64(v.f64reg());
+ break;
+ case Stk::RegisterF32:
+ freeF32(v.f32reg());
+ break;
+ default:
+ break;
+ }
+ }
+ stk_.shrinkTo(stackSize);
+ }
+
+ void popValueStackBy(uint32_t items) {
+ popValueStackTo(stk_.length() - items);
+ }
+
+ // Before branching to an outer control label, pop the execution
+ // stack to the level expected by that region, but do not free the
+ // stack as that will happen as compilation leaves the block.
+
+ void popStackBeforeBranch(uint32_t framePushed) {
+ uint32_t frameHere = masm.framePushed();
+ if (frameHere > framePushed)
+ masm.addPtr(ImmWord(frameHere - framePushed), StackPointer);
+ }
+
+ // Before exiting a nested control region, pop the execution stack
+ // to the level expected by the nesting region, and free the
+ // stack.
+
+ void popStackOnBlockExit(uint32_t framePushed) {
+ uint32_t frameHere = masm.framePushed();
+ if (frameHere > framePushed) {
+ if (deadCode_)
+ masm.adjustStack(frameHere - framePushed);
+ else
+ masm.freeStack(frameHere - framePushed);
+ }
+ }
+
+ void popStackIfMemory() {
+ if (peek(0).isMem())
+ masm.freeStack(stackConsumed(1));
+ }
+
+ // Peek at the stack, for calls.
+
+ Stk& peek(uint32_t relativeDepth) {
+ return stk_[stk_.length()-1-relativeDepth];
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Control stack
+
+ Vector<Control, 8, SystemAllocPolicy> ctl_;
+
+ MOZ_MUST_USE bool pushControl(UniquePooledLabel* label, UniquePooledLabel* otherLabel = nullptr)
+ {
+ uint32_t framePushed = masm.framePushed();
+ uint32_t stackSize = stk_.length();
+
+ if (!ctl_.emplaceBack(Control(framePushed, stackSize)))
+ return false;
+ if (label)
+ ctl_.back().label = label->release();
+ if (otherLabel)
+ ctl_.back().otherLabel = otherLabel->release();
+ ctl_.back().deadOnArrival = deadCode_;
+ return true;
+ }
+
+ void popControl() {
+ Control last = ctl_.popCopy();
+ if (last.label)
+ freeLabel(last.label);
+ if (last.otherLabel)
+ freeLabel(last.otherLabel);
+
+ if (deadCode_ && !ctl_.empty())
+ popValueStackTo(ctl_.back().stackSize);
+ }
+
+ Control& controlItem(uint32_t relativeDepth) {
+ return ctl_[ctl_.length() - 1 - relativeDepth];
+ }
+
+ MOZ_MUST_USE PooledLabel* newLabel() {
+ // TODO / INVESTIGATE (Bug 1316819): allocate() is fallible, but we can
+ // probably rely on an infallible allocator here. That would simplify
+ // code later.
+ PooledLabel* candidate = labelPool_.allocate();
+ if (!candidate)
+ return nullptr;
+ return new (candidate) PooledLabel(this);
+ }
+
+ void freeLabel(PooledLabel* label) {
+ label->~PooledLabel();
+ labelPool_.free(label);
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Function prologue and epilogue.
+
+ void beginFunction() {
+ JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
+
+ SigIdDesc sigId = mg_.funcSigs[func_.index()]->id;
+ GenerateFunctionPrologue(masm, localSize_, sigId, &compileResults_.offsets());
+
+ MOZ_ASSERT(masm.framePushed() == uint32_t(localSize_));
+
+ maxFramePushed_ = localSize_;
+
+ // We won't know until after we've generated code how big the
+ // frame will be (we may need arbitrary spill slots and
+ // outgoing param slots) so branch to code emitted after the
+ // function body that will perform the check.
+ //
+ // Code there will also assume that the fixed-size stack frame
+ // has been allocated.
+
+ masm.jump(&outOfLinePrologue_);
+ masm.bind(&bodyLabel_);
+
+ // Copy arguments from registers to stack.
+
+ const ValTypeVector& args = func_.sig().args();
+
+ for (ABIArgIter<const ValTypeVector> i(args); !i.done(); i++) {
+ Local& l = localInfo_[i.index()];
+ switch (i.mirType()) {
+ case MIRType::Int32:
+ if (i->argInRegister())
+ storeToFrameI32(i->gpr(), l.offs());
+ break;
+ case MIRType::Int64:
+ if (i->argInRegister())
+ storeToFrameI64(i->gpr64(), l.offs());
+ break;
+ case MIRType::Double:
+ if (i->argInRegister())
+ storeToFrameF64(i->fpu(), l.offs());
+ break;
+ case MIRType::Float32:
+ if (i->argInRegister())
+ storeToFrameF32(i->fpu(), l.offs());
+ break;
+ default:
+ MOZ_CRASH("Function argument type");
+ }
+ }
+
+ // The TLS pointer is always passed as a hidden argument in WasmTlsReg.
+ // Save it into its assigned local slot.
+ storeToFramePtr(WasmTlsReg, localInfo_[tlsSlot_].offs());
+
+ // Initialize the stack locals to zero.
+ //
+ // The following are all Bug 1316820:
+ //
+ // TODO / OPTIMIZE: on x64, at least, scratch will be a 64-bit
+ // register and we can move 64 bits at a time.
+ //
+ // TODO / OPTIMIZE: On SSE2 or better SIMD systems we may be
+ // able to store 128 bits at a time. (I suppose on some
+ // systems we have 512-bit SIMD for that matter.)
+ //
+ // TODO / OPTIMIZE: if we have only one initializing store
+ // then it's better to store a zero literal, probably.
+
+ if (varLow_ < varHigh_) {
+ ScratchI32 scratch(*this);
+ masm.mov(ImmWord(0), scratch);
+ for (int32_t i = varLow_ ; i < varHigh_ ; i += 4)
+ storeToFrameI32(scratch, i + 4);
+ }
+ }
+
+ bool endFunction() {
+ // Out-of-line prologue. Assumes that the in-line prologue has
+ // been executed and that a frame of size = localSize_ + sizeof(Frame)
+ // has been allocated.
+
+ masm.bind(&outOfLinePrologue_);
+
+ MOZ_ASSERT(maxFramePushed_ >= localSize_);
+
+ // ABINonArgReg0 != ScratchReg, which can be used by branchPtr().
+
+ masm.movePtr(masm.getStackPointer(), ABINonArgReg0);
+ if (maxFramePushed_ - localSize_)
+ masm.subPtr(Imm32(maxFramePushed_ - localSize_), ABINonArgReg0);
+ masm.branchPtr(Assembler::Below,
+ Address(WasmTlsReg, offsetof(TlsData, stackLimit)),
+ ABINonArgReg0,
+ &bodyLabel_);
+
+ // Since we just overflowed the stack, to be on the safe side, pop the
+ // stack so that, when the trap exit stub executes, it is a safe
+ // distance away from the end of the native stack.
+ if (localSize_)
+ masm.addToStackPtr(Imm32(localSize_));
+ masm.jump(TrapDesc(prologueTrapOffset_, Trap::StackOverflow, /* framePushed = */ 0));
+
+ masm.bind(&returnLabel_);
+
+ // Restore the TLS register in case it was overwritten by the function.
+ loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+
+ GenerateFunctionEpilogue(masm, localSize_, &compileResults_.offsets());
+
+#if defined(JS_ION_PERF)
+ // FIXME - profiling code missing. Bug 1286948.
+
+ // Note the end of the inline code and start of the OOL code.
+ //gen->perfSpewer().noteEndInlineCode(masm);
+#endif
+
+ if (!generateOutOfLineCode())
+ return false;
+
+ masm.wasmEmitTrapOutOfLineCode();
+
+ compileResults_.offsets().end = masm.currentOffset();
+
+ // A frame greater than 256KB is implausible, probably an attack,
+ // so fail the compilation.
+
+ if (maxFramePushed_ > 256 * 1024)
+ return false;
+
+ return true;
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Calls.
+
+ struct FunctionCall
+ {
+ explicit FunctionCall(uint32_t lineOrBytecode)
+ : lineOrBytecode(lineOrBytecode),
+ reloadMachineStateAfter(false),
+ usesSystemAbi(false),
+ loadTlsBefore(false),
+#ifdef JS_CODEGEN_ARM
+ hardFP(true),
+#endif
+ frameAlignAdjustment(0),
+ stackArgAreaSize(0)
+ {}
+
+ uint32_t lineOrBytecode;
+ ABIArgGenerator abi;
+ bool reloadMachineStateAfter;
+ bool usesSystemAbi;
+ bool loadTlsBefore;
+#ifdef JS_CODEGEN_ARM
+ bool hardFP;
+#endif
+ size_t frameAlignAdjustment;
+ size_t stackArgAreaSize;
+ };
+
+ void beginCall(FunctionCall& call, UseABI useABI, InterModule interModule)
+ {
+ call.reloadMachineStateAfter = interModule == InterModule::True || useABI == UseABI::System;
+ call.usesSystemAbi = useABI == UseABI::System;
+ call.loadTlsBefore = useABI == UseABI::Wasm;
+
+ if (call.usesSystemAbi) {
+ // Call-outs need to use the appropriate system ABI.
+#if defined(JS_CODEGEN_ARM)
+# if defined(JS_SIMULATOR_ARM)
+ call.hardFP = UseHardFpABI();
+# elif defined(JS_CODEGEN_ARM_HARDFP)
+ call.hardFP = true;
+# else
+ call.hardFP = false;
+# endif
+ call.abi.setUseHardFp(call.hardFP);
+#endif
+ }
+
+ call.frameAlignAdjustment = ComputeByteAlignment(masm.framePushed() + sizeof(Frame),
+ JitStackAlignment);
+ }
+
+ void endCall(FunctionCall& call)
+ {
+ size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+ if (adjustment)
+ masm.freeStack(adjustment);
+
+ if (call.reloadMachineStateAfter) {
+ loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+ masm.loadWasmPinnedRegsFromTls();
+ }
+ }
+
+ // TODO / OPTIMIZE (Bug 1316820): This is expensive; let's roll the iterator
+ // walking into the walking done for passArg. See comments in passArg.
+
+ size_t stackArgAreaSize(const ValTypeVector& args) {
+ ABIArgIter<const ValTypeVector> i(args);
+ while (!i.done())
+ i++;
+ return AlignBytes(i.stackBytesConsumedSoFar(), 16u);
+ }
+
+ void startCallArgs(FunctionCall& call, size_t stackArgAreaSize)
+ {
+ call.stackArgAreaSize = stackArgAreaSize;
+
+ size_t adjustment = call.stackArgAreaSize + call.frameAlignAdjustment;
+ if (adjustment)
+ masm.reserveStack(adjustment);
+ }
+
+ const ABIArg reservePointerArgument(FunctionCall& call) {
+ return call.abi.next(MIRType::Pointer);
+ }
+
+ // TODO / OPTIMIZE (Bug 1316820): Note passArg is used only in one place.
+ // (Or it was, until Luke wandered through, but that can be fixed again.)
+ // I'm not saying we should manually inline it, but we could hoist the
+ // dispatch into the caller and have type-specific implementations of
+ // passArg: passArgI32(), etc. Then those might be inlined, at least in PGO
+ // builds.
+ //
+ // The bulk of the work here (60%) is in the next() call, though.
+ //
+ // Notably, since next() is so expensive, stackArgAreaSize() becomes
+ // expensive too.
+ //
+ // Somehow there could be a trick here where the sequence of
+ // argument types (read from the input stream) leads to a cached
+ // entry for stackArgAreaSize() and for how to pass arguments...
+ //
+ // But at least we could reduce the cost of stackArgAreaSize() by
+ // first reading the argument types into a (reusable) vector, then
+ // we have the outgoing size at low cost, and then we can pass
+ // args based on the info we read.
+
+ void passArg(FunctionCall& call, ValType type, Stk& arg) {
+ switch (type) {
+ case ValType::I32: {
+ ABIArg argLoc = call.abi.next(MIRType::Int32);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+ loadI32(scratch, arg);
+ masm.store32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+ } else {
+ loadI32(argLoc.gpr(), arg);
+ }
+ break;
+ }
+ case ValType::I64: {
+ ABIArg argLoc = call.abi.next(MIRType::Int64);
+ if (argLoc.kind() == ABIArg::Stack) {
+ ScratchI32 scratch(*this);
+#if defined(JS_CODEGEN_X64)
+ loadI64(Register64(scratch), arg);
+ masm.movq(scratch, Operand(StackPointer, argLoc.offsetFromArgBase()));
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ loadI64Low(scratch, arg);
+ masm.store32(scratch, Address(StackPointer, argLoc.offsetFromArgBase() + INT64LOW_OFFSET));
+ loadI64High(scratch, arg);
+ masm.store32(scratch, Address(StackPointer, argLoc.offsetFromArgBase() + INT64HIGH_OFFSET));
+#else
+ MOZ_CRASH("BaseCompiler platform hook: passArg I64");
+#endif
+ } else {
+ loadI64(argLoc.gpr64(), arg);
+ }
+ break;
+ }
+ case ValType::F64: {
+ ABIArg argLoc = call.abi.next(MIRType::Double);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF64 scratch(*this);
+ loadF64(scratch, arg);
+ masm.storeDouble(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+# ifdef JS_CODEGEN_ARM
+ ScratchF64 scratch(*this);
+ loadF64(scratch, arg);
+ masm.ma_vxfer(scratch, argLoc.evenGpr(), argLoc.oddGpr());
+ break;
+# else
+ MOZ_CRASH("BaseCompiler platform hook: passArg F64 pair");
+# endif
+ }
+#endif
+ case ABIArg::FPU: {
+ loadF64(argLoc.fpu(), arg);
+ break;
+ }
+ case ABIArg::GPR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+ }
+ break;
+ }
+ case ValType::F32: {
+ ABIArg argLoc = call.abi.next(MIRType::Float32);
+ switch (argLoc.kind()) {
+ case ABIArg::Stack: {
+ ScratchF32 scratch(*this);
+ loadF32(scratch, arg);
+ masm.storeFloat32(scratch, Address(StackPointer, argLoc.offsetFromArgBase()));
+ break;
+ }
+ case ABIArg::GPR: {
+ ScratchF32 scratch(*this);
+ loadF32(scratch, arg);
+ masm.moveFloat32ToGPR(scratch, argLoc.gpr());
+ break;
+ }
+ case ABIArg::FPU: {
+ loadF32(argLoc.fpu(), arg);
+ break;
+ }
+#if defined(JS_CODEGEN_REGISTER_PAIR)
+ case ABIArg::GPR_PAIR: {
+ MOZ_CRASH("Unexpected parameter passing discipline");
+ }
+#endif
+ }
+ break;
+ }
+ default:
+ MOZ_CRASH("Function argument type");
+ }
+ }
+
+ void callDefinition(uint32_t funcIndex, const FunctionCall& call)
+ {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
+ masm.call(desc, funcIndex);
+ }
+
+ void callSymbolic(SymbolicAddress callee, const FunctionCall& call) {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+ masm.call(callee);
+ }
+
+ // Precondition: sync()
+
+ void callIndirect(uint32_t sigIndex, Stk& indexVal, const FunctionCall& call)
+ {
+ loadI32(WasmTableCallIndexReg, indexVal);
+
+ const SigWithId& sig = mg_.sigs[sigIndex];
+
+ CalleeDesc callee;
+ if (isCompilingAsmJS()) {
+ MOZ_ASSERT(sig.id.kind() == SigIdDesc::Kind::None);
+ const TableDesc& table = mg_.tables[mg_.asmJSSigToTableIndex[sigIndex]];
+
+ MOZ_ASSERT(IsPowerOfTwo(table.limits.initial));
+ masm.andPtr(Imm32((table.limits.initial - 1)), WasmTableCallIndexReg);
+
+ callee = CalleeDesc::asmJSTable(table);
+ } else {
+ MOZ_ASSERT(sig.id.kind() != SigIdDesc::Kind::None);
+ MOZ_ASSERT(mg_.tables.length() == 1);
+ const TableDesc& table = mg_.tables[0];
+
+ callee = CalleeDesc::wasmTable(table, sig.id);
+ }
+
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+ masm.wasmCallIndirect(desc, callee);
+ }
+
+ // Precondition: sync()
+
+ void callImport(unsigned globalDataOffset, const FunctionCall& call)
+ {
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Dynamic);
+ CalleeDesc callee = CalleeDesc::import(globalDataOffset);
+ masm.wasmCallImport(desc, callee);
+ }
+
+ void builtinCall(SymbolicAddress builtin, const FunctionCall& call)
+ {
+ callSymbolic(builtin, call);
+ }
+
+ void builtinInstanceMethodCall(SymbolicAddress builtin, const ABIArg& instanceArg,
+ const FunctionCall& call)
+ {
+ // Builtin method calls assume the TLS register has been set.
+ loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+
+ CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Symbolic);
+ masm.wasmCallBuiltinInstanceMethod(instanceArg, builtin);
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Sundry low-level code generators.
+
+ void addInterruptCheck()
+ {
+ // Always use signals for interrupts with Asm.JS/Wasm
+ MOZ_RELEASE_ASSERT(HaveSignalHandlers());
+ }
+
+ void jumpTable(LabelVector& labels) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ for (uint32_t i = 0; i < labels.length(); i++) {
+ CodeLabel cl;
+ masm.writeCodePointer(cl.patchAt());
+ cl.target()->bind(labels[i]->offset());
+ masm.addCodeLabel(cl);
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: jumpTable");
+#endif
+ }
+
+ void tableSwitch(Label* theTable, RegI32 switchValue) {
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ ScratchI32 scratch(*this);
+ CodeLabel tableCl;
+
+ masm.mov(tableCl.patchAt(), scratch);
+
+ tableCl.target()->bind(theTable->offset());
+ masm.addCodeLabel(tableCl);
+
+ masm.jmp(Operand(scratch, switchValue.reg, ScalePointer));
+#elif defined(JS_CODEGEN_ARM)
+ ScratchI32 scratch(*this);
+
+ // Compute the offset from the next instruction to the jump table
+ Label here;
+ masm.bind(&here);
+ uint32_t offset = here.offset() - theTable->offset();
+
+ // Read PC+8
+ masm.ma_mov(pc, scratch);
+
+ // Required by ma_sub.
+ ScratchRegisterScope arm_scratch(*this);
+
+ // Compute the table base pointer
+ masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch);
+
+ // Jump indirect via table element
+ masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue.reg, LSL, 2)), pc, Offset,
+ Assembler::Always);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: tableSwitch");
+#endif
+ }
+
+ RegI32 captureReturnedI32() {
+ RegI32 rv = RegI32(ReturnReg);
+ MOZ_ASSERT(isAvailable(rv.reg));
+ needI32(rv);
+ return rv;
+ }
+
+ RegI64 captureReturnedI64() {
+ RegI64 rv = RegI64(ReturnReg64);
+ MOZ_ASSERT(isAvailable(rv.reg));
+ needI64(rv);
+ return rv;
+ }
+
+ RegF32 captureReturnedF32(const FunctionCall& call) {
+ RegF32 rv = RegF32(ReturnFloat32Reg);
+ MOZ_ASSERT(isAvailable(rv.reg));
+ needF32(rv);
+#if defined(JS_CODEGEN_X86)
+ if (call.usesSystemAbi) {
+ masm.reserveStack(sizeof(float));
+ Operand op(esp, 0);
+ masm.fstp32(op);
+ masm.loadFloat32(op, rv.reg);
+ masm.freeStack(sizeof(float));
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP)
+ masm.ma_vxfer(r0, rv.reg);
+#endif
+ return rv;
+ }
+
+ RegF64 captureReturnedF64(const FunctionCall& call) {
+ RegF64 rv = RegF64(ReturnDoubleReg);
+ MOZ_ASSERT(isAvailable(rv.reg));
+ needF64(rv);
+#if defined(JS_CODEGEN_X86)
+ if (call.usesSystemAbi) {
+ masm.reserveStack(sizeof(double));
+ Operand op(esp, 0);
+ masm.fstp(op);
+ masm.loadDouble(op, rv.reg);
+ masm.freeStack(sizeof(double));
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (call.usesSystemAbi && !call.hardFP)
+ masm.ma_vxfer(r0, r1, rv.reg);
+#endif
+ return rv;
+ }
+
+ void returnCleanup() {
+ popStackBeforeBranch(ctl_[0].framePushed);
+ masm.jump(&returnLabel_);
+ }
+
+ void pop2xI32ForIntMulDiv(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // srcDest must be eax, and edx will be clobbered.
+ need2xI32(specific_eax, specific_edx);
+ *r1 = popI32();
+ *r0 = popI32ToSpecific(specific_eax);
+ freeI32(specific_edx);
+#else
+ pop2xI32(r0, r1);
+#endif
+ }
+
+ void pop2xI64ForIntDiv(RegI64* r0, RegI64* r1) {
+#ifdef JS_CODEGEN_X64
+ // srcDest must be rax, and rdx will be clobbered.
+ need2xI64(specific_rax, specific_rdx);
+ *r1 = popI64();
+ *r0 = popI64ToSpecific(specific_rax);
+ freeI64(specific_rdx);
+#else
+ pop2xI64(r0, r1);
+#endif
+ }
+
+ void checkDivideByZeroI32(RegI32 rhs, RegI32 srcDest, Label* done) {
+ if (isCompilingAsmJS()) {
+ // Truncated division by zero is zero (Infinity|0 == 0)
+ Label notDivByZero;
+ masm.branchTest32(Assembler::NonZero, rhs.reg, rhs.reg, &notDivByZero);
+ masm.move32(Imm32(0), srcDest.reg);
+ masm.jump(done);
+ masm.bind(&notDivByZero);
+ } else {
+ masm.branchTest32(Assembler::Zero, rhs.reg, rhs.reg, trap(Trap::IntegerDivideByZero));
+ }
+ }
+
+ void checkDivideByZeroI64(RegI64 r) {
+ MOZ_ASSERT(!isCompilingAsmJS());
+ ScratchI32 scratch(*this);
+ masm.branchTest64(Assembler::Zero, r.reg, r.reg, scratch, trap(Trap::IntegerDivideByZero));
+ }
+
+ void checkDivideSignedOverflowI32(RegI32 rhs, RegI32 srcDest, Label* done, bool zeroOnOverflow) {
+ Label notMin;
+ masm.branch32(Assembler::NotEqual, srcDest.reg, Imm32(INT32_MIN), &notMin);
+ if (zeroOnOverflow) {
+ masm.branch32(Assembler::NotEqual, rhs.reg, Imm32(-1), &notMin);
+ masm.move32(Imm32(0), srcDest.reg);
+ masm.jump(done);
+ } else if (isCompilingAsmJS()) {
+ // (-INT32_MIN)|0 == INT32_MIN and INT32_MIN is already in srcDest.
+ masm.branch32(Assembler::Equal, rhs.reg, Imm32(-1), done);
+ } else {
+ masm.branch32(Assembler::Equal, rhs.reg, Imm32(-1), trap(Trap::IntegerOverflow));
+ }
+ masm.bind(&notMin);
+ }
+
+ void checkDivideSignedOverflowI64(RegI64 rhs, RegI64 srcDest, Label* done, bool zeroOnOverflow) {
+ MOZ_ASSERT(!isCompilingAsmJS());
+ Label notmin;
+ masm.branch64(Assembler::NotEqual, srcDest.reg, Imm64(INT64_MIN), &notmin);
+ masm.branch64(Assembler::NotEqual, rhs.reg, Imm64(-1), &notmin);
+ if (zeroOnOverflow) {
+ masm.xor64(srcDest.reg, srcDest.reg);
+ masm.jump(done);
+ } else {
+ masm.jump(trap(Trap::IntegerOverflow));
+ }
+ masm.bind(&notmin);
+ }
+
+#ifndef INT_DIV_I64_CALLOUT
+ void quotientI64(RegI64 rhs, RegI64 srcDest, IsUnsigned isUnsigned) {
+ Label done;
+
+ checkDivideByZeroI64(rhs);
+
+ if (!isUnsigned)
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg.reg == rax);
+ MOZ_ASSERT(isAvailable(rdx));
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg.reg);
+ }
+# else
+ MOZ_CRASH("BaseCompiler platform hook: quotientI64");
+# endif
+ masm.bind(&done);
+ }
+
+ void remainderI64(RegI64 rhs, RegI64 srcDest, IsUnsigned isUnsigned) {
+ Label done;
+
+ checkDivideByZeroI64(rhs);
+
+ if (!isUnsigned)
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+
+# if defined(JS_CODEGEN_X64)
+ // The caller must set up the following situation.
+ MOZ_ASSERT(srcDest.reg.reg == rax);
+ MOZ_ASSERT(isAvailable(rdx));
+
+ if (isUnsigned) {
+ masm.xorq(rdx, rdx);
+ masm.udivq(rhs.reg.reg);
+ } else {
+ masm.cqo();
+ masm.idivq(rhs.reg.reg);
+ }
+ masm.movq(rdx, rax);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: remainderI64");
+# endif
+ masm.bind(&done);
+ }
+#endif // INT_DIV_I64_CALLOUT
+
+ void pop2xI32ForShiftOrRotate(RegI32* r0, RegI32* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ *r1 = popI32(specific_ecx);
+ *r0 = popI32();
+#else
+ pop2xI32(r0, r1);
+#endif
+ }
+
+ void pop2xI64ForShiftOrRotate(RegI64* r0, RegI64* r1) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ needI32(specific_ecx);
+ *r1 = widenI32(specific_ecx);
+ *r1 = popI64ToSpecific(*r1);
+ *r0 = popI64();
+#else
+ pop2xI64(r0, r1);
+#endif
+ }
+
+ void maskShiftCount32(RegI32 r) {
+#if defined(JS_CODEGEN_ARM)
+ masm.and32(Imm32(31), r.reg);
+#endif
+ }
+
+ bool popcnt32NeedsTemp() const {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return !AssemblerX86Shared::HasPOPCNT();
+#elif defined(JS_CODEGEN_ARM)
+ return true;
+#else
+ MOZ_CRASH("BaseCompiler platform hook: popcnt32NeedsTemp");
+#endif
+ }
+
+ bool popcnt64NeedsTemp() const {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ return !AssemblerX86Shared::HasPOPCNT();
+#elif defined(JS_CODEGEN_ARM)
+ return true;
+#else
+ MOZ_CRASH("BaseCompiler platform hook: popcnt64NeedsTemp");
+#endif
+ }
+
+ void reinterpretI64AsF64(RegI64 src, RegF64 dest) {
+#if defined(JS_CODEGEN_X64)
+ masm.vmovq(src.reg.reg, dest.reg);
+#elif defined(JS_CODEGEN_X86)
+ masm.Push(src.reg.high);
+ masm.Push(src.reg.low);
+ masm.vmovq(Operand(esp, 0), dest.reg);
+ masm.freeStack(sizeof(uint64_t));
+#elif defined(JS_CODEGEN_ARM)
+ masm.ma_vxfer(src.reg.low, src.reg.high, dest.reg);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: reinterpretI64AsF64");
+#endif
+ }
+
+ void reinterpretF64AsI64(RegF64 src, RegI64 dest) {
+#if defined(JS_CODEGEN_X64)
+ masm.vmovq(src.reg, dest.reg.reg);
+#elif defined(JS_CODEGEN_X86)
+ masm.reserveStack(sizeof(uint64_t));
+ masm.vmovq(src.reg, Operand(esp, 0));
+ masm.Pop(dest.reg.low);
+ masm.Pop(dest.reg.high);
+#elif defined(JS_CODEGEN_ARM)
+ masm.ma_vxfer(src.reg, dest.reg.low, dest.reg.high);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: reinterpretF64AsI64");
+#endif
+ }
+
+ void wrapI64ToI32(RegI64 src, RegI32 dest) {
+#if defined(JS_CODEGEN_X64)
+ // movl clears the high bits if the two registers are the same.
+ masm.movl(src.reg.reg, dest.reg);
+#elif defined(JS_NUNBOX32)
+ masm.move32(src.reg.low, dest.reg);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: wrapI64ToI32");
+#endif
+ }
+
+ RegI64 popI32ForSignExtendI64() {
+#if defined(JS_CODEGEN_X86)
+ need2xI32(specific_edx, specific_eax);
+ RegI32 r0 = popI32ToSpecific(specific_eax);
+ RegI64 x0 = RegI64(Register64(specific_edx.reg, specific_eax.reg));
+ (void)r0; // x0 is the widening of r0
+#else
+ RegI32 r0 = popI32();
+ RegI64 x0 = widenI32(r0);
+#endif
+ return x0;
+ }
+
+ void signExtendI32ToI64(RegI32 src, RegI64 dest) {
+#if defined(JS_CODEGEN_X64)
+ masm.movslq(src.reg, dest.reg.reg);
+#elif defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(dest.reg.low == src.reg);
+ MOZ_ASSERT(dest.reg.low == eax);
+ MOZ_ASSERT(dest.reg.high == edx);
+ masm.cdq();
+#elif defined(JS_CODEGEN_ARM)
+ masm.ma_mov(src.reg, dest.reg.low);
+ masm.ma_asr(Imm32(31), src.reg, dest.reg.high);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: signExtendI32ToI64");
+#endif
+ }
+
+ void extendU32ToI64(RegI32 src, RegI64 dest) {
+#if defined(JS_CODEGEN_X64)
+ masm.movl(src.reg, dest.reg.reg);
+#elif defined(JS_NUNBOX32)
+ masm.move32(src.reg, dest.reg.low);
+ masm.move32(Imm32(0), dest.reg.high);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: extendU32ToI64");
+#endif
+ }
+
+ class OutOfLineTruncateF32OrF64ToI32 : public OutOfLineCode
+ {
+ AnyReg src;
+ RegI32 dest;
+ bool isAsmJS;
+ bool isUnsigned;
+ TrapOffset off;
+
+ public:
+ OutOfLineTruncateF32OrF64ToI32(AnyReg src, RegI32 dest, bool isAsmJS, bool isUnsigned,
+ TrapOffset off)
+ : src(src),
+ dest(dest),
+ isAsmJS(isAsmJS),
+ isUnsigned(isUnsigned),
+ off(off)
+ {
+ MOZ_ASSERT_IF(isAsmJS, !isUnsigned);
+ }
+
+ virtual void generate(MacroAssembler& masm) {
+ bool isFloat = src.tag == AnyReg::F32;
+ FloatRegister fsrc = isFloat ? src.f32().reg : src.f64().reg;
+ if (isAsmJS) {
+ saveVolatileReturnGPR(masm);
+ masm.outOfLineTruncateSlow(fsrc, dest.reg, isFloat, /* isAsmJS */ true);
+ restoreVolatileReturnGPR(masm);
+ masm.jump(rejoin());
+ } else {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (isFloat)
+ masm.outOfLineWasmTruncateFloat32ToInt32(fsrc, isUnsigned, off, rejoin());
+ else
+ masm.outOfLineWasmTruncateDoubleToInt32(fsrc, isUnsigned, off, rejoin());
+#elif defined(JS_CODEGEN_ARM)
+ masm.outOfLineWasmTruncateToIntCheck(fsrc,
+ isFloat ? MIRType::Float32 : MIRType::Double,
+ MIRType::Int32, isUnsigned, rejoin(), off);
+#else
+ (void)isUnsigned; // Suppress warnings
+ (void)off; // for unused private
+ MOZ_CRASH("BaseCompiler platform hook: OutOfLineTruncateF32OrF64ToI32 wasm");
+#endif
+ }
+ }
+ };
+
+ MOZ_MUST_USE bool truncateF32ToI32(RegF32 src, RegI32 dest, bool isUnsigned) {
+ TrapOffset off = trapOffset();
+ OutOfLineCode* ool;
+ if (isCompilingAsmJS()) {
+ ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, true, false, off);
+ ool = addOutOfLineCode(ool);
+ if (!ool)
+ return false;
+ masm.branchTruncateFloat32ToInt32(src.reg, dest.reg, ool->entry());
+ } else {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+ ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, false, isUnsigned,
+ off);
+ ool = addOutOfLineCode(ool);
+ if (!ool)
+ return false;
+ if (isUnsigned)
+ masm.wasmTruncateFloat32ToUInt32(src.reg, dest.reg, ool->entry());
+ else
+ masm.wasmTruncateFloat32ToInt32(src.reg, dest.reg, ool->entry());
+#else
+ MOZ_CRASH("BaseCompiler platform hook: truncateF32ToI32 wasm");
+#endif
+ }
+ masm.bind(ool->rejoin());
+ return true;
+ }
+
+ MOZ_MUST_USE bool truncateF64ToI32(RegF64 src, RegI32 dest, bool isUnsigned) {
+ TrapOffset off = trapOffset();
+ OutOfLineCode* ool;
+ if (isCompilingAsmJS()) {
+ ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, true, false, off);
+ ool = addOutOfLineCode(ool);
+ if (!ool)
+ return false;
+ masm.branchTruncateDoubleToInt32(src.reg, dest.reg, ool->entry());
+ } else {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM)
+ ool = new(alloc_) OutOfLineTruncateF32OrF64ToI32(AnyReg(src), dest, false, isUnsigned,
+ off);
+ ool = addOutOfLineCode(ool);
+ if (!ool)
+ return false;
+ if (isUnsigned)
+ masm.wasmTruncateDoubleToUInt32(src.reg, dest.reg, ool->entry());
+ else
+ masm.wasmTruncateDoubleToInt32(src.reg, dest.reg, ool->entry());
+#else
+ MOZ_CRASH("BaseCompiler platform hook: truncateF64ToI32 wasm");
+#endif
+ }
+ masm.bind(ool->rejoin());
+ return true;
+ }
+
+ // This does not generate a value; if the truncation failed then it traps.
+
+ class OutOfLineTruncateCheckF32OrF64ToI64 : public OutOfLineCode
+ {
+ AnyReg src;
+ bool isUnsigned;
+ TrapOffset off;
+
+ public:
+ OutOfLineTruncateCheckF32OrF64ToI64(AnyReg src, bool isUnsigned, TrapOffset off)
+ : src(src),
+ isUnsigned(isUnsigned),
+ off(off)
+ {}
+
+ virtual void generate(MacroAssembler& masm) {
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ if (src.tag == AnyReg::F32)
+ masm.outOfLineWasmTruncateFloat32ToInt64(src.f32().reg, isUnsigned, off, rejoin());
+ else if (src.tag == AnyReg::F64)
+ masm.outOfLineWasmTruncateDoubleToInt64(src.f64().reg, isUnsigned, off, rejoin());
+ else
+ MOZ_CRASH("unexpected type");
+#elif defined(JS_CODEGEN_ARM)
+ if (src.tag == AnyReg::F32)
+ masm.outOfLineWasmTruncateToIntCheck(src.f32().reg, MIRType::Float32,
+ MIRType::Int64, isUnsigned, rejoin(), off);
+ else if (src.tag == AnyReg::F64)
+ masm.outOfLineWasmTruncateToIntCheck(src.f64().reg, MIRType::Double, MIRType::Int64,
+ isUnsigned, rejoin(), off);
+ else
+ MOZ_CRASH("unexpected type");
+#else
+ (void)src;
+ (void)isUnsigned;
+ (void)off;
+ MOZ_CRASH("BaseCompiler platform hook: OutOfLineTruncateCheckF32OrF64ToI64");
+#endif
+ }
+ };
+
+#ifndef FLOAT_TO_I64_CALLOUT
+ MOZ_MUST_USE bool truncateF32ToI64(RegF32 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
+ isUnsigned,
+ trapOffset()));
+ if (!ool)
+ return false;
+ if (isUnsigned)
+ masm.wasmTruncateFloat32ToUInt64(src.reg, dest.reg, ool->entry(),
+ ool->rejoin(), temp.reg);
+ else
+ masm.wasmTruncateFloat32ToInt64(src.reg, dest.reg, ool->entry(),
+ ool->rejoin(), temp.reg);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: truncateF32ToI64");
+# endif
+ return true;
+ }
+
+ MOZ_MUST_USE bool truncateF64ToI64(RegF64 src, RegI64 dest, bool isUnsigned, RegF64 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ OutOfLineCode* ool =
+ addOutOfLineCode(new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(src),
+ isUnsigned,
+ trapOffset()));
+ if (!ool)
+ return false;
+ if (isUnsigned)
+ masm.wasmTruncateDoubleToUInt64(src.reg, dest.reg, ool->entry(),
+ ool->rejoin(), temp.reg);
+ else
+ masm.wasmTruncateDoubleToInt64(src.reg, dest.reg, ool->entry(),
+ ool->rejoin(), temp.reg);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: truncateF64ToI64");
+# endif
+ return true;
+ }
+#endif // FLOAT_TO_I64_CALLOUT
+
+#ifndef I64_TO_FLOAT_CALLOUT
+ bool convertI64ToFloatNeedsTemp(bool isUnsigned) const {
+# if defined(JS_CODEGEN_X86)
+ return isUnsigned && AssemblerX86Shared::HasSSE3();
+# else
+ return false;
+# endif
+ }
+
+ void convertI64ToF32(RegI64 src, bool isUnsigned, RegF32 dest, RegI32 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ if (isUnsigned)
+ masm.convertUInt64ToFloat32(src.reg, dest.reg, temp.reg);
+ else
+ masm.convertInt64ToFloat32(src.reg, dest.reg);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: convertI64ToF32");
+# endif
+ }
+
+ void convertI64ToF64(RegI64 src, bool isUnsigned, RegF64 dest, RegI32 temp) {
+# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ if (isUnsigned)
+ masm.convertUInt64ToDouble(src.reg, dest.reg, temp.reg);
+ else
+ masm.convertInt64ToDouble(src.reg, dest.reg);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: convertI64ToF64");
+# endif
+ }
+#endif // I64_TO_FLOAT_CALLOUT
+
+ void cmp64Set(Assembler::Condition cond, RegI64 lhs, RegI64 rhs, RegI32 dest) {
+#if defined(JS_CODEGEN_X64)
+ masm.cmpq(rhs.reg.reg, lhs.reg.reg);
+ masm.emitSet(cond, dest.reg);
+#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ // TODO / OPTIMIZE (Bug 1316822): This is pretty branchy, we should be
+ // able to do better.
+ Label done, condTrue;
+ masm.branch64(cond, lhs.reg, rhs.reg, &condTrue);
+ masm.move32(Imm32(0), dest.reg);
+ masm.jump(&done);
+ masm.bind(&condTrue);
+ masm.move32(Imm32(1), dest.reg);
+ masm.bind(&done);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: cmp64Set");
+#endif
+ }
+
+ void unreachableTrap()
+ {
+ masm.jump(trap(Trap::Unreachable));
+#ifdef DEBUG
+ masm.breakpoint();
+#endif
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Global variable access.
+
+ // CodeGenerator{X86,X64}::visitWasmLoadGlobal()
+
+ void loadGlobalVarI32(unsigned globalDataOffset, RegI32 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.loadRipRelativeInt32(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr), r.reg, scratch);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarI32");
+#endif
+ }
+
+ void loadGlobalVarI64(unsigned globalDataOffset, RegI64 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.loadRipRelativeInt64(r.reg.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset labelLow = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg.low);
+ masm.append(GlobalAccess(labelLow, globalDataOffset + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(PatchedAbsoluteAddress(), r.reg.high);
+ masm.append(GlobalAccess(labelHigh, globalDataOffset + INT64HIGH_OFFSET));
+#elif defined(JS_CODEGEN_ARM)
+ ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr + INT64LOW_OFFSET), r.reg.low, scratch);
+ masm.ma_dtr(js::jit::IsLoad, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), r.reg.high,
+ scratch);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarI64");
+#endif
+ }
+
+ void loadGlobalVarF32(unsigned globalDataOffset, RegF32 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.loadRipRelativeFloat32(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.vmovssWithPatch(PatchedAbsoluteAddress(), r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ VFPRegister vd(r.reg);
+ masm.ma_vldr(VFPAddr(GlobalReg, VFPOffImm(addr)), vd.singleOverlay());
+#else
+ MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF32");
+#endif
+ }
+
+ void loadGlobalVarF64(unsigned globalDataOffset, RegF64 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.loadRipRelativeDouble(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.vmovsdWithPatch(PatchedAbsoluteAddress(), r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_vldr(VFPAddr(GlobalReg, VFPOffImm(addr)), r.reg);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: loadGlobalVarF64");
+#endif
+ }
+
+ // CodeGeneratorX64::visitWasmStoreGlobal()
+
+ void storeGlobalVarI32(unsigned globalDataOffset, RegI32 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.storeRipRelativeInt32(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.movlWithPatch(r.reg, PatchedAbsoluteAddress());
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr), r.reg, scratch);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarI32");
+#endif
+ }
+
+ void storeGlobalVarI64(unsigned globalDataOffset, RegI64 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.storeRipRelativeInt64(r.reg.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset labelLow = masm.movlWithPatch(r.reg.low, PatchedAbsoluteAddress());
+ masm.append(GlobalAccess(labelLow, globalDataOffset + INT64LOW_OFFSET));
+ CodeOffset labelHigh = masm.movlWithPatch(r.reg.high, PatchedAbsoluteAddress());
+ masm.append(GlobalAccess(labelHigh, globalDataOffset + INT64HIGH_OFFSET));
+#elif defined(JS_CODEGEN_ARM)
+ ScratchRegisterScope scratch(*this); // Really must be the ARM scratchreg
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr + INT64LOW_OFFSET), r.reg.low, scratch);
+ masm.ma_dtr(js::jit::IsStore, GlobalReg, Imm32(addr + INT64HIGH_OFFSET), r.reg.high,
+ scratch);
+#else
+ MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarI64");
+#endif
+ }
+
+ void storeGlobalVarF32(unsigned globalDataOffset, RegF32 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.storeRipRelativeFloat32(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.vmovssWithPatch(r.reg, PatchedAbsoluteAddress());
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ VFPRegister vd(r.reg);
+ masm.ma_vstr(vd.singleOverlay(), VFPAddr(GlobalReg, VFPOffImm(addr)));
+#else
+ MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF32");
+#endif
+ }
+
+ void storeGlobalVarF64(unsigned globalDataOffset, RegF64 r)
+ {
+#if defined(JS_CODEGEN_X64)
+ CodeOffset label = masm.storeRipRelativeDouble(r.reg);
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_X86)
+ CodeOffset label = masm.vmovsdWithPatch(r.reg, PatchedAbsoluteAddress());
+ masm.append(GlobalAccess(label, globalDataOffset));
+#elif defined(JS_CODEGEN_ARM)
+ unsigned addr = globalDataOffset - WasmGlobalRegBias;
+ masm.ma_vstr(r.reg, VFPAddr(GlobalReg, VFPOffImm(addr)));
+#else
+ MOZ_CRASH("BaseCompiler platform hook: storeGlobalVarF64");
+#endif
+ }
+
+ //////////////////////////////////////////////////////////////////////
+ //
+ // Heap access.
+
+#ifndef WASM_HUGE_MEMORY
+ class AsmJSLoadOOB : public OutOfLineCode
+ {
+ Scalar::Type viewType;
+ AnyRegister dest;
+
+ public:
+ AsmJSLoadOOB(Scalar::Type viewType, AnyRegister dest)
+ : viewType(viewType),
+ dest(dest)
+ {}
+
+ void generate(MacroAssembler& masm) {
+# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ switch (viewType) {
+ case Scalar::Float32x4:
+ case Scalar::Int32x4:
+ case Scalar::Int8x16:
+ case Scalar::Int16x8:
+ case Scalar::MaxTypedArrayViewType:
+ MOZ_CRASH("unexpected array type");
+ case Scalar::Float32:
+ masm.loadConstantFloat32(float(GenericNaN()), dest.fpu());
+ break;
+ case Scalar::Float64:
+ masm.loadConstantDouble(GenericNaN(), dest.fpu());
+ break;
+ case Scalar::Int8:
+ case Scalar::Uint8:
+ case Scalar::Int16:
+ case Scalar::Uint16:
+ case Scalar::Int32:
+ case Scalar::Uint32:
+ case Scalar::Uint8Clamped:
+ masm.movePtr(ImmWord(0), dest.gpr());
+ break;
+ case Scalar::Int64:
+ MOZ_CRASH("unexpected array type");
+ }
+ masm.jump(rejoin());
+# else
+ Unused << viewType;
+ Unused << dest;
+ MOZ_CRASH("Compiler bug: Unexpected platform.");
+# endif
+ }
+ };
+#endif
+
+ void checkOffset(MemoryAccessDesc* access, RegI32 ptr) {
+ if (access->offset() >= OffsetGuardLimit) {
+ masm.branchAdd32(Assembler::CarrySet, Imm32(access->offset()), ptr.reg,
+ trap(Trap::OutOfBounds));
+ access->clearOffset();
+ }
+ }
+
+ // This is the temp register passed as the last argument to load()
+ MOZ_MUST_USE size_t loadStoreTemps(MemoryAccessDesc& access) {
+#if defined(JS_CODEGEN_ARM)
+ if (access.isUnaligned()) {
+ switch (access.type()) {
+ case Scalar::Float32:
+ return 1;
+ case Scalar::Float64:
+ return 2;
+ default:
+ break;
+ }
+ }
+ return 0;
+#else
+ return 0;
+#endif
+ }
+
+ // ptr and dest may be the same iff dest is I32.
+ // This may destroy ptr even if ptr and dest are not the same.
+ MOZ_MUST_USE bool load(MemoryAccessDesc& access, RegI32 ptr, AnyReg dest, RegI32 tmp1,
+ RegI32 tmp2)
+ {
+ checkOffset(&access, ptr);
+
+ OutOfLineCode* ool = nullptr;
+#ifndef WASM_HUGE_MEMORY
+ if (access.isPlainAsmJS()) {
+ ool = new (alloc_) AsmJSLoadOOB(access.type(), dest.any());
+ if (!addOutOfLineCode(ool))
+ return false;
+
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, ool->entry());
+ } else {
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, trap(Trap::OutOfBounds));
+ }
+#endif
+
+#if defined(JS_CODEGEN_X64)
+ Operand srcAddr(HeapReg, ptr.reg, TimesOne, access.offset());
+
+ if (dest.tag == AnyReg::I64)
+ masm.wasmLoadI64(access, srcAddr, dest.i64().reg);
+ else
+ masm.wasmLoad(access, srcAddr, dest.any());
+#elif defined(JS_CODEGEN_X86)
+ Operand srcAddr(ptr.reg, access.offset());
+
+ if (dest.tag == AnyReg::I64) {
+ masm.wasmLoadI64(access, srcAddr, dest.i64().reg);
+ } else {
+ bool byteRegConflict = access.byteSize() == 1 && !singleByteRegs_.has(dest.i32().reg);
+ AnyRegister out = byteRegConflict ? AnyRegister(ScratchRegX86) : dest.any();
+
+ masm.wasmLoad(access, srcAddr, out);
+
+ if (byteRegConflict)
+ masm.mov(ScratchRegX86, dest.i32().reg);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (access.offset() != 0)
+ masm.add32(Imm32(access.offset()), ptr.reg);
+
+ bool isSigned = true;
+ switch (access.type()) {
+ case Scalar::Uint8:
+ case Scalar::Uint16:
+ case Scalar::Uint32: {
+ isSigned = false;
+ MOZ_FALLTHROUGH;
+ case Scalar::Int8:
+ case Scalar::Int16:
+ case Scalar::Int32:
+ Register rt = dest.tag == AnyReg::I64 ? dest.i64().reg.low : dest.i32().reg;
+ loadI32(access, isSigned, ptr, rt);
+ if (dest.tag == AnyReg::I64) {
+ if (isSigned)
+ masm.ma_asr(Imm32(31), rt, dest.i64().reg.high);
+ else
+ masm.move32(Imm32(0), dest.i64().reg.high);
+ }
+ break;
+ }
+ case Scalar::Int64:
+ loadI64(access, ptr, dest.i64());
+ break;
+ case Scalar::Float32:
+ loadF32(access, ptr, dest.f32(), tmp1);
+ break;
+ case Scalar::Float64:
+ loadF64(access, ptr, dest.f64(), tmp1, tmp2);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: unexpected array type");
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: load");
+#endif
+
+ if (ool)
+ masm.bind(ool->rejoin());
+ return true;
+ }
+
+ // ptr and src must not be the same register.
+ // This may destroy ptr.
+ MOZ_MUST_USE bool store(MemoryAccessDesc access, RegI32 ptr, AnyReg src, RegI32 tmp1,
+ RegI32 tmp2)
+ {
+ checkOffset(&access, ptr);
+
+ Label rejoin;
+#ifndef WASM_HUGE_MEMORY
+ if (access.isPlainAsmJS())
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, &rejoin);
+ else
+ masm.wasmBoundsCheck(Assembler::AboveOrEqual, ptr.reg, trap(Trap::OutOfBounds));
+#endif
+
+ // Emit the store
+#if defined(JS_CODEGEN_X64)
+ Operand dstAddr(HeapReg, ptr.reg, TimesOne, access.offset());
+
+ masm.wasmStore(access, src.any(), dstAddr);
+#elif defined(JS_CODEGEN_X86)
+ Operand dstAddr(ptr.reg, access.offset());
+
+ if (access.type() == Scalar::Int64) {
+ masm.wasmStoreI64(access, src.i64().reg, dstAddr);
+ } else {
+ AnyRegister value;
+ if (src.tag == AnyReg::I64) {
+ value = AnyRegister(src.i64().reg.low);
+ } else if (access.byteSize() == 1 && !singleByteRegs_.has(src.i32().reg)) {
+ masm.mov(src.i32().reg, ScratchRegX86);
+ value = AnyRegister(ScratchRegX86);
+ } else {
+ value = src.any();
+ }
+
+ masm.wasmStore(access, value, dstAddr);
+ }
+#elif defined(JS_CODEGEN_ARM)
+ if (access.offset() != 0)
+ masm.add32(Imm32(access.offset()), ptr.reg);
+
+ switch (access.type()) {
+ case Scalar::Uint8:
+ MOZ_FALLTHROUGH;
+ case Scalar::Uint16:
+ MOZ_FALLTHROUGH;
+ case Scalar::Int8:
+ MOZ_FALLTHROUGH;
+ case Scalar::Int16:
+ MOZ_FALLTHROUGH;
+ case Scalar::Int32:
+ MOZ_FALLTHROUGH;
+ case Scalar::Uint32: {
+ Register rt = src.tag == AnyReg::I64 ? src.i64().reg.low : src.i32().reg;
+ storeI32(access, ptr, rt);
+ break;
+ }
+ case Scalar::Int64:
+ storeI64(access, ptr, src.i64());
+ break;
+ case Scalar::Float32:
+ storeF32(access, ptr, src.f32(), tmp1);
+ break;
+ case Scalar::Float64:
+ storeF64(access, ptr, src.f64(), tmp1, tmp2);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: unexpected array type");
+ }
+#else
+ MOZ_CRASH("BaseCompiler platform hook: store");
+#endif
+
+ if (rejoin.used())
+ masm.bind(&rejoin);
+
+ return true;
+ }
+
+#ifdef JS_CODEGEN_ARM
+ void
+ loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
+ if (access.byteSize() > 1 && access.isUnaligned()) {
+ masm.add32(HeapReg, ptr.reg);
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0);
+ } else {
+ BufferOffset ld =
+ masm.ma_dataTransferN(js::jit::IsLoad, BitSize(access.byteSize()*8),
+ isSigned, HeapReg, ptr.reg, rt, Offset, Assembler::Always);
+ masm.append(access, ld.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
+ if (access.byteSize() > 1 && access.isUnaligned()) {
+ masm.add32(HeapReg, ptr.reg);
+ masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0);
+ } else {
+ BufferOffset st =
+ masm.ma_dataTransferN(js::jit::IsStore, BitSize(access.byteSize()*8),
+ IsSigned(false), ptr.reg, HeapReg, rt, Offset,
+ Assembler::Always);
+ masm.append(access, st.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
+ if (access.isUnaligned()) {
+ masm.add32(HeapReg, ptr.reg);
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low,
+ 0);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.high,
+ 4);
+ } else {
+ BufferOffset ld;
+ ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
+ ptr.reg, dest.reg.low, Offset, Assembler::Always);
+ masm.append(access, ld.getOffset(), masm.framePushed());
+ masm.add32(Imm32(4), ptr.reg);
+ ld = masm.ma_dataTransferN(js::jit::IsLoad, BitSize(32), IsSigned(false), HeapReg,
+ ptr.reg, dest.reg.high, Offset, Assembler::Always);
+ masm.append(access, ld.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
+ if (access.isUnaligned()) {
+ masm.add32(HeapReg, ptr.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4);
+ } else {
+ BufferOffset st;
+ st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
+ ptr.reg, src.reg.low, Offset, Assembler::Always);
+ masm.append(access, st.getOffset(), masm.framePushed());
+ masm.add32(Imm32(4), ptr.reg);
+ st = masm.ma_dataTransferN(js::jit::IsStore, BitSize(32), IsSigned(false), HeapReg,
+ ptr.reg, src.reg.high, Offset, Assembler::Always);
+ masm.append(access, st.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
+ masm.add32(HeapReg, ptr.reg);
+ if (access.isUnaligned()) {
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
+ masm.ma_vxfer(tmp1.reg, dest.reg);
+ } else {
+ BufferOffset ld = masm.ma_vldr(VFPAddr(ptr.reg, VFPOffImm(0)), dest.reg,
+ Assembler::Always);
+ masm.append(access, ld.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
+ masm.add32(HeapReg, ptr.reg);
+ if (access.isUnaligned()) {
+ masm.ma_vxfer(src.reg, tmp1.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
+ } else {
+ BufferOffset st =
+ masm.ma_vstr(src.reg, VFPAddr(ptr.reg, VFPOffImm(0)), Assembler::Always);
+ masm.append(access, st.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
+ masm.add32(HeapReg, ptr.reg);
+ if (access.isUnaligned()) {
+ SecondScratchRegisterScope scratch(*this);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
+ masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4);
+ masm.ma_vxfer(tmp1.reg, tmp2.reg, dest.reg);
+ } else {
+ BufferOffset ld = masm.ma_vldr(VFPAddr(ptr.reg, VFPOffImm(0)), dest.reg,
+ Assembler::Always);
+ masm.append(access, ld.getOffset(), masm.framePushed());
+ }
+ }
+
+ void
+ storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
+ masm.add32(HeapReg, ptr.reg);
+ if (access.isUnaligned()) {
+ masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
+ masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4);
+ } else {
+ BufferOffset st =
+ masm.ma_vstr(src.reg, VFPAddr(ptr.reg, VFPOffImm(0)), Assembler::Always);
+ masm.append(access, st.getOffset(), masm.framePushed());
+ }
+ }
+#endif // JS_CODEGEN_ARM
+
+ ////////////////////////////////////////////////////////////
+
+ // Generally speaking, ABOVE this point there should be no value
+ // stack manipulation (calls to popI32 etc).
+
+ // Generally speaking, BELOW this point there should be no
+ // platform dependencies. We make an exception for x86 register
+ // targeting, which is not too hard to keep clean.
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Sundry wrappers.
+
+ void pop2xI32(RegI32* r0, RegI32* r1) {
+ *r1 = popI32();
+ *r0 = popI32();
+ }
+
+ RegI32 popI32ToSpecific(RegI32 specific) {
+ freeI32(specific);
+ return popI32(specific);
+ }
+
+ void pop2xI64(RegI64* r0, RegI64* r1) {
+ *r1 = popI64();
+ *r0 = popI64();
+ }
+
+ RegI64 popI64ToSpecific(RegI64 specific) {
+ freeI64(specific);
+ return popI64(specific);
+ }
+
+ void pop2xF32(RegF32* r0, RegF32* r1) {
+ *r1 = popF32();
+ *r0 = popF32();
+ }
+
+ void pop2xF64(RegF64* r0, RegF64* r1) {
+ *r1 = popF64();
+ *r0 = popF64();
+ }
+
+ ////////////////////////////////////////////////////////////
+ //
+ // Sundry helpers.
+
+ uint32_t readCallSiteLineOrBytecode() {
+ if (!func_.callSiteLineNums().empty())
+ return func_.callSiteLineNums()[lastReadCallSite_++];
+ return trapOffset().bytecodeOffset;
+ }
+
+ bool done() const {
+ return iter_.done();
+ }
+
+ bool isCompilingAsmJS() const {
+ return mg_.kind == ModuleKind::AsmJS;
+ }
+
+ TrapOffset trapOffset() const {
+ return iter_.trapOffset();
+ }
+ Maybe<TrapOffset> trapIfNotAsmJS() const {
+ return isCompilingAsmJS() ? Nothing() : Some(trapOffset());
+ }
+ TrapDesc trap(Trap t) const {
+ return TrapDesc(trapOffset(), t, masm.framePushed());
+ }
+
+ //////////////////////////////////////////////////////////////////////
+
+ MOZ_MUST_USE bool emitBody();
+ MOZ_MUST_USE bool emitBlock();
+ MOZ_MUST_USE bool emitLoop();
+ MOZ_MUST_USE bool emitIf();
+ MOZ_MUST_USE bool emitElse();
+ MOZ_MUST_USE bool emitEnd();
+ MOZ_MUST_USE bool emitBr();
+ MOZ_MUST_USE bool emitBrIf();
+ MOZ_MUST_USE bool emitBrTable();
+ MOZ_MUST_USE bool emitDrop();
+ MOZ_MUST_USE bool emitReturn();
+ MOZ_MUST_USE bool emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall);
+ MOZ_MUST_USE bool emitCall();
+ MOZ_MUST_USE bool emitCallIndirect(bool oldStyle);
+ MOZ_MUST_USE bool emitCommonMathCall(uint32_t lineOrBytecode, SymbolicAddress callee,
+ ValTypeVector& signature, ExprType retType);
+ MOZ_MUST_USE bool emitUnaryMathBuiltinCall(SymbolicAddress callee, ValType operandType);
+ MOZ_MUST_USE bool emitBinaryMathBuiltinCall(SymbolicAddress callee, ValType operandType);
+#ifdef INT_DIV_I64_CALLOUT
+ MOZ_MUST_USE bool emitDivOrModI64BuiltinCall(SymbolicAddress callee, ValType operandType);
+#endif
+ MOZ_MUST_USE bool emitGetLocal();
+ MOZ_MUST_USE bool emitSetLocal();
+ MOZ_MUST_USE bool emitTeeLocal();
+ MOZ_MUST_USE bool emitGetGlobal();
+ MOZ_MUST_USE bool emitSetGlobal();
+ MOZ_MUST_USE bool emitTeeGlobal();
+ MOZ_MUST_USE bool emitLoad(ValType type, Scalar::Type viewType);
+ MOZ_MUST_USE bool emitStore(ValType resultType, Scalar::Type viewType);
+ MOZ_MUST_USE bool emitTeeStore(ValType resultType, Scalar::Type viewType);
+ MOZ_MUST_USE bool emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType);
+ MOZ_MUST_USE bool emitSelect();
+
+ void endBlock(ExprType type, bool isFunctionBody);
+ void endLoop(ExprType type);
+ void endIfThen();
+ void endIfThenElse(ExprType type);
+
+ void doReturn(ExprType returnType);
+ void pushReturned(const FunctionCall& call, ExprType type);
+
+ void emitCompareI32(JSOp compareOp, MCompare::CompareType compareType);
+ void emitCompareI64(JSOp compareOp, MCompare::CompareType compareType);
+ void emitCompareF32(JSOp compareOp, MCompare::CompareType compareType);
+ void emitCompareF64(JSOp compareOp, MCompare::CompareType compareType);
+
+ void emitAddI32();
+ void emitAddI64();
+ void emitAddF64();
+ void emitAddF32();
+ void emitSubtractI32();
+ void emitSubtractI64();
+ void emitSubtractF32();
+ void emitSubtractF64();
+ void emitMultiplyI32();
+ void emitMultiplyI64();
+ void emitMultiplyF32();
+ void emitMultiplyF64();
+ void emitQuotientI32();
+ void emitQuotientU32();
+ void emitRemainderI32();
+ void emitRemainderU32();
+#ifndef INT_DIV_I64_CALLOUT
+ void emitQuotientI64();
+ void emitQuotientU64();
+ void emitRemainderI64();
+ void emitRemainderU64();
+#endif
+ void emitDivideF32();
+ void emitDivideF64();
+ void emitMinI32();
+ void emitMaxI32();
+ void emitMinMaxI32(Assembler::Condition cond);
+ void emitMinF32();
+ void emitMaxF32();
+ void emitMinF64();
+ void emitMaxF64();
+ void emitCopysignF32();
+ void emitCopysignF64();
+ void emitOrI32();
+ void emitOrI64();
+ void emitAndI32();
+ void emitAndI64();
+ void emitXorI32();
+ void emitXorI64();
+ void emitShlI32();
+ void emitShlI64();
+ void emitShrI32();
+ void emitShrI64();
+ void emitShrU32();
+ void emitShrU64();
+ void emitRotrI32();
+ void emitRotrI64();
+ void emitRotlI32();
+ void emitRotlI64();
+ void emitEqzI32();
+ void emitEqzI64();
+ void emitClzI32();
+ void emitClzI64();
+ void emitCtzI32();
+ void emitCtzI64();
+ void emitPopcntI32();
+ void emitPopcntI64();
+ void emitBitNotI32();
+ void emitAbsI32();
+ void emitAbsF32();
+ void emitAbsF64();
+ void emitNegateI32();
+ void emitNegateF32();
+ void emitNegateF64();
+ void emitSqrtF32();
+ void emitSqrtF64();
+ template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF32ToI32();
+ template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF64ToI32();
+#ifdef FLOAT_TO_I64_CALLOUT
+ MOZ_MUST_USE bool emitConvertFloatingToInt64Callout(SymbolicAddress callee, ValType operandType,
+ ValType resultType);
+#else
+ template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF32ToI64();
+ template<bool isUnsigned> MOZ_MUST_USE bool emitTruncateF64ToI64();
+#endif
+ void emitWrapI64ToI32();
+ void emitExtendI32ToI64();
+ void emitExtendU32ToI64();
+ void emitReinterpretF32AsI32();
+ void emitReinterpretF64AsI64();
+ void emitConvertF64ToF32();
+ void emitConvertI32ToF32();
+ void emitConvertU32ToF32();
+ void emitConvertF32ToF64();
+ void emitConvertI32ToF64();
+ void emitConvertU32ToF64();
+#ifdef I64_TO_FLOAT_CALLOUT
+ MOZ_MUST_USE bool emitConvertInt64ToFloatingCallout(SymbolicAddress callee, ValType operandType,
+ ValType resultType);
+#else
+ void emitConvertI64ToF32();
+ void emitConvertU64ToF32();
+ void emitConvertI64ToF64();
+ void emitConvertU64ToF64();
+#endif
+ void emitReinterpretI32AsF32();
+ void emitReinterpretI64AsF64();
+ MOZ_MUST_USE bool emitGrowMemory();
+ MOZ_MUST_USE bool emitCurrentMemory();
+};
+
+void
+BaseCompiler::emitAddI32()
+{
+ int32_t c;
+ if (popConstI32(c)) {
+ RegI32 r = popI32();
+ masm.add32(Imm32(c), r.reg);
+ pushI32(r);
+ } else {
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.add32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+ }
+}
+
+void
+BaseCompiler::emitAddI64()
+{
+ // TODO / OPTIMIZE: Ditto check for constant here (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.add64(r1.reg, r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitAddF64()
+{
+ // TODO / OPTIMIZE: Ditto check for constant here (Bug 1316803)
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ masm.addDouble(r1.reg, r0.reg);
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitAddF32()
+{
+ // TODO / OPTIMIZE: Ditto check for constant here (Bug 1316803)
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ masm.addFloat32(r1.reg, r0.reg);
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitSubtractI32()
+{
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.sub32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitSubtractI64()
+{
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.sub64(r1.reg, r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitSubtractF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ masm.subFloat32(r1.reg, r0.reg);
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitSubtractF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ masm.subDouble(r1.reg, r0.reg);
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitMultiplyI32()
+{
+ // TODO / OPTIMIZE: Multiplication by constant is common (Bug 1275442, 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForIntMulDiv(&r0, &r1);
+ masm.mul32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitMultiplyI64()
+{
+ // TODO / OPTIMIZE: Multiplication by constant is common (Bug 1275442, 1316803)
+ RegI64 r0, r1;
+ RegI32 temp;
+#if defined(JS_CODEGEN_X64)
+ // srcDest must be rax, and rdx will be clobbered.
+ need2xI64(specific_rax, specific_rdx);
+ r1 = popI64();
+ r0 = popI64ToSpecific(specific_rax);
+ freeI64(specific_rdx);
+#elif defined(JS_CODEGEN_X86)
+ need2xI32(specific_eax, specific_edx);
+ r1 = popI64();
+ r0 = popI64ToSpecific(RegI64(Register64(specific_edx.reg, specific_eax.reg)));
+ temp = needI32();
+#else
+ pop2xI64(&r0, &r1);
+ temp = needI32();
+#endif
+ masm.mul64(r1.reg, r0.reg, temp.reg);
+ if (temp.reg != Register::Invalid())
+ freeI32(temp);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitMultiplyF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ masm.mulFloat32(r1.reg, r0.reg);
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitMultiplyF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ masm.mulDouble(r1.reg, r0.reg);
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitQuotientI32()
+{
+ // TODO / OPTIMIZE: Fast case if lhs >= 0 and rhs is power of two (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForIntMulDiv(&r0, &r1);
+
+ Label done;
+ checkDivideByZeroI32(r1, r0, &done);
+ checkDivideSignedOverflowI32(r1, r0, &done, ZeroOnOverflow(false));
+ masm.quotient32(r1.reg, r0.reg, IsUnsigned(false));
+ masm.bind(&done);
+
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitQuotientU32()
+{
+ // TODO / OPTIMIZE: Fast case if lhs >= 0 and rhs is power of two (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForIntMulDiv(&r0, &r1);
+
+ Label done;
+ checkDivideByZeroI32(r1, r0, &done);
+ masm.quotient32(r1.reg, r0.reg, IsUnsigned(true));
+ masm.bind(&done);
+
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitRemainderI32()
+{
+ // TODO / OPTIMIZE: Fast case if lhs >= 0 and rhs is power of two (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForIntMulDiv(&r0, &r1);
+
+ Label done;
+ checkDivideByZeroI32(r1, r0, &done);
+ checkDivideSignedOverflowI32(r1, r0, &done, ZeroOnOverflow(true));
+ masm.remainder32(r1.reg, r0.reg, IsUnsigned(false));
+ masm.bind(&done);
+
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitRemainderU32()
+{
+ // TODO / OPTIMIZE: Fast case if lhs >= 0 and rhs is power of two (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForIntMulDiv(&r0, &r1);
+
+ Label done;
+ checkDivideByZeroI32(r1, r0, &done);
+ masm.remainder32(r1.reg, r0.reg, IsUnsigned(true));
+ masm.bind(&done);
+
+ freeI32(r1);
+ pushI32(r0);
+}
+
+#ifndef INT_DIV_I64_CALLOUT
+void
+BaseCompiler::emitQuotientI64()
+{
+# ifdef JS_PUNBOX64
+ RegI64 r0, r1;
+ pop2xI64ForIntDiv(&r0, &r1);
+ quotientI64(r1, r0, IsUnsigned(false));
+ freeI64(r1);
+ pushI64(r0);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitQuotientI64");
+# endif
+}
+
+void
+BaseCompiler::emitQuotientU64()
+{
+# ifdef JS_PUNBOX64
+ RegI64 r0, r1;
+ pop2xI64ForIntDiv(&r0, &r1);
+ quotientI64(r1, r0, IsUnsigned(true));
+ freeI64(r1);
+ pushI64(r0);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitQuotientU64");
+# endif
+}
+
+void
+BaseCompiler::emitRemainderI64()
+{
+# ifdef JS_PUNBOX64
+ RegI64 r0, r1;
+ pop2xI64ForIntDiv(&r0, &r1);
+ remainderI64(r1, r0, IsUnsigned(false));
+ freeI64(r1);
+ pushI64(r0);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitRemainderI64");
+# endif
+}
+
+void
+BaseCompiler::emitRemainderU64()
+{
+# ifdef JS_PUNBOX64
+ RegI64 r0, r1;
+ pop2xI64ForIntDiv(&r0, &r1);
+ remainderI64(r1, r0, IsUnsigned(true));
+ freeI64(r1);
+ pushI64(r0);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitRemainderU64");
+# endif
+}
+#endif // INT_DIV_I64_CALLOUT
+
+void
+BaseCompiler::emitDivideF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ masm.divFloat32(r1.reg, r0.reg);
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitDivideF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ masm.divDouble(r1.reg, r0.reg);
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitMinI32()
+{
+ emitMinMaxI32(Assembler::LessThan);
+}
+
+void
+BaseCompiler::emitMaxI32()
+{
+ emitMinMaxI32(Assembler::GreaterThan);
+}
+
+void
+BaseCompiler::emitMinMaxI32(Assembler::Condition cond)
+{
+ Label done;
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ // TODO / OPTIMIZE (bug 1316823): Use conditional move on some platforms?
+ masm.branch32(cond, r0.reg, r1.reg, &done);
+ moveI32(r1, r0);
+ masm.bind(&done);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitMinF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ if (!isCompilingAsmJS()) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): Don't do this if one of the operands
+ // is known to be a constant.
+ ScratchF32 zero(*this);
+ masm.loadConstantFloat32(0.f, zero);
+ masm.subFloat32(zero, r0.reg);
+ masm.subFloat32(zero, r1.reg);
+ }
+ masm.minFloat32(r1.reg, r0.reg, HandleNaNSpecially(true));
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitMaxF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ if (!isCompilingAsmJS()) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF32 zero(*this);
+ masm.loadConstantFloat32(0.f, zero);
+ masm.subFloat32(zero, r0.reg);
+ masm.subFloat32(zero, r1.reg);
+ }
+ masm.maxFloat32(r1.reg, r0.reg, HandleNaNSpecially(true));
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitMinF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ if (!isCompilingAsmJS()) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF64 zero(*this);
+ masm.loadConstantDouble(0, zero);
+ masm.subDouble(zero, r0.reg);
+ masm.subDouble(zero, r1.reg);
+ }
+ masm.minDouble(r1.reg, r0.reg, HandleNaNSpecially(true));
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitMaxF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ if (!isCompilingAsmJS()) {
+ // Convert signaling NaN to quiet NaNs.
+ //
+ // TODO / OPTIMIZE (bug 1316824): see comment in emitMinF32.
+ ScratchF64 zero(*this);
+ masm.loadConstantDouble(0, zero);
+ masm.subDouble(zero, r0.reg);
+ masm.subDouble(zero, r1.reg);
+ }
+ masm.maxDouble(r1.reg, r0.reg, HandleNaNSpecially(true));
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitCopysignF32()
+{
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ RegI32 i0 = needI32();
+ RegI32 i1 = needI32();
+ masm.moveFloat32ToGPR(r0.reg, i0.reg);
+ masm.moveFloat32ToGPR(r1.reg, i1.reg);
+ masm.and32(Imm32(INT32_MAX), i0.reg);
+ masm.and32(Imm32(INT32_MIN), i1.reg);
+ masm.or32(i1.reg, i0.reg);
+ masm.moveGPRToFloat32(i0.reg, r0.reg);
+ freeI32(i0);
+ freeI32(i1);
+ freeF32(r1);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitCopysignF64()
+{
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ RegI64 x0 = needI64();
+ RegI64 x1 = needI64();
+ reinterpretF64AsI64(r0, x0);
+ reinterpretF64AsI64(r1, x1);
+ masm.and64(Imm64(INT64_MAX), x0.reg);
+ masm.and64(Imm64(INT64_MIN), x1.reg);
+ masm.or64(x1.reg, x0.reg);
+ reinterpretI64AsF64(x0, r0);
+ freeI64(x0);
+ freeI64(x1);
+ freeF64(r1);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitOrI32()
+{
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.or32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitOrI64()
+{
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.or64(r1.reg, r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitAndI32()
+{
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.and32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitAndI64()
+{
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.and64(r1.reg, r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitXorI32()
+{
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.xor32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitXorI64()
+{
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.xor64(r1.reg, r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitShlI32()
+{
+ int32_t c;
+ if (popConstI32(c)) {
+ RegI32 r = popI32();
+ masm.lshift32(Imm32(c & 31), r.reg);
+ pushI32(r);
+ } else {
+ RegI32 r0, r1;
+ pop2xI32ForShiftOrRotate(&r0, &r1);
+ maskShiftCount32(r1);
+ masm.lshift32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+ }
+}
+
+void
+BaseCompiler::emitShlI64()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64ForShiftOrRotate(&r0, &r1);
+ masm.lshift64(lowPart(r1), r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitShrI32()
+{
+ int32_t c;
+ if (popConstI32(c)) {
+ RegI32 r = popI32();
+ masm.rshift32Arithmetic(Imm32(c & 31), r.reg);
+ pushI32(r);
+ } else {
+ RegI32 r0, r1;
+ pop2xI32ForShiftOrRotate(&r0, &r1);
+ maskShiftCount32(r1);
+ masm.rshift32Arithmetic(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+ }
+}
+
+void
+BaseCompiler::emitShrI64()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64ForShiftOrRotate(&r0, &r1);
+ masm.rshift64Arithmetic(lowPart(r1), r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitShrU32()
+{
+ int32_t c;
+ if (popConstI32(c)) {
+ RegI32 r = popI32();
+ masm.rshift32(Imm32(c & 31), r.reg);
+ pushI32(r);
+ } else {
+ RegI32 r0, r1;
+ pop2xI32ForShiftOrRotate(&r0, &r1);
+ maskShiftCount32(r1);
+ masm.rshift32(r1.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+ }
+}
+
+void
+BaseCompiler::emitShrU64()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64ForShiftOrRotate(&r0, &r1);
+ masm.rshift64(lowPart(r1), r0.reg);
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitRotrI32()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForShiftOrRotate(&r0, &r1);
+ masm.rotateRight(r1.reg, r0.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitRotrI64()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64ForShiftOrRotate(&r0, &r1);
+ masm.rotateRight64(lowPart(r1), r0.reg, r0.reg, maybeHighPart(r1));
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitRotlI32()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI32 r0, r1;
+ pop2xI32ForShiftOrRotate(&r0, &r1);
+ masm.rotateLeft(r1.reg, r0.reg, r0.reg);
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitRotlI64()
+{
+ // TODO / OPTIMIZE: Constant rhs (Bug 1316803)
+ RegI64 r0, r1;
+ pop2xI64ForShiftOrRotate(&r0, &r1);
+ masm.rotateLeft64(lowPart(r1), r0.reg, r0.reg, maybeHighPart(r1));
+ freeI64(r1);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitEqzI32()
+{
+ // TODO / OPTIMIZE: Boolean evaluation for control (Bug 1286816)
+ RegI32 r0 = popI32();
+ masm.cmp32Set(Assembler::Equal, r0.reg, Imm32(0), r0.reg);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitEqzI64()
+{
+ // TODO / OPTIMIZE: Boolean evaluation for control (Bug 1286816)
+ // TODO / OPTIMIZE: Avoid the temp register (Bug 1316848)
+ RegI64 r0 = popI64();
+ RegI64 r1 = needI64();
+ setI64(0, r1);
+ RegI32 i0 = fromI64(r0);
+ cmp64Set(Assembler::Equal, r0, r1, i0);
+ freeI64(r1);
+ freeI64Except(r0, i0);
+ pushI32(i0);
+}
+
+void
+BaseCompiler::emitClzI32()
+{
+ RegI32 r0 = popI32();
+ masm.clz32(r0.reg, r0.reg, IsKnownNotZero(false));
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitClzI64()
+{
+ RegI64 r0 = popI64();
+ masm.clz64(r0.reg, lowPart(r0));
+ maybeClearHighPart(r0);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitCtzI32()
+{
+ RegI32 r0 = popI32();
+ masm.ctz32(r0.reg, r0.reg, IsKnownNotZero(false));
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitCtzI64()
+{
+ RegI64 r0 = popI64();
+ masm.ctz64(r0.reg, lowPart(r0));
+ maybeClearHighPart(r0);
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitPopcntI32()
+{
+ RegI32 r0 = popI32();
+ if (popcnt32NeedsTemp()) {
+ RegI32 tmp = needI32();
+ masm.popcnt32(r0.reg, r0.reg, tmp.reg);
+ freeI32(tmp);
+ } else {
+ masm.popcnt32(r0.reg, r0.reg, invalidI32().reg);
+ }
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitPopcntI64()
+{
+ RegI64 r0 = popI64();
+ if (popcnt64NeedsTemp()) {
+ RegI32 tmp = needI32();
+ masm.popcnt64(r0.reg, r0.reg, tmp.reg);
+ freeI32(tmp);
+ } else {
+ masm.popcnt64(r0.reg, r0.reg, invalidI32().reg);
+ }
+ pushI64(r0);
+}
+
+void
+BaseCompiler::emitBitNotI32()
+{
+ RegI32 r0 = popI32();
+ masm.not32(r0.reg);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitAbsI32()
+{
+ // TODO / OPTIMIZE (bug 1316823): Use conditional move on some platforms?
+ Label nonnegative;
+ RegI32 r0 = popI32();
+ masm.branch32(Assembler::GreaterThanOrEqual, r0.reg, Imm32(0), &nonnegative);
+ masm.neg32(r0.reg);
+ masm.bind(&nonnegative);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitAbsF32()
+{
+ RegF32 r0 = popF32();
+ masm.absFloat32(r0.reg, r0.reg);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitAbsF64()
+{
+ RegF64 r0 = popF64();
+ masm.absDouble(r0.reg, r0.reg);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitNegateI32()
+{
+ RegI32 r0 = popI32();
+ masm.neg32(r0.reg);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitNegateF32()
+{
+ RegF32 r0 = popF32();
+ masm.negateFloat(r0.reg);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitNegateF64()
+{
+ RegF64 r0 = popF64();
+ masm.negateDouble(r0.reg);
+ pushF64(r0);
+}
+
+void
+BaseCompiler::emitSqrtF32()
+{
+ RegF32 r0 = popF32();
+ masm.sqrtFloat32(r0.reg, r0.reg);
+ pushF32(r0);
+}
+
+void
+BaseCompiler::emitSqrtF64()
+{
+ RegF64 r0 = popF64();
+ masm.sqrtDouble(r0.reg, r0.reg);
+ pushF64(r0);
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF32ToI32()
+{
+ RegF32 r0 = popF32();
+ RegI32 i0 = needI32();
+ if (!truncateF32ToI32(r0, i0, isUnsigned))
+ return false;
+ freeF32(r0);
+ pushI32(i0);
+ return true;
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF64ToI32()
+{
+ RegF64 r0 = popF64();
+ RegI32 i0 = needI32();
+ if (!truncateF64ToI32(r0, i0, isUnsigned))
+ return false;
+ freeF64(r0);
+ pushI32(i0);
+ return true;
+}
+
+#ifndef FLOAT_TO_I64_CALLOUT
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF32ToI64()
+{
+ RegF32 r0 = popF32();
+ RegI64 x0 = needI64();
+ if (isUnsigned) {
+ RegF64 tmp = needF64();
+ if (!truncateF32ToI64(r0, x0, isUnsigned, tmp))
+ return false;
+ freeF64(tmp);
+ } else {
+ if (!truncateF32ToI64(r0, x0, isUnsigned, invalidF64()))
+ return false;
+ }
+ freeF32(r0);
+ pushI64(x0);
+ return true;
+}
+
+template<bool isUnsigned>
+bool
+BaseCompiler::emitTruncateF64ToI64()
+{
+ RegF64 r0 = popF64();
+ RegI64 x0 = needI64();
+ if (isUnsigned) {
+ RegF64 tmp = needF64();
+ if (!truncateF64ToI64(r0, x0, isUnsigned, tmp))
+ return false;
+ freeF64(tmp);
+ } else {
+ if (!truncateF64ToI64(r0, x0, isUnsigned, invalidF64()))
+ return false;
+ }
+ freeF64(r0);
+ pushI64(x0);
+ return true;
+}
+#endif // FLOAT_TO_I64_CALLOUT
+
+void
+BaseCompiler::emitWrapI64ToI32()
+{
+ RegI64 r0 = popI64();
+ RegI32 i0 = fromI64(r0);
+ wrapI64ToI32(r0, i0);
+ freeI64Except(r0, i0);
+ pushI32(i0);
+}
+
+void
+BaseCompiler::emitExtendI32ToI64()
+{
+ RegI64 x0 = popI32ForSignExtendI64();
+ RegI32 r0 = RegI32(lowPart(x0));
+ signExtendI32ToI64(r0, x0);
+ pushI64(x0);
+ // Note: no need to free r0, since it is part of x0
+}
+
+void
+BaseCompiler::emitExtendU32ToI64()
+{
+ RegI32 r0 = popI32();
+ RegI64 x0 = widenI32(r0);
+ extendU32ToI64(r0, x0);
+ pushI64(x0);
+ // Note: no need to free r0, since it is part of x0
+}
+
+void
+BaseCompiler::emitReinterpretF32AsI32()
+{
+ RegF32 r0 = popF32();
+ RegI32 i0 = needI32();
+ masm.moveFloat32ToGPR(r0.reg, i0.reg);
+ freeF32(r0);
+ pushI32(i0);
+}
+
+void
+BaseCompiler::emitReinterpretF64AsI64()
+{
+ RegF64 r0 = popF64();
+ RegI64 x0 = needI64();
+ reinterpretF64AsI64(r0, x0);
+ freeF64(r0);
+ pushI64(x0);
+}
+
+void
+BaseCompiler::emitConvertF64ToF32()
+{
+ RegF64 r0 = popF64();
+ RegF32 f0 = needF32();
+ masm.convertDoubleToFloat32(r0.reg, f0.reg);
+ freeF64(r0);
+ pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertI32ToF32()
+{
+ RegI32 r0 = popI32();
+ RegF32 f0 = needF32();
+ masm.convertInt32ToFloat32(r0.reg, f0.reg);
+ freeI32(r0);
+ pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertU32ToF32()
+{
+ RegI32 r0 = popI32();
+ RegF32 f0 = needF32();
+ masm.convertUInt32ToFloat32(r0.reg, f0.reg);
+ freeI32(r0);
+ pushF32(f0);
+}
+
+#ifndef I64_TO_FLOAT_CALLOUT
+void
+BaseCompiler::emitConvertI64ToF32()
+{
+ RegI64 r0 = popI64();
+ RegF32 f0 = needF32();
+ convertI64ToF32(r0, IsUnsigned(false), f0, RegI32());
+ freeI64(r0);
+ pushF32(f0);
+}
+
+void
+BaseCompiler::emitConvertU64ToF32()
+{
+ RegI64 r0 = popI64();
+ RegF32 f0 = needF32();
+ RegI32 temp;
+ if (convertI64ToFloatNeedsTemp(IsUnsigned(true)))
+ temp = needI32();
+ convertI64ToF32(r0, IsUnsigned(true), f0, temp);
+ if (temp.reg != Register::Invalid())
+ freeI32(temp);
+ freeI64(r0);
+ pushF32(f0);
+}
+#endif
+
+void
+BaseCompiler::emitConvertF32ToF64()
+{
+ RegF32 r0 = popF32();
+ RegF64 d0 = needF64();
+ masm.convertFloat32ToDouble(r0.reg, d0.reg);
+ freeF32(r0);
+ pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertI32ToF64()
+{
+ RegI32 r0 = popI32();
+ RegF64 d0 = needF64();
+ masm.convertInt32ToDouble(r0.reg, d0.reg);
+ freeI32(r0);
+ pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertU32ToF64()
+{
+ RegI32 r0 = popI32();
+ RegF64 d0 = needF64();
+ masm.convertUInt32ToDouble(r0.reg, d0.reg);
+ freeI32(r0);
+ pushF64(d0);
+}
+
+#ifndef I64_TO_FLOAT_CALLOUT
+void
+BaseCompiler::emitConvertI64ToF64()
+{
+ RegI64 r0 = popI64();
+ RegF64 d0 = needF64();
+ convertI64ToF64(r0, IsUnsigned(false), d0, RegI32());
+ freeI64(r0);
+ pushF64(d0);
+}
+
+void
+BaseCompiler::emitConvertU64ToF64()
+{
+ RegI64 r0 = popI64();
+ RegF64 d0 = needF64();
+ RegI32 temp;
+ if (convertI64ToFloatNeedsTemp(IsUnsigned(true)))
+ temp = needI32();
+ convertI64ToF64(r0, IsUnsigned(true), d0, temp);
+ if (temp.reg != Register::Invalid())
+ freeI32(temp);
+ freeI64(r0);
+ pushF64(d0);
+}
+#endif // I64_TO_FLOAT_CALLOUT
+
+void
+BaseCompiler::emitReinterpretI32AsF32()
+{
+ RegI32 r0 = popI32();
+ RegF32 f0 = needF32();
+ masm.moveGPRToFloat32(r0.reg, f0.reg);
+ freeI32(r0);
+ pushF32(f0);
+}
+
+void
+BaseCompiler::emitReinterpretI64AsF64()
+{
+ RegI64 r0 = popI64();
+ RegF64 d0 = needF64();
+ reinterpretI64AsF64(r0, d0);
+ freeI64(r0);
+ pushF64(d0);
+}
+
+// For blocks and loops and ifs:
+//
+// - Sync the value stack before going into the block in order to simplify exit
+// from the block: all exits from the block can assume that there are no
+// live registers except the one carrying the exit value.
+// - The block can accumulate a number of dead values on the stacks, so when
+// branching out of the block or falling out at the end be sure to
+// pop the appropriate stacks back to where they were on entry, while
+// preserving the exit value.
+// - A continue branch in a loop is much like an exit branch, but the branch
+// value must not be preserved.
+// - The exit value is always in a designated join register (type dependent).
+
+bool
+BaseCompiler::emitBlock()
+{
+ if (!iter_.readBlock())
+ return false;
+
+ UniquePooledLabel blockEnd(newLabel());
+ if (!blockEnd)
+ return false;
+
+ if (!deadCode_)
+ sync(); // Simplifies branching out from block
+
+ return pushControl(&blockEnd);
+}
+
+void
+BaseCompiler::endBlock(ExprType type, bool isFunctionBody)
+{
+ Control& block = controlItem(0);
+
+ // Save the value.
+ AnyReg r;
+ if (!deadCode_ && !IsVoid(type))
+ r = popJoinReg();
+
+ // Leave the block.
+ popStackOnBlockExit(block.framePushed);
+
+ // Bind after cleanup: branches out will have popped the stack.
+ if (block.label->used()) {
+ masm.bind(block.label);
+ if (deadCode_ && !IsVoid(type))
+ r = allocJoinReg(type);
+ deadCode_ = false;
+ }
+
+ MOZ_ASSERT(stk_.length() == block.stackSize);
+
+ // Retain the value stored in joinReg by all paths.
+ if (!deadCode_) {
+ if (!IsVoid(type))
+ pushJoinReg(r);
+
+ if (isFunctionBody)
+ doReturn(func_.sig().ret());
+ }
+
+ popControl();
+}
+
+bool
+BaseCompiler::emitLoop()
+{
+ if (!iter_.readLoop())
+ return false;
+
+ UniquePooledLabel blockCont(newLabel());
+ if (!blockCont)
+ return false;
+
+ if (!deadCode_)
+ sync(); // Simplifies branching out from block
+
+ if (!pushControl(&blockCont))
+ return false;
+
+ if (!deadCode_) {
+ masm.bind(controlItem(0).label);
+ addInterruptCheck();
+ }
+
+ return true;
+}
+
+void
+BaseCompiler::endLoop(ExprType type)
+{
+ Control& block = controlItem(0);
+
+ AnyReg r;
+ if (!deadCode_ && !IsVoid(type))
+ r = popJoinReg();
+
+ popStackOnBlockExit(block.framePushed);
+
+ MOZ_ASSERT(stk_.length() == block.stackSize);
+
+ popControl();
+
+ // Retain the value stored in joinReg by all paths.
+ if (!deadCode_ && !IsVoid(type))
+ pushJoinReg(r);
+}
+
+// The bodies of the "then" and "else" arms can be arbitrary sequences
+// of expressions, they push control and increment the nesting and can
+// even be targeted by jumps. A branch to the "if" block branches to
+// the exit of the if, ie, it's like "break". Consider:
+//
+// (func (result i32)
+// (if (i32.const 1)
+// (begin (br 1) (unreachable))
+// (begin (unreachable)))
+// (i32.const 1))
+//
+// The branch causes neither of the unreachable expressions to be
+// evaluated.
+
+bool
+BaseCompiler::emitIf()
+{
+ Nothing unused_cond;
+ if (!iter_.readIf(&unused_cond))
+ return false;
+
+ UniquePooledLabel endLabel(newLabel());
+ if (!endLabel)
+ return false;
+
+ UniquePooledLabel elseLabel(newLabel());
+ if (!elseLabel)
+ return false;
+
+ RegI32 rc;
+ if (!deadCode_) {
+ rc = popI32();
+ sync(); // Simplifies branching out from the arms
+ }
+
+ if (!pushControl(&endLabel, &elseLabel))
+ return false;
+
+ if (!deadCode_) {
+ masm.branch32(Assembler::Equal, rc.reg, Imm32(0), controlItem(0).otherLabel);
+ freeI32(rc);
+ }
+
+ return true;
+}
+
+void
+BaseCompiler::endIfThen()
+{
+ Control& ifThen = controlItem(0);
+
+ popStackOnBlockExit(ifThen.framePushed);
+
+ if (ifThen.otherLabel->used())
+ masm.bind(ifThen.otherLabel);
+
+ if (ifThen.label->used())
+ masm.bind(ifThen.label);
+
+ deadCode_ = ifThen.deadOnArrival;
+
+ MOZ_ASSERT(stk_.length() == ifThen.stackSize);
+
+ popControl();
+}
+
+bool
+BaseCompiler::emitElse()
+{
+ ExprType thenType;
+ Nothing unused_thenValue;
+ if (!iter_.readElse(&thenType, &unused_thenValue))
+ return false;
+
+ Control& ifThenElse = controlItem(0);
+
+ // See comment in endIfThenElse, below.
+
+ // Exit the "then" branch.
+
+ ifThenElse.deadThenBranch = deadCode_;
+
+ AnyReg r;
+ if (!deadCode_ && !IsVoid(thenType))
+ r = popJoinReg();
+
+ popStackOnBlockExit(ifThenElse.framePushed);
+
+ if (!deadCode_)
+ masm.jump(ifThenElse.label);
+
+ if (ifThenElse.otherLabel->used())
+ masm.bind(ifThenElse.otherLabel);
+
+ // Reset to the "else" branch.
+
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize);
+
+ if (!deadCode_ && !IsVoid(thenType))
+ freeJoinReg(r);
+
+ deadCode_ = ifThenElse.deadOnArrival;
+
+ return true;
+}
+
+void
+BaseCompiler::endIfThenElse(ExprType type)
+{
+ Control& ifThenElse = controlItem(0);
+
+ // The expression type is not a reliable guide to what we'll find
+ // on the stack, we could have (if E (i32.const 1) (unreachable))
+ // in which case the "else" arm is AnyType but the type of the
+ // full expression is I32. So restore whatever's there, not what
+ // we want to find there. The "then" arm has the same constraint.
+
+ AnyReg r;
+ if (!deadCode_ && !IsVoid(type))
+ r = popJoinReg();
+
+ popStackOnBlockExit(ifThenElse.framePushed);
+
+ if (ifThenElse.label->used())
+ masm.bind(ifThenElse.label);
+
+ if (!ifThenElse.deadOnArrival &&
+ (!ifThenElse.deadThenBranch || !deadCode_ || ifThenElse.label->bound())) {
+ if (deadCode_ && !IsVoid(type))
+ r = allocJoinReg(type);
+ deadCode_ = false;
+ }
+
+ MOZ_ASSERT(stk_.length() == ifThenElse.stackSize);
+
+ popControl();
+
+ if (!deadCode_ && !IsVoid(type))
+ pushJoinReg(r);
+}
+
+bool
+BaseCompiler::emitEnd()
+{
+ LabelKind kind;
+ ExprType type;
+ Nothing unused_value;
+ if (!iter_.readEnd(&kind, &type, &unused_value))
+ return false;
+
+ switch (kind) {
+ case LabelKind::Block: endBlock(type, iter_.controlStackEmpty()); break;
+ case LabelKind::Loop: endLoop(type); break;
+ case LabelKind::UnreachableThen:
+ case LabelKind::Then: endIfThen(); break;
+ case LabelKind::Else: endIfThenElse(type); break;
+ }
+
+ return true;
+}
+
+bool
+BaseCompiler::emitBr()
+{
+ uint32_t relativeDepth;
+ ExprType type;
+ Nothing unused_value;
+ if (!iter_.readBr(&relativeDepth, &type, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ Control& target = controlItem(relativeDepth);
+
+ // Save any value in the designated join register, where the
+ // normal block exit code will also leave it.
+
+ AnyReg r;
+ if (!IsVoid(type))
+ r = popJoinReg();
+
+ popStackBeforeBranch(target.framePushed);
+ masm.jump(target.label);
+
+ // The register holding the join value is free for the remainder
+ // of this block.
+
+ if (!IsVoid(type))
+ freeJoinReg(r);
+
+ deadCode_ = true;
+
+ popValueStackTo(ctl_.back().stackSize);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitBrIf()
+{
+ uint32_t relativeDepth;
+ ExprType type;
+ Nothing unused_value, unused_condition;
+ if (!iter_.readBrIf(&relativeDepth, &type, &unused_value, &unused_condition))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ Control& target = controlItem(relativeDepth);
+
+ // TODO / OPTIMIZE (Bug 1286816): Optimize boolean evaluation for control by
+ // allowing a conditional expression to be left on the stack and reified
+ // here as part of the branch instruction.
+
+ // Don't use joinReg for rc
+ maybeReserveJoinRegI(type);
+
+ // Condition value is on top, always I32.
+ RegI32 rc = popI32();
+
+ maybeUnreserveJoinRegI(type);
+
+ // Save any value in the designated join register, where the
+ // normal block exit code will also leave it.
+ AnyReg r;
+ if (!IsVoid(type))
+ r = popJoinReg();
+
+ Label notTaken;
+ masm.branch32(Assembler::Equal, rc.reg, Imm32(0), &notTaken);
+ popStackBeforeBranch(target.framePushed);
+ masm.jump(target.label);
+ masm.bind(&notTaken);
+
+ // This register is free in the remainder of the block.
+ freeI32(rc);
+
+ // br_if returns its value(s).
+ if (!IsVoid(type))
+ pushJoinReg(r);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitBrTable()
+{
+ uint32_t tableLength;
+ ExprType type;
+ Nothing unused_value, unused_index;
+ if (!iter_.readBrTable(&tableLength, &type, &unused_value, &unused_index))
+ return false;
+
+ LabelVector stubs;
+ if (!stubs.reserve(tableLength+1))
+ return false;
+
+ Uint32Vector depths;
+ if (!depths.reserve(tableLength))
+ return false;
+
+ for (size_t i = 0; i < tableLength; ++i) {
+ uint32_t depth;
+ if (!iter_.readBrTableEntry(&type, &unused_value, &depth))
+ return false;
+ depths.infallibleAppend(depth);
+ }
+
+ uint32_t defaultDepth;
+ if (!iter_.readBrTableDefault(&type, &unused_value, &defaultDepth))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // Don't use joinReg for rc
+ maybeReserveJoinRegI(type);
+
+ // Table switch value always on top.
+ RegI32 rc = popI32();
+
+ maybeUnreserveJoinRegI(type);
+
+ AnyReg r;
+ if (!IsVoid(type))
+ r = popJoinReg();
+
+ Label dispatchCode;
+ masm.branch32(Assembler::Below, rc.reg, Imm32(tableLength), &dispatchCode);
+
+ // This is the out-of-range stub. rc is dead here but we don't need it.
+
+ popStackBeforeBranch(controlItem(defaultDepth).framePushed);
+ masm.jump(controlItem(defaultDepth).label);
+
+ // Emit stubs. rc is dead in all of these but we don't need it.
+ //
+ // TODO / OPTIMIZE (Bug 1316804): Branch directly to the case code if we
+ // can, don't emit an intermediate stub.
+
+ for (uint32_t i = 0; i < tableLength; i++) {
+ PooledLabel* stubLabel = newLabel();
+ // The labels in the vector are in the TempAllocator and will
+ // be freed by and by.
+ if (!stubLabel)
+ return false;
+ stubs.infallibleAppend(stubLabel);
+ masm.bind(stubLabel);
+ uint32_t k = depths[i];
+ popStackBeforeBranch(controlItem(k).framePushed);
+ masm.jump(controlItem(k).label);
+ }
+
+ // Emit table.
+
+ Label theTable;
+ masm.bind(&theTable);
+ jumpTable(stubs);
+
+ // Emit indirect jump. rc is live here.
+
+ masm.bind(&dispatchCode);
+ tableSwitch(&theTable, rc);
+
+ deadCode_ = true;
+
+ // Clean up.
+
+ freeI32(rc);
+ if (!IsVoid(type))
+ freeJoinReg(r);
+
+ for (uint32_t i = 0; i < tableLength; i++)
+ freeLabel(stubs[i]);
+
+ popValueStackTo(ctl_.back().stackSize);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitDrop()
+{
+ if (!iter_.readDrop())
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ popStackIfMemory();
+ popValueStackBy(1);
+ return true;
+}
+
+void
+BaseCompiler::doReturn(ExprType type)
+{
+ switch (type) {
+ case ExprType::Void: {
+ returnCleanup();
+ break;
+ }
+ case ExprType::I32: {
+ RegI32 rv = popI32(RegI32(ReturnReg));
+ returnCleanup();
+ freeI32(rv);
+ break;
+ }
+ case ExprType::I64: {
+ RegI64 rv = popI64(RegI64(ReturnReg64));
+ returnCleanup();
+ freeI64(rv);
+ break;
+ }
+ case ExprType::F64: {
+ RegF64 rv = popF64(RegF64(ReturnDoubleReg));
+ returnCleanup();
+ freeF64(rv);
+ break;
+ }
+ case ExprType::F32: {
+ RegF32 rv = popF32(RegF32(ReturnFloat32Reg));
+ returnCleanup();
+ freeF32(rv);
+ break;
+ }
+ default: {
+ MOZ_CRASH("Function return type");
+ }
+ }
+}
+
+bool
+BaseCompiler::emitReturn()
+{
+ Nothing unused_value;
+ if (!iter_.readReturn(&unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ doReturn(func_.sig().ret());
+ deadCode_ = true;
+
+ popValueStackTo(ctl_.back().stackSize);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall)
+{
+ MOZ_ASSERT(!deadCode_);
+
+ startCallArgs(baselineCall, stackArgAreaSize(args));
+
+ uint32_t numArgs = args.length();
+ for (size_t i = 0; i < numArgs; ++i) {
+ ValType argType = args[i];
+ Nothing arg_;
+ if (!iter_.readCallArg(argType, numArgs, i, &arg_))
+ return false;
+ Stk& arg = peek(numArgs - 1 - i);
+ passArg(baselineCall, argType, arg);
+ }
+
+ // Pass the TLS pointer as a hidden argument in WasmTlsReg. Load
+ // it directly out if its stack slot so we don't interfere with
+ // the stk_.
+ if (baselineCall.loadTlsBefore)
+ loadFromFramePtr(WasmTlsReg, frameOffsetFromSlot(tlsSlot_, MIRType::Pointer));
+
+ if (!iter_.readCallArgsEnd(numArgs))
+ return false;
+
+ return true;
+}
+
+void
+BaseCompiler::pushReturned(const FunctionCall& call, ExprType type)
+{
+ switch (type) {
+ case ExprType::Void:
+ MOZ_CRASH("Compiler bug: attempt to push void return");
+ break;
+ case ExprType::I32: {
+ RegI32 rv = captureReturnedI32();
+ pushI32(rv);
+ break;
+ }
+ case ExprType::I64: {
+ RegI64 rv = captureReturnedI64();
+ pushI64(rv);
+ break;
+ }
+ case ExprType::F32: {
+ RegF32 rv = captureReturnedF32(call);
+ pushF32(rv);
+ break;
+ }
+ case ExprType::F64: {
+ RegF64 rv = captureReturnedF64(call);
+ pushF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Function return type");
+ }
+}
+
+// For now, always sync() at the beginning of the call to easily save live
+// values.
+//
+// TODO / OPTIMIZE (Bug 1316806): We may be able to avoid a full sync(), since
+// all we want is to save live registers that won't be saved by the callee or
+// that we need for outgoing args - we don't need to sync the locals. We can
+// just push the necessary registers, it'll be like a lightweight sync.
+//
+// Even some of the pushing may be unnecessary if the registers will be consumed
+// by the call, because then what we want is parallel assignment to the argument
+// registers or onto the stack for outgoing arguments. A sync() is just
+// simpler.
+
+bool
+BaseCompiler::emitCall()
+{
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t funcIndex;
+ if (!iter_.readCall(&funcIndex))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ sync();
+
+ const Sig& sig = *mg_.funcSigs[funcIndex];
+ bool import = mg_.funcIsImport(funcIndex);
+
+ uint32_t numArgs = sig.args().length();
+ size_t stackSpace = stackConsumed(numArgs);
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::Wasm, import ? InterModule::True : InterModule::False);
+
+ if (!emitCallArgs(sig.args(), baselineCall))
+ return false;
+
+ if (!iter_.readCallReturn(sig.ret()))
+ return false;
+
+ if (import)
+ callImport(mg_.funcImportGlobalDataOffsets[funcIndex], baselineCall);
+ else
+ callDefinition(funcIndex, baselineCall);
+
+ endCall(baselineCall);
+
+ // TODO / OPTIMIZE (bug 1316827): It would be better to merge this
+ // freeStack() into the one in endCall, if we can.
+
+ popValueStackBy(numArgs);
+ masm.freeStack(stackSpace);
+
+ if (!IsVoid(sig.ret()))
+ pushReturned(baselineCall, sig.ret());
+
+ return true;
+}
+
+bool
+BaseCompiler::emitCallIndirect(bool oldStyle)
+{
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ uint32_t sigIndex;
+ Nothing callee_;
+ if (oldStyle) {
+ if (!iter_.readOldCallIndirect(&sigIndex))
+ return false;
+ } else {
+ if (!iter_.readCallIndirect(&sigIndex, &callee_))
+ return false;
+ }
+
+ if (deadCode_)
+ return true;
+
+ sync();
+
+ const SigWithId& sig = mg_.sigs[sigIndex];
+
+ // new style: Stack: ... arg1 .. argn callee
+ // old style: Stack: ... callee arg1 .. argn
+
+ uint32_t numArgs = sig.args().length();
+ size_t stackSpace = stackConsumed(numArgs + 1);
+
+ // The arguments must be at the stack top for emitCallArgs, so pop the
+ // callee if it is on top. Note this only pops the compiler's stack,
+ // not the CPU stack.
+
+ Stk callee = oldStyle ? peek(numArgs) : stk_.popCopy();
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::Wasm, InterModule::True);
+
+ if (!emitCallArgs(sig.args(), baselineCall))
+ return false;
+
+ if (oldStyle) {
+ if (!iter_.readOldCallIndirectCallee(&callee_))
+ return false;
+ }
+
+ if (!iter_.readCallReturn(sig.ret()))
+ return false;
+
+ callIndirect(sigIndex, callee, baselineCall);
+
+ endCall(baselineCall);
+
+ // For new style calls, the callee was popped off the compiler's
+ // stack above.
+
+ popValueStackBy(oldStyle ? numArgs + 1 : numArgs);
+
+ // TODO / OPTIMIZE (bug 1316827): It would be better to merge this
+ // freeStack() into the one in endCall, if we can.
+
+ masm.freeStack(stackSpace);
+
+ if (!IsVoid(sig.ret()))
+ pushReturned(baselineCall, sig.ret());
+
+ return true;
+}
+
+bool
+BaseCompiler::emitCommonMathCall(uint32_t lineOrBytecode, SymbolicAddress callee,
+ ValTypeVector& signature, ExprType retType)
+{
+ sync();
+
+ uint32_t numArgs = signature.length();
+ size_t stackSpace = stackConsumed(numArgs);
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::System, InterModule::False);
+
+ if (!emitCallArgs(signature, baselineCall))
+ return false;
+
+ if (!iter_.readCallReturn(retType))
+ return false;
+
+ builtinCall(callee, baselineCall);
+
+ endCall(baselineCall);
+
+ // TODO / OPTIMIZE (bug 1316827): It would be better to merge this
+ // freeStack() into the one in endCall, if we can.
+
+ popValueStackBy(numArgs);
+ masm.freeStack(stackSpace);
+
+ pushReturned(baselineCall, retType);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitUnaryMathBuiltinCall(SymbolicAddress callee, ValType operandType)
+{
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ if (deadCode_)
+ return true;
+
+ return emitCommonMathCall(lineOrBytecode, callee,
+ operandType == ValType::F32 ? SigF_ : SigD_,
+ operandType == ValType::F32 ? ExprType::F32 : ExprType::F64);
+}
+
+bool
+BaseCompiler::emitBinaryMathBuiltinCall(SymbolicAddress callee, ValType operandType)
+{
+ MOZ_ASSERT(operandType == ValType::F64);
+
+ uint32_t lineOrBytecode = 0;
+ if (callee == SymbolicAddress::ModD) {
+ // Not actually a call in the binary representation
+ } else {
+ lineOrBytecode = readCallSiteLineOrBytecode();
+ }
+
+ if (deadCode_)
+ return true;
+
+ return emitCommonMathCall(lineOrBytecode, callee, SigDD_, ExprType::F64);
+}
+
+#ifdef INT_DIV_I64_CALLOUT
+bool
+BaseCompiler::emitDivOrModI64BuiltinCall(SymbolicAddress callee, ValType operandType)
+{
+ MOZ_ASSERT(operandType == ValType::I64);
+
+ if (deadCode_)
+ return true;
+
+ sync();
+
+ needI64(abiReturnRegI64);
+
+ RegI32 temp = needI32();
+ RegI64 rhs = popI64();
+ RegI64 srcDest = popI64ToSpecific(abiReturnRegI64);
+
+ Label done;
+
+ checkDivideByZeroI64(rhs);
+
+ if (callee == SymbolicAddress::DivI64)
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(false));
+ else if (callee == SymbolicAddress::ModI64)
+ checkDivideSignedOverflowI64(rhs, srcDest, &done, ZeroOnOverflow(true));
+
+ masm.setupUnalignedABICall(temp.reg);
+ masm.passABIArg(srcDest.reg.high);
+ masm.passABIArg(srcDest.reg.low);
+ masm.passABIArg(rhs.reg.high);
+ masm.passABIArg(rhs.reg.low);
+ masm.callWithABI(callee);
+
+ masm.bind(&done);
+
+ freeI32(temp);
+ freeI64(rhs);
+ pushI64(srcDest);
+
+ return true;
+}
+#endif // INT_DIV_I64_CALLOUT
+
+#ifdef I64_TO_FLOAT_CALLOUT
+bool
+BaseCompiler::emitConvertInt64ToFloatingCallout(SymbolicAddress callee, ValType operandType,
+ ValType resultType)
+{
+ sync();
+
+ RegI32 temp = needI32();
+ RegI64 input = popI64();
+
+ FunctionCall call(0);
+
+ masm.setupUnalignedABICall(temp.reg);
+# ifdef JS_NUNBOX32
+ masm.passABIArg(input.reg.high);
+ masm.passABIArg(input.reg.low);
+# else
+ MOZ_CRASH("BaseCompiler platform hook: emitConvertInt64ToFloatingCallout");
+# endif
+ masm.callWithABI(callee, MoveOp::DOUBLE);
+
+ freeI32(temp);
+ freeI64(input);
+
+ RegF64 rv = captureReturnedF64(call);
+
+ if (resultType == ValType::F32) {
+ RegF32 rv2 = needF32();
+ masm.convertDoubleToFloat32(rv.reg, rv2.reg);
+ freeF64(rv);
+ pushF32(rv2);
+ } else {
+ pushF64(rv);
+ }
+
+ return true;
+}
+#endif // I64_TO_FLOAT_CALLOUT
+
+#ifdef FLOAT_TO_I64_CALLOUT
+// `Callee` always takes a double, so a float32 input must be converted.
+bool
+BaseCompiler::emitConvertFloatingToInt64Callout(SymbolicAddress callee, ValType operandType,
+ ValType resultType)
+{
+ RegF64 doubleInput;
+ if (operandType == ValType::F32) {
+ doubleInput = needF64();
+ RegF32 input = popF32();
+ masm.convertFloat32ToDouble(input.reg, doubleInput.reg);
+ freeF32(input);
+ } else {
+ doubleInput = popF64();
+ }
+
+ // We may need the value after the call for the ool check.
+ RegF64 otherReg = needF64();
+ moveF64(doubleInput, otherReg);
+ pushF64(otherReg);
+
+ sync();
+
+ RegI32 temp = needI32();
+ FunctionCall call(0);
+
+ masm.setupUnalignedABICall(temp.reg);
+ masm.passABIArg(doubleInput.reg, MoveOp::DOUBLE);
+ masm.callWithABI(callee);
+
+ freeI32(temp);
+ freeF64(doubleInput);
+
+ RegI64 rv = captureReturnedI64();
+
+ RegF64 inputVal = popF64();
+
+ bool isUnsigned = callee == SymbolicAddress::TruncateDoubleToUint64;
+
+ // The OOL check just succeeds or fails, it does not generate a value.
+ OutOfLineCode* ool = new (alloc_) OutOfLineTruncateCheckF32OrF64ToI64(AnyReg(inputVal),
+ isUnsigned,
+ trapOffset());
+ ool = addOutOfLineCode(ool);
+ if (!ool)
+ return false;
+
+ masm.branch64(Assembler::Equal, rv.reg, Imm64(0x8000000000000000), ool->entry());
+ masm.bind(ool->rejoin());
+
+ pushI64(rv);
+ freeF64(inputVal);
+
+ return true;
+}
+#endif // FLOAT_TO_I64_CALLOUT
+
+bool
+BaseCompiler::emitGetLocal()
+{
+ uint32_t slot;
+ if (!iter_.readGetLocal(locals_, &slot))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // Local loads are pushed unresolved, ie, they may be deferred
+ // until needed, until they may be affected by a store, or until a
+ // sync. This is intended to reduce register pressure.
+
+ switch (locals_[slot]) {
+ case ValType::I32:
+ pushLocalI32(slot);
+ break;
+ case ValType::I64:
+ pushLocalI64(slot);
+ break;
+ case ValType::F64:
+ pushLocalF64(slot);
+ break;
+ case ValType::F32:
+ pushLocalF32(slot);
+ break;
+ default:
+ MOZ_CRASH("Local variable type");
+ }
+
+ return true;
+}
+
+bool
+BaseCompiler::emitSetLocal()
+{
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readSetLocal(locals_, &slot, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ switch (locals_[slot]) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ syncLocal(slot);
+ storeToFrameI32(rv.reg, frameOffsetFromSlot(slot, MIRType::Int32));
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ syncLocal(slot);
+ storeToFrameI64(rv.reg, frameOffsetFromSlot(slot, MIRType::Int64));
+ freeI64(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ syncLocal(slot);
+ storeToFrameF64(rv.reg, frameOffsetFromSlot(slot, MIRType::Double));
+ freeF64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ syncLocal(slot);
+ storeToFrameF32(rv.reg, frameOffsetFromSlot(slot, MIRType::Float32));
+ freeF32(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Local variable type");
+ }
+
+ return true;
+}
+
+bool
+BaseCompiler::emitTeeLocal()
+{
+ uint32_t slot;
+ Nothing unused_value;
+ if (!iter_.readTeeLocal(locals_, &slot, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ switch (locals_[slot]) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ syncLocal(slot);
+ storeToFrameI32(rv.reg, frameOffsetFromSlot(slot, MIRType::Int32));
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ syncLocal(slot);
+ storeToFrameI64(rv.reg, frameOffsetFromSlot(slot, MIRType::Int64));
+ pushI64(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ syncLocal(slot);
+ storeToFrameF64(rv.reg, frameOffsetFromSlot(slot, MIRType::Double));
+ pushF64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ syncLocal(slot);
+ storeToFrameF32(rv.reg, frameOffsetFromSlot(slot, MIRType::Float32));
+ pushF32(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Local variable type");
+ }
+
+ return true;
+}
+
+bool
+BaseCompiler::emitGetGlobal()
+{
+ uint32_t id;
+ if (!iter_.readGetGlobal(mg_.globals, &id))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ const GlobalDesc& global = mg_.globals[id];
+
+ if (global.isConstant()) {
+ Val value = global.constantValue();
+ switch (value.type()) {
+ case ValType::I32:
+ pushI32(value.i32());
+ break;
+ case ValType::I64:
+ pushI64(value.i64());
+ break;
+ case ValType::F32:
+ pushF32(value.f32());
+ break;
+ case ValType::F64:
+ pushF64(value.f64());
+ break;
+ default:
+ MOZ_CRASH("Global constant type");
+ }
+ return true;
+ }
+
+ switch (global.type()) {
+ case ValType::I32: {
+ RegI32 rv = needI32();
+ loadGlobalVarI32(global.offset(), rv);
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = needI64();
+ loadGlobalVarI64(global.offset(), rv);
+ pushI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = needF32();
+ loadGlobalVarF32(global.offset(), rv);
+ pushF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = needF64();
+ loadGlobalVarF64(global.offset(), rv);
+ pushF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool
+BaseCompiler::emitSetGlobal()
+{
+ uint32_t id;
+ Nothing unused_value;
+ if (!iter_.readSetGlobal(mg_.globals, &id, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ const GlobalDesc& global = mg_.globals[id];
+
+ switch (global.type()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ storeGlobalVarI32(global.offset(), rv);
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ storeGlobalVarI64(global.offset(), rv);
+ freeI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ storeGlobalVarF32(global.offset(), rv);
+ freeF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ storeGlobalVarF64(global.offset(), rv);
+ freeF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool
+BaseCompiler::emitTeeGlobal()
+{
+ uint32_t id;
+ Nothing unused_value;
+ if (!iter_.readTeeGlobal(mg_.globals, &id, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ const GlobalDesc& global = mg_.globals[id];
+
+ switch (global.type()) {
+ case ValType::I32: {
+ RegI32 rv = popI32();
+ storeGlobalVarI32(global.offset(), rv);
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ storeGlobalVarI64(global.offset(), rv);
+ pushI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ storeGlobalVarF32(global.offset(), rv);
+ pushF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ storeGlobalVarF64(global.offset(), rv);
+ pushF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("Global variable type");
+ break;
+ }
+ return true;
+}
+
+bool
+BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
+{
+ LinearMemoryAddress<Nothing> addr;
+ if (!iter_.readLoad(type, Scalar::byteSize(viewType), &addr))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // TODO / OPTIMIZE (bug 1316831): Disable bounds checking on constant
+ // accesses below the minimum heap length.
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
+
+ size_t temps = loadStoreTemps(access);
+ RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
+ RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
+
+ switch (type) {
+ case ValType::I32: {
+ RegI32 rp = popI32();
+#ifdef JS_CODEGEN_ARM
+ RegI32 rv = access.isUnaligned() ? needI32() : rp;
+#else
+ RegI32 rv = rp;
+#endif
+ if (!load(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ pushI32(rv);
+ if (rp != rv)
+ freeI32(rp);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv;
+ RegI32 rp;
+#ifdef JS_CODEGEN_X86
+ rv = abiReturnRegI64;
+ needI64(rv);
+ rp = popI32();
+#else
+ rp = popI32();
+ rv = needI64();
+#endif
+ if (!load(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ pushI64(rv);
+ freeI32(rp);
+ break;
+ }
+ case ValType::F32: {
+ RegI32 rp = popI32();
+ RegF32 rv = needF32();
+ if (!load(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ pushF32(rv);
+ freeI32(rp);
+ break;
+ }
+ case ValType::F64: {
+ RegI32 rp = popI32();
+ RegF64 rv = needF64();
+ if (!load(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ pushF64(rv);
+ freeI32(rp);
+ break;
+ }
+ default:
+ MOZ_CRASH("load type");
+ break;
+ }
+
+ if (temps >= 1)
+ freeI32(tmp1);
+ if (temps >= 2)
+ freeI32(tmp2);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitStore(ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // TODO / OPTIMIZE (bug 1316831): Disable bounds checking on constant
+ // accesses below the minimum heap length.
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
+
+ size_t temps = loadStoreTemps(access);
+ RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
+ RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
+
+ switch (resultType) {
+ case ValType::I32: {
+ RegI32 rp, rv;
+ pop2xI32(&rp, &rv);
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ freeI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ freeI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ freeF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ freeF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("store type");
+ break;
+ }
+
+ if (temps >= 1)
+ freeI32(tmp1);
+ if (temps >= 2)
+ freeI32(tmp2);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitTeeStore(ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // TODO / OPTIMIZE (bug 1316831): Disable bounds checking on constant
+ // accesses below the minimum heap length.
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
+
+ size_t temps = loadStoreTemps(access);
+ RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
+ RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
+
+ switch (resultType) {
+ case ValType::I32: {
+ RegI32 rp, rv;
+ pop2xI32(&rp, &rv);
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ pushI32(rv);
+ break;
+ }
+ case ValType::I64: {
+ RegI64 rv = popI64();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ pushI64(rv);
+ break;
+ }
+ case ValType::F32: {
+ RegF32 rv = popF32();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ pushF32(rv);
+ break;
+ }
+ case ValType::F64: {
+ RegF64 rv = popF64();
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rv), tmp1, tmp2))
+ return false;
+ freeI32(rp);
+ pushF64(rv);
+ break;
+ }
+ default:
+ MOZ_CRASH("store type");
+ break;
+ }
+
+ if (temps >= 1)
+ freeI32(tmp1);
+ if (temps >= 2)
+ freeI32(tmp2);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitSelect()
+{
+ ValType type;
+ Nothing unused_trueValue;
+ Nothing unused_falseValue;
+ Nothing unused_condition;
+ if (!iter_.readSelect(&type, &unused_trueValue, &unused_falseValue, &unused_condition))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // I32 condition on top, then false, then true.
+
+ RegI32 rc = popI32();
+ switch (type) {
+ case ValType::I32: {
+ Label done;
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ masm.branch32(Assembler::NotEqual, rc.reg, Imm32(0), &done);
+ moveI32(r1, r0);
+ masm.bind(&done);
+ freeI32(r1);
+ pushI32(r0);
+ break;
+ }
+ case ValType::I64: {
+ Label done;
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ masm.branch32(Assembler::NotEqual, rc.reg, Imm32(0), &done);
+ moveI64(r1, r0);
+ masm.bind(&done);
+ freeI64(r1);
+ pushI64(r0);
+ break;
+ }
+ case ValType::F32: {
+ Label done;
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ masm.branch32(Assembler::NotEqual, rc.reg, Imm32(0), &done);
+ moveF32(r1, r0);
+ masm.bind(&done);
+ freeF32(r1);
+ pushF32(r0);
+ break;
+ }
+ case ValType::F64: {
+ Label done;
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ masm.branch32(Assembler::NotEqual, rc.reg, Imm32(0), &done);
+ moveF64(r1, r0);
+ masm.bind(&done);
+ freeF64(r1);
+ pushF64(r0);
+ break;
+ }
+ default: {
+ MOZ_CRASH("select type");
+ }
+ }
+ freeI32(rc);
+
+ return true;
+}
+
+void
+BaseCompiler::emitCompareI32(JSOp compareOp, MCompare::CompareType compareType)
+{
+ // TODO / OPTIMIZE (bug 1286816): if we want to generate good code for
+ // boolean operators for control it is possible to delay generating code
+ // here by pushing a compare operation on the stack, after all it is
+ // side-effect free. The popping code for br_if will handle it differently,
+ // but other popI32() will just force code generation.
+ //
+ // TODO / OPTIMIZE (bug 1286816): Comparisons against constants using the
+ // same popConstant pattern as for add().
+
+ MOZ_ASSERT(compareType == MCompare::Compare_Int32 || compareType == MCompare::Compare_UInt32);
+ RegI32 r0, r1;
+ pop2xI32(&r0, &r1);
+ bool u = compareType == MCompare::Compare_UInt32;
+ switch (compareOp) {
+ case JSOP_EQ:
+ masm.cmp32Set(Assembler::Equal, r0.reg, r1.reg, r0.reg);
+ break;
+ case JSOP_NE:
+ masm.cmp32Set(Assembler::NotEqual, r0.reg, r1.reg, r0.reg);
+ break;
+ case JSOP_LE:
+ masm.cmp32Set(u ? Assembler::BelowOrEqual : Assembler::LessThanOrEqual, r0.reg, r1.reg, r0.reg);
+ break;
+ case JSOP_LT:
+ masm.cmp32Set(u ? Assembler::Below : Assembler::LessThan, r0.reg, r1.reg, r0.reg);
+ break;
+ case JSOP_GE:
+ masm.cmp32Set(u ? Assembler::AboveOrEqual : Assembler::GreaterThanOrEqual, r0.reg, r1.reg, r0.reg);
+ break;
+ case JSOP_GT:
+ masm.cmp32Set(u ? Assembler::Above : Assembler::GreaterThan, r0.reg, r1.reg, r0.reg);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected compare opcode");
+ }
+ freeI32(r1);
+ pushI32(r0);
+}
+
+void
+BaseCompiler::emitCompareI64(JSOp compareOp, MCompare::CompareType compareType)
+{
+ MOZ_ASSERT(compareType == MCompare::Compare_Int64 || compareType == MCompare::Compare_UInt64);
+ RegI64 r0, r1;
+ pop2xI64(&r0, &r1);
+ RegI32 i0(fromI64(r0));
+ bool u = compareType == MCompare::Compare_UInt64;
+ switch (compareOp) {
+ case JSOP_EQ:
+ cmp64Set(Assembler::Equal, r0, r1, i0);
+ break;
+ case JSOP_NE:
+ cmp64Set(Assembler::NotEqual, r0, r1, i0);
+ break;
+ case JSOP_LE:
+ cmp64Set(u ? Assembler::BelowOrEqual : Assembler::LessThanOrEqual, r0, r1, i0);
+ break;
+ case JSOP_LT:
+ cmp64Set(u ? Assembler::Below : Assembler::LessThan, r0, r1, i0);
+ break;
+ case JSOP_GE:
+ cmp64Set(u ? Assembler::AboveOrEqual : Assembler::GreaterThanOrEqual, r0, r1, i0);
+ break;
+ case JSOP_GT:
+ cmp64Set(u ? Assembler::Above : Assembler::GreaterThan, r0, r1, i0);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected compare opcode");
+ }
+ freeI64(r1);
+ freeI64Except(r0, i0);
+ pushI32(i0);
+}
+
+void
+BaseCompiler::emitCompareF32(JSOp compareOp, MCompare::CompareType compareType)
+{
+ MOZ_ASSERT(compareType == MCompare::Compare_Float32);
+ Label across;
+ RegF32 r0, r1;
+ pop2xF32(&r0, &r1);
+ RegI32 i0 = needI32();
+ masm.mov(ImmWord(1), i0.reg);
+ switch (compareOp) {
+ case JSOP_EQ:
+ masm.branchFloat(Assembler::DoubleEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_NE:
+ masm.branchFloat(Assembler::DoubleNotEqualOrUnordered, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_LE:
+ masm.branchFloat(Assembler::DoubleLessThanOrEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_LT:
+ masm.branchFloat(Assembler::DoubleLessThan, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_GE:
+ masm.branchFloat(Assembler::DoubleGreaterThanOrEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_GT:
+ masm.branchFloat(Assembler::DoubleGreaterThan, r0.reg, r1.reg, &across);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected compare opcode");
+ }
+ masm.mov(ImmWord(0), i0.reg);
+ masm.bind(&across);
+ freeF32(r0);
+ freeF32(r1);
+ pushI32(i0);
+}
+
+void
+BaseCompiler::emitCompareF64(JSOp compareOp, MCompare::CompareType compareType)
+{
+ MOZ_ASSERT(compareType == MCompare::Compare_Double);
+ Label across;
+ RegF64 r0, r1;
+ pop2xF64(&r0, &r1);
+ RegI32 i0 = needI32();
+ masm.mov(ImmWord(1), i0.reg);
+ switch (compareOp) {
+ case JSOP_EQ:
+ masm.branchDouble(Assembler::DoubleEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_NE:
+ masm.branchDouble(Assembler::DoubleNotEqualOrUnordered, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_LE:
+ masm.branchDouble(Assembler::DoubleLessThanOrEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_LT:
+ masm.branchDouble(Assembler::DoubleLessThan, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_GE:
+ masm.branchDouble(Assembler::DoubleGreaterThanOrEqual, r0.reg, r1.reg, &across);
+ break;
+ case JSOP_GT:
+ masm.branchDouble(Assembler::DoubleGreaterThan, r0.reg, r1.reg, &across);
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected compare opcode");
+ }
+ masm.mov(ImmWord(0), i0.reg);
+ masm.bind(&across);
+ freeF64(r0);
+ freeF64(r1);
+ pushI32(i0);
+}
+
+bool
+BaseCompiler::emitTeeStoreWithCoercion(ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<Nothing> addr;
+ Nothing unused_value;
+ if (!iter_.readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &unused_value))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ // TODO / OPTIMIZE (bug 1316831): Disable bounds checking on constant
+ // accesses below the minimum heap length.
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, trapIfNotAsmJS());
+
+ size_t temps = loadStoreTemps(access);
+ RegI32 tmp1 = temps >= 1 ? needI32() : invalidI32();
+ RegI32 tmp2 = temps >= 2 ? needI32() : invalidI32();
+
+ if (resultType == ValType::F32 && viewType == Scalar::Float64) {
+ RegF32 rv = popF32();
+ RegF64 rw = needF64();
+ masm.convertFloat32ToDouble(rv.reg, rw.reg);
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rw), tmp1, tmp2))
+ return false;
+ pushF32(rv);
+ freeI32(rp);
+ freeF64(rw);
+ }
+ else if (resultType == ValType::F64 && viewType == Scalar::Float32) {
+ RegF64 rv = popF64();
+ RegF32 rw = needF32();
+ masm.convertDoubleToFloat32(rv.reg, rw.reg);
+ RegI32 rp = popI32();
+ if (!store(access, rp, AnyReg(rw), tmp1, tmp2))
+ return false;
+ pushF64(rv);
+ freeI32(rp);
+ freeF32(rw);
+ }
+ else
+ MOZ_CRASH("unexpected coerced store");
+
+ if (temps >= 1)
+ freeI32(tmp1);
+ if (temps >= 2)
+ freeI32(tmp2);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitGrowMemory()
+{
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ Nothing arg;
+ if (!iter_.readGrowMemory(&arg))
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ sync();
+
+ uint32_t numArgs = 1;
+ size_t stackSpace = stackConsumed(numArgs);
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::System, InterModule::True);
+
+ ABIArg instanceArg = reservePointerArgument(baselineCall);
+
+ startCallArgs(baselineCall, stackArgAreaSize(SigI_));
+ passArg(baselineCall, ValType::I32, peek(0));
+ builtinInstanceMethodCall(SymbolicAddress::GrowMemory, instanceArg, baselineCall);
+ endCall(baselineCall);
+
+ popValueStackBy(numArgs);
+ masm.freeStack(stackSpace);
+
+ pushReturned(baselineCall, ExprType::I32);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitCurrentMemory()
+{
+ uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
+
+ if (!iter_.readCurrentMemory())
+ return false;
+
+ if (deadCode_)
+ return true;
+
+ sync();
+
+ FunctionCall baselineCall(lineOrBytecode);
+ beginCall(baselineCall, UseABI::System, InterModule::False);
+
+ ABIArg instanceArg = reservePointerArgument(baselineCall);
+
+ startCallArgs(baselineCall, stackArgAreaSize(Sig_));
+ builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, instanceArg, baselineCall);
+ endCall(baselineCall);
+
+ pushReturned(baselineCall, ExprType::I32);
+
+ return true;
+}
+
+bool
+BaseCompiler::emitBody()
+{
+ uint32_t overhead = 0;
+
+ for (;;) {
+
+ Nothing unused_a, unused_b;
+
+#define emitBinary(doEmit, type) \
+ iter_.readBinary(type, &unused_a, &unused_b) && (deadCode_ || (doEmit(), true))
+
+#define emitUnary(doEmit, type) \
+ iter_.readUnary(type, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define emitComparison(doEmit, operandType, compareOp, compareType) \
+ iter_.readComparison(operandType, &unused_a, &unused_b) && \
+ (deadCode_ || (doEmit(compareOp, compareType), true))
+
+#define emitConversion(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || (doEmit(), true))
+
+#define emitConversionOOM(doEmit, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && (deadCode_ || doEmit())
+
+#define emitCalloutConversionOOM(doEmit, symbol, inType, outType) \
+ iter_.readConversion(inType, outType, &unused_a) && \
+ (deadCode_ || doEmit(symbol, inType, outType))
+
+#define CHECK(E) if (!(E)) goto done
+#define NEXT() continue
+#define CHECK_NEXT(E) if (!(E)) goto done; continue
+
+ // TODO / EVALUATE (bug 1316845): Not obvious that this attempt at
+ // reducing overhead is really paying off relative to making the check
+ // every iteration.
+
+ if (overhead == 0) {
+ // Check every 50 expressions -- a happy medium between
+ // memory usage and checking overhead.
+ overhead = 50;
+
+ // Checking every 50 expressions should be safe, as the
+ // baseline JIT does very little allocation per expression.
+ CHECK(alloc_.ensureBallast());
+
+ // The pushiest opcode is LOOP, which pushes two values
+ // per instance.
+ CHECK(stk_.reserve(stk_.length() + overhead * 2));
+ }
+
+ overhead--;
+
+ if (done())
+ return true;
+
+ uint16_t op;
+ CHECK(iter_.readOp(&op));
+
+ switch (op) {
+ // Control opcodes
+ case uint16_t(Op::Nop):
+ CHECK(iter_.readNop());
+ NEXT();
+ case uint16_t(Op::Drop):
+ CHECK_NEXT(emitDrop());
+ case uint16_t(Op::Block):
+ CHECK_NEXT(emitBlock());
+ case uint16_t(Op::Loop):
+ CHECK_NEXT(emitLoop());
+ case uint16_t(Op::If):
+ CHECK_NEXT(emitIf());
+ case uint16_t(Op::Else):
+ CHECK_NEXT(emitElse());
+ case uint16_t(Op::End):
+ CHECK_NEXT(emitEnd());
+ case uint16_t(Op::Br):
+ CHECK_NEXT(emitBr());
+ case uint16_t(Op::BrIf):
+ CHECK_NEXT(emitBrIf());
+ case uint16_t(Op::BrTable):
+ CHECK_NEXT(emitBrTable());
+ case uint16_t(Op::Return):
+ CHECK_NEXT(emitReturn());
+ case uint16_t(Op::Unreachable):
+ CHECK(iter_.readUnreachable());
+ if (!deadCode_) {
+ unreachableTrap();
+ deadCode_ = true;
+ popValueStackTo(ctl_.back().stackSize);
+ }
+ NEXT();
+
+ // Calls
+ case uint16_t(Op::Call):
+ CHECK_NEXT(emitCall());
+ case uint16_t(Op::CallIndirect):
+ CHECK_NEXT(emitCallIndirect(/* oldStyle = */ false));
+ case uint16_t(Op::OldCallIndirect):
+ CHECK_NEXT(emitCallIndirect(/* oldStyle = */ true));
+
+ // Locals and globals
+ case uint16_t(Op::GetLocal):
+ CHECK_NEXT(emitGetLocal());
+ case uint16_t(Op::SetLocal):
+ CHECK_NEXT(emitSetLocal());
+ case uint16_t(Op::TeeLocal):
+ CHECK_NEXT(emitTeeLocal());
+ case uint16_t(Op::GetGlobal):
+ CHECK_NEXT(emitGetGlobal());
+ case uint16_t(Op::SetGlobal):
+ CHECK_NEXT(emitSetGlobal());
+ case uint16_t(Op::TeeGlobal):
+ CHECK_NEXT(emitTeeGlobal());
+
+ // Select
+ case uint16_t(Op::Select):
+ CHECK_NEXT(emitSelect());
+
+ // I32
+ case uint16_t(Op::I32Const): {
+ int32_t i32;
+ CHECK(iter_.readI32Const(&i32));
+ if (!deadCode_)
+ pushI32(i32);
+ NEXT();
+ }
+ case uint16_t(Op::I32Add):
+ CHECK_NEXT(emitBinary(emitAddI32, ValType::I32));
+ case uint16_t(Op::I32Sub):
+ CHECK_NEXT(emitBinary(emitSubtractI32, ValType::I32));
+ case uint16_t(Op::I32Mul):
+ CHECK_NEXT(emitBinary(emitMultiplyI32, ValType::I32));
+ case uint16_t(Op::I32DivS):
+ CHECK_NEXT(emitBinary(emitQuotientI32, ValType::I32));
+ case uint16_t(Op::I32DivU):
+ CHECK_NEXT(emitBinary(emitQuotientU32, ValType::I32));
+ case uint16_t(Op::I32RemS):
+ CHECK_NEXT(emitBinary(emitRemainderI32, ValType::I32));
+ case uint16_t(Op::I32RemU):
+ CHECK_NEXT(emitBinary(emitRemainderU32, ValType::I32));
+ case uint16_t(Op::I32Min):
+ CHECK_NEXT(emitBinary(emitMinI32, ValType::I32));
+ case uint16_t(Op::I32Max):
+ CHECK_NEXT(emitBinary(emitMaxI32, ValType::I32));
+ case uint16_t(Op::I32Eqz):
+ CHECK_NEXT(emitConversion(emitEqzI32, ValType::I32, ValType::I32));
+ case uint16_t(Op::I32TruncSF32):
+ CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI32<false>, ValType::F32, ValType::I32));
+ case uint16_t(Op::I32TruncUF32):
+ CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI32<true>, ValType::F32, ValType::I32));
+ case uint16_t(Op::I32TruncSF64):
+ CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI32<false>, ValType::F64, ValType::I32));
+ case uint16_t(Op::I32TruncUF64):
+ CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI32<true>, ValType::F64, ValType::I32));
+ case uint16_t(Op::I32WrapI64):
+ CHECK_NEXT(emitConversion(emitWrapI64ToI32, ValType::I64, ValType::I32));
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK_NEXT(emitConversion(emitReinterpretF32AsI32, ValType::F32, ValType::I32));
+ case uint16_t(Op::I32Clz):
+ CHECK_NEXT(emitUnary(emitClzI32, ValType::I32));
+ case uint16_t(Op::I32Ctz):
+ CHECK_NEXT(emitUnary(emitCtzI32, ValType::I32));
+ case uint16_t(Op::I32Popcnt):
+ CHECK_NEXT(emitUnary(emitPopcntI32, ValType::I32));
+ case uint16_t(Op::I32Abs):
+ CHECK_NEXT(emitUnary(emitAbsI32, ValType::I32));
+ case uint16_t(Op::I32Neg):
+ CHECK_NEXT(emitUnary(emitNegateI32, ValType::I32));
+ case uint16_t(Op::I32Or):
+ CHECK_NEXT(emitBinary(emitOrI32, ValType::I32));
+ case uint16_t(Op::I32And):
+ CHECK_NEXT(emitBinary(emitAndI32, ValType::I32));
+ case uint16_t(Op::I32Xor):
+ CHECK_NEXT(emitBinary(emitXorI32, ValType::I32));
+ case uint16_t(Op::I32Shl):
+ CHECK_NEXT(emitBinary(emitShlI32, ValType::I32));
+ case uint16_t(Op::I32ShrS):
+ CHECK_NEXT(emitBinary(emitShrI32, ValType::I32));
+ case uint16_t(Op::I32ShrU):
+ CHECK_NEXT(emitBinary(emitShrU32, ValType::I32));
+ case uint16_t(Op::I32BitNot):
+ CHECK_NEXT(emitUnary(emitBitNotI32, ValType::I32));
+ case uint16_t(Op::I32Load8S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Load8U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint8));
+ case uint16_t(Op::I32Load16S):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Load16U):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Uint16));
+ case uint16_t(Op::I32Load):
+ CHECK_NEXT(emitLoad(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Store8):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32TeeStore8):
+ CHECK_NEXT(emitTeeStore(ValType::I32, Scalar::Int8));
+ case uint16_t(Op::I32Store16):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32TeeStore16):
+ CHECK_NEXT(emitTeeStore(ValType::I32, Scalar::Int16));
+ case uint16_t(Op::I32Store):
+ CHECK_NEXT(emitStore(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32TeeStore):
+ CHECK_NEXT(emitTeeStore(ValType::I32, Scalar::Int32));
+ case uint16_t(Op::I32Rotr):
+ CHECK_NEXT(emitBinary(emitRotrI32, ValType::I32));
+ case uint16_t(Op::I32Rotl):
+ CHECK_NEXT(emitBinary(emitRotlI32, ValType::I32));
+
+ // I64
+ case uint16_t(Op::I64Const): {
+ int64_t i64;
+ CHECK(iter_.readI64Const(&i64));
+ if (!deadCode_)
+ pushI64(i64);
+ NEXT();
+ }
+ case uint16_t(Op::I64Add):
+ CHECK_NEXT(emitBinary(emitAddI64, ValType::I64));
+ case uint16_t(Op::I64Sub):
+ CHECK_NEXT(emitBinary(emitSubtractI64, ValType::I64));
+ case uint16_t(Op::I64Mul):
+ CHECK_NEXT(emitBinary(emitMultiplyI64, ValType::I64));
+ case uint16_t(Op::I64DivS):
+#ifdef INT_DIV_I64_CALLOUT
+ CHECK_NEXT(emitDivOrModI64BuiltinCall(SymbolicAddress::DivI64, ValType::I64));
+#else
+ CHECK_NEXT(emitBinary(emitQuotientI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64DivU):
+#ifdef INT_DIV_I64_CALLOUT
+ CHECK_NEXT(emitDivOrModI64BuiltinCall(SymbolicAddress::UDivI64, ValType::I64));
+#else
+ CHECK_NEXT(emitBinary(emitQuotientU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemS):
+#ifdef INT_DIV_I64_CALLOUT
+ CHECK_NEXT(emitDivOrModI64BuiltinCall(SymbolicAddress::ModI64, ValType::I64));
+#else
+ CHECK_NEXT(emitBinary(emitRemainderI64, ValType::I64));
+#endif
+ case uint16_t(Op::I64RemU):
+#ifdef INT_DIV_I64_CALLOUT
+ CHECK_NEXT(emitDivOrModI64BuiltinCall(SymbolicAddress::UModI64, ValType::I64));
+#else
+ CHECK_NEXT(emitBinary(emitRemainderU64, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncSF32):
+#ifdef FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F32, ValType::I64));
+#else
+ CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI64<false>, ValType::F32, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncUF32):
+#ifdef FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64,
+ ValType::F32, ValType::I64));
+#else
+ CHECK_NEXT(emitConversionOOM(emitTruncateF32ToI64<true>, ValType::F32, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncSF64):
+#ifdef FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToInt64,
+ ValType::F64, ValType::I64));
+#else
+ CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI64<false>, ValType::F64, ValType::I64));
+#endif
+ case uint16_t(Op::I64TruncUF64):
+#ifdef FLOAT_TO_I64_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertFloatingToInt64Callout,
+ SymbolicAddress::TruncateDoubleToUint64,
+ ValType::F64, ValType::I64));
+#else
+ CHECK_NEXT(emitConversionOOM(emitTruncateF64ToI64<true>, ValType::F64, ValType::I64));
+#endif
+ case uint16_t(Op::I64ExtendSI32):
+ CHECK_NEXT(emitConversion(emitExtendI32ToI64, ValType::I32, ValType::I64));
+ case uint16_t(Op::I64ExtendUI32):
+ CHECK_NEXT(emitConversion(emitExtendU32ToI64, ValType::I32, ValType::I64));
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK_NEXT(emitConversion(emitReinterpretF64AsI64, ValType::F64, ValType::I64));
+ case uint16_t(Op::I64Or):
+ CHECK_NEXT(emitBinary(emitOrI64, ValType::I64));
+ case uint16_t(Op::I64And):
+ CHECK_NEXT(emitBinary(emitAndI64, ValType::I64));
+ case uint16_t(Op::I64Xor):
+ CHECK_NEXT(emitBinary(emitXorI64, ValType::I64));
+ case uint16_t(Op::I64Shl):
+ CHECK_NEXT(emitBinary(emitShlI64, ValType::I64));
+ case uint16_t(Op::I64ShrS):
+ CHECK_NEXT(emitBinary(emitShrI64, ValType::I64));
+ case uint16_t(Op::I64ShrU):
+ CHECK_NEXT(emitBinary(emitShrU64, ValType::I64));
+ case uint16_t(Op::I64Rotr):
+ CHECK_NEXT(emitBinary(emitRotrI64, ValType::I64));
+ case uint16_t(Op::I64Rotl):
+ CHECK_NEXT(emitBinary(emitRotlI64, ValType::I64));
+ case uint16_t(Op::I64Clz):
+ CHECK_NEXT(emitUnary(emitClzI64, ValType::I64));
+ case uint16_t(Op::I64Ctz):
+ CHECK_NEXT(emitUnary(emitCtzI64, ValType::I64));
+ case uint16_t(Op::I64Popcnt):
+ CHECK_NEXT(emitUnary(emitPopcntI64, ValType::I64));
+ case uint16_t(Op::I64Eqz):
+ CHECK_NEXT(emitConversion(emitEqzI64, ValType::I64, ValType::I32));
+ case uint16_t(Op::I64Load8S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Load16S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Load32S):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Load8U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint8));
+ case uint16_t(Op::I64Load16U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint16));
+ case uint16_t(Op::I64Load32U):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Uint32));
+ case uint16_t(Op::I64Load):
+ CHECK_NEXT(emitLoad(ValType::I64, Scalar::Int64));
+ case uint16_t(Op::I64Store8):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64TeeStore8):
+ CHECK_NEXT(emitTeeStore(ValType::I64, Scalar::Int8));
+ case uint16_t(Op::I64Store16):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64TeeStore16):
+ CHECK_NEXT(emitTeeStore(ValType::I64, Scalar::Int16));
+ case uint16_t(Op::I64Store32):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64TeeStore32):
+ CHECK_NEXT(emitTeeStore(ValType::I64, Scalar::Int32));
+ case uint16_t(Op::I64Store):
+ CHECK_NEXT(emitStore(ValType::I64, Scalar::Int64));
+ case uint16_t(Op::I64TeeStore):
+ CHECK_NEXT(emitTeeStore(ValType::I64, Scalar::Int64));
+
+ // F32
+ case uint16_t(Op::F32Const): {
+ RawF32 f32;
+ CHECK(iter_.readF32Const(&f32));
+ if (!deadCode_)
+ pushF32(f32);
+ NEXT();
+ }
+ case uint16_t(Op::F32Add):
+ CHECK_NEXT(emitBinary(emitAddF32, ValType::F32));
+ case uint16_t(Op::F32Sub):
+ CHECK_NEXT(emitBinary(emitSubtractF32, ValType::F32));
+ case uint16_t(Op::F32Mul):
+ CHECK_NEXT(emitBinary(emitMultiplyF32, ValType::F32));
+ case uint16_t(Op::F32Div):
+ CHECK_NEXT(emitBinary(emitDivideF32, ValType::F32));
+ case uint16_t(Op::F32Min):
+ CHECK_NEXT(emitBinary(emitMinF32, ValType::F32));
+ case uint16_t(Op::F32Max):
+ CHECK_NEXT(emitBinary(emitMaxF32, ValType::F32));
+ case uint16_t(Op::F32Neg):
+ CHECK_NEXT(emitUnary(emitNegateF32, ValType::F32));
+ case uint16_t(Op::F32Abs):
+ CHECK_NEXT(emitUnary(emitAbsF32, ValType::F32));
+ case uint16_t(Op::F32Sqrt):
+ CHECK_NEXT(emitUnary(emitSqrtF32, ValType::F32));
+ case uint16_t(Op::F32Ceil):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::CeilF, ValType::F32));
+ case uint16_t(Op::F32Floor):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::FloorF, ValType::F32));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK_NEXT(emitConversion(emitConvertF64ToF32, ValType::F64, ValType::F32));
+ case uint16_t(Op::F32ConvertSI32):
+ CHECK_NEXT(emitConversion(emitConvertI32ToF32, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertUI32):
+ CHECK_NEXT(emitConversion(emitConvertU32ToF32, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32ConvertSI64):
+#ifdef I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+ SymbolicAddress::Int64ToFloatingPoint,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(emitConversion(emitConvertI64ToF32, ValType::I64, ValType::F32));
+#endif
+ case uint16_t(Op::F32ConvertUI64):
+#ifdef I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+ SymbolicAddress::Uint64ToFloatingPoint,
+ ValType::I64, ValType::F32));
+#else
+ CHECK_NEXT(emitConversion(emitConvertU64ToF32, ValType::I64, ValType::F32));
+#endif
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK_NEXT(emitConversion(emitReinterpretI32AsF32, ValType::I32, ValType::F32));
+ case uint16_t(Op::F32Load):
+ CHECK_NEXT(emitLoad(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32Store):
+ CHECK_NEXT(emitStore(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32TeeStore):
+ CHECK_NEXT(emitTeeStore(ValType::F32, Scalar::Float32));
+ case uint16_t(Op::F32TeeStoreF64):
+ CHECK_NEXT(emitTeeStoreWithCoercion(ValType::F32, Scalar::Float64));
+ case uint16_t(Op::F32CopySign):
+ CHECK_NEXT(emitBinary(emitCopysignF32, ValType::F32));
+ case uint16_t(Op::F32Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntF, ValType::F32));
+ case uint16_t(Op::F32Trunc):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::TruncF, ValType::F32));
+
+ // F64
+ case uint16_t(Op::F64Const): {
+ RawF64 f64;
+ CHECK(iter_.readF64Const(&f64));
+ if (!deadCode_)
+ pushF64(f64);
+ NEXT();
+ }
+ case uint16_t(Op::F64Add):
+ CHECK_NEXT(emitBinary(emitAddF64, ValType::F64));
+ case uint16_t(Op::F64Sub):
+ CHECK_NEXT(emitBinary(emitSubtractF64, ValType::F64));
+ case uint16_t(Op::F64Mul):
+ CHECK_NEXT(emitBinary(emitMultiplyF64, ValType::F64));
+ case uint16_t(Op::F64Div):
+ CHECK_NEXT(emitBinary(emitDivideF64, ValType::F64));
+ case uint16_t(Op::F64Mod):
+ CHECK_NEXT(emitBinaryMathBuiltinCall(SymbolicAddress::ModD, ValType::F64));
+ case uint16_t(Op::F64Min):
+ CHECK_NEXT(emitBinary(emitMinF64, ValType::F64));
+ case uint16_t(Op::F64Max):
+ CHECK_NEXT(emitBinary(emitMaxF64, ValType::F64));
+ case uint16_t(Op::F64Neg):
+ CHECK_NEXT(emitUnary(emitNegateF64, ValType::F64));
+ case uint16_t(Op::F64Abs):
+ CHECK_NEXT(emitUnary(emitAbsF64, ValType::F64));
+ case uint16_t(Op::F64Sqrt):
+ CHECK_NEXT(emitUnary(emitSqrtF64, ValType::F64));
+ case uint16_t(Op::F64Ceil):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::CeilD, ValType::F64));
+ case uint16_t(Op::F64Floor):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::FloorD, ValType::F64));
+ case uint16_t(Op::F64Sin):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::SinD, ValType::F64));
+ case uint16_t(Op::F64Cos):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::CosD, ValType::F64));
+ case uint16_t(Op::F64Tan):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::TanD, ValType::F64));
+ case uint16_t(Op::F64Asin):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::ASinD, ValType::F64));
+ case uint16_t(Op::F64Acos):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::ACosD, ValType::F64));
+ case uint16_t(Op::F64Atan):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::ATanD, ValType::F64));
+ case uint16_t(Op::F64Exp):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::ExpD, ValType::F64));
+ case uint16_t(Op::F64Log):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::LogD, ValType::F64));
+ case uint16_t(Op::F64Pow):
+ CHECK_NEXT(emitBinaryMathBuiltinCall(SymbolicAddress::PowD, ValType::F64));
+ case uint16_t(Op::F64Atan2):
+ CHECK_NEXT(emitBinaryMathBuiltinCall(SymbolicAddress::ATan2D, ValType::F64));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK_NEXT(emitConversion(emitConvertF32ToF64, ValType::F32, ValType::F64));
+ case uint16_t(Op::F64ConvertSI32):
+ CHECK_NEXT(emitConversion(emitConvertI32ToF64, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertUI32):
+ CHECK_NEXT(emitConversion(emitConvertU32ToF64, ValType::I32, ValType::F64));
+ case uint16_t(Op::F64ConvertSI64):
+#ifdef I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+ SymbolicAddress::Int64ToFloatingPoint,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(emitConversion(emitConvertI64ToF64, ValType::I64, ValType::F64));
+#endif
+ case uint16_t(Op::F64ConvertUI64):
+#ifdef I64_TO_FLOAT_CALLOUT
+ CHECK_NEXT(emitCalloutConversionOOM(emitConvertInt64ToFloatingCallout,
+ SymbolicAddress::Uint64ToFloatingPoint,
+ ValType::I64, ValType::F64));
+#else
+ CHECK_NEXT(emitConversion(emitConvertU64ToF64, ValType::I64, ValType::F64));
+#endif
+ case uint16_t(Op::F64Load):
+ CHECK_NEXT(emitLoad(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64Store):
+ CHECK_NEXT(emitStore(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64TeeStore):
+ CHECK_NEXT(emitTeeStore(ValType::F64, Scalar::Float64));
+ case uint16_t(Op::F64TeeStoreF32):
+ CHECK_NEXT(emitTeeStoreWithCoercion(ValType::F64, Scalar::Float32));
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK_NEXT(emitConversion(emitReinterpretI64AsF64, ValType::I64, ValType::F64));
+ case uint16_t(Op::F64CopySign):
+ CHECK_NEXT(emitBinary(emitCopysignF64, ValType::F64));
+ case uint16_t(Op::F64Nearest):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::NearbyIntD, ValType::F64));
+ case uint16_t(Op::F64Trunc):
+ CHECK_NEXT(emitUnaryMathBuiltinCall(SymbolicAddress::TruncD, ValType::F64));
+
+ // Comparisons
+ case uint16_t(Op::I32Eq):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_EQ, MCompare::Compare_Int32));
+ case uint16_t(Op::I32Ne):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_NE, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtS):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_LT, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LeS):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_LE, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GtS):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_GT, MCompare::Compare_Int32));
+ case uint16_t(Op::I32GeS):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_GE, MCompare::Compare_Int32));
+ case uint16_t(Op::I32LtU):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_LT, MCompare::Compare_UInt32));
+ case uint16_t(Op::I32LeU):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_LE, MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GtU):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_GT, MCompare::Compare_UInt32));
+ case uint16_t(Op::I32GeU):
+ CHECK_NEXT(emitComparison(emitCompareI32, ValType::I32, JSOP_GE, MCompare::Compare_UInt32));
+ case uint16_t(Op::I64Eq):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_EQ, MCompare::Compare_Int64));
+ case uint16_t(Op::I64Ne):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_NE, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtS):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_LT, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LeS):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_LE, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GtS):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_GT, MCompare::Compare_Int64));
+ case uint16_t(Op::I64GeS):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_GE, MCompare::Compare_Int64));
+ case uint16_t(Op::I64LtU):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_LT, MCompare::Compare_UInt64));
+ case uint16_t(Op::I64LeU):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_LE, MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GtU):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_GT, MCompare::Compare_UInt64));
+ case uint16_t(Op::I64GeU):
+ CHECK_NEXT(emitComparison(emitCompareI64, ValType::I64, JSOP_GE, MCompare::Compare_UInt64));
+ case uint16_t(Op::F32Eq):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_EQ, MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ne):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_NE, MCompare::Compare_Float32));
+ case uint16_t(Op::F32Lt):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_LT, MCompare::Compare_Float32));
+ case uint16_t(Op::F32Le):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_LE, MCompare::Compare_Float32));
+ case uint16_t(Op::F32Gt):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_GT, MCompare::Compare_Float32));
+ case uint16_t(Op::F32Ge):
+ CHECK_NEXT(emitComparison(emitCompareF32, ValType::F32, JSOP_GE, MCompare::Compare_Float32));
+ case uint16_t(Op::F64Eq):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_EQ, MCompare::Compare_Double));
+ case uint16_t(Op::F64Ne):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_NE, MCompare::Compare_Double));
+ case uint16_t(Op::F64Lt):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_LT, MCompare::Compare_Double));
+ case uint16_t(Op::F64Le):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_LE, MCompare::Compare_Double));
+ case uint16_t(Op::F64Gt):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_GT, MCompare::Compare_Double));
+ case uint16_t(Op::F64Ge):
+ CHECK_NEXT(emitComparison(emitCompareF64, ValType::F64, JSOP_GE, MCompare::Compare_Double));
+
+ // SIMD
+#define CASE(TYPE, OP, SIGN) \
+ case uint16_t(Op::TYPE##OP): \
+ MOZ_CRASH("Unimplemented SIMD");
+#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
+#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
+#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
+#define F32x4CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable)
+#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
+#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
+#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
+#define ENUMERATE(TYPE, FORALL, DO) \
+ case uint16_t(Op::TYPE##Constructor): \
+ FORALL(DO)
+
+ ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
+ ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
+ ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
+ ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
+ ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
+ ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
+ ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
+
+#undef CASE
+#undef I8x16CASE
+#undef I16x8CASE
+#undef I32x4CASE
+#undef F32x4CASE
+#undef B8x16CASE
+#undef B16x8CASE
+#undef B32x4CASE
+#undef ENUMERATE
+
+ case uint16_t(Op::I8x16Const):
+ case uint16_t(Op::I16x8Const):
+ case uint16_t(Op::I32x4Const):
+ case uint16_t(Op::F32x4Const):
+ case uint16_t(Op::B8x16Const):
+ case uint16_t(Op::B16x8Const):
+ case uint16_t(Op::B32x4Const):
+ case uint16_t(Op::I32x4shiftRightByScalarU):
+ case uint16_t(Op::I8x16addSaturateU):
+ case uint16_t(Op::I8x16subSaturateU):
+ case uint16_t(Op::I8x16shiftRightByScalarU):
+ case uint16_t(Op::I8x16lessThanU):
+ case uint16_t(Op::I8x16lessThanOrEqualU):
+ case uint16_t(Op::I8x16greaterThanU):
+ case uint16_t(Op::I8x16greaterThanOrEqualU):
+ case uint16_t(Op::I8x16extractLaneU):
+ case uint16_t(Op::I16x8addSaturateU):
+ case uint16_t(Op::I16x8subSaturateU):
+ case uint16_t(Op::I16x8shiftRightByScalarU):
+ case uint16_t(Op::I16x8lessThanU):
+ case uint16_t(Op::I16x8lessThanOrEqualU):
+ case uint16_t(Op::I16x8greaterThanU):
+ case uint16_t(Op::I16x8greaterThanOrEqualU):
+ case uint16_t(Op::I16x8extractLaneU):
+ case uint16_t(Op::I32x4lessThanU):
+ case uint16_t(Op::I32x4lessThanOrEqualU):
+ case uint16_t(Op::I32x4greaterThanU):
+ case uint16_t(Op::I32x4greaterThanOrEqualU):
+ case uint16_t(Op::I32x4fromFloat32x4U):
+ MOZ_CRASH("Unimplemented SIMD");
+
+ // Atomics
+ case uint16_t(Op::I32AtomicsLoad):
+ case uint16_t(Op::I32AtomicsStore):
+ case uint16_t(Op::I32AtomicsBinOp):
+ case uint16_t(Op::I32AtomicsCompareExchange):
+ case uint16_t(Op::I32AtomicsExchange):
+ MOZ_CRASH("Unimplemented Atomics");
+
+ // Memory Related
+ case uint16_t(Op::GrowMemory):
+ CHECK_NEXT(emitGrowMemory());
+ case uint16_t(Op::CurrentMemory):
+ CHECK_NEXT(emitCurrentMemory());
+ }
+
+ MOZ_CRASH("unexpected wasm opcode");
+
+#undef CHECK
+#undef NEXT
+#undef CHECK_NEXT
+#undef emitBinary
+#undef emitUnary
+#undef emitComparison
+#undef emitConversion
+#undef emitConversionOOM
+#undef emitCalloutConversionOOM
+ }
+
+done:
+ return false;
+}
+
+bool
+BaseCompiler::emitFunction()
+{
+ // emitBody() will ensure that there is enough memory reserved in the
+ // vector for infallible allocation to succeed within the compiler, but we
+ // need a little headroom for the initial pushControl(), which pushes a
+ // void value onto the value stack.
+
+ if (!stk_.reserve(8))
+ return false;
+
+ const Sig& sig = func_.sig();
+
+ if (!iter_.readFunctionStart(sig.ret()))
+ return false;
+
+ beginFunction();
+
+ UniquePooledLabel functionEnd(newLabel());
+ if (!pushControl(&functionEnd))
+ return false;
+
+ if (!emitBody())
+ return false;
+
+ if (!iter_.readFunctionEnd())
+ return false;
+
+ if (!endFunction())
+ return false;
+
+ return true;
+}
+
+BaseCompiler::BaseCompiler(const ModuleGeneratorData& mg,
+ Decoder& decoder,
+ const FuncBytes& func,
+ const ValTypeVector& locals,
+ FuncCompileResults& compileResults)
+ : mg_(mg),
+ iter_(decoder, func.lineOrBytecode()),
+ func_(func),
+ lastReadCallSite_(0),
+ alloc_(compileResults.alloc()),
+ locals_(locals),
+ localSize_(0),
+ varLow_(0),
+ varHigh_(0),
+ maxFramePushed_(0),
+ deadCode_(false),
+ prologueTrapOffset_(trapOffset()),
+ compileResults_(compileResults),
+ masm(compileResults_.masm()),
+ availGPR_(GeneralRegisterSet::All()),
+ availFPU_(FloatRegisterSet::All()),
+#ifdef DEBUG
+ scratchRegisterTaken_(false),
+#endif
+ tlsSlot_(0),
+#ifdef JS_CODEGEN_X64
+ specific_rax(RegI64(Register64(rax))),
+ specific_rcx(RegI64(Register64(rcx))),
+ specific_rdx(RegI64(Register64(rdx))),
+#endif
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86)
+ specific_eax(RegI32(eax)),
+ specific_ecx(RegI32(ecx)),
+ specific_edx(RegI32(edx)),
+#endif
+#ifdef JS_CODEGEN_X86
+ singleByteRegs_(GeneralRegisterSet(Registers::SingleByteRegs)),
+ abiReturnRegI64(RegI64(Register64(edx, eax))),
+#endif
+#ifdef JS_CODEGEN_ARM
+ abiReturnRegI64(ReturnReg64),
+#endif
+ joinRegI32(RegI32(ReturnReg)),
+ joinRegI64(RegI64(ReturnReg64)),
+ joinRegF32(RegF32(ReturnFloat32Reg)),
+ joinRegF64(RegF64(ReturnDoubleReg))
+{
+ // jit/RegisterAllocator.h: RegisterAllocator::RegisterAllocator()
+
+#if defined(JS_CODEGEN_X64)
+ availGPR_.take(HeapReg);
+#elif defined(JS_CODEGEN_ARM)
+ availGPR_.take(HeapReg);
+ availGPR_.take(GlobalReg);
+ availGPR_.take(ScratchRegARM);
+#elif defined(JS_CODEGEN_ARM64)
+ availGPR_.take(HeapReg);
+ availGPR_.take(HeapLenReg);
+ availGPR_.take(GlobalReg);
+#elif defined(JS_CODEGEN_X86)
+ availGPR_.take(ScratchRegX86);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ availGPR_.take(HeapReg);
+ availGPR_.take(GlobalReg);
+#endif
+
+ labelPool_.setAllocator(alloc_);
+}
+
+bool
+BaseCompiler::init()
+{
+ if (!SigDD_.append(ValType::F64) || !SigDD_.append(ValType::F64))
+ return false;
+ if (!SigD_.append(ValType::F64))
+ return false;
+ if (!SigF_.append(ValType::F32))
+ return false;
+ if (!SigI_.append(ValType::I32))
+ return false;
+ if (!SigI64I64_.append(ValType::I64) || !SigI64I64_.append(ValType::I64))
+ return false;
+
+ const ValTypeVector& args = func_.sig().args();
+
+ // localInfo_ contains an entry for every local in locals_, followed by
+ // entries for special locals. Currently the only special local is the TLS
+ // pointer.
+ tlsSlot_ = locals_.length();
+ if (!localInfo_.resize(locals_.length() + 1))
+ return false;
+
+ localSize_ = 0;
+
+ for (ABIArgIter<const ValTypeVector> i(args); !i.done(); i++) {
+ Local& l = localInfo_[i.index()];
+ switch (i.mirType()) {
+ case MIRType::Int32:
+ if (i->argInRegister())
+ l.init(MIRType::Int32, pushLocal(4));
+ else
+ l.init(MIRType::Int32, -(i->offsetFromArgBase() + sizeof(Frame)));
+ break;
+ case MIRType::Int64:
+ if (i->argInRegister())
+ l.init(MIRType::Int64, pushLocal(8));
+ else
+ l.init(MIRType::Int64, -(i->offsetFromArgBase() + sizeof(Frame)));
+ break;
+ case MIRType::Double:
+ if (i->argInRegister())
+ l.init(MIRType::Double, pushLocal(8));
+ else
+ l.init(MIRType::Double, -(i->offsetFromArgBase() + sizeof(Frame)));
+ break;
+ case MIRType::Float32:
+ if (i->argInRegister())
+ l.init(MIRType::Float32, pushLocal(4));
+ else
+ l.init(MIRType::Float32, -(i->offsetFromArgBase() + sizeof(Frame)));
+ break;
+ default:
+ MOZ_CRASH("Argument type");
+ }
+ }
+
+ // Reserve a stack slot for the TLS pointer outside the varLow..varHigh
+ // range so it isn't zero-filled like the normal locals.
+ localInfo_[tlsSlot_].init(MIRType::Pointer, pushLocal(sizeof(void*)));
+
+ varLow_ = localSize_;
+
+ for (size_t i = args.length(); i < locals_.length(); i++) {
+ Local& l = localInfo_[i];
+ switch (locals_[i]) {
+ case ValType::I32:
+ l.init(MIRType::Int32, pushLocal(4));
+ break;
+ case ValType::F32:
+ l.init(MIRType::Float32, pushLocal(4));
+ break;
+ case ValType::F64:
+ l.init(MIRType::Double, pushLocal(8));
+ break;
+ case ValType::I64:
+ l.init(MIRType::Int64, pushLocal(8));
+ break;
+ default:
+ MOZ_CRASH("Compiler bug: Unexpected local type");
+ }
+ }
+
+ varHigh_ = localSize_;
+
+ localSize_ = AlignBytes(localSize_, 16u);
+
+ addInterruptCheck();
+
+ return true;
+}
+
+void
+BaseCompiler::finish()
+{
+ MOZ_ASSERT(done(), "all bytes must be consumed");
+ MOZ_ASSERT(func_.callSiteLineNums().length() == lastReadCallSite_);
+
+ masm.flushBuffer();
+}
+
+static LiveRegisterSet
+volatileReturnGPR()
+{
+ GeneralRegisterSet rtn;
+ rtn.addAllocatable(ReturnReg);
+ return LiveRegisterSet(RegisterSet::VolatileNot(RegisterSet(rtn, FloatRegisterSet())));
+}
+
+LiveRegisterSet BaseCompiler::VolatileReturnGPR = volatileReturnGPR();
+
+} // wasm
+} // js
+
+bool
+js::wasm::BaselineCanCompile(const FunctionGenerator* fg)
+{
+ // On all platforms we require signals for AsmJS/Wasm.
+ // If we made it this far we must have signals.
+ MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+
+#if defined(JS_CODEGEN_ARM)
+ // Simplifying assumption: require SDIV and UDIV.
+ //
+ // I have no good data on ARM populations allowing me to say that
+ // X% of devices in the market implement SDIV and UDIV. However,
+ // they are definitely implemented on the Cortex-A7 and Cortex-A15
+ // and on all ARMv8 systems.
+ if (!HasIDIV())
+ return false;
+#endif
+
+#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_ARM)
+ if (fg->usesAtomics())
+ return false;
+
+ if (fg->usesSimd())
+ return false;
+
+ return true;
+#else
+ return false;
+#endif
+}
+
+bool
+js::wasm::BaselineCompileFunction(IonCompileTask* task)
+{
+ MOZ_ASSERT(task->mode() == IonCompileTask::CompileMode::Baseline);
+
+ const FuncBytes& func = task->func();
+ FuncCompileResults& results = task->results();
+
+ Decoder d(func.bytes());
+
+ // Build the local types vector.
+
+ ValTypeVector locals;
+ if (!locals.appendAll(func.sig().args()))
+ return false;
+ if (!DecodeLocalEntries(d, task->mg().kind, &locals))
+ return false;
+
+ // The MacroAssembler will sometimes access the jitContext.
+
+ JitContext jitContext(&results.alloc());
+
+ // One-pass baseline compilation.
+
+ BaseCompiler f(task->mg(), d, func, locals, results);
+ if (!f.init())
+ return false;
+
+ if (!f.emitFunction())
+ return false;
+
+ f.finish();
+
+ return true;
+}
+
+#undef INT_DIV_I64_CALLOUT
+#undef I64_TO_FLOAT_CALLOUT
+#undef FLOAT_TO_I64_CALLOUT
diff --git a/js/src/wasm/WasmBaselineCompile.h b/js/src/wasm/WasmBaselineCompile.h
new file mode 100644
index 0000000000..3c29a3551e
--- /dev/null
+++ b/js/src/wasm/WasmBaselineCompile.h
@@ -0,0 +1,48 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef asmjs_wasm_baseline_compile_h
+#define asmjs_wasm_baseline_compile_h
+
+#include "wasm/WasmIonCompile.h"
+
+namespace js {
+namespace wasm {
+
+class FunctionGenerator;
+
+// Return true if BaselineCompileFunction can generate code for the
+// function held in the FunctionGenerator. If false is returned a
+// different compilation strategy must be chosen.
+//
+// This allows the baseline compiler to have different capabilities on
+// different platforms and defer to the full Ion compiler if
+// capabilities are missing. The FunctionGenerator and other data
+// structures contain information about the capabilities that are
+// required to compile the function.
+bool
+BaselineCanCompile(const FunctionGenerator* fg);
+
+// Generate adequate code quickly.
+bool
+BaselineCompileFunction(IonCompileTask* task);
+
+} // namespace wasm
+} // namespace js
+
+#endif // asmjs_wasm_baseline_compile_h
diff --git a/js/src/wasm/WasmBinaryConstants.h b/js/src/wasm/WasmBinaryConstants.h
new file mode 100644
index 0000000000..fd3bd1264a
--- /dev/null
+++ b/js/src/wasm/WasmBinaryConstants.h
@@ -0,0 +1,449 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_h
+#define wasm_binary_h
+
+#include "builtin/SIMD.h"
+
+namespace js {
+namespace wasm {
+
+static const uint32_t MagicNumber = 0x6d736100; // "\0asm"
+static const uint32_t EncodingVersion = 0x01;
+
+// 0xd is equivalent to 0x1 modulo unreachability validation rules, so to aid
+// transition of toolchain, accept both for a short period of time.
+static const uint32_t PrevEncodingVersion = 0x0d;
+
+enum class SectionId {
+ UserDefined = 0,
+ Type = 1,
+ Import = 2,
+ Function = 3,
+ Table = 4,
+ Memory = 5,
+ Global = 6,
+ Export = 7,
+ Start = 8,
+ Elem = 9,
+ Code = 10,
+ Data = 11
+};
+
+static const char NameSectionName[] = "name";
+
+enum class TypeCode
+{
+ I32 = 0x7f, // SLEB128(-0x01)
+ I64 = 0x7e, // SLEB128(-0x02)
+ F32 = 0x7d, // SLEB128(-0x03)
+ F64 = 0x7c, // SLEB128(-0x04)
+
+ // Only emitted internally for asm.js, likely to get collapsed into I128
+ I8x16 = 0x7b,
+ I16x8 = 0x7a,
+ I32x4 = 0x79,
+ F32x4 = 0x78,
+ B8x16 = 0x77,
+ B16x8 = 0x76,
+ B32x4 = 0x75,
+
+ // A function pointer with any signature
+ AnyFunc = 0x70, // SLEB128(-0x10)
+
+ // Type constructor for function types
+ Func = 0x60, // SLEB128(-0x20)
+
+ // Special code representing the block signature ()->()
+ BlockVoid = 0x40, // SLEB128(-0x40)
+
+ Limit = 0x80
+};
+
+enum class ValType
+{
+ I32 = uint8_t(TypeCode::I32),
+ I64 = uint8_t(TypeCode::I64),
+ F32 = uint8_t(TypeCode::F32),
+ F64 = uint8_t(TypeCode::F64),
+
+ // ------------------------------------------------------------------------
+ // The rest of these types are currently only emitted internally when
+ // compiling asm.js and are rejected by wasm validation.
+
+ I8x16 = uint8_t(TypeCode::I8x16),
+ I16x8 = uint8_t(TypeCode::I16x8),
+ I32x4 = uint8_t(TypeCode::I32x4),
+ F32x4 = uint8_t(TypeCode::F32x4),
+ B8x16 = uint8_t(TypeCode::B8x16),
+ B16x8 = uint8_t(TypeCode::B16x8),
+ B32x4 = uint8_t(TypeCode::B32x4)
+};
+
+typedef Vector<ValType, 8, SystemAllocPolicy> ValTypeVector;
+
+enum class DefinitionKind
+{
+ Function = 0x00,
+ Table = 0x01,
+ Memory = 0x02,
+ Global = 0x03
+};
+
+enum class GlobalTypeImmediate
+{
+ IsMutable = 0x1,
+ AllowedMask = 0x1
+};
+
+enum class MemoryTableFlags
+{
+ Default = 0x0
+};
+
+enum class Op
+{
+ // Control flow operators
+ Unreachable = 0x00,
+ Nop = 0x01,
+ Block = 0x02,
+ Loop = 0x03,
+ If = 0x04,
+ Else = 0x05,
+ End = 0x0b,
+ Br = 0x0c,
+ BrIf = 0x0d,
+ BrTable = 0x0e,
+ Return = 0x0f,
+
+ // Call operators
+ Call = 0x10,
+ CallIndirect = 0x11,
+
+ // Parametric operators
+ Drop = 0x1a,
+ Select = 0x1b,
+
+ // Variable access
+ GetLocal = 0x20,
+ SetLocal = 0x21,
+ TeeLocal = 0x22,
+ GetGlobal = 0x23,
+ SetGlobal = 0x24,
+
+ // Memory-related operators
+ I32Load = 0x28,
+ I64Load = 0x29,
+ F32Load = 0x2a,
+ F64Load = 0x2b,
+ I32Load8S = 0x2c,
+ I32Load8U = 0x2d,
+ I32Load16S = 0x2e,
+ I32Load16U = 0x2f,
+ I64Load8S = 0x30,
+ I64Load8U = 0x31,
+ I64Load16S = 0x32,
+ I64Load16U = 0x33,
+ I64Load32S = 0x34,
+ I64Load32U = 0x35,
+ I32Store = 0x36,
+ I64Store = 0x37,
+ F32Store = 0x38,
+ F64Store = 0x39,
+ I32Store8 = 0x3a,
+ I32Store16 = 0x3b,
+ I64Store8 = 0x3c,
+ I64Store16 = 0x3d,
+ I64Store32 = 0x3e,
+ CurrentMemory = 0x3f,
+ GrowMemory = 0x40,
+
+ // Constants
+ I32Const = 0x41,
+ I64Const = 0x42,
+ F32Const = 0x43,
+ F64Const = 0x44,
+
+ // Comparison operators
+ I32Eqz = 0x45,
+ I32Eq = 0x46,
+ I32Ne = 0x47,
+ I32LtS = 0x48,
+ I32LtU = 0x49,
+ I32GtS = 0x4a,
+ I32GtU = 0x4b,
+ I32LeS = 0x4c,
+ I32LeU = 0x4d,
+ I32GeS = 0x4e,
+ I32GeU = 0x4f,
+ I64Eqz = 0x50,
+ I64Eq = 0x51,
+ I64Ne = 0x52,
+ I64LtS = 0x53,
+ I64LtU = 0x54,
+ I64GtS = 0x55,
+ I64GtU = 0x56,
+ I64LeS = 0x57,
+ I64LeU = 0x58,
+ I64GeS = 0x59,
+ I64GeU = 0x5a,
+ F32Eq = 0x5b,
+ F32Ne = 0x5c,
+ F32Lt = 0x5d,
+ F32Gt = 0x5e,
+ F32Le = 0x5f,
+ F32Ge = 0x60,
+ F64Eq = 0x61,
+ F64Ne = 0x62,
+ F64Lt = 0x63,
+ F64Gt = 0x64,
+ F64Le = 0x65,
+ F64Ge = 0x66,
+
+ // Numeric operators
+ I32Clz = 0x67,
+ I32Ctz = 0x68,
+ I32Popcnt = 0x69,
+ I32Add = 0x6a,
+ I32Sub = 0x6b,
+ I32Mul = 0x6c,
+ I32DivS = 0x6d,
+ I32DivU = 0x6e,
+ I32RemS = 0x6f,
+ I32RemU = 0x70,
+ I32And = 0x71,
+ I32Or = 0x72,
+ I32Xor = 0x73,
+ I32Shl = 0x74,
+ I32ShrS = 0x75,
+ I32ShrU = 0x76,
+ I32Rotl = 0x77,
+ I32Rotr = 0x78,
+ I64Clz = 0x79,
+ I64Ctz = 0x7a,
+ I64Popcnt = 0x7b,
+ I64Add = 0x7c,
+ I64Sub = 0x7d,
+ I64Mul = 0x7e,
+ I64DivS = 0x7f,
+ I64DivU = 0x80,
+ I64RemS = 0x81,
+ I64RemU = 0x82,
+ I64And = 0x83,
+ I64Or = 0x84,
+ I64Xor = 0x85,
+ I64Shl = 0x86,
+ I64ShrS = 0x87,
+ I64ShrU = 0x88,
+ I64Rotl = 0x89,
+ I64Rotr = 0x8a,
+ F32Abs = 0x8b,
+ F32Neg = 0x8c,
+ F32Ceil = 0x8d,
+ F32Floor = 0x8e,
+ F32Trunc = 0x8f,
+ F32Nearest = 0x90,
+ F32Sqrt = 0x91,
+ F32Add = 0x92,
+ F32Sub = 0x93,
+ F32Mul = 0x94,
+ F32Div = 0x95,
+ F32Min = 0x96,
+ F32Max = 0x97,
+ F32CopySign = 0x98,
+ F64Abs = 0x99,
+ F64Neg = 0x9a,
+ F64Ceil = 0x9b,
+ F64Floor = 0x9c,
+ F64Trunc = 0x9d,
+ F64Nearest = 0x9e,
+ F64Sqrt = 0x9f,
+ F64Add = 0xa0,
+ F64Sub = 0xa1,
+ F64Mul = 0xa2,
+ F64Div = 0xa3,
+ F64Min = 0xa4,
+ F64Max = 0xa5,
+ F64CopySign = 0xa6,
+
+ // Conversions
+ I32WrapI64 = 0xa7,
+ I32TruncSF32 = 0xa8,
+ I32TruncUF32 = 0xa9,
+ I32TruncSF64 = 0xaa,
+ I32TruncUF64 = 0xab,
+ I64ExtendSI32 = 0xac,
+ I64ExtendUI32 = 0xad,
+ I64TruncSF32 = 0xae,
+ I64TruncUF32 = 0xaf,
+ I64TruncSF64 = 0xb0,
+ I64TruncUF64 = 0xb1,
+ F32ConvertSI32 = 0xb2,
+ F32ConvertUI32 = 0xb3,
+ F32ConvertSI64 = 0xb4,
+ F32ConvertUI64 = 0xb5,
+ F32DemoteF64 = 0xb6,
+ F64ConvertSI32 = 0xb7,
+ F64ConvertUI32 = 0xb8,
+ F64ConvertSI64 = 0xb9,
+ F64ConvertUI64 = 0xba,
+ F64PromoteF32 = 0xbb,
+
+ // Reinterpretations
+ I32ReinterpretF32 = 0xbc,
+ I64ReinterpretF64 = 0xbd,
+ F32ReinterpretI32 = 0xbe,
+ F64ReinterpretI64 = 0xbf,
+
+ // ------------------------------------------------------------------------
+ // The rest of these operators are currently only emitted internally when
+ // compiling asm.js and are rejected by wasm validation.
+
+ // asm.js-specific operators
+ TeeGlobal = 0xc8,
+ I32Min,
+ I32Max,
+ I32Neg,
+ I32BitNot,
+ I32Abs,
+ F32TeeStoreF64,
+ F64TeeStoreF32,
+ I32TeeStore8,
+ I32TeeStore16,
+ I64TeeStore8,
+ I64TeeStore16,
+ I64TeeStore32,
+ I32TeeStore,
+ I64TeeStore,
+ F32TeeStore,
+ F64TeeStore,
+ F64Mod,
+ F64Sin,
+ F64Cos,
+ F64Tan,
+ F64Asin,
+ F64Acos,
+ F64Atan,
+ F64Exp,
+ F64Log,
+ F64Pow,
+ F64Atan2,
+
+ // asm.js-style call_indirect with the callee evaluated first.
+ OldCallIndirect,
+
+ // Atomics
+ I32AtomicsCompareExchange,
+ I32AtomicsExchange,
+ I32AtomicsLoad,
+ I32AtomicsStore,
+ I32AtomicsBinOp,
+
+ // SIMD
+#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+#define _(OP) SIMD_OPCODE(I8x16, OP)
+ FORALL_INT8X16_ASMJS_OP(_)
+ I8x16Constructor,
+ I8x16Const,
+#undef _
+ // Unsigned I8x16 operations. These are the SIMD.Uint8x16 operations that
+ // behave differently from their SIMD.Int8x16 counterparts.
+ I8x16extractLaneU,
+ I8x16addSaturateU,
+ I8x16subSaturateU,
+ I8x16shiftRightByScalarU,
+ I8x16lessThanU,
+ I8x16lessThanOrEqualU,
+ I8x16greaterThanU,
+ I8x16greaterThanOrEqualU,
+
+#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+#define _(OP) SIMD_OPCODE(I16x8, OP)
+ FORALL_INT16X8_ASMJS_OP(_)
+ I16x8Constructor,
+ I16x8Const,
+#undef _
+ // Unsigned I16x8 operations. These are the SIMD.Uint16x8 operations that
+ // behave differently from their SIMD.Int16x8 counterparts.
+ I16x8extractLaneU,
+ I16x8addSaturateU,
+ I16x8subSaturateU,
+ I16x8shiftRightByScalarU,
+ I16x8lessThanU,
+ I16x8lessThanOrEqualU,
+ I16x8greaterThanU,
+ I16x8greaterThanOrEqualU,
+
+#define SIMD_OPCODE(TYPE, OP) TYPE##OP,
+#define _(OP) SIMD_OPCODE(I32x4, OP)
+ FORALL_INT32X4_ASMJS_OP(_)
+ I32x4Constructor,
+ I32x4Const,
+#undef _
+ // Unsigned I32x4 operations. These are the SIMD.Uint32x4 operations that
+ // behave differently from their SIMD.Int32x4 counterparts.
+ I32x4shiftRightByScalarU,
+ I32x4lessThanU,
+ I32x4lessThanOrEqualU,
+ I32x4greaterThanU,
+ I32x4greaterThanOrEqualU,
+ I32x4fromFloat32x4U,
+#define _(OP) SIMD_OPCODE(F32x4, OP)
+ FORALL_FLOAT32X4_ASMJS_OP(_)
+ F32x4Constructor,
+ F32x4Const,
+#undef _
+
+#define _(OP) SIMD_OPCODE(B8x16, OP)
+ FORALL_BOOL_SIMD_OP(_)
+ B8x16Constructor,
+ B8x16Const,
+#undef _
+#undef OPCODE
+
+#define _(OP) SIMD_OPCODE(B16x8, OP)
+ FORALL_BOOL_SIMD_OP(_)
+ B16x8Constructor,
+ B16x8Const,
+#undef _
+#undef OPCODE
+
+#define _(OP) SIMD_OPCODE(B32x4, OP)
+ FORALL_BOOL_SIMD_OP(_)
+ B32x4Constructor,
+ B32x4Const,
+#undef _
+#undef OPCODE
+
+ Limit
+};
+
+// Telemetry sample values for the JS_AOT_USAGE key, indicating whether asm.js
+// or WebAssembly is used.
+
+enum class Telemetry
+{
+ ASMJS = 0,
+ WASM = 1
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_binary_h
diff --git a/js/src/wasm/WasmBinaryFormat.cpp b/js/src/wasm/WasmBinaryFormat.cpp
new file mode 100644
index 0000000000..b3ee8642a0
--- /dev/null
+++ b/js/src/wasm/WasmBinaryFormat.cpp
@@ -0,0 +1,655 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinaryFormat.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "jsprf.h"
+
+#include "jit/JitOptions.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+
+bool
+wasm::DecodePreamble(Decoder& d)
+{
+ uint32_t u32;
+ if (!d.readFixedU32(&u32) || u32 != MagicNumber)
+ return d.fail("failed to match magic number");
+
+ if (!d.readFixedU32(&u32) || (u32 != EncodingVersion && u32 != PrevEncodingVersion)) {
+ return d.fail("binary version 0x%" PRIx32 " does not match expected version 0x%" PRIx32,
+ u32, EncodingVersion);
+ }
+
+ return true;
+}
+
+static bool
+DecodeValType(Decoder& d, ModuleKind kind, ValType* type)
+{
+ uint8_t unchecked;
+ if (!d.readValType(&unchecked))
+ return false;
+
+ switch (unchecked) {
+ case uint8_t(ValType::I32):
+ case uint8_t(ValType::F32):
+ case uint8_t(ValType::F64):
+ case uint8_t(ValType::I64):
+ *type = ValType(unchecked);
+ return true;
+ case uint8_t(ValType::I8x16):
+ case uint8_t(ValType::I16x8):
+ case uint8_t(ValType::I32x4):
+ case uint8_t(ValType::F32x4):
+ case uint8_t(ValType::B8x16):
+ case uint8_t(ValType::B16x8):
+ case uint8_t(ValType::B32x4):
+ if (kind != ModuleKind::AsmJS)
+ return d.fail("bad type");
+ *type = ValType(unchecked);
+ return true;
+ default:
+ break;
+ }
+ return d.fail("bad type");
+}
+
+bool
+wasm::DecodeTypeSection(Decoder& d, SigWithIdVector* sigs)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Type, &sectionStart, &sectionSize, "type"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numSigs;
+ if (!d.readVarU32(&numSigs))
+ return d.fail("expected number of signatures");
+
+ if (numSigs > MaxSigs)
+ return d.fail("too many signatures");
+
+ if (!sigs->resize(numSigs))
+ return false;
+
+ for (uint32_t sigIndex = 0; sigIndex < numSigs; sigIndex++) {
+ uint32_t form;
+ if (!d.readVarU32(&form) || form != uint32_t(TypeCode::Func))
+ return d.fail("expected function form");
+
+ uint32_t numArgs;
+ if (!d.readVarU32(&numArgs))
+ return d.fail("bad number of function args");
+
+ if (numArgs > MaxArgsPerFunc)
+ return d.fail("too many arguments in signature");
+
+ ValTypeVector args;
+ if (!args.resize(numArgs))
+ return false;
+
+ for (uint32_t i = 0; i < numArgs; i++) {
+ if (!DecodeValType(d, ModuleKind::Wasm, &args[i]))
+ return false;
+ }
+
+ uint32_t numRets;
+ if (!d.readVarU32(&numRets))
+ return d.fail("bad number of function returns");
+
+ if (numRets > 1)
+ return d.fail("too many returns in signature");
+
+ ExprType result = ExprType::Void;
+
+ if (numRets == 1) {
+ ValType type;
+ if (!DecodeValType(d, ModuleKind::Wasm, &type))
+ return false;
+
+ result = ToExprType(type);
+ }
+
+ (*sigs)[sigIndex] = Sig(Move(args), result);
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "type"))
+ return false;
+
+ return true;
+}
+
+UniqueChars
+wasm::DecodeName(Decoder& d)
+{
+ uint32_t numBytes;
+ if (!d.readVarU32(&numBytes))
+ return nullptr;
+
+ const uint8_t* bytes;
+ if (!d.readBytes(numBytes, &bytes))
+ return nullptr;
+
+ UniqueChars name(js_pod_malloc<char>(numBytes + 1));
+ if (!name)
+ return nullptr;
+
+ memcpy(name.get(), bytes, numBytes);
+ name[numBytes] = '\0';
+
+ return name;
+}
+
+static bool
+DecodeSignatureIndex(Decoder& d, const SigWithIdVector& sigs, uint32_t* sigIndex)
+{
+ if (!d.readVarU32(sigIndex))
+ return d.fail("expected signature index");
+
+ if (*sigIndex >= sigs.length())
+ return d.fail("signature index out of range");
+
+ return true;
+}
+
+bool
+wasm::DecodeTableLimits(Decoder& d, TableDescVector* tables)
+{
+ uint32_t elementType;
+ if (!d.readVarU32(&elementType))
+ return d.fail("expected table element type");
+
+ if (elementType != uint32_t(TypeCode::AnyFunc))
+ return d.fail("expected 'anyfunc' element type");
+
+ Limits limits;
+ if (!DecodeLimits(d, &limits))
+ return false;
+
+ if (tables->length())
+ return d.fail("already have default table");
+
+ return tables->emplaceBack(TableKind::AnyFunction, limits);
+}
+
+bool
+wasm::GlobalIsJSCompatible(Decoder& d, ValType type, bool isMutable)
+{
+ switch (type) {
+ case ValType::I32:
+ case ValType::F32:
+ case ValType::F64:
+ break;
+ case ValType::I64:
+ if (!jit::JitOptions.wasmTestMode)
+ return d.fail("can't import/export an Int64 global to JS");
+ break;
+ default:
+ return d.fail("unexpected variable type in global import/export");
+ }
+
+ if (isMutable)
+ return d.fail("can't import/export mutable globals in the MVP");
+
+ return true;
+}
+
+static bool
+DecodeImport(Decoder& d, const SigWithIdVector& sigs, Uint32Vector* funcSigIndices,
+ GlobalDescVector* globals, TableDescVector* tables, Maybe<Limits>* memory,
+ ImportVector* imports)
+{
+ UniqueChars moduleName = DecodeName(d);
+ if (!moduleName)
+ return d.fail("expected valid import module name");
+
+ UniqueChars funcName = DecodeName(d);
+ if (!funcName)
+ return d.fail("expected valid import func name");
+
+ uint32_t rawImportKind;
+ if (!d.readVarU32(&rawImportKind))
+ return d.fail("failed to read import kind");
+
+ DefinitionKind importKind = DefinitionKind(rawImportKind);
+
+ switch (importKind) {
+ case DefinitionKind::Function: {
+ uint32_t sigIndex;
+ if (!DecodeSignatureIndex(d, sigs, &sigIndex))
+ return false;
+ if (!funcSigIndices->append(sigIndex))
+ return false;
+ break;
+ }
+ case DefinitionKind::Table: {
+ if (!DecodeTableLimits(d, tables))
+ return false;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ Limits limits;
+ if (!DecodeMemoryLimits(d, !!*memory, &limits))
+ return false;
+ memory->emplace(limits);
+ break;
+ }
+ case DefinitionKind::Global: {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, &type, &isMutable))
+ return false;
+ if (!GlobalIsJSCompatible(d, type, isMutable))
+ return false;
+ if (!globals->append(GlobalDesc(type, isMutable, globals->length())))
+ return false;
+ break;
+ }
+ default:
+ return d.fail("unsupported import kind");
+ }
+
+ return imports->emplaceBack(Move(moduleName), Move(funcName), importKind);
+}
+
+bool
+wasm::DecodeImportSection(Decoder& d, const SigWithIdVector& sigs, Uint32Vector* funcSigIndices,
+ GlobalDescVector* globals, TableDescVector* tables, Maybe<Limits>* memory,
+ ImportVector* imports)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Import, &sectionStart, &sectionSize, "import"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numImports;
+ if (!d.readVarU32(&numImports))
+ return d.fail("failed to read number of imports");
+
+ if (numImports > MaxImports)
+ return d.fail("too many imports");
+
+ for (uint32_t i = 0; i < numImports; i++) {
+ if (!DecodeImport(d, sigs, funcSigIndices, globals, tables, memory, imports))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "import"))
+ return false;
+
+ return true;
+}
+
+bool
+wasm::DecodeFunctionSection(Decoder& d, const SigWithIdVector& sigs, size_t numImportedFunc,
+ Uint32Vector* funcSigIndexes)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Function, &sectionStart, &sectionSize, "function"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs))
+ return d.fail("expected number of function definitions");
+
+ CheckedInt<uint32_t> numFuncs = numImportedFunc;
+ numFuncs += numDefs;
+ if (!numFuncs.isValid() || numFuncs.value() > MaxFuncs)
+ return d.fail("too many functions");
+
+ if (!funcSigIndexes->reserve(numDefs))
+ return false;
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ uint32_t sigIndex;
+ if (!DecodeSignatureIndex(d, sigs, &sigIndex))
+ return false;
+ funcSigIndexes->infallibleAppend(sigIndex);
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "function"))
+ return false;
+
+ return true;
+}
+
+bool
+wasm::EncodeLocalEntries(Encoder& e, const ValTypeVector& locals)
+{
+ uint32_t numLocalEntries = 0;
+ ValType prev = ValType(TypeCode::Limit);
+ for (ValType t : locals) {
+ if (t != prev) {
+ numLocalEntries++;
+ prev = t;
+ }
+ }
+
+ if (!e.writeVarU32(numLocalEntries))
+ return false;
+
+ if (numLocalEntries) {
+ prev = locals[0];
+ uint32_t count = 1;
+ for (uint32_t i = 1; i < locals.length(); i++, count++) {
+ if (prev != locals[i]) {
+ if (!e.writeVarU32(count))
+ return false;
+ if (!e.writeValType(prev))
+ return false;
+ prev = locals[i];
+ count = 0;
+ }
+ }
+ if (!e.writeVarU32(count))
+ return false;
+ if (!e.writeValType(prev))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+wasm::DecodeLocalEntries(Decoder& d, ModuleKind kind, ValTypeVector* locals)
+{
+ uint32_t numLocalEntries;
+ if (!d.readVarU32(&numLocalEntries))
+ return d.fail("failed to read number of local entries");
+
+ for (uint32_t i = 0; i < numLocalEntries; i++) {
+ uint32_t count;
+ if (!d.readVarU32(&count))
+ return d.fail("failed to read local entry count");
+
+ if (MaxLocals - locals->length() < count)
+ return d.fail("too many locals");
+
+ ValType type;
+ if (!DecodeValType(d, kind, &type))
+ return false;
+
+ if (!locals->appendN(type, count))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+wasm::DecodeGlobalType(Decoder& d, ValType* type, bool* isMutable)
+{
+ if (!DecodeValType(d, ModuleKind::Wasm, type))
+ return false;
+
+ uint32_t flags;
+ if (!d.readVarU32(&flags))
+ return d.fail("expected global flags");
+
+ if (flags & ~uint32_t(GlobalTypeImmediate::AllowedMask))
+ return d.fail("unexpected bits set in global flags");
+
+ *isMutable = flags & uint32_t(GlobalTypeImmediate::IsMutable);
+ return true;
+}
+
+bool
+wasm::DecodeInitializerExpression(Decoder& d, const GlobalDescVector& globals, ValType expected,
+ InitExpr* init)
+{
+ uint16_t op;
+ if (!d.readOp(&op))
+ return d.fail("failed to read initializer type");
+
+ switch (op) {
+ case uint16_t(Op::I32Const): {
+ int32_t i32;
+ if (!d.readVarS32(&i32))
+ return d.fail("failed to read initializer i32 expression");
+ *init = InitExpr(Val(uint32_t(i32)));
+ break;
+ }
+ case uint16_t(Op::I64Const): {
+ int64_t i64;
+ if (!d.readVarS64(&i64))
+ return d.fail("failed to read initializer i64 expression");
+ *init = InitExpr(Val(uint64_t(i64)));
+ break;
+ }
+ case uint16_t(Op::F32Const): {
+ RawF32 f32;
+ if (!d.readFixedF32(&f32))
+ return d.fail("failed to read initializer f32 expression");
+ *init = InitExpr(Val(f32));
+ break;
+ }
+ case uint16_t(Op::F64Const): {
+ RawF64 f64;
+ if (!d.readFixedF64(&f64))
+ return d.fail("failed to read initializer f64 expression");
+ *init = InitExpr(Val(f64));
+ break;
+ }
+ case uint16_t(Op::GetGlobal): {
+ uint32_t i;
+ if (!d.readVarU32(&i))
+ return d.fail("failed to read get_global index in initializer expression");
+ if (i >= globals.length())
+ return d.fail("global index out of range in initializer expression");
+ if (!globals[i].isImport() || globals[i].isMutable())
+ return d.fail("initializer expression must reference a global immutable import");
+ *init = InitExpr(i, globals[i].type());
+ break;
+ }
+ default: {
+ return d.fail("unexpected initializer expression");
+ }
+ }
+
+ if (expected != init->type())
+ return d.fail("type mismatch: initializer type and expected type don't match");
+
+ uint16_t end;
+ if (!d.readOp(&end) || end != uint16_t(Op::End))
+ return d.fail("failed to read end of initializer expression");
+
+ return true;
+}
+
+bool
+wasm::DecodeLimits(Decoder& d, Limits* limits)
+{
+ uint32_t flags;
+ if (!d.readVarU32(&flags))
+ return d.fail("expected flags");
+
+ if (flags & ~uint32_t(0x1))
+ return d.fail("unexpected bits set in flags: %" PRIu32, (flags & ~uint32_t(0x1)));
+
+ if (!d.readVarU32(&limits->initial))
+ return d.fail("expected initial length");
+
+ if (flags & 0x1) {
+ uint32_t maximum;
+ if (!d.readVarU32(&maximum))
+ return d.fail("expected maximum length");
+
+ if (limits->initial > maximum) {
+ return d.fail("memory size minimum must not be greater than maximum; "
+ "maximum length %" PRIu32 " is less than initial length %" PRIu32,
+ maximum, limits->initial);
+ }
+
+ limits->maximum.emplace(maximum);
+ }
+
+ return true;
+}
+
+bool
+wasm::DecodeDataSection(Decoder& d, bool usesMemory, uint32_t minMemoryByteLength,
+ const GlobalDescVector& globals, DataSegmentVector* segments)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Data, &sectionStart, &sectionSize, "data"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ if (!usesMemory)
+ return d.fail("data section requires a memory section");
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments))
+ return d.fail("failed to read number of data segments");
+
+ if (numSegments > MaxDataSegments)
+ return d.fail("too many data segments");
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t linearMemoryIndex;
+ if (!d.readVarU32(&linearMemoryIndex))
+ return d.fail("expected linear memory index");
+
+ if (linearMemoryIndex != 0)
+ return d.fail("linear memory index must currently be 0");
+
+ DataSegment seg;
+ if (!DecodeInitializerExpression(d, globals, ValType::I32, &seg.offset))
+ return false;
+
+ if (!d.readVarU32(&seg.length))
+ return d.fail("expected segment size");
+
+ seg.bytecodeOffset = d.currentOffset();
+
+ if (!d.readBytes(seg.length))
+ return d.fail("data segment shorter than declared");
+
+ if (!segments->append(seg))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "data"))
+ return false;
+
+ return true;
+}
+
+bool
+wasm::DecodeMemoryLimits(Decoder& d, bool hasMemory, Limits* memory)
+{
+ if (hasMemory)
+ return d.fail("already have default memory");
+
+ if (!DecodeLimits(d, memory))
+ return false;
+
+ CheckedInt<uint32_t> initialBytes = memory->initial;
+ initialBytes *= PageSize;
+ if (!initialBytes.isValid() || initialBytes.value() > uint32_t(INT32_MAX))
+ return d.fail("initial memory size too big");
+
+ memory->initial = initialBytes.value();
+
+ if (memory->maximum) {
+ CheckedInt<uint32_t> maximumBytes = *memory->maximum;
+ maximumBytes *= PageSize;
+ if (!maximumBytes.isValid())
+ return d.fail("maximum memory size too big");
+
+ memory->maximum = Some(maximumBytes.value());
+ }
+
+ return true;
+}
+
+bool
+wasm::DecodeMemorySection(Decoder& d, bool hasMemory, Limits* memory, bool *present)
+{
+ *present = false;
+
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Memory, &sectionStart, &sectionSize, "memory"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ *present = true;
+
+ uint32_t numMemories;
+ if (!d.readVarU32(&numMemories))
+ return d.fail("failed to read number of memories");
+
+ if (numMemories != 1)
+ return d.fail("the number of memories must be exactly one");
+
+ if (!DecodeMemoryLimits(d, hasMemory, memory))
+ return false;
+
+ if (!d.finishSection(sectionStart, sectionSize, "memory"))
+ return false;
+
+ return true;
+}
+
+bool
+wasm::DecodeUnknownSections(Decoder& d)
+{
+ while (!d.done()) {
+ if (!d.skipUserDefinedSection())
+ return false;
+ }
+
+ return true;
+}
+
+bool
+Decoder::fail(const char* msg, ...)
+{
+ va_list ap;
+ va_start(ap, msg);
+ UniqueChars str(JS_vsmprintf(msg, ap));
+ va_end(ap);
+ if (!str)
+ return false;
+
+ return fail(Move(str));
+}
+
+bool
+Decoder::fail(UniqueChars msg)
+{
+ MOZ_ASSERT(error_);
+ UniqueChars strWithOffset(JS_smprintf("at offset %" PRIuSIZE ": %s", currentOffset(), msg.get()));
+ if (!strWithOffset)
+ return false;
+
+ *error_ = Move(strWithOffset);
+ return false;
+}
diff --git a/js/src/wasm/WasmBinaryFormat.h b/js/src/wasm/WasmBinaryFormat.h
new file mode 100644
index 0000000000..2f95ebafb1
--- /dev/null
+++ b/js/src/wasm/WasmBinaryFormat.h
@@ -0,0 +1,689 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_format_h
+#define wasm_binary_format_h
+
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+// The Encoder class appends bytes to the Bytes object it is given during
+// construction. The client is responsible for the Bytes's lifetime and must
+// keep the Bytes alive as long as the Encoder is used.
+
+class Encoder
+{
+ Bytes& bytes_;
+
+ template <class T>
+ MOZ_MUST_USE bool write(const T& v) {
+ return bytes_.append(reinterpret_cast<const uint8_t*>(&v), sizeof(T));
+ }
+
+ template <typename UInt>
+ MOZ_MUST_USE bool writeVarU(UInt i) {
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ if (i != 0)
+ byte |= 0x80;
+ if (!bytes_.append(byte))
+ return false;
+ } while (i != 0);
+ return true;
+ }
+
+ template <typename SInt>
+ MOZ_MUST_USE bool writeVarS(SInt i) {
+ bool done;
+ do {
+ uint8_t byte = i & 0x7f;
+ i >>= 7;
+ done = ((i == 0) && !(byte & 0x40)) || ((i == -1) && (byte & 0x40));
+ if (!done)
+ byte |= 0x80;
+ if (!bytes_.append(byte))
+ return false;
+ } while (!done);
+ return true;
+ }
+
+ void patchVarU32(size_t offset, uint32_t patchBits, uint32_t assertBits) {
+ do {
+ uint8_t assertByte = assertBits & 0x7f;
+ uint8_t patchByte = patchBits & 0x7f;
+ assertBits >>= 7;
+ patchBits >>= 7;
+ if (assertBits != 0) {
+ assertByte |= 0x80;
+ patchByte |= 0x80;
+ }
+ MOZ_ASSERT(assertByte == bytes_[offset]);
+ bytes_[offset] = patchByte;
+ offset++;
+ } while(assertBits != 0);
+ }
+
+ void patchFixedU7(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(patchBits <= uint8_t(INT8_MAX));
+ patchFixedU8(offset, patchBits, assertBits);
+ }
+
+ void patchFixedU8(size_t offset, uint8_t patchBits, uint8_t assertBits) {
+ MOZ_ASSERT(bytes_[offset] == assertBits);
+ bytes_[offset] = patchBits;
+ }
+
+ uint32_t varU32ByteLength(size_t offset) const {
+ size_t start = offset;
+ while (bytes_[offset] & 0x80)
+ offset++;
+ return offset - start + 1;
+ }
+
+ public:
+ explicit Encoder(Bytes& bytes)
+ : bytes_(bytes)
+ {
+ MOZ_ASSERT(empty());
+ }
+
+ size_t currentOffset() const { return bytes_.length(); }
+ bool empty() const { return currentOffset() == 0; }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ MOZ_MUST_USE bool writeFixedU7(uint8_t i) {
+ MOZ_ASSERT(i <= uint8_t(INT8_MAX));
+ return writeFixedU8(i);
+ }
+ MOZ_MUST_USE bool writeFixedU8(uint8_t i) {
+ return write<uint8_t>(i);
+ }
+ MOZ_MUST_USE bool writeFixedU32(uint32_t i) {
+ return write<uint32_t>(i);
+ }
+ MOZ_MUST_USE bool writeFixedF32(RawF32 f) {
+ return write<uint32_t>(f.bits());
+ }
+ MOZ_MUST_USE bool writeFixedF64(RawF64 d) {
+ return write<uint64_t>(d.bits());
+ }
+ MOZ_MUST_USE bool writeFixedI8x16(const I8x16& i8x16) {
+ return write<I8x16>(i8x16);
+ }
+ MOZ_MUST_USE bool writeFixedI16x8(const I16x8& i16x8) {
+ return write<I16x8>(i16x8);
+ }
+ MOZ_MUST_USE bool writeFixedI32x4(const I32x4& i32x4) {
+ return write<I32x4>(i32x4);
+ }
+ MOZ_MUST_USE bool writeFixedF32x4(const F32x4& f32x4) {
+ return write<F32x4>(f32x4);
+ }
+
+ // Variable-length encodings that all use LEB128.
+
+ MOZ_MUST_USE bool writeVarU32(uint32_t i) {
+ return writeVarU<uint32_t>(i);
+ }
+ MOZ_MUST_USE bool writeVarS32(int32_t i) {
+ return writeVarS<int32_t>(i);
+ }
+ MOZ_MUST_USE bool writeVarU64(uint64_t i) {
+ return writeVarU<uint64_t>(i);
+ }
+ MOZ_MUST_USE bool writeVarS64(int64_t i) {
+ return writeVarS<int64_t>(i);
+ }
+ MOZ_MUST_USE bool writeValType(ValType type) {
+ static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ MOZ_ASSERT(size_t(type) < size_t(TypeCode::Limit));
+ return writeFixedU8(uint8_t(type));
+ }
+ MOZ_MUST_USE bool writeBlockType(ExprType type) {
+ static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ MOZ_ASSERT(size_t(type) < size_t(TypeCode::Limit));
+ return writeFixedU8(uint8_t(type));
+ }
+ MOZ_MUST_USE bool writeOp(Op op) {
+ static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
+ MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
+ if (size_t(op) < UINT8_MAX)
+ return writeFixedU8(uint8_t(op));
+ return writeFixedU8(UINT8_MAX) &&
+ writeFixedU8(size_t(op) - UINT8_MAX);
+ }
+
+ // Fixed-length encodings that allow back-patching.
+
+ MOZ_MUST_USE bool writePatchableFixedU7(size_t* offset) {
+ *offset = bytes_.length();
+ return writeFixedU8(UINT8_MAX);
+ }
+ void patchFixedU7(size_t offset, uint8_t patchBits) {
+ return patchFixedU7(offset, patchBits, UINT8_MAX);
+ }
+
+ // Variable-length encodings that allow back-patching.
+
+ MOZ_MUST_USE bool writePatchableVarU32(size_t* offset) {
+ *offset = bytes_.length();
+ return writeVarU32(UINT32_MAX);
+ }
+ void patchVarU32(size_t offset, uint32_t patchBits) {
+ return patchVarU32(offset, patchBits, UINT32_MAX);
+ }
+
+ // Byte ranges start with an LEB128 length followed by an arbitrary sequence
+ // of bytes. When used for strings, bytes are to be interpreted as utf8.
+
+ MOZ_MUST_USE bool writeBytes(const void* bytes, uint32_t numBytes) {
+ return writeVarU32(numBytes) &&
+ bytes_.append(reinterpret_cast<const uint8_t*>(bytes), numBytes);
+ }
+
+ // A "section" is a contiguous range of bytes that stores its own size so
+ // that it may be trivially skipped without examining the contents. Sections
+ // require backpatching since the size of the section is only known at the
+ // end while the size's varU32 must be stored at the beginning. Immediately
+ // after the section length is the string id of the section.
+
+ MOZ_MUST_USE bool startSection(SectionId id, size_t* offset) {
+ MOZ_ASSERT(id != SectionId::UserDefined); // not supported yet
+
+ return writeVarU32(uint32_t(id)) &&
+ writePatchableVarU32(offset);
+ }
+ void finishSection(size_t offset) {
+ return patchVarU32(offset, bytes_.length() - offset - varU32ByteLength(offset));
+ }
+};
+
+// The Decoder class decodes the bytes in the range it is given during
+// construction. The client is responsible for keeping the byte range alive as
+// long as the Decoder is used.
+
+class Decoder
+{
+ const uint8_t* const beg_;
+ const uint8_t* const end_;
+ const uint8_t* cur_;
+ UniqueChars* error_;
+
+ template <class T>
+ MOZ_MUST_USE bool read(T* out) {
+ if (bytesRemain() < sizeof(T))
+ return false;
+ memcpy((void*)out, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return true;
+ }
+
+ template <class T>
+ T uncheckedRead() {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ T ret;
+ memcpy(&ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ return ret;
+ }
+
+ template <class T>
+ void uncheckedRead(T* ret) {
+ MOZ_ASSERT(bytesRemain() >= sizeof(T));
+ memcpy(ret, cur_, sizeof(T));
+ cur_ += sizeof(T);
+ }
+
+ template <typename UInt>
+ MOZ_MUST_USE bool readVarU(UInt* out) {
+ const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt u = 0;
+ uint8_t byte;
+ UInt shift = 0;
+ do {
+ if (!readFixedU8(&byte))
+ return false;
+ if (!(byte & 0x80)) {
+ *out = u | UInt(byte) << shift;
+ return true;
+ }
+ u |= UInt(byte & 0x7F) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ if (!readFixedU8(&byte) || (byte & (unsigned(-1) << remainderBits)))
+ return false;
+ *out = u | (UInt(byte) << numBitsInSevens);
+ return true;
+ }
+
+ template <typename SInt>
+ MOZ_MUST_USE bool readVarS(SInt* out) {
+ const unsigned numBits = sizeof(SInt) * CHAR_BIT;
+ const unsigned remainderBits = numBits % 7;
+ const unsigned numBitsInSevens = numBits - remainderBits;
+ SInt s = 0;
+ uint8_t byte;
+ unsigned shift = 0;
+ do {
+ if (!readFixedU8(&byte))
+ return false;
+ s |= SInt(byte & 0x7f) << shift;
+ shift += 7;
+ if (!(byte & 0x80)) {
+ if (byte & 0x40)
+ s |= SInt(-1) << shift;
+ *out = s;
+ return true;
+ }
+ } while (shift < numBitsInSevens);
+ if (!remainderBits || !readFixedU8(&byte) || (byte & 0x80))
+ return false;
+ uint8_t mask = 0x7f & (uint8_t(-1) << remainderBits);
+ if ((byte & mask) != ((byte & (1 << (remainderBits - 1))) ? mask : 0))
+ return false;
+ *out = s | SInt(byte) << shift;
+ return true;
+ }
+
+ public:
+ Decoder(const uint8_t* begin, const uint8_t* end, UniqueChars* error)
+ : beg_(begin),
+ end_(end),
+ cur_(begin),
+ error_(error)
+ {
+ MOZ_ASSERT(begin <= end);
+ }
+ explicit Decoder(const Bytes& bytes, UniqueChars* error = nullptr)
+ : beg_(bytes.begin()),
+ end_(bytes.end()),
+ cur_(bytes.begin()),
+ error_(error)
+ {}
+
+ bool fail(const char* msg, ...) MOZ_FORMAT_PRINTF(2, 3);
+ bool fail(UniqueChars msg);
+ void clearError() {
+ if (error_)
+ error_->reset();
+ }
+
+ bool done() const {
+ MOZ_ASSERT(cur_ <= end_);
+ return cur_ == end_;
+ }
+
+ size_t bytesRemain() const {
+ MOZ_ASSERT(end_ >= cur_);
+ return size_t(end_ - cur_);
+ }
+ // pos must be a value previously returned from currentPosition.
+ void rollbackPosition(const uint8_t* pos) {
+ cur_ = pos;
+ }
+ const uint8_t* currentPosition() const {
+ return cur_;
+ }
+ size_t currentOffset() const {
+ return cur_ - beg_;
+ }
+ const uint8_t* begin() const {
+ return beg_;
+ }
+
+ // Fixed-size encoding operations simply copy the literal bytes (without
+ // attempting to align).
+
+ MOZ_MUST_USE bool readFixedU8(uint8_t* i) {
+ return read<uint8_t>(i);
+ }
+ MOZ_MUST_USE bool readFixedU32(uint32_t* u) {
+ return read<uint32_t>(u);
+ }
+ MOZ_MUST_USE bool readFixedF32(RawF32* f) {
+ uint32_t u;
+ if (!read<uint32_t>(&u))
+ return false;
+ *f = RawF32::fromBits(u);
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedF64(RawF64* d) {
+ uint64_t u;
+ if (!read<uint64_t>(&u))
+ return false;
+ *d = RawF64::fromBits(u);
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedI8x16(I8x16* i8x16) {
+ return read<I8x16>(i8x16);
+ }
+ MOZ_MUST_USE bool readFixedI16x8(I16x8* i16x8) {
+ return read<I16x8>(i16x8);
+ }
+ MOZ_MUST_USE bool readFixedI32x4(I32x4* i32x4) {
+ return read<I32x4>(i32x4);
+ }
+ MOZ_MUST_USE bool readFixedF32x4(F32x4* f32x4) {
+ return read<F32x4>(f32x4);
+ }
+
+ // Variable-length encodings that all use LEB128.
+
+ MOZ_MUST_USE bool readVarU32(uint32_t* out) {
+ return readVarU<uint32_t>(out);
+ }
+ MOZ_MUST_USE bool readVarS32(int32_t* out) {
+ return readVarS<int32_t>(out);
+ }
+ MOZ_MUST_USE bool readVarU64(uint64_t* out) {
+ return readVarU<uint64_t>(out);
+ }
+ MOZ_MUST_USE bool readVarS64(int64_t* out) {
+ return readVarS<int64_t>(out);
+ }
+ MOZ_MUST_USE bool readValType(uint8_t* type) {
+ static_assert(uint8_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ return readFixedU8(type);
+ }
+ MOZ_MUST_USE bool readBlockType(uint8_t* type) {
+ static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
+ return readFixedU8(type);
+ }
+ MOZ_MUST_USE bool readOp(uint16_t* op) {
+ static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
+ uint8_t u8;
+ if (!readFixedU8(&u8))
+ return false;
+ if (MOZ_LIKELY(u8 != UINT8_MAX)) {
+ *op = u8;
+ return true;
+ }
+ if (!readFixedU8(&u8))
+ return false;
+ *op = uint16_t(u8) + UINT8_MAX;
+ return true;
+ }
+
+ // See writeBytes comment.
+
+ MOZ_MUST_USE bool readBytes(uint32_t numBytes, const uint8_t** bytes = nullptr) {
+ if (bytes)
+ *bytes = cur_;
+ if (bytesRemain() < numBytes)
+ return false;
+ cur_ += numBytes;
+ return true;
+ }
+
+ // See "section" description in Encoder.
+
+ static const uint32_t NotStarted = UINT32_MAX;
+
+ MOZ_MUST_USE bool startSection(SectionId id,
+ uint32_t* startOffset,
+ uint32_t* size,
+ const char* sectionName)
+ {
+ const uint8_t* const before = cur_;
+ const uint8_t* beforeId = before;
+ uint32_t idValue;
+ if (!readVarU32(&idValue))
+ goto backup;
+ while (idValue != uint32_t(id)) {
+ if (idValue != uint32_t(SectionId::UserDefined))
+ goto backup;
+ // Rewind to the section id since skipUserDefinedSection expects it.
+ cur_ = beforeId;
+ if (!skipUserDefinedSection())
+ return false;
+ beforeId = cur_;
+ if (!readVarU32(&idValue))
+ goto backup;
+ }
+ if (!readVarU32(size))
+ goto fail;
+ if (bytesRemain() < *size)
+ goto fail;
+ *startOffset = cur_ - beg_;
+ return true;
+ backup:
+ cur_ = before;
+ *startOffset = NotStarted;
+ return true;
+ fail:
+ return fail("failed to start %s section", sectionName);
+ }
+ MOZ_MUST_USE bool finishSection(uint32_t startOffset, uint32_t size,
+ const char* sectionName)
+ {
+ if (size != (cur_ - beg_) - startOffset)
+ return fail("byte size mismatch in %s section", sectionName);
+ return true;
+ }
+
+ // "User sections" do not cause validation errors unless the error is in
+ // the user-defined section header itself.
+
+ MOZ_MUST_USE bool startUserDefinedSection(const char* expectedId,
+ size_t expectedIdSize,
+ uint32_t* sectionStart,
+ uint32_t* sectionSize)
+ {
+ const uint8_t* const before = cur_;
+ while (true) {
+ if (!startSection(SectionId::UserDefined, sectionStart, sectionSize, "user-defined"))
+ return false;
+ if (*sectionStart == NotStarted) {
+ cur_ = before;
+ return true;
+ }
+ uint32_t idSize;
+ if (!readVarU32(&idSize))
+ goto fail;
+ if (idSize > bytesRemain() || currentOffset() + idSize > *sectionStart + *sectionSize)
+ goto fail;
+ if (expectedId && (expectedIdSize != idSize || !!memcmp(cur_, expectedId, idSize))) {
+ finishUserDefinedSection(*sectionStart, *sectionSize);
+ continue;
+ }
+ cur_ += idSize;
+ return true;
+ }
+ MOZ_CRASH("unreachable");
+ fail:
+ return fail("failed to start user-defined section");
+ }
+ template <size_t IdSizeWith0>
+ MOZ_MUST_USE bool startUserDefinedSection(const char (&id)[IdSizeWith0],
+ uint32_t* sectionStart,
+ uint32_t* sectionSize)
+ {
+ MOZ_ASSERT(id[IdSizeWith0 - 1] == '\0');
+ return startUserDefinedSection(id, IdSizeWith0 - 1, sectionStart, sectionSize);
+ }
+ void finishUserDefinedSection(uint32_t sectionStart, uint32_t sectionSize) {
+ MOZ_ASSERT(cur_ >= beg_);
+ MOZ_ASSERT(cur_ <= end_);
+ cur_ = (beg_ + sectionStart) + sectionSize;
+ MOZ_ASSERT(cur_ <= end_);
+ clearError();
+ }
+ MOZ_MUST_USE bool skipUserDefinedSection() {
+ uint32_t sectionStart, sectionSize;
+ if (!startUserDefinedSection(nullptr, 0, &sectionStart, &sectionSize))
+ return false;
+ if (sectionStart == NotStarted)
+ return fail("expected user-defined section");
+ finishUserDefinedSection(sectionStart, sectionSize);
+ return true;
+ }
+
+ // The infallible "unchecked" decoding functions can be used when we are
+ // sure that the bytes are well-formed (by construction or due to previous
+ // validation).
+
+ uint8_t uncheckedReadFixedU8() {
+ return uncheckedRead<uint8_t>();
+ }
+ uint32_t uncheckedReadFixedU32() {
+ return uncheckedRead<uint32_t>();
+ }
+ RawF32 uncheckedReadFixedF32() {
+ return RawF32::fromBits(uncheckedRead<uint32_t>());
+ }
+ RawF64 uncheckedReadFixedF64() {
+ return RawF64::fromBits(uncheckedRead<uint64_t>());
+ }
+ template <typename UInt>
+ UInt uncheckedReadVarU() {
+ static const unsigned numBits = sizeof(UInt) * CHAR_BIT;
+ static const unsigned remainderBits = numBits % 7;
+ static const unsigned numBitsInSevens = numBits - remainderBits;
+ UInt decoded = 0;
+ uint32_t shift = 0;
+ do {
+ uint8_t byte = *cur_++;
+ if (!(byte & 0x80))
+ return decoded | (UInt(byte) << shift);
+ decoded |= UInt(byte & 0x7f) << shift;
+ shift += 7;
+ } while (shift != numBitsInSevens);
+ uint8_t byte = *cur_++;
+ MOZ_ASSERT(!(byte & 0xf0));
+ return decoded | (UInt(byte) << numBitsInSevens);
+ }
+ uint32_t uncheckedReadVarU32() {
+ return uncheckedReadVarU<uint32_t>();
+ }
+ int32_t uncheckedReadVarS32() {
+ int32_t i32 = 0;
+ MOZ_ALWAYS_TRUE(readVarS32(&i32));
+ return i32;
+ }
+ uint64_t uncheckedReadVarU64() {
+ return uncheckedReadVarU<uint64_t>();
+ }
+ int64_t uncheckedReadVarS64() {
+ int64_t i64 = 0;
+ MOZ_ALWAYS_TRUE(readVarS64(&i64));
+ return i64;
+ }
+ ValType uncheckedReadValType() {
+ return (ValType)uncheckedReadFixedU8();
+ }
+ Op uncheckedReadOp() {
+ static_assert(size_t(Op::Limit) <= 2 * UINT8_MAX, "fits");
+ uint8_t u8 = uncheckedReadFixedU8();
+ return u8 != UINT8_MAX
+ ? Op(u8)
+ : Op(uncheckedReadFixedU8() + UINT8_MAX);
+ }
+ void uncheckedReadFixedI8x16(I8x16* i8x16) {
+ struct T { I8x16 v; };
+ T t = uncheckedRead<T>();
+ memcpy(i8x16, &t, sizeof(t));
+ }
+ void uncheckedReadFixedI16x8(I16x8* i16x8) {
+ struct T { I16x8 v; };
+ T t = uncheckedRead<T>();
+ memcpy(i16x8, &t, sizeof(t));
+ }
+ void uncheckedReadFixedI32x4(I32x4* i32x4) {
+ struct T { I32x4 v; };
+ T t = uncheckedRead<T>();
+ memcpy(i32x4, &t, sizeof(t));
+ }
+ void uncheckedReadFixedF32x4(F32x4* f32x4) {
+ struct T { F32x4 v; };
+ T t = uncheckedRead<T>();
+ memcpy(f32x4, &t, sizeof(t));
+ }
+};
+
+// Reusable macro encoding/decoding functions reused by both the two
+// encoders (AsmJS/WasmTextToBinary) and all the decoders
+// (WasmCompile/WasmIonCompile/WasmBaselineCompile/WasmBinaryToText).
+
+// Misc helpers.
+
+UniqueChars
+DecodeName(Decoder& d);
+
+MOZ_MUST_USE bool
+DecodeTableLimits(Decoder& d, TableDescVector* tables);
+
+MOZ_MUST_USE bool
+GlobalIsJSCompatible(Decoder& d, ValType type, bool isMutable);
+
+MOZ_MUST_USE bool
+EncodeLocalEntries(Encoder& d, const ValTypeVector& locals);
+
+MOZ_MUST_USE bool
+DecodeLocalEntries(Decoder& d, ModuleKind kind, ValTypeVector* locals);
+
+MOZ_MUST_USE bool
+DecodeGlobalType(Decoder& d, ValType* type, bool* isMutable);
+
+MOZ_MUST_USE bool
+DecodeInitializerExpression(Decoder& d, const GlobalDescVector& globals, ValType expected,
+ InitExpr* init);
+
+MOZ_MUST_USE bool
+DecodeLimits(Decoder& d, Limits* limits);
+
+MOZ_MUST_USE bool
+DecodeMemoryLimits(Decoder& d, bool hasMemory, Limits* memory);
+
+// Section macros.
+
+MOZ_MUST_USE bool
+DecodePreamble(Decoder& d);
+
+MOZ_MUST_USE bool
+DecodeTypeSection(Decoder& d, SigWithIdVector* sigs);
+
+MOZ_MUST_USE bool
+DecodeImportSection(Decoder& d, const SigWithIdVector& sigs, Uint32Vector* funcSigIndices,
+ GlobalDescVector* globals, TableDescVector* tables, Maybe<Limits>* memory,
+ ImportVector* imports);
+
+MOZ_MUST_USE bool
+DecodeFunctionSection(Decoder& d, const SigWithIdVector& sigs, size_t numImportedFunc,
+ Uint32Vector* funcSigIndexes);
+
+MOZ_MUST_USE bool
+DecodeUnknownSections(Decoder& d);
+
+MOZ_MUST_USE bool
+DecodeDataSection(Decoder& d, bool usesMemory, uint32_t minMemoryByteLength,
+ const GlobalDescVector& globals, DataSegmentVector* segments);
+
+MOZ_MUST_USE bool
+DecodeMemorySection(Decoder& d, bool hasMemory, Limits* memory, bool* present);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_binary_format_h
diff --git a/js/src/wasm/WasmBinaryIterator.cpp b/js/src/wasm/WasmBinaryIterator.cpp
new file mode 100644
index 0000000000..6bc9b527e4
--- /dev/null
+++ b/js/src/wasm/WasmBinaryIterator.cpp
@@ -0,0 +1,498 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinaryIterator.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+#ifdef DEBUG
+OpKind
+wasm::Classify(Op op)
+{
+ switch (op) {
+ case Op::Block:
+ return OpKind::Block;
+ case Op::Loop:
+ return OpKind::Loop;
+ case Op::Unreachable:
+ return OpKind::Unreachable;
+ case Op::Drop:
+ return OpKind::Drop;
+ case Op::I32Const:
+ return OpKind::I32;
+ case Op::I64Const:
+ return OpKind::I64;
+ case Op::F32Const:
+ return OpKind::F32;
+ case Op::F64Const:
+ return OpKind::F64;
+ case Op::I8x16Const:
+ return OpKind::I8x16;
+ case Op::I16x8Const:
+ return OpKind::I16x8;
+ case Op::I32x4Const:
+ return OpKind::I32x4;
+ case Op::B8x16Const:
+ return OpKind::B8x16;
+ case Op::B16x8Const:
+ return OpKind::B16x8;
+ case Op::B32x4Const:
+ return OpKind::B32x4;
+ case Op::F32x4Const:
+ return OpKind::F32x4;
+ case Op::Br:
+ return OpKind::Br;
+ case Op::BrIf:
+ return OpKind::BrIf;
+ case Op::BrTable:
+ return OpKind::BrTable;
+ case Op::Nop:
+ return OpKind::Nop;
+ case Op::I32Clz:
+ case Op::I32Ctz:
+ case Op::I32Popcnt:
+ case Op::I64Clz:
+ case Op::I64Ctz:
+ case Op::I64Popcnt:
+ case Op::F32Abs:
+ case Op::F32Neg:
+ case Op::F32Ceil:
+ case Op::F32Floor:
+ case Op::F32Trunc:
+ case Op::F32Nearest:
+ case Op::F32Sqrt:
+ case Op::F64Abs:
+ case Op::F64Neg:
+ case Op::F64Ceil:
+ case Op::F64Floor:
+ case Op::F64Trunc:
+ case Op::F64Nearest:
+ case Op::F64Sqrt:
+ case Op::I32BitNot:
+ case Op::I32Abs:
+ case Op::F64Sin:
+ case Op::F64Cos:
+ case Op::F64Tan:
+ case Op::F64Asin:
+ case Op::F64Acos:
+ case Op::F64Atan:
+ case Op::F64Exp:
+ case Op::F64Log:
+ case Op::I32Neg:
+ case Op::I8x16neg:
+ case Op::I8x16not:
+ case Op::I16x8neg:
+ case Op::I16x8not:
+ case Op::I32x4neg:
+ case Op::I32x4not:
+ case Op::F32x4neg:
+ case Op::F32x4sqrt:
+ case Op::F32x4abs:
+ case Op::F32x4reciprocalApproximation:
+ case Op::F32x4reciprocalSqrtApproximation:
+ case Op::B8x16not:
+ case Op::B16x8not:
+ case Op::B32x4not:
+ return OpKind::Unary;
+ case Op::I32Add:
+ case Op::I32Sub:
+ case Op::I32Mul:
+ case Op::I32DivS:
+ case Op::I32DivU:
+ case Op::I32RemS:
+ case Op::I32RemU:
+ case Op::I32And:
+ case Op::I32Or:
+ case Op::I32Xor:
+ case Op::I32Shl:
+ case Op::I32ShrS:
+ case Op::I32ShrU:
+ case Op::I32Rotl:
+ case Op::I32Rotr:
+ case Op::I64Add:
+ case Op::I64Sub:
+ case Op::I64Mul:
+ case Op::I64DivS:
+ case Op::I64DivU:
+ case Op::I64RemS:
+ case Op::I64RemU:
+ case Op::I64And:
+ case Op::I64Or:
+ case Op::I64Xor:
+ case Op::I64Shl:
+ case Op::I64ShrS:
+ case Op::I64ShrU:
+ case Op::I64Rotl:
+ case Op::I64Rotr:
+ case Op::F32Add:
+ case Op::F32Sub:
+ case Op::F32Mul:
+ case Op::F32Div:
+ case Op::F32Min:
+ case Op::F32Max:
+ case Op::F32CopySign:
+ case Op::F64Add:
+ case Op::F64Sub:
+ case Op::F64Mul:
+ case Op::F64Div:
+ case Op::F64Min:
+ case Op::F64Max:
+ case Op::F64CopySign:
+ case Op::I32Min:
+ case Op::I32Max:
+ case Op::F64Mod:
+ case Op::F64Pow:
+ case Op::F64Atan2:
+ case Op::I8x16add:
+ case Op::I8x16sub:
+ case Op::I8x16mul:
+ case Op::I8x16addSaturate:
+ case Op::I8x16subSaturate:
+ case Op::I8x16addSaturateU:
+ case Op::I8x16subSaturateU:
+ case Op::I8x16and:
+ case Op::I8x16or:
+ case Op::I8x16xor:
+ case Op::I16x8add:
+ case Op::I16x8sub:
+ case Op::I16x8mul:
+ case Op::I16x8addSaturate:
+ case Op::I16x8subSaturate:
+ case Op::I16x8addSaturateU:
+ case Op::I16x8subSaturateU:
+ case Op::I16x8and:
+ case Op::I16x8or:
+ case Op::I16x8xor:
+ case Op::I32x4add:
+ case Op::I32x4sub:
+ case Op::I32x4mul:
+ case Op::I32x4and:
+ case Op::I32x4or:
+ case Op::I32x4xor:
+ case Op::F32x4add:
+ case Op::F32x4sub:
+ case Op::F32x4mul:
+ case Op::F32x4div:
+ case Op::F32x4min:
+ case Op::F32x4max:
+ case Op::F32x4minNum:
+ case Op::F32x4maxNum:
+ case Op::B8x16and:
+ case Op::B8x16or:
+ case Op::B8x16xor:
+ case Op::B16x8and:
+ case Op::B16x8or:
+ case Op::B16x8xor:
+ case Op::B32x4and:
+ case Op::B32x4or:
+ case Op::B32x4xor:
+ return OpKind::Binary;
+ case Op::I32Eq:
+ case Op::I32Ne:
+ case Op::I32LtS:
+ case Op::I32LtU:
+ case Op::I32LeS:
+ case Op::I32LeU:
+ case Op::I32GtS:
+ case Op::I32GtU:
+ case Op::I32GeS:
+ case Op::I32GeU:
+ case Op::I64Eq:
+ case Op::I64Ne:
+ case Op::I64LtS:
+ case Op::I64LtU:
+ case Op::I64LeS:
+ case Op::I64LeU:
+ case Op::I64GtS:
+ case Op::I64GtU:
+ case Op::I64GeS:
+ case Op::I64GeU:
+ case Op::F32Eq:
+ case Op::F32Ne:
+ case Op::F32Lt:
+ case Op::F32Le:
+ case Op::F32Gt:
+ case Op::F32Ge:
+ case Op::F64Eq:
+ case Op::F64Ne:
+ case Op::F64Lt:
+ case Op::F64Le:
+ case Op::F64Gt:
+ case Op::F64Ge:
+ return OpKind::Comparison;
+ case Op::I32Eqz:
+ case Op::I32WrapI64:
+ case Op::I32TruncSF32:
+ case Op::I32TruncUF32:
+ case Op::I32ReinterpretF32:
+ case Op::I32TruncSF64:
+ case Op::I32TruncUF64:
+ case Op::I64ExtendSI32:
+ case Op::I64ExtendUI32:
+ case Op::I64TruncSF32:
+ case Op::I64TruncUF32:
+ case Op::I64TruncSF64:
+ case Op::I64TruncUF64:
+ case Op::I64ReinterpretF64:
+ case Op::I64Eqz:
+ case Op::F32ConvertSI32:
+ case Op::F32ConvertUI32:
+ case Op::F32ReinterpretI32:
+ case Op::F32ConvertSI64:
+ case Op::F32ConvertUI64:
+ case Op::F32DemoteF64:
+ case Op::F64ConvertSI32:
+ case Op::F64ConvertUI32:
+ case Op::F64ConvertSI64:
+ case Op::F64ConvertUI64:
+ case Op::F64ReinterpretI64:
+ case Op::F64PromoteF32:
+ case Op::I32x4fromFloat32x4:
+ case Op::I32x4fromFloat32x4U:
+ case Op::F32x4fromInt32x4:
+ case Op::F32x4fromUint32x4:
+ case Op::I32x4fromFloat32x4Bits:
+ case Op::I32x4fromInt8x16Bits:
+ case Op::I32x4fromInt16x8Bits:
+ case Op::I16x8fromInt8x16Bits:
+ case Op::I16x8fromInt32x4Bits:
+ case Op::I16x8fromFloat32x4Bits:
+ case Op::I8x16fromInt16x8Bits:
+ case Op::I8x16fromInt32x4Bits:
+ case Op::I8x16fromFloat32x4Bits:
+ case Op::F32x4fromInt8x16Bits:
+ case Op::F32x4fromInt16x8Bits:
+ case Op::F32x4fromInt32x4Bits:
+ return OpKind::Conversion;
+ case Op::I32Load8S:
+ case Op::I32Load8U:
+ case Op::I32Load16S:
+ case Op::I32Load16U:
+ case Op::I64Load8S:
+ case Op::I64Load8U:
+ case Op::I64Load16S:
+ case Op::I64Load16U:
+ case Op::I64Load32S:
+ case Op::I64Load32U:
+ case Op::I32Load:
+ case Op::I64Load:
+ case Op::F32Load:
+ case Op::F64Load:
+ case Op::I8x16load:
+ case Op::I16x8load:
+ case Op::I32x4load:
+ case Op::I32x4load1:
+ case Op::I32x4load2:
+ case Op::I32x4load3:
+ case Op::F32x4load:
+ case Op::F32x4load1:
+ case Op::F32x4load2:
+ case Op::F32x4load3:
+ return OpKind::Load;
+ case Op::I32Store8:
+ case Op::I32Store16:
+ case Op::I64Store8:
+ case Op::I64Store16:
+ case Op::I64Store32:
+ case Op::I32Store:
+ case Op::I64Store:
+ case Op::F32Store:
+ case Op::F64Store:
+ return OpKind::Store;
+ case Op::I32TeeStore8:
+ case Op::I32TeeStore16:
+ case Op::I64TeeStore8:
+ case Op::I64TeeStore16:
+ case Op::I64TeeStore32:
+ case Op::I32TeeStore:
+ case Op::I64TeeStore:
+ case Op::F32TeeStore:
+ case Op::F64TeeStore:
+ case Op::F32TeeStoreF64:
+ case Op::F64TeeStoreF32:
+ case Op::I8x16store:
+ case Op::I16x8store:
+ case Op::I32x4store:
+ case Op::I32x4store1:
+ case Op::I32x4store2:
+ case Op::I32x4store3:
+ case Op::F32x4store:
+ case Op::F32x4store1:
+ case Op::F32x4store2:
+ case Op::F32x4store3:
+ return OpKind::TeeStore;
+ case Op::Select:
+ return OpKind::Select;
+ case Op::GetLocal:
+ return OpKind::GetLocal;
+ case Op::SetLocal:
+ return OpKind::SetLocal;
+ case Op::TeeLocal:
+ return OpKind::TeeLocal;
+ case Op::GetGlobal:
+ return OpKind::GetGlobal;
+ case Op::SetGlobal:
+ return OpKind::SetGlobal;
+ case Op::TeeGlobal:
+ return OpKind::TeeGlobal;
+ case Op::Call:
+ return OpKind::Call;
+ case Op::CallIndirect:
+ return OpKind::CallIndirect;
+ case Op::OldCallIndirect:
+ return OpKind::OldCallIndirect;
+ case Op::Return:
+ case Op::Limit:
+ // Accept Limit, for use in decoding the end of a function after the body.
+ return OpKind::Return;
+ case Op::If:
+ return OpKind::If;
+ case Op::Else:
+ return OpKind::Else;
+ case Op::End:
+ return OpKind::End;
+ case Op::I32AtomicsLoad:
+ return OpKind::AtomicLoad;
+ case Op::I32AtomicsStore:
+ return OpKind::AtomicStore;
+ case Op::I32AtomicsBinOp:
+ return OpKind::AtomicBinOp;
+ case Op::I32AtomicsCompareExchange:
+ return OpKind::AtomicCompareExchange;
+ case Op::I32AtomicsExchange:
+ return OpKind::AtomicExchange;
+ case Op::I8x16extractLane:
+ case Op::I8x16extractLaneU:
+ case Op::I16x8extractLane:
+ case Op::I16x8extractLaneU:
+ case Op::I32x4extractLane:
+ case Op::F32x4extractLane:
+ case Op::B8x16extractLane:
+ case Op::B16x8extractLane:
+ case Op::B32x4extractLane:
+ return OpKind::ExtractLane;
+ case Op::I8x16replaceLane:
+ case Op::I16x8replaceLane:
+ case Op::I32x4replaceLane:
+ case Op::F32x4replaceLane:
+ case Op::B8x16replaceLane:
+ case Op::B16x8replaceLane:
+ case Op::B32x4replaceLane:
+ return OpKind::ReplaceLane;
+ case Op::I8x16swizzle:
+ case Op::I16x8swizzle:
+ case Op::I32x4swizzle:
+ case Op::F32x4swizzle:
+ return OpKind::Swizzle;
+ case Op::I8x16shuffle:
+ case Op::I16x8shuffle:
+ case Op::I32x4shuffle:
+ case Op::F32x4shuffle:
+ return OpKind::Shuffle;
+ case Op::I16x8check:
+ case Op::I16x8splat:
+ case Op::I32x4check:
+ case Op::I32x4splat:
+ case Op::I8x16check:
+ case Op::I8x16splat:
+ case Op::F32x4check:
+ case Op::F32x4splat:
+ case Op::B16x8check:
+ case Op::B16x8splat:
+ case Op::B32x4check:
+ case Op::B32x4splat:
+ case Op::B8x16check:
+ case Op::B8x16splat:
+ return OpKind::Splat;
+ case Op::I8x16select:
+ case Op::I16x8select:
+ case Op::I32x4select:
+ case Op::F32x4select:
+ return OpKind::SimdSelect;
+ case Op::I8x16Constructor:
+ case Op::I16x8Constructor:
+ case Op::I32x4Constructor:
+ case Op::F32x4Constructor:
+ case Op::B8x16Constructor:
+ case Op::B16x8Constructor:
+ case Op::B32x4Constructor:
+ return OpKind::SimdCtor;
+ case Op::B8x16allTrue:
+ case Op::B8x16anyTrue:
+ case Op::B16x8allTrue:
+ case Op::B16x8anyTrue:
+ case Op::B32x4allTrue:
+ case Op::B32x4anyTrue:
+ return OpKind::SimdBooleanReduction;
+ case Op::I8x16shiftLeftByScalar:
+ case Op::I8x16shiftRightByScalar:
+ case Op::I8x16shiftRightByScalarU:
+ case Op::I16x8shiftLeftByScalar:
+ case Op::I16x8shiftRightByScalar:
+ case Op::I16x8shiftRightByScalarU:
+ case Op::I32x4shiftLeftByScalar:
+ case Op::I32x4shiftRightByScalar:
+ case Op::I32x4shiftRightByScalarU:
+ return OpKind::SimdShiftByScalar;
+ case Op::I8x16equal:
+ case Op::I8x16notEqual:
+ case Op::I8x16greaterThan:
+ case Op::I8x16greaterThanOrEqual:
+ case Op::I8x16lessThan:
+ case Op::I8x16lessThanOrEqual:
+ case Op::I8x16greaterThanU:
+ case Op::I8x16greaterThanOrEqualU:
+ case Op::I8x16lessThanU:
+ case Op::I8x16lessThanOrEqualU:
+ case Op::I16x8equal:
+ case Op::I16x8notEqual:
+ case Op::I16x8greaterThan:
+ case Op::I16x8greaterThanOrEqual:
+ case Op::I16x8lessThan:
+ case Op::I16x8lessThanOrEqual:
+ case Op::I16x8greaterThanU:
+ case Op::I16x8greaterThanOrEqualU:
+ case Op::I16x8lessThanU:
+ case Op::I16x8lessThanOrEqualU:
+ case Op::I32x4equal:
+ case Op::I32x4notEqual:
+ case Op::I32x4greaterThan:
+ case Op::I32x4greaterThanOrEqual:
+ case Op::I32x4lessThan:
+ case Op::I32x4lessThanOrEqual:
+ case Op::I32x4greaterThanU:
+ case Op::I32x4greaterThanOrEqualU:
+ case Op::I32x4lessThanU:
+ case Op::I32x4lessThanOrEqualU:
+ case Op::F32x4equal:
+ case Op::F32x4notEqual:
+ case Op::F32x4greaterThan:
+ case Op::F32x4greaterThanOrEqual:
+ case Op::F32x4lessThan:
+ case Op::F32x4lessThanOrEqual:
+ return OpKind::SimdComparison;
+ case Op::CurrentMemory:
+ return OpKind::CurrentMemory;
+ case Op::GrowMemory:
+ return OpKind::GrowMemory;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
+}
+#endif
diff --git a/js/src/wasm/WasmBinaryIterator.h b/js/src/wasm/WasmBinaryIterator.h
new file mode 100644
index 0000000000..76e0c28750
--- /dev/null
+++ b/js/src/wasm/WasmBinaryIterator.h
@@ -0,0 +1,2246 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_iterator_h
+#define wasm_binary_iterator_h
+
+#include "mozilla/Poison.h"
+
+#include "jsprf.h"
+
+#include "jit/AtomicOp.h"
+#include "wasm/WasmBinaryFormat.h"
+
+namespace js {
+namespace wasm {
+
+// The kind of a control-flow stack item.
+enum class LabelKind : uint8_t {
+ Block,
+ Loop,
+ Then,
+ UnreachableThen, // like Then, but not reachable
+ Else
+};
+
+#ifdef DEBUG
+// Families of opcodes that share a signature and validation logic.
+enum class OpKind {
+ Block,
+ Loop,
+ Unreachable,
+ Drop,
+ I32,
+ I64,
+ F32,
+ F64,
+ I8x16,
+ I16x8,
+ I32x4,
+ F32x4,
+ B8x16,
+ B16x8,
+ B32x4,
+ Br,
+ BrIf,
+ BrTable,
+ Nop,
+ Nullary,
+ Unary,
+ Binary,
+ Comparison,
+ Conversion,
+ Load,
+ Store,
+ TeeStore,
+ CurrentMemory,
+ GrowMemory,
+ Select,
+ GetLocal,
+ SetLocal,
+ TeeLocal,
+ GetGlobal,
+ SetGlobal,
+ TeeGlobal,
+ Call,
+ CallIndirect,
+ OldCallIndirect,
+ Return,
+ If,
+ Else,
+ End,
+ AtomicLoad,
+ AtomicStore,
+ AtomicBinOp,
+ AtomicCompareExchange,
+ AtomicExchange,
+ ExtractLane,
+ ReplaceLane,
+ Swizzle,
+ Shuffle,
+ Splat,
+ SimdSelect,
+ SimdCtor,
+ SimdBooleanReduction,
+ SimdShiftByScalar,
+ SimdComparison,
+};
+
+// Return the OpKind for a given Op. This is used for sanity-checking that
+// API users use the correct read function for a given Op.
+OpKind
+Classify(Op op);
+#endif
+
+// Common fields for linear memory access.
+template <typename Value>
+struct LinearMemoryAddress
+{
+ Value base;
+ uint32_t offset;
+ uint32_t align;
+
+ LinearMemoryAddress()
+ {}
+ LinearMemoryAddress(Value base, uint32_t offset, uint32_t align)
+ : base(base), offset(offset), align(align)
+ {}
+};
+
+template <typename ControlItem>
+class ControlStackEntry
+{
+ LabelKind kind_;
+ bool reachable_;
+ ExprType type_;
+ size_t valueStackStart_;
+ ControlItem controlItem_;
+
+ public:
+ ControlStackEntry(LabelKind kind, ExprType type, bool reachable, size_t valueStackStart)
+ : kind_(kind), reachable_(reachable), type_(type), valueStackStart_(valueStackStart),
+ controlItem_()
+ {
+ MOZ_ASSERT(type != ExprType::Limit);
+ }
+
+ LabelKind kind() const { return kind_; }
+ ExprType type() const { return type_; }
+ bool reachable() const { return reachable_; }
+ size_t valueStackStart() const { return valueStackStart_; }
+ ControlItem& controlItem() { return controlItem_; }
+
+ void setReachable() { reachable_ = true; }
+
+ void switchToElse(bool reachable) {
+ MOZ_ASSERT(kind_ == LabelKind::Then || kind_ == LabelKind::UnreachableThen);
+ reachable_ = reachable;
+ kind_ = LabelKind::Else;
+ controlItem_ = ControlItem();
+ }
+};
+
+// Specialization for when there is no additional data needed.
+template <>
+class ControlStackEntry<Nothing>
+{
+ LabelKind kind_;
+ bool reachable_;
+ ExprType type_;
+ size_t valueStackStart_;
+
+ public:
+ ControlStackEntry(LabelKind kind, ExprType type, bool reachable, size_t valueStackStart)
+ : kind_(kind), reachable_(reachable), type_(type), valueStackStart_(valueStackStart)
+ {
+ MOZ_ASSERT(type != ExprType::Limit);
+ }
+
+ LabelKind kind() const { return kind_; }
+ ExprType type() const { return type_; }
+ bool reachable() const { return reachable_; }
+ size_t valueStackStart() const { return valueStackStart_; }
+ Nothing controlItem() { return Nothing(); }
+
+ void setReachable() { reachable_ = true; }
+
+ void switchToElse(bool reachable) {
+ MOZ_ASSERT(kind_ == LabelKind::Then || kind_ == LabelKind::UnreachableThen);
+ reachable_ = reachable;
+ kind_ = LabelKind::Else;
+ }
+};
+
+template <typename Value>
+class TypeAndValue
+{
+ ValType type_;
+ Value value_;
+
+ public:
+ TypeAndValue() : type_(ValType(TypeCode::Limit)), value_() {}
+ explicit TypeAndValue(ValType type)
+ : type_(type), value_()
+ {}
+ TypeAndValue(ValType type, Value value)
+ : type_(type), value_(value)
+ {}
+ ValType type() const {
+ return type_;
+ }
+ Value value() const {
+ return value_;
+ }
+ void setValue(Value value) {
+ value_ = value;
+ }
+};
+
+// Specialization for when there is no additional data needed.
+template <>
+class TypeAndValue<Nothing>
+{
+ ValType type_;
+
+ public:
+ TypeAndValue() : type_(ValType(TypeCode::Limit)) {}
+ explicit TypeAndValue(ValType type) : type_(type) {}
+
+ TypeAndValue(ValType type, Nothing value)
+ : type_(type)
+ {}
+
+ ValType type() const { return type_; }
+ Nothing value() const { return Nothing(); }
+ void setValue(Nothing value) {}
+};
+
+// A policy class for configuring OpIter. Clients can use this as a
+// base class, and override the behavior as needed.
+struct OpIterPolicy
+{
+ // Should the iterator perform validation, such as type checking and
+ // validity checking?
+ static const bool Validate = false;
+
+ // Should the iterator produce output values?
+ static const bool Output = false;
+
+ // These members allow clients to add additional information to the value
+ // and control stacks, respectively. Using Nothing means that no additional
+ // field is added.
+ typedef Nothing Value;
+ typedef Nothing ControlItem;
+};
+
+// An iterator over the bytes of a function body. It performs validation
+// (if Policy::Validate is true) and unpacks the data into a usable form.
+//
+// The MOZ_STACK_CLASS attribute here is because of the use of DebugOnly.
+// There's otherwise nothing inherent in this class which would require
+// it to be used on the stack.
+template <typename Policy>
+class MOZ_STACK_CLASS OpIter : private Policy
+{
+ static const bool Validate = Policy::Validate;
+ static const bool Output = Policy::Output;
+ typedef typename Policy::Value Value;
+ typedef typename Policy::ControlItem ControlItem;
+
+ Decoder& d_;
+ const size_t offsetInModule_;
+
+ Vector<TypeAndValue<Value>, 8, SystemAllocPolicy> valueStack_;
+ Vector<ControlStackEntry<ControlItem>, 8, SystemAllocPolicy> controlStack_;
+ bool reachable_;
+
+ DebugOnly<Op> op_;
+ size_t offsetOfExpr_;
+
+ MOZ_MUST_USE bool readFixedU8(uint8_t* out) {
+ if (Validate)
+ return d_.readFixedU8(out);
+ *out = d_.uncheckedReadFixedU8();
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedU32(uint32_t* out) {
+ if (Validate)
+ return d_.readFixedU32(out);
+ *out = d_.uncheckedReadFixedU32();
+ return true;
+ }
+ MOZ_MUST_USE bool readVarS32(int32_t* out) {
+ if (Validate)
+ return d_.readVarS32(out);
+ *out = d_.uncheckedReadVarS32();
+ return true;
+ }
+ MOZ_MUST_USE bool readVarU32(uint32_t* out) {
+ if (Validate)
+ return d_.readVarU32(out);
+ *out = d_.uncheckedReadVarU32();
+ return true;
+ }
+ MOZ_MUST_USE bool readVarS64(int64_t* out) {
+ if (Validate)
+ return d_.readVarS64(out);
+ *out = d_.uncheckedReadVarS64();
+ return true;
+ }
+ MOZ_MUST_USE bool readVarU64(uint64_t* out) {
+ if (Validate)
+ return d_.readVarU64(out);
+ *out = d_.uncheckedReadVarU64();
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedF32(RawF32* out) {
+ if (Validate)
+ return d_.readFixedF32(out);
+ *out = d_.uncheckedReadFixedF32();
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedF64(RawF64* out) {
+ if (Validate)
+ return d_.readFixedF64(out);
+ *out = d_.uncheckedReadFixedF64();
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedI8x16(I8x16* out) {
+ if (Validate)
+ return d_.readFixedI8x16(out);
+ d_.uncheckedReadFixedI8x16(out);
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedI16x8(I16x8* out) {
+ if (Validate)
+ return d_.readFixedI16x8(out);
+ d_.uncheckedReadFixedI16x8(out);
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedI32x4(I32x4* out) {
+ if (Validate)
+ return d_.readFixedI32x4(out);
+ d_.uncheckedReadFixedI32x4(out);
+ return true;
+ }
+ MOZ_MUST_USE bool readFixedF32x4(F32x4* out) {
+ if (Validate)
+ return d_.readFixedF32x4(out);
+ d_.uncheckedReadFixedF32x4(out);
+ return true;
+ }
+
+ MOZ_MUST_USE bool readAtomicViewType(Scalar::Type* viewType) {
+ uint8_t x;
+ if (!readFixedU8(&x))
+ return fail("unable to read atomic view");
+ if (Validate && x >= Scalar::MaxTypedArrayViewType)
+ return fail("invalid atomic view type");
+ *viewType = Scalar::Type(x);
+ return true;
+ }
+
+ MOZ_MUST_USE bool readAtomicBinOpOp(jit::AtomicOp* op) {
+ uint8_t x;
+ if (!readFixedU8(&x))
+ return fail("unable to read atomic opcode");
+ if (Validate) {
+ switch (x) {
+ case jit::AtomicFetchAddOp:
+ case jit::AtomicFetchSubOp:
+ case jit::AtomicFetchAndOp:
+ case jit::AtomicFetchOrOp:
+ case jit::AtomicFetchXorOp:
+ break;
+ default:
+ return fail("unrecognized atomic binop");
+ }
+ }
+ *op = jit::AtomicOp(x);
+ return true;
+ }
+
+ MOZ_MUST_USE bool readLinearMemoryAddress(uint32_t byteSize, LinearMemoryAddress<Value>* addr);
+ MOZ_MUST_USE bool readBlockType(ExprType* expr);
+
+ MOZ_MUST_USE bool typeMismatch(ExprType actual, ExprType expected) MOZ_COLD;
+ MOZ_MUST_USE bool checkType(ValType actual, ValType expected);
+ MOZ_MUST_USE bool checkType(ExprType actual, ExprType expected);
+
+ MOZ_MUST_USE bool pushControl(LabelKind kind, ExprType type, bool reachable);
+ MOZ_MUST_USE bool mergeControl(LabelKind* kind, ExprType* type, Value* value);
+ MOZ_MUST_USE bool popControl(LabelKind* kind, ExprType* type, Value* value);
+
+ MOZ_MUST_USE bool push(ValType t) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ return valueStack_.emplaceBack(t);
+ }
+ MOZ_MUST_USE bool push(TypeAndValue<Value> tv) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ return valueStack_.append(tv);
+ }
+ void infalliblePush(ValType t) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return;
+ valueStack_.infallibleEmplaceBack(t);
+ }
+ void infalliblePush(TypeAndValue<Value> tv) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return;
+ valueStack_.infallibleAppend(tv);
+ }
+
+ // Test whether reading the top of the value stack is currently valid.
+ MOZ_MUST_USE bool checkTop() {
+ MOZ_ASSERT(reachable_);
+ if (Validate && valueStack_.length() <= controlStack_.back().valueStackStart()) {
+ if (valueStack_.empty())
+ return fail("popping value from empty stack");
+ return fail("popping value from outside block");
+ }
+ return true;
+ }
+
+ // Pop the top of the value stack.
+ MOZ_MUST_USE bool pop(TypeAndValue<Value>* tv) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ if (!checkTop())
+ return false;
+ *tv = valueStack_.popCopy();
+ return true;
+ }
+
+ // Pop the top of the value stack and check that it has the given type.
+ MOZ_MUST_USE bool popWithType(ValType expectedType, Value* value) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ if (!checkTop())
+ return false;
+ TypeAndValue<Value> tv = valueStack_.popCopy();
+ if (!checkType(tv.type(), expectedType))
+ return false;
+ if (Output)
+ *value = tv.value();
+ return true;
+ }
+
+ // Pop the top of the value stack and discard the result.
+ MOZ_MUST_USE bool pop() {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ if (!checkTop())
+ return false;
+ valueStack_.popBack();
+ return true;
+ }
+
+ // Read the top of the value stack (without popping it).
+ MOZ_MUST_USE bool top(TypeAndValue<Value>* tv) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ if (!checkTop())
+ return false;
+ *tv = valueStack_.back();
+ return true;
+ }
+
+ // Read the top of the value stack (without popping it) and check that it
+ // has the given type.
+ MOZ_MUST_USE bool topWithType(ValType expectedType, Value* value) {
+ if (MOZ_UNLIKELY(!reachable_))
+ return true;
+ if (!checkTop())
+ return false;
+ TypeAndValue<Value>& tv = valueStack_.back();
+ if (!checkType(tv.type(), expectedType))
+ return false;
+ if (Output)
+ *value = tv.value();
+ return true;
+ }
+
+ // Read the value stack entry at depth |index|.
+ MOZ_MUST_USE bool peek(uint32_t index, TypeAndValue<Value>* tv) {
+ MOZ_ASSERT(reachable_);
+ if (Validate && valueStack_.length() - controlStack_.back().valueStackStart() < index)
+ return fail("peeking at value from outside block");
+ *tv = valueStack_[valueStack_.length() - index];
+ return true;
+ }
+
+ bool getControl(uint32_t relativeDepth, ControlStackEntry<ControlItem>** controlEntry) {
+ if (Validate && relativeDepth >= controlStack_.length())
+ return fail("branch depth exceeds current nesting level");
+
+ *controlEntry = &controlStack_[controlStack_.length() - 1 - relativeDepth];
+ return true;
+ }
+
+ void enterUnreachableCode() {
+ valueStack_.shrinkTo(controlStack_.back().valueStackStart());
+ reachable_ = false;
+ }
+
+ bool checkBrValue(uint32_t relativeDepth, ExprType* type, Value* value);
+ bool checkBrIfValues(uint32_t relativeDepth, Value* condition, ExprType* type, Value* value);
+
+ public:
+ explicit OpIter(Decoder& decoder, uint32_t offsetInModule = 0)
+ : d_(decoder), offsetInModule_(offsetInModule), reachable_(true),
+ op_(Op::Limit), offsetOfExpr_(0)
+ {}
+
+ // Return the decoding byte offset.
+ uint32_t currentOffset() const { return d_.currentOffset(); }
+
+ // Returning the offset within the entire module of the last-read Op.
+ TrapOffset trapOffset() const {
+ return TrapOffset(offsetInModule_ + offsetOfExpr_);
+ }
+
+ // Test whether the iterator has reached the end of the buffer.
+ bool done() const { return d_.done(); }
+
+ // Report a general failure.
+ MOZ_MUST_USE bool fail(const char* msg) MOZ_COLD;
+
+ // Report an unimplemented feature.
+ MOZ_MUST_USE bool notYetImplemented(const char* what) MOZ_COLD;
+
+ // Report an unrecognized opcode.
+ MOZ_MUST_USE bool unrecognizedOpcode(uint32_t expr) MOZ_COLD;
+
+ // Test whether the iterator is currently in "reachable" code.
+ bool inReachableCode() const { return reachable_; }
+
+ // ------------------------------------------------------------------------
+ // Decoding and validation interface.
+
+ MOZ_MUST_USE bool readOp(uint16_t* op);
+ MOZ_MUST_USE bool readFunctionStart(ExprType ret);
+ MOZ_MUST_USE bool readFunctionEnd();
+ MOZ_MUST_USE bool readReturn(Value* value);
+ MOZ_MUST_USE bool readBlock();
+ MOZ_MUST_USE bool readLoop();
+ MOZ_MUST_USE bool readIf(Value* condition);
+ MOZ_MUST_USE bool readElse(ExprType* thenType, Value* thenValue);
+ MOZ_MUST_USE bool readEnd(LabelKind* kind, ExprType* type, Value* value);
+ MOZ_MUST_USE bool readBr(uint32_t* relativeDepth, ExprType* type, Value* value);
+ MOZ_MUST_USE bool readBrIf(uint32_t* relativeDepth, ExprType* type,
+ Value* value, Value* condition);
+ MOZ_MUST_USE bool readBrTable(uint32_t* tableLength, ExprType* type,
+ Value* value, Value* index);
+ MOZ_MUST_USE bool readBrTableEntry(ExprType* type, Value* value, uint32_t* depth);
+ MOZ_MUST_USE bool readBrTableDefault(ExprType* type, Value* value, uint32_t* depth);
+ MOZ_MUST_USE bool readUnreachable();
+ MOZ_MUST_USE bool readDrop();
+ MOZ_MUST_USE bool readUnary(ValType operandType, Value* input);
+ MOZ_MUST_USE bool readConversion(ValType operandType, ValType resultType, Value* input);
+ MOZ_MUST_USE bool readBinary(ValType operandType, Value* lhs, Value* rhs);
+ MOZ_MUST_USE bool readComparison(ValType operandType, Value* lhs, Value* rhs);
+ MOZ_MUST_USE bool readLoad(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr);
+ MOZ_MUST_USE bool readStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr, Value* value);
+ MOZ_MUST_USE bool readTeeStore(ValType resultType, uint32_t byteSize,
+ LinearMemoryAddress<Value>* addr, Value* value);
+ MOZ_MUST_USE bool readNop();
+ MOZ_MUST_USE bool readCurrentMemory();
+ MOZ_MUST_USE bool readGrowMemory(Value* input);
+ MOZ_MUST_USE bool readSelect(ValType* type,
+ Value* trueValue, Value* falseValue, Value* condition);
+ MOZ_MUST_USE bool readGetLocal(const ValTypeVector& locals, uint32_t* id);
+ MOZ_MUST_USE bool readSetLocal(const ValTypeVector& locals, uint32_t* id, Value* value);
+ MOZ_MUST_USE bool readTeeLocal(const ValTypeVector& locals, uint32_t* id, Value* value);
+ MOZ_MUST_USE bool readGetGlobal(const GlobalDescVector& globals, uint32_t* id);
+ MOZ_MUST_USE bool readSetGlobal(const GlobalDescVector& globals, uint32_t* id, Value* value);
+ MOZ_MUST_USE bool readTeeGlobal(const GlobalDescVector& globals, uint32_t* id, Value* value);
+ MOZ_MUST_USE bool readI32Const(int32_t* i32);
+ MOZ_MUST_USE bool readI64Const(int64_t* i64);
+ MOZ_MUST_USE bool readF32Const(RawF32* f32);
+ MOZ_MUST_USE bool readF64Const(RawF64* f64);
+ MOZ_MUST_USE bool readI8x16Const(I8x16* i8x16);
+ MOZ_MUST_USE bool readI16x8Const(I16x8* i16x8);
+ MOZ_MUST_USE bool readI32x4Const(I32x4* i32x4);
+ MOZ_MUST_USE bool readF32x4Const(F32x4* f32x4);
+ MOZ_MUST_USE bool readB8x16Const(I8x16* i8x16);
+ MOZ_MUST_USE bool readB16x8Const(I16x8* i16x8);
+ MOZ_MUST_USE bool readB32x4Const(I32x4* i32x4);
+ MOZ_MUST_USE bool readCall(uint32_t* calleeIndex);
+ MOZ_MUST_USE bool readCallIndirect(uint32_t* sigIndex, Value* callee);
+ MOZ_MUST_USE bool readOldCallIndirect(uint32_t* sigIndex);
+ MOZ_MUST_USE bool readCallArg(ValType type, uint32_t numArgs, uint32_t argIndex, Value* arg);
+ MOZ_MUST_USE bool readCallArgsEnd(uint32_t numArgs);
+ MOZ_MUST_USE bool readOldCallIndirectCallee(Value* callee);
+ MOZ_MUST_USE bool readCallReturn(ExprType ret);
+ MOZ_MUST_USE bool readAtomicLoad(LinearMemoryAddress<Value>* addr,
+ Scalar::Type* viewType);
+ MOZ_MUST_USE bool readAtomicStore(LinearMemoryAddress<Value>* addr,
+ Scalar::Type* viewType,
+ Value* value);
+ MOZ_MUST_USE bool readAtomicBinOp(LinearMemoryAddress<Value>* addr,
+ Scalar::Type* viewType,
+ jit::AtomicOp* op,
+ Value* value);
+ MOZ_MUST_USE bool readAtomicCompareExchange(LinearMemoryAddress<Value>* addr,
+ Scalar::Type* viewType,
+ Value* oldValue,
+ Value* newValue);
+ MOZ_MUST_USE bool readAtomicExchange(LinearMemoryAddress<Value>* addr,
+ Scalar::Type* viewType,
+ Value* newValue);
+ MOZ_MUST_USE bool readSimdComparison(ValType simdType, Value* lhs,
+ Value* rhs);
+ MOZ_MUST_USE bool readSimdShiftByScalar(ValType simdType, Value* lhs,
+ Value* rhs);
+ MOZ_MUST_USE bool readSimdBooleanReduction(ValType simdType, Value* input);
+ MOZ_MUST_USE bool readExtractLane(ValType simdType, uint8_t* lane,
+ Value* vector);
+ MOZ_MUST_USE bool readReplaceLane(ValType simdType, uint8_t* lane,
+ Value* vector, Value* scalar);
+ MOZ_MUST_USE bool readSplat(ValType simdType, Value* scalar);
+ MOZ_MUST_USE bool readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector);
+ MOZ_MUST_USE bool readShuffle(ValType simdType, uint8_t (* lanes)[16],
+ Value* lhs, Value* rhs);
+ MOZ_MUST_USE bool readSimdSelect(ValType simdType, Value* trueValue,
+ Value* falseValue,
+ Value* condition);
+ MOZ_MUST_USE bool readSimdCtor();
+ MOZ_MUST_USE bool readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t argIndex,
+ Value* arg);
+ MOZ_MUST_USE bool readSimdCtorArgsEnd(uint32_t numElements);
+ MOZ_MUST_USE bool readSimdCtorReturn(ValType simdType);
+
+ // At a location where readOp is allowed, peek at the next opcode
+ // without consuming it or updating any internal state.
+ // Never fails: returns uint16_t(Op::Limit) if it can't read.
+ uint16_t peekOp();
+
+ // ------------------------------------------------------------------------
+ // Stack management.
+
+ // Set the result value of the current top-of-value-stack expression.
+ void setResult(Value value) {
+ if (MOZ_LIKELY(reachable_))
+ valueStack_.back().setValue(value);
+ }
+
+ // Return the result value of the current top-of-value-stack expression.
+ Value getResult() {
+ MOZ_ASSERT(reachable_);
+ return valueStack_.back().value();
+ }
+
+ // Return a reference to the top of the control stack.
+ ControlItem& controlItem() {
+ return controlStack_.back().controlItem();
+ }
+
+ // Return the signature of the top of the control stack.
+ ExprType controlType() {
+ return controlStack_.back().type();
+ }
+
+ // Test whether the control-stack is empty, meaning we've consumed the final
+ // end of the function body.
+ bool controlStackEmpty() const {
+ return controlStack_.empty();
+ }
+};
+
+template <typename Policy>
+bool
+OpIter<Policy>::typeMismatch(ExprType actual, ExprType expected)
+{
+ MOZ_ASSERT(Validate);
+ MOZ_ASSERT(reachable_);
+
+ UniqueChars error(JS_smprintf("type mismatch: expression has type %s but expected %s",
+ ToCString(actual), ToCString(expected)));
+ if (!error)
+ return false;
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::checkType(ValType actual, ValType expected)
+{
+ return checkType(ToExprType(actual), ToExprType(expected));
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::checkType(ExprType actual, ExprType expected)
+{
+ MOZ_ASSERT(reachable_);
+
+ if (!Validate) {
+ MOZ_ASSERT(actual == expected, "type mismatch");
+ return true;
+ }
+
+ if (MOZ_LIKELY(actual == expected))
+ return true;
+
+ return typeMismatch(actual, expected);
+}
+
+template <typename Policy>
+bool
+OpIter<Policy>::notYetImplemented(const char* what)
+{
+ UniqueChars error(JS_smprintf("not yet implemented: %s", what));
+ if (!error)
+ return false;
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+bool
+OpIter<Policy>::unrecognizedOpcode(uint32_t expr)
+{
+ UniqueChars error(JS_smprintf("unrecognized opcode: %x", expr));
+ if (!error)
+ return false;
+
+ return fail(error.get());
+}
+
+template <typename Policy>
+bool
+OpIter<Policy>::fail(const char* msg)
+{
+ return d_.fail("%s", msg);
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::pushControl(LabelKind kind, ExprType type, bool reachable)
+{
+ return controlStack_.emplaceBack(kind, type, reachable, valueStack_.length());
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::mergeControl(LabelKind* kind, ExprType* type, Value* value)
+{
+ MOZ_ASSERT(!controlStack_.empty());
+
+ ControlStackEntry<ControlItem>& controlItem = controlStack_.back();
+ *kind = controlItem.kind();
+
+ if (reachable_) {
+ // Unlike branching, exiting a scope via fallthrough does not implicitly
+ // pop excess items on the stack.
+ size_t valueStackStart = controlItem.valueStackStart();
+ size_t valueStackLength = valueStack_.length();
+ MOZ_ASSERT(valueStackLength >= valueStackStart);
+ if (valueStackLength == valueStackStart) {
+ *type = ExprType::Void;
+ if (!checkType(ExprType::Void, controlItem.type()))
+ return false;
+ } else {
+ *type = controlItem.type();
+ if (Validate && valueStackLength - valueStackStart > (IsVoid(*type) ? 0u : 1u))
+ return fail("unused values not explicitly dropped by end of block");
+ if (!topWithType(NonVoidToValType(*type), value))
+ return false;
+ }
+ } else {
+ if (*kind != LabelKind::Loop && controlItem.reachable()) {
+ // There was no fallthrough path, but there was some other reachable
+ // branch to the end.
+ reachable_ = true;
+ *type = controlItem.type();
+ if (!IsVoid(*type)) {
+ if (!push(NonVoidToValType(*type)))
+ return false;
+ }
+ } else {
+ // No fallthrough and no branch to the end either; we remain
+ // unreachable.
+ *type = ExprType::Void;
+ }
+ if (Output)
+ *value = Value();
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::popControl(LabelKind* kind, ExprType* type, Value* value)
+{
+ if (!mergeControl(kind, type, value))
+ return false;
+
+ if (*kind == LabelKind::Then) {
+ // A reachable If without an Else. Forbid a result value.
+ if (reachable_) {
+ if (Validate && !IsVoid(*type))
+ return fail("if without else with a result value");
+ }
+ reachable_ = true;
+ }
+
+ controlStack_.popBack();
+
+ if (!reachable_ && !controlStack_.empty())
+ valueStack_.shrinkTo(controlStack_.back().valueStackStart());
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBlockType(ExprType* type)
+{
+ uint8_t unchecked;
+ if (!d_.readBlockType(&unchecked))
+ return fail("unable to read block signature");
+
+ if (Validate) {
+ switch (unchecked) {
+ case uint8_t(ExprType::Void):
+ case uint8_t(ExprType::I32):
+ case uint8_t(ExprType::I64):
+ case uint8_t(ExprType::F32):
+ case uint8_t(ExprType::F64):
+ case uint8_t(ExprType::I8x16):
+ case uint8_t(ExprType::I16x8):
+ case uint8_t(ExprType::I32x4):
+ case uint8_t(ExprType::F32x4):
+ case uint8_t(ExprType::B8x16):
+ case uint8_t(ExprType::B16x8):
+ case uint8_t(ExprType::B32x4):
+ break;
+ default:
+ return fail("invalid inline block type");
+ }
+ }
+
+ *type = ExprType(unchecked);
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readOp(uint16_t* op)
+{
+ offsetOfExpr_ = d_.currentOffset();
+
+ if (Validate) {
+ if (MOZ_UNLIKELY(!d_.readOp(op)))
+ return fail("unable to read opcode");
+ } else {
+ *op = uint16_t(d_.uncheckedReadOp());
+ }
+
+ op_ = Op(*op); // debug-only
+
+ return true;
+}
+
+template <typename Policy>
+inline uint16_t
+OpIter<Policy>::peekOp()
+{
+ const uint8_t* pos = d_.currentPosition();
+ uint16_t op;
+
+ if (Validate) {
+ if (MOZ_UNLIKELY(!d_.readOp(&op)))
+ op = uint16_t(Op::Limit);
+ } else {
+ op = uint16_t(d_.uncheckedReadOp());
+ }
+
+ d_.rollbackPosition(pos);
+
+ return op;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readFunctionStart(ExprType ret)
+{
+ MOZ_ASSERT(valueStack_.empty());
+ MOZ_ASSERT(controlStack_.empty());
+ MOZ_ASSERT(Op(op_) == Op::Limit);
+ MOZ_ASSERT(reachable_);
+
+ return pushControl(LabelKind::Block, ret, false);
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readFunctionEnd()
+{
+ if (Validate) {
+ if (!controlStack_.empty())
+ return fail("unbalanced function body control flow");
+ } else {
+ MOZ_ASSERT(controlStack_.empty());
+ }
+
+ op_ = Op::Limit;
+ valueStack_.clear();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readReturn(Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Return);
+
+ if (MOZ_LIKELY(reachable_)) {
+ ControlStackEntry<ControlItem>& controlItem = controlStack_[0];
+ MOZ_ASSERT(controlItem.kind() == LabelKind::Block);
+
+ controlItem.setReachable();
+
+ if (!IsVoid(controlItem.type())) {
+ if (!popWithType(NonVoidToValType(controlItem.type()), value))
+ return false;
+ }
+ }
+
+ enterUnreachableCode();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBlock()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Block);
+
+ ExprType type = ExprType::Limit;
+ if (!readBlockType(&type))
+ return false;
+
+ return pushControl(LabelKind::Block, type, false);
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readLoop()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Loop);
+
+ ExprType type = ExprType::Limit;
+ if (!readBlockType(&type))
+ return false;
+
+ return pushControl(LabelKind::Loop, type, reachable_);
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readIf(Value* condition)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::If);
+
+ ExprType type = ExprType::Limit;
+ if (!readBlockType(&type))
+ return false;
+
+ if (MOZ_LIKELY(reachable_)) {
+ if (!popWithType(ValType::I32, condition))
+ return false;
+
+ return pushControl(LabelKind::Then, type, false);
+ }
+
+ return pushControl(LabelKind::UnreachableThen, type, false);
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readElse(ExprType* thenType, Value* thenValue)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Else);
+
+ // Finish up the then arm.
+ ExprType type = ExprType::Limit;
+ LabelKind kind;
+ if (!mergeControl(&kind, &type, thenValue))
+ return false;
+
+ if (Output)
+ *thenType = type;
+
+ // Pop the old then value from the stack.
+ if (!IsVoid(type))
+ valueStack_.popBack();
+
+ if (Validate && kind != LabelKind::Then && kind != LabelKind::UnreachableThen)
+ return fail("else can only be used within an if");
+
+ // Switch to the else arm.
+ controlStack_.back().switchToElse(reachable_);
+
+ reachable_ = kind != LabelKind::UnreachableThen;
+
+ MOZ_ASSERT(valueStack_.length() == controlStack_.back().valueStackStart());
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readEnd(LabelKind* kind, ExprType* type, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::End);
+
+ LabelKind validateKind = static_cast<LabelKind>(-1);
+ ExprType validateType = ExprType::Limit;
+ if (!popControl(&validateKind, &validateType, value))
+ return false;
+
+ if (Output) {
+ *kind = validateKind;
+ *type = validateType;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::checkBrValue(uint32_t relativeDepth, ExprType* type, Value* value)
+{
+ if (MOZ_LIKELY(reachable_)) {
+ ControlStackEntry<ControlItem>* controlItem = nullptr;
+ if (!getControl(relativeDepth, &controlItem))
+ return false;
+
+ if (controlItem->kind() != LabelKind::Loop) {
+ controlItem->setReachable();
+
+ ExprType expectedType = controlItem->type();
+ if (Output)
+ *type = expectedType;
+
+ if (!IsVoid(expectedType))
+ return topWithType(NonVoidToValType(expectedType), value);
+ }
+ }
+
+ if (Output) {
+ *type = ExprType::Void;
+ *value = Value();
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBr(uint32_t* relativeDepth, ExprType* type, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Br);
+
+ uint32_t validateRelativeDepth;
+ if (!readVarU32(&validateRelativeDepth))
+ return fail("unable to read br depth");
+
+ if (!checkBrValue(validateRelativeDepth, type, value))
+ return false;
+
+ if (Output)
+ *relativeDepth = validateRelativeDepth;
+
+ enterUnreachableCode();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::checkBrIfValues(uint32_t relativeDepth, Value* condition,
+ ExprType* type, Value* value)
+{
+ if (MOZ_LIKELY(reachable_)) {
+ if (!popWithType(ValType::I32, condition))
+ return false;
+
+ ControlStackEntry<ControlItem>* controlItem = nullptr;
+ if (!getControl(relativeDepth, &controlItem))
+ return false;
+
+ if (controlItem->kind() != LabelKind::Loop) {
+ controlItem->setReachable();
+
+ ExprType expectedType = controlItem->type();
+ if (Output)
+ *type = expectedType;
+
+ if (!IsVoid(expectedType))
+ return topWithType(NonVoidToValType(expectedType), value);
+ }
+ }
+
+ if (Output) {
+ *type = ExprType::Void;
+ *value = Value();
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBrIf(uint32_t* relativeDepth, ExprType* type, Value* value, Value* condition)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::BrIf);
+
+ uint32_t validateRelativeDepth;
+ if (!readVarU32(&validateRelativeDepth))
+ return fail("unable to read br_if depth");
+
+ if (!checkBrIfValues(validateRelativeDepth, condition, type, value))
+ return false;
+
+ if (Output)
+ *relativeDepth = validateRelativeDepth;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBrTable(uint32_t* tableLength, ExprType* type,
+ Value* value, Value* index)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::BrTable);
+
+ if (!readVarU32(tableLength))
+ return fail("unable to read br_table table length");
+
+ if (MOZ_LIKELY(reachable_)) {
+ if (!popWithType(ValType::I32, index))
+ return false;
+ }
+
+ // Set *type to indicate that we don't know the type yet.
+ *type = ExprType::Limit;
+ if (Output)
+ *value = Value();
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBrTableEntry(ExprType* type, Value* value, uint32_t* depth)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::BrTable);
+
+ if (!readVarU32(depth))
+ return false;
+
+ ExprType knownType = *type;
+
+ if (MOZ_LIKELY(reachable_)) {
+ ControlStackEntry<ControlItem>* controlItem = nullptr;
+ if (!getControl(*depth, &controlItem))
+ return false;
+
+ if (controlItem->kind() != LabelKind::Loop) {
+ controlItem->setReachable();
+
+ // If we've already seen one label, we know the type and can check
+ // that the type for the current label matches it.
+ if (knownType != ExprType::Limit)
+ return checkType(knownType, controlItem->type());
+
+ // This is the first label; record the type and the value now.
+ ExprType expectedType = controlItem->type();
+ if (!IsVoid(expectedType)) {
+ *type = expectedType;
+ return popWithType(NonVoidToValType(expectedType), value);
+ }
+ }
+
+ if (knownType != ExprType::Limit && knownType != ExprType::Void)
+ return typeMismatch(knownType, ExprType::Void);
+ }
+
+ *type = ExprType::Void;
+ if (Output)
+ *value = Value();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBrTableDefault(ExprType* type, Value* value, uint32_t* depth)
+{
+ if (!readBrTableEntry(type, value, depth))
+ return false;
+
+ MOZ_ASSERT(!reachable_ || *type != ExprType::Limit);
+
+ enterUnreachableCode();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readUnreachable()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Unreachable);
+
+ enterUnreachableCode();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readDrop()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Drop);
+
+ if (!pop())
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readUnary(ValType operandType, Value* input)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Unary);
+
+ if (!popWithType(operandType, input))
+ return false;
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readConversion(ValType operandType, ValType resultType, Value* input)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Conversion);
+
+ if (!popWithType(operandType, input))
+ return false;
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readBinary(ValType operandType, Value* lhs, Value* rhs)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Binary);
+
+ if (!popWithType(operandType, rhs))
+ return false;
+
+ if (!popWithType(operandType, lhs))
+ return false;
+
+ infalliblePush(operandType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readComparison(ValType operandType, Value* lhs, Value* rhs)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Comparison);
+
+ if (!popWithType(operandType, rhs))
+ return false;
+
+ if (!popWithType(operandType, lhs))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readLinearMemoryAddress(uint32_t byteSize, LinearMemoryAddress<Value>* addr)
+{
+ uint8_t alignLog2;
+ if (!readFixedU8(&alignLog2))
+ return fail("unable to read load alignment");
+
+ uint32_t unusedOffset;
+ if (!readVarU32(Output ? &addr->offset : &unusedOffset))
+ return fail("unable to read load offset");
+
+ if (Validate && (alignLog2 >= 32 || (uint32_t(1) << alignLog2) > byteSize))
+ return fail("greater than natural alignment");
+
+ Value unused;
+ if (!popWithType(ValType::I32, Output ? &addr->base : &unused))
+ return false;
+
+ if (Output)
+ addr->align = uint32_t(1) << alignLog2;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readLoad(ValType resultType, uint32_t byteSize, LinearMemoryAddress<Value>* addr)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Load);
+
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ infalliblePush(resultType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readStore(ValType resultType, uint32_t byteSize, LinearMemoryAddress<Value>* addr,
+ Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Store);
+
+ if (!popWithType(resultType, value))
+ return false;
+
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readTeeStore(ValType resultType, uint32_t byteSize, LinearMemoryAddress<Value>* addr,
+ Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeStore);
+
+ if (!popWithType(resultType, value))
+ return false;
+
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ infalliblePush(TypeAndValue<Value>(resultType, Output ? *value : Value()));
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readNop()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Nop);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCurrentMemory()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::CurrentMemory);
+
+ uint32_t flags;
+ if (!readVarU32(&flags))
+ return false;
+
+ if (Validate && flags != uint32_t(MemoryTableFlags::Default))
+ return fail("unexpected flags");
+
+ if (!push(ValType::I32))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readGrowMemory(Value* input)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::GrowMemory);
+
+ uint32_t flags;
+ if (!readVarU32(&flags))
+ return false;
+
+ if (Validate && flags != uint32_t(MemoryTableFlags::Default))
+ return fail("unexpected flags");
+
+ if (!popWithType(ValType::I32, input))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSelect(ValType* type, Value* trueValue, Value* falseValue, Value* condition)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Select);
+
+ if (!popWithType(ValType::I32, condition))
+ return false;
+
+ TypeAndValue<Value> false_;
+ if (!pop(&false_))
+ return false;
+
+ TypeAndValue<Value> true_;
+ if (!pop(&true_))
+ return false;
+
+ ValType resultType = true_.type();
+ if (Validate && resultType != false_.type())
+ return fail("select operand types must match");
+
+ infalliblePush(resultType);
+
+ if (Output) {
+ *type = resultType;
+ *trueValue = true_.value();
+ *falseValue = false_.value();
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readGetLocal(const ValTypeVector& locals, uint32_t* id)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::GetLocal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= locals.length())
+ return fail("get_local index out of range");
+
+ if (!push(locals[validateId]))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSetLocal(const ValTypeVector& locals, uint32_t* id, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SetLocal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= locals.length())
+ return fail("set_local index out of range");
+
+ if (!popWithType(locals[validateId], value))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readTeeLocal(const ValTypeVector& locals, uint32_t* id, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeLocal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= locals.length())
+ return fail("set_local index out of range");
+
+ if (!topWithType(locals[validateId], value))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readGetGlobal(const GlobalDescVector& globals, uint32_t* id)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::GetGlobal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= globals.length())
+ return fail("get_global index out of range");
+
+ if (!push(globals[validateId].type()))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSetGlobal(const GlobalDescVector& globals, uint32_t* id, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SetGlobal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= globals.length())
+ return fail("set_global index out of range");
+
+ if (Validate && !globals[validateId].isMutable())
+ return fail("can't write an immutable global");
+
+ if (!popWithType(globals[validateId].type(), value))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readTeeGlobal(const GlobalDescVector& globals, uint32_t* id, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::TeeGlobal);
+
+ uint32_t validateId;
+ if (!readVarU32(&validateId))
+ return false;
+
+ if (Validate && validateId >= globals.length())
+ return fail("set_global index out of range");
+
+ if (Validate && !globals[validateId].isMutable())
+ return fail("can't write an immutable global");
+
+ if (!topWithType(globals[validateId].type(), value))
+ return false;
+
+ if (Output)
+ *id = validateId;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readI32Const(int32_t* i32)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::I32);
+
+ int32_t unused;
+ if (!readVarS32(Output ? i32 : &unused))
+ return false;
+
+ if (!push(ValType::I32))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readI64Const(int64_t* i64)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::I64);
+
+ int64_t unused;
+ if (!readVarS64(Output ? i64 : &unused))
+ return false;
+
+ if (!push(ValType::I64))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readF32Const(RawF32* f32)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::F32);
+
+ RawF32 unused;
+ if (!readFixedF32(Output ? f32 : &unused))
+ return false;
+
+ if (!push(ValType::F32))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readF64Const(RawF64* f64)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::F64);
+
+ RawF64 unused;
+ if (!readFixedF64(Output ? f64 : &unused))
+ return false;
+
+ if (!push(ValType::F64))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readI8x16Const(I8x16* i8x16)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::I8x16);
+
+ I8x16 unused;
+ if (!readFixedI8x16(Output ? i8x16 : &unused))
+ return false;
+
+ if (!push(ValType::I8x16))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readI16x8Const(I16x8* i16x8)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::I16x8);
+
+ I16x8 unused;
+ if (!readFixedI16x8(Output ? i16x8 : &unused))
+ return false;
+
+ if (!push(ValType::I16x8))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readI32x4Const(I32x4* i32x4)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::I32x4);
+
+ I32x4 unused;
+ if (!readFixedI32x4(Output ? i32x4 : &unused))
+ return false;
+
+ if (!push(ValType::I32x4))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readF32x4Const(F32x4* f32x4)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::F32x4);
+
+ F32x4 unused;
+ if (!readFixedF32x4(Output ? f32x4 : &unused))
+ return false;
+
+ if (!push(ValType::F32x4))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readB8x16Const(I8x16* i8x16)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::B8x16);
+
+ I8x16 unused;
+ if (!readFixedI8x16(Output ? i8x16 : &unused))
+ return false;
+
+ if (!push(ValType::B8x16))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readB16x8Const(I16x8* i16x8)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::B16x8);
+
+ I16x8 unused;
+ if (!readFixedI16x8(Output ? i16x8 : &unused))
+ return false;
+
+ if (!push(ValType::B16x8))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readB32x4Const(I32x4* i32x4)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::B32x4);
+
+ I32x4 unused;
+ if (!readFixedI32x4(Output ? i32x4 : &unused))
+ return false;
+
+ if (!push(ValType::B32x4))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCall(uint32_t* calleeIndex)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Call);
+
+ if (!readVarU32(calleeIndex))
+ return fail("unable to read call function index");
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCallIndirect(uint32_t* sigIndex, Value* callee)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::CallIndirect);
+
+ if (!readVarU32(sigIndex))
+ return fail("unable to read call_indirect signature index");
+
+ uint32_t flags;
+ if (!readVarU32(&flags))
+ return false;
+
+ if (Validate && flags != uint32_t(MemoryTableFlags::Default))
+ return fail("unexpected flags");
+
+ if (reachable_) {
+ if (!popWithType(ValType::I32, callee))
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readOldCallIndirect(uint32_t* sigIndex)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallIndirect);
+
+ if (!readVarU32(sigIndex))
+ return fail("unable to read call_indirect signature index");
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCallArg(ValType type, uint32_t numArgs, uint32_t argIndex, Value* arg)
+{
+ MOZ_ASSERT(reachable_);
+
+ TypeAndValue<Value> tv;
+
+ if (!peek(numArgs - argIndex, &tv))
+ return false;
+ if (!checkType(tv.type(), type))
+ return false;
+
+ if (Output)
+ *arg = tv.value();
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCallArgsEnd(uint32_t numArgs)
+{
+ MOZ_ASSERT(reachable_);
+ MOZ_ASSERT(numArgs <= valueStack_.length());
+
+ valueStack_.shrinkBy(numArgs);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readOldCallIndirectCallee(Value* callee)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::OldCallIndirect);
+ MOZ_ASSERT(reachable_);
+
+ if (!popWithType(ValType::I32, callee))
+ return false;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readCallReturn(ExprType ret)
+{
+ MOZ_ASSERT(reachable_);
+
+ if (!IsVoid(ret)) {
+ if (!push(NonVoidToValType(ret)))
+ return false;
+ }
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readAtomicLoad(LinearMemoryAddress<Value>* addr, Scalar::Type* viewType)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicLoad);
+
+ Scalar::Type validateViewType;
+ if (!readAtomicViewType(&validateViewType))
+ return false;
+
+ uint32_t byteSize = Scalar::byteSize(validateViewType);
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ if (Output)
+ *viewType = validateViewType;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readAtomicStore(LinearMemoryAddress<Value>* addr, Scalar::Type* viewType,
+ Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicStore);
+
+ Scalar::Type validateViewType;
+ if (!readAtomicViewType(&validateViewType))
+ return false;
+
+ uint32_t byteSize = Scalar::byteSize(validateViewType);
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ if (!popWithType(ValType::I32, value))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ if (Output)
+ *viewType = validateViewType;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readAtomicBinOp(LinearMemoryAddress<Value>* addr, Scalar::Type* viewType,
+ jit::AtomicOp* op, Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicBinOp);
+
+ Scalar::Type validateViewType;
+ if (!readAtomicViewType(&validateViewType))
+ return false;
+
+ if (!readAtomicBinOpOp(op))
+ return false;
+
+ uint32_t byteSize = Scalar::byteSize(validateViewType);
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ if (!popWithType(ValType::I32, value))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ if (Output)
+ *viewType = validateViewType;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readAtomicCompareExchange(LinearMemoryAddress<Value>* addr, Scalar::Type* viewType,
+ Value* oldValue, Value* newValue)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicCompareExchange);
+
+ Scalar::Type validateViewType;
+ if (!readAtomicViewType(&validateViewType))
+ return false;
+
+ uint32_t byteSize = Scalar::byteSize(validateViewType);
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ if (!popWithType(ValType::I32, newValue))
+ return false;
+
+ if (!popWithType(ValType::I32, oldValue))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ if (Output)
+ *viewType = validateViewType;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readAtomicExchange(LinearMemoryAddress<Value>* addr, Scalar::Type* viewType,
+ Value* value)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::AtomicExchange);
+
+ Scalar::Type validateViewType;
+ if (!readAtomicViewType(&validateViewType))
+ return false;
+
+ uint32_t byteSize = Scalar::byteSize(validateViewType);
+ if (!readLinearMemoryAddress(byteSize, addr))
+ return false;
+
+ if (!popWithType(ValType::I32, value))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ if (Output)
+ *viewType = validateViewType;
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdComparison(ValType simdType, Value* lhs, Value* rhs)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdComparison);
+
+ if (!popWithType(simdType, rhs))
+ return false;
+
+ if (!popWithType(simdType, lhs))
+ return false;
+
+ infalliblePush(SimdBoolType(simdType));
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdShiftByScalar(ValType simdType, Value* lhs, Value* rhs)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdShiftByScalar);
+
+ if (!popWithType(ValType::I32, rhs))
+ return false;
+
+ if (!popWithType(simdType, lhs))
+ return false;
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdBooleanReduction(ValType simdType, Value* input)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdBooleanReduction);
+
+ if (!popWithType(simdType, input))
+ return false;
+
+ infalliblePush(ValType::I32);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readExtractLane(ValType simdType, uint8_t* lane, Value* vector)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::ExtractLane);
+
+ uint32_t laneBits;
+ if (!readVarU32(&laneBits))
+ return false;
+
+ if (Validate && laneBits >= NumSimdElements(simdType))
+ return fail("simd lane out of bounds for simd type");
+
+ if (!popWithType(simdType, vector))
+ return false;
+
+ infalliblePush(SimdElementType(simdType));
+
+ if (Output)
+ *lane = uint8_t(laneBits);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readReplaceLane(ValType simdType, uint8_t* lane, Value* vector, Value* scalar)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::ReplaceLane);
+
+ uint32_t laneBits;
+ if (!readVarU32(&laneBits))
+ return false;
+
+ if (Validate && laneBits >= NumSimdElements(simdType))
+ return fail("simd lane out of bounds for simd type");
+
+ if (!popWithType(SimdElementType(simdType), scalar))
+ return false;
+
+ if (!popWithType(simdType, vector))
+ return false;
+
+ infalliblePush(simdType);
+
+ if (Output)
+ *lane = uint8_t(laneBits);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSplat(ValType simdType, Value* scalar)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Splat);
+
+ if (!popWithType(SimdElementType(simdType), scalar))
+ return false;
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSwizzle(ValType simdType, uint8_t (* lanes)[16], Value* vector)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Swizzle);
+
+ uint32_t numSimdLanes = NumSimdElements(simdType);
+ MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
+ for (uint32_t i = 0; i < numSimdLanes; ++i) {
+ uint8_t validateLane;
+ if (!readFixedU8(Output ? &(*lanes)[i] : &validateLane))
+ return fail("unable to read swizzle lane");
+ if (Validate && (Output ? (*lanes)[i] : validateLane) >= numSimdLanes)
+ return fail("swizzle index out of bounds");
+ }
+
+ if (!popWithType(simdType, vector))
+ return false;
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readShuffle(ValType simdType, uint8_t (* lanes)[16], Value* lhs, Value* rhs)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::Shuffle);
+
+ uint32_t numSimdLanes = NumSimdElements(simdType);
+ MOZ_ASSERT(numSimdLanes <= mozilla::ArrayLength(*lanes));
+ for (uint32_t i = 0; i < numSimdLanes; ++i) {
+ uint8_t validateLane;
+ if (!readFixedU8(Output ? &(*lanes)[i] : &validateLane))
+ return fail("unable to read shuffle lane");
+ if (Validate && (Output ? (*lanes)[i] : validateLane) >= numSimdLanes * 2)
+ return fail("shuffle index out of bounds");
+ }
+
+ if (!popWithType(simdType, rhs))
+ return false;
+
+ if (!popWithType(simdType, lhs))
+ return false;
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdSelect(ValType simdType, Value* trueValue, Value* falseValue,
+ Value* condition)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdSelect);
+
+ if (!popWithType(simdType, falseValue))
+ return false;
+ if (!popWithType(simdType, trueValue))
+ return false;
+ if (!popWithType(SimdBoolType(simdType), condition))
+ return false;
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdCtor()
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdCtorArg(ValType elementType, uint32_t numElements, uint32_t index,
+ Value* arg)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
+ MOZ_ASSERT(numElements > 0);
+
+ TypeAndValue<Value> tv;
+
+ if (!peek(numElements - index, &tv))
+ return false;
+ if (!checkType(tv.type(), elementType))
+ return false;
+
+ *arg = tv.value();
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdCtorArgsEnd(uint32_t numElements)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
+ MOZ_ASSERT(numElements <= valueStack_.length());
+
+ valueStack_.shrinkBy(numElements);
+
+ return true;
+}
+
+template <typename Policy>
+inline bool
+OpIter<Policy>::readSimdCtorReturn(ValType simdType)
+{
+ MOZ_ASSERT(Classify(op_) == OpKind::SimdCtor);
+
+ infalliblePush(simdType);
+
+ return true;
+}
+
+} // namespace wasm
+} // namespace js
+
+namespace mozilla {
+
+// Specialize IsPod for the Nothing specializations.
+template<> struct IsPod<js::wasm::TypeAndValue<Nothing>> : TrueType {};
+template<> struct IsPod<js::wasm::ControlStackEntry<Nothing>> : TrueType {};
+
+} // namespace mozilla
+
+#endif // wasm_iterator_h
diff --git a/js/src/wasm/WasmBinaryToAST.cpp b/js/src/wasm/WasmBinaryToAST.cpp
new file mode 100644
index 0000000000..3582a11762
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToAST.cpp
@@ -0,0 +1,2067 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinaryToAST.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Sprintf.h"
+
+#include "jscntxt.h"
+
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmBinaryIterator.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::FloorLog2;
+
+enum AstDecodeTerminationKind
+{
+ Unknown,
+ End,
+ Else
+};
+
+struct AstDecodeStackItem
+{
+ AstExpr* expr;
+ AstDecodeTerminationKind terminationKind;
+ ExprType type;
+
+ explicit AstDecodeStackItem()
+ : expr(nullptr),
+ terminationKind(AstDecodeTerminationKind::Unknown),
+ type(ExprType::Limit)
+ {}
+ explicit AstDecodeStackItem(AstDecodeTerminationKind terminationKind, ExprType type)
+ : expr(nullptr),
+ terminationKind(terminationKind),
+ type(type)
+ {}
+ explicit AstDecodeStackItem(AstExpr* expr)
+ : expr(expr),
+ terminationKind(AstDecodeTerminationKind::Unknown),
+ type(ExprType::Limit)
+ {}
+};
+
+// We don't define a Value type because OpIter doesn't push void values, which
+// we actually need here because we're building an AST, so we maintain our own
+// stack.
+struct AstDecodePolicy : OpIterPolicy
+{
+ // Enable validation because we can be called from wasmBinaryToText on bytes
+ // which are not necessarily valid, and we shouldn't run the decoder in
+ // non-validating mode on invalid code.
+ static const bool Validate = true;
+
+ static const bool Output = true;
+};
+
+typedef OpIter<AstDecodePolicy> AstDecodeOpIter;
+
+class AstDecodeContext
+{
+ public:
+ typedef AstVector<uint32_t> AstIndexVector;
+ typedef AstVector<AstDecodeStackItem> AstDecodeStack;
+ typedef AstVector<uint32_t> DepthStack;
+
+ JSContext* cx;
+ LifoAlloc& lifo;
+ Decoder& d;
+ bool generateNames;
+
+ private:
+ AstModule& module_;
+ AstIndexVector funcDefSigs_;
+ AstDecodeOpIter *iter_;
+ AstDecodeStack exprs_;
+ DepthStack depths_;
+ const ValTypeVector* locals_;
+ GlobalDescVector globals_;
+ AstNameVector blockLabels_;
+ uint32_t currentLabelIndex_;
+ ExprType retType_;
+
+ public:
+ AstDecodeContext(JSContext* cx, LifoAlloc& lifo, Decoder& d, AstModule& module,
+ bool generateNames)
+ : cx(cx),
+ lifo(lifo),
+ d(d),
+ generateNames(generateNames),
+ module_(module),
+ funcDefSigs_(lifo),
+ iter_(nullptr),
+ exprs_(lifo),
+ depths_(lifo),
+ locals_(nullptr),
+ blockLabels_(lifo),
+ currentLabelIndex_(0),
+ retType_(ExprType::Limit)
+ {}
+
+ AstModule& module() { return module_; }
+ AstIndexVector& funcDefSigs() { return funcDefSigs_; }
+ AstDecodeOpIter& iter() { return *iter_; }
+ AstDecodeStack& exprs() { return exprs_; }
+ DepthStack& depths() { return depths_; }
+
+ AstNameVector& blockLabels() { return blockLabels_; }
+
+ ExprType retType() const { return retType_; }
+ const ValTypeVector& locals() const { return *locals_; }
+
+ bool addGlobalDesc(ValType type, bool isMutable, bool isImport) {
+ if (isImport)
+ return globals_.append(GlobalDesc(type, isMutable, globals_.length()));
+ // No need to have the precise init expr value; we just need the right
+ // type.
+ Val dummy;
+ switch (type) {
+ case ValType::I32: dummy = Val(uint32_t(0)); break;
+ case ValType::I64: dummy = Val(uint64_t(0)); break;
+ case ValType::F32: dummy = Val(RawF32(0.f)); break;
+ case ValType::F64: dummy = Val(RawF64(0.0)); break;
+ default: return false;
+ }
+ return globals_.append(GlobalDesc(InitExpr(dummy), isMutable));
+ }
+ const GlobalDescVector& globalDescs() const { return globals_; }
+
+ void popBack() { return exprs().popBack(); }
+ AstDecodeStackItem popCopy() { return exprs().popCopy(); }
+ AstDecodeStackItem& top() { return exprs().back(); }
+ MOZ_MUST_USE bool push(AstDecodeStackItem item) { return exprs().append(item); }
+
+ bool needFirst() {
+ for (size_t i = depths().back(); i < exprs().length(); ++i) {
+ if (!exprs()[i].expr->isVoid())
+ return true;
+ }
+ return false;
+ }
+
+ AstExpr* handleVoidExpr(AstExpr* voidNode)
+ {
+ MOZ_ASSERT(voidNode->isVoid());
+
+ // To attach a node that "returns void" to the middle of an AST, wrap it
+ // in a first node next to the node it should accompany.
+ if (needFirst()) {
+ AstExpr *prev = popCopy().expr;
+
+ // If the previous/A node is already a First, reuse it.
+ if (prev->kind() == AstExprKind::First) {
+ if (!prev->as<AstFirst>().exprs().append(voidNode))
+ return nullptr;
+ return prev;
+ }
+
+ AstExprVector exprs(lifo);
+ if (!exprs.append(prev))
+ return nullptr;
+ if (!exprs.append(voidNode))
+ return nullptr;
+
+ return new(lifo) AstFirst(Move(exprs));
+ }
+
+ return voidNode;
+ }
+
+ void startFunction(AstDecodeOpIter* iter, const ValTypeVector* locals, ExprType retType)
+ {
+ iter_ = iter;
+ locals_ = locals;
+ currentLabelIndex_ = 0;
+ retType_ = retType;
+ }
+ void endFunction()
+ {
+ iter_ = nullptr;
+ locals_ = nullptr;
+ retType_ = ExprType::Limit;
+ MOZ_ASSERT(blockLabels_.length() == 0);
+ }
+ uint32_t nextLabelIndex()
+ {
+ return currentLabelIndex_++;
+ }
+};
+
+static bool
+GenerateName(AstDecodeContext& c, const AstName& prefix, uint32_t index, AstName* name)
+{
+ if (!c.generateNames) {
+ *name = AstName();
+ return true;
+ }
+
+ AstVector<char16_t> result(c.lifo);
+ if (!result.append(u'$'))
+ return false;
+ if (!result.append(prefix.begin(), prefix.length()))
+ return false;
+
+ uint32_t tmp = index;
+ do {
+ if (!result.append(u'0'))
+ return false;
+ tmp /= 10;
+ } while (tmp);
+
+ if (index) {
+ char16_t* p = result.end();
+ for (tmp = index; tmp; tmp /= 10)
+ *(--p) = u'0' + (tmp % 10);
+ }
+
+ size_t length = result.length();
+ char16_t* begin = result.extractOrCopyRawBuffer();
+ if (!begin)
+ return false;
+
+ *name = AstName(begin, length);
+ return true;
+}
+
+static bool
+GenerateRef(AstDecodeContext& c, const AstName& prefix, uint32_t index, AstRef* ref)
+{
+ MOZ_ASSERT(index != AstNoIndex);
+
+ if (!c.generateNames) {
+ *ref = AstRef(index);
+ return true;
+ }
+
+ AstName name;
+ if (!GenerateName(c, prefix, index, &name))
+ return false;
+ MOZ_ASSERT(!name.empty());
+
+ *ref = AstRef(name);
+ ref->setIndex(index);
+ return true;
+}
+
+static bool
+AstDecodeCallArgs(AstDecodeContext& c, const AstSig& sig, AstExprVector* funcArgs)
+{
+ MOZ_ASSERT(c.iter().inReachableCode());
+
+ const AstValTypeVector& args = sig.args();
+ uint32_t numArgs = args.length();
+
+ if (!funcArgs->resize(numArgs))
+ return false;
+
+ for (size_t i = 0; i < numArgs; ++i) {
+ ValType argType = args[i];
+ AstDecodeStackItem item;
+ if (!c.iter().readCallArg(argType, numArgs, i, nullptr))
+ return false;
+ (*funcArgs)[i] = c.exprs()[c.exprs().length() - numArgs + i].expr;
+ }
+ c.exprs().shrinkBy(numArgs);
+
+ return c.iter().readCallArgsEnd(numArgs);
+}
+
+static bool
+AstDecodeCallReturn(AstDecodeContext& c, const AstSig& sig)
+{
+ return c.iter().readCallReturn(sig.ret());
+}
+
+static bool
+AstDecodeExpr(AstDecodeContext& c);
+
+static bool
+AstDecodeDrop(AstDecodeContext& c)
+{
+ if (!c.iter().readDrop())
+ return false;
+
+ AstDecodeStackItem value = c.popCopy();
+
+ AstExpr* tmp = new(c.lifo) AstDrop(*value.expr);
+ if (!tmp)
+ return false;
+
+ tmp = c.handleVoidExpr(tmp);
+ if (!tmp)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(tmp)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeCall(AstDecodeContext& c)
+{
+ uint32_t funcIndex;
+ if (!c.iter().readCall(&funcIndex))
+ return false;
+
+ if (!c.iter().inReachableCode())
+ return true;
+
+ uint32_t sigIndex;
+ AstRef funcRef;
+ if (funcIndex < c.module().numFuncImports()) {
+ AstImport* import = c.module().imports()[funcIndex];
+ sigIndex = import->funcSig().index();
+ funcRef = AstRef(import->name());
+ } else {
+ uint32_t funcDefIndex = funcIndex - c.module().numFuncImports();
+ if (funcDefIndex >= c.funcDefSigs().length())
+ return c.iter().fail("callee index out of range");
+
+ sigIndex = c.funcDefSigs()[funcDefIndex];
+
+ if (!GenerateRef(c, AstName(u"func"), funcIndex, &funcRef))
+ return false;
+ }
+
+ const AstSig* sig = c.module().sigs()[sigIndex];
+
+ AstExprVector args(c.lifo);
+ if (!AstDecodeCallArgs(c, *sig, &args))
+ return false;
+
+ if (!AstDecodeCallReturn(c, *sig))
+ return false;
+
+ AstCall* call = new(c.lifo) AstCall(Op::Call, sig->ret(), funcRef, Move(args));
+ if (!call)
+ return false;
+
+ AstExpr* result = call;
+ if (IsVoid(sig->ret()))
+ result = c.handleVoidExpr(call);
+
+ if (!c.push(AstDecodeStackItem(result)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeCallIndirect(AstDecodeContext& c)
+{
+ uint32_t sigIndex;
+ if (!c.iter().readCallIndirect(&sigIndex, nullptr))
+ return false;
+
+ if (!c.iter().inReachableCode())
+ return true;
+
+ if (sigIndex >= c.module().sigs().length())
+ return c.iter().fail("signature index out of range");
+
+ AstDecodeStackItem index = c.popCopy();
+
+ AstRef sigRef;
+ if (!GenerateRef(c, AstName(u"type"), sigIndex, &sigRef))
+ return false;
+
+ const AstSig* sig = c.module().sigs()[sigIndex];
+ AstExprVector args(c.lifo);
+ if (!AstDecodeCallArgs(c, *sig, &args))
+ return false;
+
+ if (!AstDecodeCallReturn(c, *sig))
+ return false;
+
+ AstCallIndirect* call = new(c.lifo) AstCallIndirect(sigRef, sig->ret(),
+ Move(args), index.expr);
+ if (!call)
+ return false;
+
+ AstExpr* result = call;
+ if (IsVoid(sig->ret()))
+ result = c.handleVoidExpr(call);
+
+ if (!c.push(AstDecodeStackItem(result)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeGetBlockRef(AstDecodeContext& c, uint32_t depth, AstRef* ref)
+{
+ if (!c.generateNames || depth >= c.blockLabels().length()) {
+ // Also ignoring if it's a function body label.
+ *ref = AstRef(depth);
+ return true;
+ }
+
+ uint32_t index = c.blockLabels().length() - depth - 1;
+ if (c.blockLabels()[index].empty()) {
+ if (!GenerateName(c, AstName(u"label"), c.nextLabelIndex(), &c.blockLabels()[index]))
+ return false;
+ }
+ *ref = AstRef(c.blockLabels()[index]);
+ ref->setIndex(depth);
+ return true;
+}
+
+static bool
+AstDecodeBrTable(AstDecodeContext& c)
+{
+ uint32_t tableLength;
+ ExprType type;
+ if (!c.iter().readBrTable(&tableLength, &type, nullptr, nullptr))
+ return false;
+
+ AstRefVector table(c.lifo);
+ if (!table.resize(tableLength))
+ return false;
+
+ uint32_t depth;
+ for (size_t i = 0, e = tableLength; i < e; ++i) {
+ if (!c.iter().readBrTableEntry(&type, nullptr, &depth))
+ return false;
+ if (!AstDecodeGetBlockRef(c, depth, &table[i]))
+ return false;
+ }
+
+ // Read the default label.
+ if (!c.iter().readBrTableDefault(&type, nullptr, &depth))
+ return false;
+
+ AstDecodeStackItem index = c.popCopy();
+ AstDecodeStackItem value;
+ if (!IsVoid(type))
+ value = c.popCopy();
+
+ AstRef def;
+ if (!AstDecodeGetBlockRef(c, depth, &def))
+ return false;
+
+ AstBranchTable* branchTable = new(c.lifo) AstBranchTable(*index.expr,
+ def, Move(table), value.expr);
+ if (!branchTable)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(branchTable)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeBlock(AstDecodeContext& c, Op op)
+{
+ MOZ_ASSERT(op == Op::Block || op == Op::Loop);
+
+ if (!c.blockLabels().append(AstName()))
+ return false;
+
+ if (op == Op::Loop) {
+ if (!c.iter().readLoop())
+ return false;
+ } else {
+ if (!c.iter().readBlock())
+ return false;
+ }
+
+ if (!c.depths().append(c.exprs().length()))
+ return false;
+
+ ExprType type;
+ while (true) {
+ if (!AstDecodeExpr(c))
+ return false;
+
+ const AstDecodeStackItem& item = c.top();
+ if (!item.expr) { // Op::End was found
+ type = item.type;
+ c.popBack();
+ break;
+ }
+ }
+
+ AstExprVector exprs(c.lifo);
+ for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end();
+ i != e; ++i) {
+ if (!exprs.append(i->expr))
+ return false;
+ }
+ c.exprs().shrinkTo(c.depths().popCopy());
+
+ AstName name = c.blockLabels().popCopy();
+ AstBlock* block = new(c.lifo) AstBlock(op, type, name, Move(exprs));
+ if (!block)
+ return false;
+
+ AstExpr* result = block;
+ if (IsVoid(type))
+ result = c.handleVoidExpr(block);
+
+ if (!c.push(AstDecodeStackItem(result)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeIf(AstDecodeContext& c)
+{
+ if (!c.iter().readIf(nullptr))
+ return false;
+
+ AstDecodeStackItem cond = c.popCopy();
+
+ bool hasElse = false;
+
+ if (!c.depths().append(c.exprs().length()))
+ return false;
+
+ if (!c.blockLabels().append(AstName()))
+ return false;
+
+ ExprType type;
+ while (true) {
+ if (!AstDecodeExpr(c))
+ return false;
+
+ const AstDecodeStackItem& item = c.top();
+ if (!item.expr) { // Op::End was found
+ hasElse = item.terminationKind == AstDecodeTerminationKind::Else;
+ type = item.type;
+ c.popBack();
+ break;
+ }
+ }
+
+ AstExprVector thenExprs(c.lifo);
+ for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end();
+ i != e; ++i) {
+ if (!thenExprs.append(i->expr))
+ return false;
+ }
+ c.exprs().shrinkTo(c.depths().back());
+
+ AstExprVector elseExprs(c.lifo);
+ if (hasElse) {
+ while (true) {
+ if (!AstDecodeExpr(c))
+ return false;
+
+ const AstDecodeStackItem& item = c.top();
+ if (!item.expr) { // Op::End was found
+ c.popBack();
+ break;
+ }
+ }
+
+ for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end();
+ i != e; ++i) {
+ if (!elseExprs.append(i->expr))
+ return false;
+ }
+ c.exprs().shrinkTo(c.depths().back());
+ }
+
+ c.depths().popBack();
+
+ AstName name = c.blockLabels().popCopy();
+
+ AstIf* if_ = new(c.lifo) AstIf(type, cond.expr, name, Move(thenExprs), Move(elseExprs));
+ if (!if_)
+ return false;
+
+ AstExpr* result = if_;
+ if (IsVoid(type))
+ result = c.handleVoidExpr(if_);
+
+ if (!c.push(AstDecodeStackItem(result)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeEnd(AstDecodeContext& c)
+{
+ LabelKind kind;
+ ExprType type;
+ if (!c.iter().readEnd(&kind, &type, nullptr))
+ return false;
+
+ if (!c.push(AstDecodeStackItem(AstDecodeTerminationKind::End, type)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeElse(AstDecodeContext& c)
+{
+ ExprType type;
+
+ if (!c.iter().readElse(&type, nullptr))
+ return false;
+
+ if (!c.push(AstDecodeStackItem(AstDecodeTerminationKind::Else, type)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeNop(AstDecodeContext& c)
+{
+ if (!c.iter().readNop())
+ return false;
+
+ AstExpr* tmp = new(c.lifo) AstNop();
+ if (!tmp)
+ return false;
+
+ tmp = c.handleVoidExpr(tmp);
+ if (!tmp)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(tmp)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeUnary(AstDecodeContext& c, ValType type, Op op)
+{
+ if (!c.iter().readUnary(type, nullptr))
+ return false;
+
+ AstDecodeStackItem operand = c.popCopy();
+
+ AstUnaryOperator* unary = new(c.lifo) AstUnaryOperator(op, operand.expr);
+ if (!unary)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(unary)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeBinary(AstDecodeContext& c, ValType type, Op op)
+{
+ if (!c.iter().readBinary(type, nullptr, nullptr))
+ return false;
+
+ AstDecodeStackItem rhs = c.popCopy();
+ AstDecodeStackItem lhs = c.popCopy();
+
+ AstBinaryOperator* binary = new(c.lifo) AstBinaryOperator(op, lhs.expr, rhs.expr);
+ if (!binary)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(binary)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeSelect(AstDecodeContext& c)
+{
+ ValType type;
+ if (!c.iter().readSelect(&type, nullptr, nullptr, nullptr))
+ return false;
+
+ AstDecodeStackItem selectFalse = c.popCopy();
+ AstDecodeStackItem selectTrue = c.popCopy();
+ AstDecodeStackItem cond = c.popCopy();
+
+ AstTernaryOperator* ternary = new(c.lifo) AstTernaryOperator(Op::Select, cond.expr, selectTrue.expr, selectFalse.expr);
+ if (!ternary)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(ternary)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeComparison(AstDecodeContext& c, ValType type, Op op)
+{
+ if (!c.iter().readComparison(type, nullptr, nullptr))
+ return false;
+
+ AstDecodeStackItem rhs = c.popCopy();
+ AstDecodeStackItem lhs = c.popCopy();
+
+ AstComparisonOperator* comparison = new(c.lifo) AstComparisonOperator(op, lhs.expr, rhs.expr);
+ if (!comparison)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(comparison)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeConversion(AstDecodeContext& c, ValType fromType, ValType toType, Op op)
+{
+ if (!c.iter().readConversion(fromType, toType, nullptr))
+ return false;
+
+ AstDecodeStackItem operand = c.popCopy();
+
+ AstConversionOperator* conversion = new(c.lifo) AstConversionOperator(op, operand.expr);
+ if (!conversion)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(conversion)))
+ return false;
+
+ return true;
+}
+
+static AstLoadStoreAddress
+AstDecodeLoadStoreAddress(const LinearMemoryAddress<Nothing>& addr, const AstDecodeStackItem& item)
+{
+ uint32_t flags = FloorLog2(addr.align);
+ return AstLoadStoreAddress(item.expr, flags, addr.offset);
+}
+
+static bool
+AstDecodeLoad(AstDecodeContext& c, ValType type, uint32_t byteSize, Op op)
+{
+ LinearMemoryAddress<Nothing> addr;
+ if (!c.iter().readLoad(type, byteSize, &addr))
+ return false;
+
+ AstDecodeStackItem item = c.popCopy();
+
+ AstLoad* load = new(c.lifo) AstLoad(op, AstDecodeLoadStoreAddress(addr, item));
+ if (!load)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(load)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeStore(AstDecodeContext& c, ValType type, uint32_t byteSize, Op op)
+{
+ LinearMemoryAddress<Nothing> addr;
+ if (!c.iter().readStore(type, byteSize, &addr, nullptr))
+ return false;
+
+ AstDecodeStackItem value = c.popCopy();
+ AstDecodeStackItem item = c.popCopy();
+
+ AstStore* store = new(c.lifo) AstStore(op, AstDecodeLoadStoreAddress(addr, item), value.expr);
+ if (!store)
+ return false;
+
+ AstExpr* wrapped = c.handleVoidExpr(store);
+ if (!wrapped)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(wrapped)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeCurrentMemory(AstDecodeContext& c)
+{
+ if (!c.iter().readCurrentMemory())
+ return false;
+
+ AstCurrentMemory* gm = new(c.lifo) AstCurrentMemory();
+ if (!gm)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(gm)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeGrowMemory(AstDecodeContext& c)
+{
+ if (!c.iter().readGrowMemory(nullptr))
+ return false;
+
+ AstDecodeStackItem operand = c.popCopy();
+
+ AstGrowMemory* gm = new(c.lifo) AstGrowMemory(operand.expr);
+ if (!gm)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(gm)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeBranch(AstDecodeContext& c, Op op)
+{
+ MOZ_ASSERT(op == Op::Br || op == Op::BrIf);
+
+ uint32_t depth;
+ ExprType type;
+ AstDecodeStackItem value;
+ AstDecodeStackItem cond;
+ if (op == Op::Br) {
+ if (!c.iter().readBr(&depth, &type, nullptr))
+ return false;
+ if (!IsVoid(type))
+ value = c.popCopy();
+ } else {
+ if (!c.iter().readBrIf(&depth, &type, nullptr, nullptr))
+ return false;
+ if (!IsVoid(type))
+ value = c.popCopy();
+ cond = c.popCopy();
+ }
+
+ AstRef depthRef;
+ if (!AstDecodeGetBlockRef(c, depth, &depthRef))
+ return false;
+
+ if (op == Op::Br || !value.expr)
+ type = ExprType::Void;
+ AstBranch* branch = new(c.lifo) AstBranch(op, type, cond.expr, depthRef, value.expr);
+ if (!branch)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(branch)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeGetLocal(AstDecodeContext& c)
+{
+ uint32_t getLocalId;
+ if (!c.iter().readGetLocal(c.locals(), &getLocalId))
+ return false;
+
+ AstRef localRef;
+ if (!GenerateRef(c, AstName(u"var"), getLocalId, &localRef))
+ return false;
+
+ AstGetLocal* getLocal = new(c.lifo) AstGetLocal(localRef);
+ if (!getLocal)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(getLocal)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeSetLocal(AstDecodeContext& c)
+{
+ uint32_t setLocalId;
+ if (!c.iter().readSetLocal(c.locals(), &setLocalId, nullptr))
+ return false;
+
+ AstDecodeStackItem setLocalValue = c.popCopy();
+
+ AstRef localRef;
+ if (!GenerateRef(c, AstName(u"var"), setLocalId, &localRef))
+ return false;
+
+ AstSetLocal* setLocal = new(c.lifo) AstSetLocal(localRef, *setLocalValue.expr);
+ if (!setLocal)
+ return false;
+
+ AstExpr* expr = c.handleVoidExpr(setLocal);
+ if (!expr)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(expr)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeTeeLocal(AstDecodeContext& c)
+{
+ uint32_t teeLocalId;
+ if (!c.iter().readTeeLocal(c.locals(), &teeLocalId, nullptr))
+ return false;
+
+ AstDecodeStackItem teeLocalValue = c.popCopy();
+
+ AstRef localRef;
+ if (!GenerateRef(c, AstName(u"var"), teeLocalId, &localRef))
+ return false;
+
+ AstTeeLocal* teeLocal = new(c.lifo) AstTeeLocal(localRef, *teeLocalValue.expr);
+ if (!teeLocal)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(teeLocal)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeGetGlobal(AstDecodeContext& c)
+{
+ uint32_t globalId;
+ if (!c.iter().readGetGlobal(c.globalDescs(), &globalId))
+ return false;
+
+ AstRef globalRef;
+ if (!GenerateRef(c, AstName(u"global"), globalId, &globalRef))
+ return false;
+
+ auto* getGlobal = new(c.lifo) AstGetGlobal(globalRef);
+ if (!getGlobal)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(getGlobal)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeSetGlobal(AstDecodeContext& c)
+{
+ uint32_t globalId;
+ if (!c.iter().readSetGlobal(c.globalDescs(), &globalId, nullptr))
+ return false;
+
+ AstDecodeStackItem value = c.popCopy();
+
+ AstRef globalRef;
+ if (!GenerateRef(c, AstName(u"global"), globalId, &globalRef))
+ return false;
+
+ auto* setGlobal = new(c.lifo) AstSetGlobal(globalRef, *value.expr);
+ if (!setGlobal)
+ return false;
+
+ AstExpr* expr = c.handleVoidExpr(setGlobal);
+ if (!expr)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(expr)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeReturn(AstDecodeContext& c)
+{
+ if (!c.iter().readReturn(nullptr))
+ return false;
+
+ AstDecodeStackItem result;
+ if (!IsVoid(c.retType()))
+ result = c.popCopy();
+
+ AstReturn* ret = new(c.lifo) AstReturn(result.expr);
+ if (!ret)
+ return false;
+
+ if (!c.push(AstDecodeStackItem(ret)))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeExpr(AstDecodeContext& c)
+{
+ uint32_t exprOffset = c.iter().currentOffset();
+ uint16_t op;
+ if (!c.iter().readOp(&op))
+ return false;
+
+ AstExpr* tmp;
+ switch (op) {
+ case uint16_t(Op::Nop):
+ if (!AstDecodeNop(c))
+ return false;
+ break;
+ case uint16_t(Op::Drop):
+ if (!AstDecodeDrop(c))
+ return false;
+ break;
+ case uint16_t(Op::Call):
+ if (!AstDecodeCall(c))
+ return false;
+ break;
+ case uint16_t(Op::CallIndirect):
+ if (!AstDecodeCallIndirect(c))
+ return false;
+ break;
+ case uint16_t(Op::I32Const):
+ int32_t i32;
+ if (!c.iter().readI32Const(&i32))
+ return false;
+ tmp = new(c.lifo) AstConst(Val((uint32_t)i32));
+ if (!tmp || !c.push(AstDecodeStackItem(tmp)))
+ return false;
+ break;
+ case uint16_t(Op::I64Const):
+ int64_t i64;
+ if (!c.iter().readI64Const(&i64))
+ return false;
+ tmp = new(c.lifo) AstConst(Val((uint64_t)i64));
+ if (!tmp || !c.push(AstDecodeStackItem(tmp)))
+ return false;
+ break;
+ case uint16_t(Op::F32Const): {
+ RawF32 f32;
+ if (!c.iter().readF32Const(&f32))
+ return false;
+ tmp = new(c.lifo) AstConst(Val(f32));
+ if (!tmp || !c.push(AstDecodeStackItem(tmp)))
+ return false;
+ break;
+ }
+ case uint16_t(Op::F64Const): {
+ RawF64 f64;
+ if (!c.iter().readF64Const(&f64))
+ return false;
+ tmp = new(c.lifo) AstConst(Val(f64));
+ if (!tmp || !c.push(AstDecodeStackItem(tmp)))
+ return false;
+ break;
+ }
+ case uint16_t(Op::GetLocal):
+ if (!AstDecodeGetLocal(c))
+ return false;
+ break;
+ case uint16_t(Op::SetLocal):
+ if (!AstDecodeSetLocal(c))
+ return false;
+ break;
+ case uint16_t(Op::TeeLocal):
+ if (!AstDecodeTeeLocal(c))
+ return false;
+ break;
+ case uint16_t(Op::Select):
+ if (!AstDecodeSelect(c))
+ return false;
+ break;
+ case uint16_t(Op::Block):
+ case uint16_t(Op::Loop):
+ if (!AstDecodeBlock(c, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::If):
+ if (!AstDecodeIf(c))
+ return false;
+ break;
+ case uint16_t(Op::Else):
+ if (!AstDecodeElse(c))
+ return false;
+ break;
+ case uint16_t(Op::End):
+ if (!AstDecodeEnd(c))
+ return false;
+ break;
+ case uint16_t(Op::I32Clz):
+ case uint16_t(Op::I32Ctz):
+ case uint16_t(Op::I32Popcnt):
+ if (!AstDecodeUnary(c, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Clz):
+ case uint16_t(Op::I64Ctz):
+ case uint16_t(Op::I64Popcnt):
+ if (!AstDecodeUnary(c, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32Abs):
+ case uint16_t(Op::F32Neg):
+ case uint16_t(Op::F32Ceil):
+ case uint16_t(Op::F32Floor):
+ case uint16_t(Op::F32Sqrt):
+ case uint16_t(Op::F32Trunc):
+ case uint16_t(Op::F32Nearest):
+ if (!AstDecodeUnary(c, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64Abs):
+ case uint16_t(Op::F64Neg):
+ case uint16_t(Op::F64Ceil):
+ case uint16_t(Op::F64Floor):
+ case uint16_t(Op::F64Sqrt):
+ case uint16_t(Op::F64Trunc):
+ case uint16_t(Op::F64Nearest):
+ if (!AstDecodeUnary(c, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Add):
+ case uint16_t(Op::I32Sub):
+ case uint16_t(Op::I32Mul):
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ case uint16_t(Op::I32And):
+ case uint16_t(Op::I32Or):
+ case uint16_t(Op::I32Xor):
+ case uint16_t(Op::I32Shl):
+ case uint16_t(Op::I32ShrS):
+ case uint16_t(Op::I32ShrU):
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ if (!AstDecodeBinary(c, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Add):
+ case uint16_t(Op::I64Sub):
+ case uint16_t(Op::I64Mul):
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ case uint16_t(Op::I64And):
+ case uint16_t(Op::I64Or):
+ case uint16_t(Op::I64Xor):
+ case uint16_t(Op::I64Shl):
+ case uint16_t(Op::I64ShrS):
+ case uint16_t(Op::I64ShrU):
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ if (!AstDecodeBinary(c, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32Add):
+ case uint16_t(Op::F32Sub):
+ case uint16_t(Op::F32Mul):
+ case uint16_t(Op::F32Div):
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ case uint16_t(Op::F32CopySign):
+ if (!AstDecodeBinary(c, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64Add):
+ case uint16_t(Op::F64Sub):
+ case uint16_t(Op::F64Mul):
+ case uint16_t(Op::F64Div):
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ case uint16_t(Op::F64CopySign):
+ if (!AstDecodeBinary(c, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Eq):
+ case uint16_t(Op::I32Ne):
+ case uint16_t(Op::I32LtS):
+ case uint16_t(Op::I32LtU):
+ case uint16_t(Op::I32LeS):
+ case uint16_t(Op::I32LeU):
+ case uint16_t(Op::I32GtS):
+ case uint16_t(Op::I32GtU):
+ case uint16_t(Op::I32GeS):
+ case uint16_t(Op::I32GeU):
+ if (!AstDecodeComparison(c, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Eq):
+ case uint16_t(Op::I64Ne):
+ case uint16_t(Op::I64LtS):
+ case uint16_t(Op::I64LtU):
+ case uint16_t(Op::I64LeS):
+ case uint16_t(Op::I64LeU):
+ case uint16_t(Op::I64GtS):
+ case uint16_t(Op::I64GtU):
+ case uint16_t(Op::I64GeS):
+ case uint16_t(Op::I64GeU):
+ if (!AstDecodeComparison(c, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32Eq):
+ case uint16_t(Op::F32Ne):
+ case uint16_t(Op::F32Lt):
+ case uint16_t(Op::F32Le):
+ case uint16_t(Op::F32Gt):
+ case uint16_t(Op::F32Ge):
+ if (!AstDecodeComparison(c, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64Eq):
+ case uint16_t(Op::F64Ne):
+ case uint16_t(Op::F64Lt):
+ case uint16_t(Op::F64Le):
+ case uint16_t(Op::F64Gt):
+ case uint16_t(Op::F64Ge):
+ if (!AstDecodeComparison(c, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Eqz):
+ if (!AstDecodeConversion(c, ValType::I32, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Eqz):
+ case uint16_t(Op::I32WrapI64):
+ if (!AstDecodeConversion(c, ValType::I64, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32TruncSF32):
+ case uint16_t(Op::I32TruncUF32):
+ case uint16_t(Op::I32ReinterpretF32):
+ if (!AstDecodeConversion(c, ValType::F32, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32TruncSF64):
+ case uint16_t(Op::I32TruncUF64):
+ if (!AstDecodeConversion(c, ValType::F64, ValType::I32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64ExtendSI32):
+ case uint16_t(Op::I64ExtendUI32):
+ if (!AstDecodeConversion(c, ValType::I32, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64TruncSF32):
+ case uint16_t(Op::I64TruncUF32):
+ if (!AstDecodeConversion(c, ValType::F32, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64TruncSF64):
+ case uint16_t(Op::I64TruncUF64):
+ case uint16_t(Op::I64ReinterpretF64):
+ if (!AstDecodeConversion(c, ValType::F64, ValType::I64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32ConvertSI32):
+ case uint16_t(Op::F32ConvertUI32):
+ case uint16_t(Op::F32ReinterpretI32):
+ if (!AstDecodeConversion(c, ValType::I32, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32ConvertSI64):
+ case uint16_t(Op::F32ConvertUI64):
+ if (!AstDecodeConversion(c, ValType::I64, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32DemoteF64):
+ if (!AstDecodeConversion(c, ValType::F64, ValType::F32, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64ConvertSI32):
+ case uint16_t(Op::F64ConvertUI32):
+ if (!AstDecodeConversion(c, ValType::I32, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64ConvertSI64):
+ case uint16_t(Op::F64ConvertUI64):
+ case uint16_t(Op::F64ReinterpretI64):
+ if (!AstDecodeConversion(c, ValType::I64, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64PromoteF32):
+ if (!AstDecodeConversion(c, ValType::F32, ValType::F64, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Load8S):
+ case uint16_t(Op::I32Load8U):
+ if (!AstDecodeLoad(c, ValType::I32, 1, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Load16S):
+ case uint16_t(Op::I32Load16U):
+ if (!AstDecodeLoad(c, ValType::I32, 2, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Load):
+ if (!AstDecodeLoad(c, ValType::I32, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Load8S):
+ case uint16_t(Op::I64Load8U):
+ if (!AstDecodeLoad(c, ValType::I64, 1, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Load16S):
+ case uint16_t(Op::I64Load16U):
+ if (!AstDecodeLoad(c, ValType::I64, 2, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Load32S):
+ case uint16_t(Op::I64Load32U):
+ if (!AstDecodeLoad(c, ValType::I64, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Load):
+ if (!AstDecodeLoad(c, ValType::I64, 8, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32Load):
+ if (!AstDecodeLoad(c, ValType::F32, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64Load):
+ if (!AstDecodeLoad(c, ValType::F64, 8, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Store8):
+ if (!AstDecodeStore(c, ValType::I32, 1, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Store16):
+ if (!AstDecodeStore(c, ValType::I32, 2, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I32Store):
+ if (!AstDecodeStore(c, ValType::I32, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Store8):
+ if (!AstDecodeStore(c, ValType::I64, 1, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Store16):
+ if (!AstDecodeStore(c, ValType::I64, 2, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Store32):
+ if (!AstDecodeStore(c, ValType::I64, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::I64Store):
+ if (!AstDecodeStore(c, ValType::I64, 8, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F32Store):
+ if (!AstDecodeStore(c, ValType::F32, 4, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::F64Store):
+ if (!AstDecodeStore(c, ValType::F64, 8, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::CurrentMemory):
+ if (!AstDecodeCurrentMemory(c))
+ return false;
+ break;
+ case uint16_t(Op::GrowMemory):
+ if (!AstDecodeGrowMemory(c))
+ return false;
+ break;
+ case uint16_t(Op::SetGlobal):
+ if (!AstDecodeSetGlobal(c))
+ return false;
+ break;
+ case uint16_t(Op::GetGlobal):
+ if (!AstDecodeGetGlobal(c))
+ return false;
+ break;
+ case uint16_t(Op::Br):
+ case uint16_t(Op::BrIf):
+ if (!AstDecodeBranch(c, Op(op)))
+ return false;
+ break;
+ case uint16_t(Op::BrTable):
+ if (!AstDecodeBrTable(c))
+ return false;
+ break;
+ case uint16_t(Op::Return):
+ if (!AstDecodeReturn(c))
+ return false;
+ break;
+ case uint16_t(Op::Unreachable):
+ if (!c.iter().readUnreachable())
+ return false;
+ tmp = new(c.lifo) AstUnreachable();
+ if (!tmp)
+ return false;
+ if (!c.push(AstDecodeStackItem(tmp)))
+ return false;
+ break;
+ default:
+ return c.iter().unrecognizedOpcode(op);
+ }
+
+ AstExpr* lastExpr = c.top().expr;
+ if (lastExpr)
+ lastExpr->setOffset(exprOffset);
+ return true;
+}
+
+/*****************************************************************************/
+// wasm decoding and generation
+
+static bool
+AstDecodeTypeSection(AstDecodeContext& c, SigWithIdVector* sigs)
+{
+ if (!DecodeTypeSection(c.d, sigs))
+ return false;
+
+ for (size_t sigIndex = 0; sigIndex < sigs->length(); sigIndex++) {
+ const Sig& sig = (*sigs)[sigIndex];
+
+ AstValTypeVector args(c.lifo);
+ if (!args.appendAll(sig.args()))
+ return false;
+
+ AstSig sigNoName(Move(args), sig.ret());
+ AstName sigName;
+ if (!GenerateName(c, AstName(u"type"), sigIndex, &sigName))
+ return false;
+
+ AstSig* astSig = new(c.lifo) AstSig(sigName, Move(sigNoName));
+ if (!astSig || !c.module().append(astSig))
+ return false;
+ }
+
+ return true;
+}
+
+static AstName
+ToAstName(AstDecodeContext& c, const UniqueChars& name)
+{
+ size_t len = strlen(name.get());
+ char16_t* buffer = static_cast<char16_t *>(c.lifo.alloc(len * sizeof(char16_t)));
+ if (!buffer)
+ return AstName();
+
+ for (size_t i = 0; i < len; i++)
+ buffer[i] = name.get()[i];
+
+ return AstName(buffer, len);
+}
+
+static bool
+AstDecodeImportSection(AstDecodeContext& c, const SigWithIdVector& sigs)
+{
+ Uint32Vector funcSigIndices;
+ GlobalDescVector globals;
+ TableDescVector tables;
+ Maybe<Limits> memory;
+ ImportVector imports;
+ if (!DecodeImportSection(c.d, sigs, &funcSigIndices, &globals, &tables, &memory, &imports))
+ return false;
+
+ size_t lastFunc = 0;
+ size_t lastGlobal = 0;
+ size_t lastTable = 0;
+ size_t lastMemory = 0;
+
+ for (size_t importIndex = 0; importIndex < imports.length(); importIndex++) {
+ const Import& import = imports[importIndex];
+
+ AstName moduleName = ToAstName(c, import.module);
+ AstName fieldName = ToAstName(c, import.field);
+
+ AstImport* ast = nullptr;
+ switch (import.kind) {
+ case DefinitionKind::Function: {
+ AstName importName;
+ if (!GenerateName(c, AstName(u"import"), lastFunc, &importName))
+ return false;
+
+ AstRef sigRef;
+ if (!GenerateRef(c, AstName(u"type"), funcSigIndices[lastFunc], &sigRef))
+ return false;
+
+ ast = new(c.lifo) AstImport(importName, moduleName, fieldName, sigRef);
+ lastFunc++;
+ break;
+ }
+ case DefinitionKind::Global: {
+ AstName importName;
+ if (!GenerateName(c, AstName(u"global"), lastGlobal, &importName))
+ return false;
+
+ const GlobalDesc& global = globals[lastGlobal];
+ ValType type = global.type();
+ bool isMutable = global.isMutable();
+
+ if (!c.addGlobalDesc(type, isMutable, /* import */ true))
+ return false;
+
+ ast = new(c.lifo) AstImport(importName, moduleName, fieldName,
+ AstGlobal(importName, type, isMutable));
+ lastGlobal++;
+ break;
+ }
+ case DefinitionKind::Table: {
+ AstName importName;
+ if (!GenerateName(c, AstName(u"table"), lastTable, &importName))
+ return false;
+
+ ast = new(c.lifo) AstImport(importName, moduleName, fieldName, DefinitionKind::Table,
+ tables[lastTable].limits);
+ lastTable++;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ AstName importName;
+ if (!GenerateName(c, AstName(u"memory"), lastMemory, &importName))
+ return false;
+
+ ast = new(c.lifo) AstImport(importName, moduleName, fieldName, DefinitionKind::Memory,
+ *memory);
+ lastMemory++;
+ break;
+ }
+ }
+
+ if (!ast || !c.module().append(ast))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+AstDecodeFunctionSection(AstDecodeContext& c, const SigWithIdVector& sigs)
+{
+ Uint32Vector funcSigIndexes;
+ if (!DecodeFunctionSection(c.d, sigs, c.module().numFuncImports(), &funcSigIndexes))
+ return false;
+
+ return c.funcDefSigs().appendAll(funcSigIndexes);
+}
+
+static bool
+AstDecodeTableSection(AstDecodeContext& c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Table, &sectionStart, &sectionSize, "table"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numTables;
+ if (!c.d.readVarU32(&numTables))
+ return c.d.fail("failed to read number of tables");
+
+ if (numTables != 1)
+ return c.d.fail("the number of tables must be exactly one");
+
+ uint32_t typeConstructorValue;
+ if (!c.d.readVarU32(&typeConstructorValue))
+ return c.d.fail("expected type constructor kind");
+
+ if (typeConstructorValue != uint32_t(TypeCode::AnyFunc))
+ return c.d.fail("unknown type constructor kind");
+
+ Limits table;
+ if (!DecodeLimits(c.d, &table))
+ return false;
+
+ if (table.initial > MaxTableElems)
+ return c.d.fail("too many table elements");
+
+ if (c.module().hasTable())
+ return c.d.fail("already have a table");
+
+ AstName name;
+ if (!GenerateName(c, AstName(u"table"), c.module().tables().length(), &name))
+ return false;
+
+ if (!c.module().addTable(name, table))
+ return false;
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "table"))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeName(AstDecodeContext& c, AstName* name)
+{
+ uint32_t length;
+ if (!c.d.readVarU32(&length))
+ return false;
+
+ const uint8_t* bytes;
+ if (!c.d.readBytes(length, &bytes))
+ return false;
+
+ char16_t* buffer = static_cast<char16_t *>(c.lifo.alloc(length * sizeof(char16_t)));
+ for (size_t i = 0; i < length; i++)
+ buffer[i] = bytes[i];
+
+ *name = AstName(buffer, length);
+ return true;
+}
+
+static bool
+AstDecodeMemorySection(AstDecodeContext& c)
+{
+ bool present;
+ Limits memory;
+ if (!DecodeMemorySection(c.d, c.module().hasMemory(), &memory, &present))
+ return false;
+
+ if (present) {
+ AstName name;
+ if (!GenerateName(c, AstName(u"memory"), c.module().memories().length(), &name))
+ return false;
+ if (!c.module().addMemory(name, memory))
+ return false;
+ }
+
+ return true;
+}
+
+static AstExpr*
+ToAstExpr(AstDecodeContext& c, const InitExpr& initExpr)
+{
+ switch (initExpr.kind()) {
+ case InitExpr::Kind::Constant: {
+ return new(c.lifo) AstConst(Val(initExpr.val()));
+ }
+ case InitExpr::Kind::GetGlobal: {
+ AstRef globalRef;
+ if (!GenerateRef(c, AstName(u"global"), initExpr.globalIndex(), &globalRef))
+ return nullptr;
+ return new(c.lifo) AstGetGlobal(globalRef);
+ }
+ }
+ return nullptr;
+}
+
+static bool
+AstDecodeInitializerExpression(AstDecodeContext& c, ValType type, AstExpr** init)
+{
+ InitExpr initExpr;
+ if (!DecodeInitializerExpression(c.d, c.globalDescs(), type, &initExpr))
+ return false;
+
+ *init = ToAstExpr(c, initExpr);
+ return !!*init;
+}
+
+static bool
+AstDecodeGlobal(AstDecodeContext& c, uint32_t i, AstGlobal* global)
+{
+ AstName name;
+ if (!GenerateName(c, AstName(u"global"), i, &name))
+ return false;
+
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(c.d, &type, &isMutable))
+ return false;
+
+ AstExpr* init;
+ if (!AstDecodeInitializerExpression(c, type, &init))
+ return false;
+
+ if (!c.addGlobalDesc(type, isMutable, /* import */ false))
+ return false;
+
+ *global = AstGlobal(name, type, isMutable, Some(init));
+ return true;
+}
+
+static bool
+AstDecodeGlobalSection(AstDecodeContext& c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Global, &sectionStart, &sectionSize, "global"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numGlobals;
+ if (!c.d.readVarU32(&numGlobals))
+ return c.d.fail("expected number of globals");
+
+ uint32_t numImported = c.globalDescs().length();
+
+ for (uint32_t i = 0; i < numGlobals; i++) {
+ auto* global = new(c.lifo) AstGlobal;
+ if (!AstDecodeGlobal(c, i + numImported, global))
+ return false;
+ if (!c.module().append(global))
+ return false;
+ }
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "global"))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeExport(AstDecodeContext& c, AstExport** export_)
+{
+ AstName fieldName;
+ if (!AstDecodeName(c, &fieldName))
+ return c.d.fail("expected export name");
+
+ uint32_t kindValue;
+ if (!c.d.readVarU32(&kindValue))
+ return c.d.fail("expected export kind");
+
+ uint32_t index;
+ if (!c.d.readVarU32(&index))
+ return c.d.fail("expected export internal index");
+
+ *export_ = new(c.lifo) AstExport(fieldName, DefinitionKind(kindValue), AstRef(index));
+ if (!*export_)
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeExportSection(AstDecodeContext& c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Export, &sectionStart, &sectionSize, "export"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numExports;
+ if (!c.d.readVarU32(&numExports))
+ return c.d.fail("failed to read number of exports");
+
+ if (numExports > MaxExports)
+ return c.d.fail("too many exports");
+
+ for (uint32_t i = 0; i < numExports; i++) {
+ AstExport* export_ = nullptr;
+ if (!AstDecodeExport(c, &export_))
+ return false;
+ if (!c.module().append(export_))
+ return false;
+ }
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "export"))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcDefIndex, AstFunc** func)
+{
+ uint32_t offset = c.d.currentOffset();
+ uint32_t bodySize;
+ if (!c.d.readVarU32(&bodySize))
+ return c.d.fail("expected number of function body bytes");
+
+ if (c.d.bytesRemain() < bodySize)
+ return c.d.fail("function body length too big");
+
+ const uint8_t* bodyBegin = c.d.currentPosition();
+ const uint8_t* bodyEnd = bodyBegin + bodySize;
+
+ AstDecodeOpIter iter(c.d);
+
+ uint32_t sigIndex = c.funcDefSigs()[funcDefIndex];
+ const AstSig* sig = c.module().sigs()[sigIndex];
+
+ AstValTypeVector vars(c.lifo);
+ AstNameVector localsNames(c.lifo);
+ AstExprVector body(c.lifo);
+
+ ValTypeVector locals;
+ if (!locals.appendAll(sig->args()))
+ return false;
+
+ if (!DecodeLocalEntries(c.d, ModuleKind::Wasm, &locals))
+ return c.d.fail("failed decoding local entries");
+
+ c.startFunction(&iter, &locals, sig->ret());
+
+ AstName funcName;
+ if (!GenerateName(c, AstName(u"func"), c.module().numFuncImports() + funcDefIndex, &funcName))
+ return false;
+
+ uint32_t numParams = sig->args().length();
+ uint32_t numLocals = locals.length();
+ for (uint32_t i = numParams; i < numLocals; i++) {
+ if (!vars.append(locals[i]))
+ return false;
+ }
+ for (uint32_t i = 0; i < numLocals; i++) {
+ AstName varName;
+ if (!GenerateName(c, AstName(u"var"), i, &varName))
+ return false;
+ if (!localsNames.append(varName))
+ return false;
+ }
+
+ if (!c.iter().readFunctionStart(sig->ret()))
+ return false;
+
+ if (!c.depths().append(c.exprs().length()))
+ return false;
+
+ while (c.d.currentPosition() < bodyEnd) {
+ if (!AstDecodeExpr(c))
+ return false;
+
+ const AstDecodeStackItem& item = c.top();
+ if (!item.expr) { // Op::End was found
+ c.popBack();
+ break;
+ }
+ }
+
+ for (auto i = c.exprs().begin() + c.depths().back(), e = c.exprs().end();
+ i != e; ++i) {
+ if (!body.append(i->expr))
+ return false;
+ }
+ c.exprs().shrinkTo(c.depths().popCopy());
+
+ if (!c.iter().readFunctionEnd())
+ return false;
+
+ c.endFunction();
+
+ if (c.d.currentPosition() != bodyEnd)
+ return c.d.fail("function body length mismatch");
+
+ AstRef sigRef;
+ if (!GenerateRef(c, AstName(u"type"), sigIndex, &sigRef))
+ return false;
+
+ *func = new(c.lifo) AstFunc(funcName, sigRef, Move(vars), Move(localsNames), Move(body));
+ if (!*func)
+ return false;
+ (*func)->setOffset(offset);
+
+ return true;
+}
+
+static bool
+AstDecodeCodeSection(AstDecodeContext &c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Code, &sectionStart, &sectionSize, "code"))
+ return false;
+
+ if (sectionStart == Decoder::NotStarted) {
+ if (c.funcDefSigs().length() != 0)
+ return c.d.fail("expected function bodies");
+
+ return false;
+ }
+
+ uint32_t numFuncBodies;
+ if (!c.d.readVarU32(&numFuncBodies))
+ return c.d.fail("expected function body count");
+
+ if (numFuncBodies != c.funcDefSigs().length())
+ return c.d.fail("function body count does not match function signature count");
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncBodies; funcDefIndex++) {
+ AstFunc* func;
+ if (!AstDecodeFunctionBody(c, funcDefIndex, &func))
+ return false;
+ if (!c.module().append(func))
+ return false;
+ }
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "code"))
+ return false;
+
+ return true;
+}
+
+// Number of bytes to display in a single fragment of a data section (per line).
+static const size_t WRAP_DATA_BYTES = 30;
+
+static bool
+AstDecodeDataSection(AstDecodeContext &c)
+{
+ DataSegmentVector segments;
+ bool hasMemory = c.module().hasMemory();
+
+ MOZ_ASSERT(c.module().memories().length() <= 1, "at most one memory in MVP");
+ uint32_t memByteLength = hasMemory ? c.module().memories()[0].limits.initial : 0;
+
+ if (!DecodeDataSection(c.d, hasMemory, memByteLength, c.globalDescs(), &segments))
+ return false;
+
+ for (DataSegment& s : segments) {
+ const uint8_t* src = c.d.begin() + s.bytecodeOffset;
+ char16_t* buffer = static_cast<char16_t*>(c.lifo.alloc(s.length * sizeof(char16_t)));
+ for (size_t i = 0; i < s.length; i++)
+ buffer[i] = src[i];
+
+ AstExpr* offset = ToAstExpr(c, s.offset);
+ if (!offset)
+ return false;
+
+ AstNameVector fragments(c.lifo);
+ for (size_t start = 0; start < s.length; start += WRAP_DATA_BYTES) {
+ AstName name(buffer + start, Min(WRAP_DATA_BYTES, s.length - start));
+ if (!fragments.append(name))
+ return false;
+ }
+
+ AstDataSegment* segment = new(c.lifo) AstDataSegment(offset, Move(fragments));
+ if (!segment || !c.module().append(segment))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+AstDecodeElemSection(AstDecodeContext &c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Elem, &sectionStart, &sectionSize, "elem"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numElems;
+ if (!c.d.readVarU32(&numElems))
+ return c.d.fail("failed to read number of table elements");
+
+ for (uint32_t i = 0; i < numElems; i++) {
+ uint32_t tableIndex;
+ if (!c.d.readVarU32(&tableIndex))
+ return c.d.fail("expected table index for element");
+
+ if (tableIndex != 0)
+ return c.d.fail("non-zero table index for element");
+
+ AstExpr* offset;
+ if (!AstDecodeInitializerExpression(c, ValType::I32, &offset))
+ return false;
+
+ uint32_t count;
+ if (!c.d.readVarU32(&count))
+ return c.d.fail("expected element count");
+
+ AstRefVector elems(c.lifo);
+ if (!elems.resize(count))
+ return false;
+
+ for (uint32_t i = 0; i < count; i++) {
+ uint32_t index;
+ if (!c.d.readVarU32(&index))
+ return c.d.fail("expected element index");
+
+ elems[i] = AstRef(index);
+ }
+
+ AstElemSegment* segment = new(c.lifo) AstElemSegment(offset, Move(elems));
+ if (!segment || !c.module().append(segment))
+ return false;
+ }
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "elem"))
+ return false;
+
+ return true;
+}
+
+static bool
+AstDecodeStartSection(AstDecodeContext &c)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!c.d.startSection(SectionId::Start, &sectionStart, &sectionSize, "start"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t funcIndex;
+ if (!c.d.readVarU32(&funcIndex))
+ return c.d.fail("failed to read start func index");
+
+ AstRef funcRef;
+ if (!GenerateRef(c, AstName(u"func"), funcIndex, &funcRef))
+ return false;
+
+ c.module().setStartFunc(AstStartFunc(funcRef));
+
+ if (!c.d.finishSection(sectionStart, sectionSize, "start"))
+ return false;
+
+ return true;
+}
+
+bool
+wasm::BinaryToAst(JSContext* cx, const uint8_t* bytes, uint32_t length,
+ LifoAlloc& lifo, AstModule** module)
+{
+ AstModule* result = new(lifo) AstModule(lifo);
+ if (!result->init())
+ return false;
+
+ UniqueChars error;
+ Decoder d(bytes, bytes + length, &error);
+ AstDecodeContext c(cx, lifo, d, *result, true);
+
+ SigWithIdVector sigs;
+ if (!DecodePreamble(d) ||
+ !AstDecodeTypeSection(c, &sigs) ||
+ !AstDecodeImportSection(c, sigs) ||
+ !AstDecodeFunctionSection(c, sigs) ||
+ !AstDecodeTableSection(c) ||
+ !AstDecodeMemorySection(c) ||
+ !AstDecodeGlobalSection(c) ||
+ !AstDecodeExportSection(c) ||
+ !AstDecodeStartSection(c) ||
+ !AstDecodeElemSection(c) ||
+ !AstDecodeCodeSection(c) ||
+ !AstDecodeDataSection(c) ||
+ !DecodeUnknownSections(c.d))
+ {
+ if (error) {
+ JS_ReportErrorNumberASCII(c.cx, GetErrorMessage, nullptr, JSMSG_WASM_COMPILE_ERROR,
+ error.get());
+ return false;
+ }
+ ReportOutOfMemory(c.cx);
+ return false;
+ }
+
+ MOZ_ASSERT(!error, "unreported error in decoding");
+
+ *module = result;
+ return true;
+}
diff --git a/js/src/wasm/WasmBinaryToAST.h b/js/src/wasm/WasmBinaryToAST.h
new file mode 100644
index 0000000000..320862dbbc
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToAST.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasmbinarytoast_h
+#define wasmbinarytoast_h
+
+#include "ds/LifoAlloc.h"
+
+#include "wasm/WasmAST.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+bool
+BinaryToAst(JSContext* cx, const uint8_t* bytes, uint32_t length, LifoAlloc& lifo,
+ AstModule** module);
+
+} // end wasm namespace
+} // end js namespace
+
+#endif // namespace wasmbinarytoast_h
diff --git a/js/src/wasm/WasmBinaryToExperimentalText.cpp b/js/src/wasm/WasmBinaryToExperimentalText.cpp
new file mode 100644
index 0000000000..29f79981b6
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToExperimentalText.cpp
@@ -0,0 +1,1922 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinaryToExperimentalText.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "jsnum.h"
+#include "jsprf.h"
+
+#include "vm/ArrayBufferObject.h"
+#include "vm/StringBuffer.h"
+#include "wasm/WasmAST.h"
+#include "wasm/WasmBinaryToAST.h"
+#include "wasm/WasmTextUtils.h"
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::IsInfinite;
+using mozilla::IsNaN;
+using mozilla::IsNegativeZero;
+
+enum PrintOperatorPrecedence
+{
+ ExpressionPrecedence = 0,
+ AssignmentPrecedence = 1,
+ StoreOperatorPrecedence = 1,
+ BitwiseOrPrecedence = 4,
+ BitwiseXorPrecedence = 5,
+ BitwiseAndPrecedence = 6,
+ EqualityPrecedence = 7,
+ ComparisonPrecedence = 8,
+ BitwiseShiftPrecedence = 9,
+ AdditionPrecedence = 10,
+ MultiplicationPrecedence = 11,
+ NegatePrecedence = 12,
+ EqzPrecedence = 12,
+ OperatorPrecedence = 15,
+ LoadOperatorPrecedence = 15,
+ CallPrecedence = 15,
+ GroupPrecedence = 16,
+};
+
+struct WasmPrintContext
+{
+ JSContext* cx;
+ AstModule* module;
+ WasmPrintBuffer& buffer;
+ const ExperimentalTextFormatting& f;
+ GeneratedSourceMap* maybeSourceMap;
+ uint32_t indent;
+
+ uint32_t currentFuncIndex;
+ PrintOperatorPrecedence currentPrecedence;
+
+ WasmPrintContext(JSContext* cx, AstModule* module, WasmPrintBuffer& buffer,
+ const ExperimentalTextFormatting& f, GeneratedSourceMap* wasmSourceMap_)
+ : cx(cx),
+ module(module),
+ buffer(buffer),
+ f(f),
+ maybeSourceMap(wasmSourceMap_),
+ indent(0),
+ currentFuncIndex(0),
+ currentPrecedence(PrintOperatorPrecedence::ExpressionPrecedence)
+ {}
+
+ StringBuffer& sb() { return buffer.stringBuffer(); }
+};
+
+/*****************************************************************************/
+// utilities
+
+static bool
+IsDropValueExpr(AstExpr& expr)
+{
+ // Based on AST information, determines if the expression does not return a value.
+ // TODO infer presence of a return value for rest kinds of expressions from
+ // the function return type.
+ switch (expr.kind()) {
+ case AstExprKind::Branch:
+ return !expr.as<AstBranch>().maybeValue();
+ case AstExprKind::BranchTable:
+ return !expr.as<AstBranchTable>().maybeValue();
+ case AstExprKind::If:
+ return !expr.as<AstIf>().hasElse();
+ case AstExprKind::Nop:
+ case AstExprKind::Drop:
+ case AstExprKind::Unreachable:
+ case AstExprKind::Return:
+ case AstExprKind::SetLocal:
+ case AstExprKind::Store:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+PrintIndent(WasmPrintContext& c)
+{
+ for (uint32_t i = 0; i < c.indent; i++) {
+ if (!c.buffer.append(" "))
+ return false;
+ }
+ return true;
+}
+
+static bool
+PrintInt32(WasmPrintContext& c, int32_t num, bool printSign = false)
+{
+ // Negative sign will be printed, printing '+' for non-negative values.
+ if (printSign && num >= 0) {
+ if (!c.buffer.append("+"))
+ return false;
+ }
+ return NumberValueToStringBuffer(c.cx, Int32Value(num), c.buffer.stringBuffer());
+}
+
+static bool
+PrintInt64(WasmPrintContext& c, int64_t num)
+{
+ if (num < 0 && !c.buffer.append("-"))
+ return false;
+ if (!num)
+ return c.buffer.append("0");
+
+ uint64_t abs = mozilla::Abs(num);
+ uint64_t n = abs;
+ uint64_t pow = 1;
+ while (n) {
+ pow *= 10;
+ n /= 10;
+ }
+ pow /= 10;
+
+ n = abs;
+ while (pow) {
+ if (!c.buffer.append((char16_t)(u'0' + n / pow)))
+ return false;
+ n -= (n / pow) * pow;
+ pow /= 10;
+ }
+
+ return true;
+}
+
+static bool
+PrintDouble(WasmPrintContext& c, RawF64 num)
+{
+ double d = num.fp();
+ if (IsNegativeZero(d))
+ return c.buffer.append("-0.0");
+ if (IsNaN(d))
+ return RenderNaN(c.sb(), num);
+ if (IsInfinite(d)) {
+ if (d > 0)
+ return c.buffer.append("infinity");
+ return c.buffer.append("-infinity");
+ }
+
+ uint32_t startLength = c.buffer.length();
+ if (!NumberValueToStringBuffer(c.cx, DoubleValue(d), c.buffer.stringBuffer()))
+ return false;
+ MOZ_ASSERT(startLength < c.buffer.length());
+
+ // Checking if we need to end number with '.0'.
+ for (uint32_t i = c.buffer.length() - 1; i >= startLength; i--) {
+ char16_t ch = c.buffer.getChar(i);
+ if (ch == '.' || ch == 'e')
+ return true;
+ }
+ return c.buffer.append(".0");
+}
+
+static bool
+PrintFloat32(WasmPrintContext& c, RawF32 num)
+{
+ float f = num.fp();
+ if (IsNaN(f))
+ return RenderNaN(c.sb(), num) && c.buffer.append(".f");
+ return PrintDouble(c, RawF64(double(f))) &&
+ c.buffer.append("f");
+}
+
+static bool
+PrintEscapedString(WasmPrintContext& c, const AstName& s)
+{
+ size_t length = s.length();
+ const char16_t* p = s.begin();
+ for (size_t i = 0; i < length; i++) {
+ char16_t byte = p[i];
+ switch (byte) {
+ case '\n':
+ if (!c.buffer.append("\\n"))
+ return false;
+ break;
+ case '\r':
+ if (!c.buffer.append("\\0d"))
+ return false;
+ break;
+ case '\t':
+ if (!c.buffer.append("\\t"))
+ return false;
+ break;
+ case '\f':
+ if (!c.buffer.append("\\0c"))
+ return false;
+ break;
+ case '\b':
+ if (!c.buffer.append("\\08"))
+ return false;
+ break;
+ case '\\':
+ if (!c.buffer.append("\\\\"))
+ return false;
+ break;
+ case '"' :
+ if (!c.buffer.append("\\\""))
+ return false;
+ break;
+ case '\'':
+ if (!c.buffer.append("\\'"))
+ return false;
+ break;
+ default:
+ if (byte >= 32 && byte < 127) {
+ if (!c.buffer.append((char)byte))
+ return false;
+ } else {
+ char digit1 = byte / 16, digit2 = byte % 16;
+ if (!c.buffer.append("\\"))
+ return false;
+ if (!c.buffer.append((char)(digit1 < 10 ? digit1 + '0' : digit1 - 10 + 'a')))
+ return false;
+ if (!c.buffer.append((char)(digit2 < 10 ? digit2 + '0' : digit2 - 10 + 'a')))
+ return false;
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+static bool
+PrintExprType(WasmPrintContext& c, ExprType type)
+{
+ switch (type) {
+ case ExprType::Void: return true; // ignoring void
+ case ExprType::I32: return c.buffer.append("i32");
+ case ExprType::I64: return c.buffer.append("i64");
+ case ExprType::F32: return c.buffer.append("f32");
+ case ExprType::F64: return c.buffer.append("f64");
+ default:;
+ }
+
+ MOZ_CRASH("bad type");
+}
+
+static bool
+PrintValType(WasmPrintContext& c, ValType type)
+{
+ return PrintExprType(c, ToExprType(type));
+}
+
+static bool
+PrintName(WasmPrintContext& c, const AstName& name)
+{
+ return c.buffer.append(name.begin(), name.end());
+}
+
+static bool
+PrintRef(WasmPrintContext& c, const AstRef& ref)
+{
+ if (ref.name().empty())
+ return PrintInt32(c, ref.index());
+
+ return PrintName(c, ref.name());
+}
+
+static bool
+PrintExpr(WasmPrintContext& c, AstExpr& expr);
+
+static bool
+PrintBlockLevelExpr(WasmPrintContext& c, AstExpr& expr, bool isLast)
+{
+ if (!PrintIndent(c))
+ return false;
+ if (!PrintExpr(c, expr))
+ return false;
+ if (!isLast || IsDropValueExpr(expr)) {
+ if (!c.buffer.append(';'))
+ return false;
+ }
+ return c.buffer.append('\n');
+}
+
+/*****************************************************************************/
+// binary format parsing and rendering
+
+static bool
+PrintNop(WasmPrintContext& c)
+{
+ return c.buffer.append("nop");
+}
+
+static bool
+PrintDrop(WasmPrintContext& c, AstDrop& drop)
+{
+ if (!PrintExpr(c, drop.value()))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintUnreachable(WasmPrintContext& c, AstUnreachable& unreachable)
+{
+ return c.buffer.append("unreachable");
+}
+
+static bool
+PrintCallArgs(WasmPrintContext& c, const AstExprVector& args)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+ c.currentPrecedence = ExpressionPrecedence;
+
+ if (!c.buffer.append("("))
+ return false;
+ for (uint32_t i = 0; i < args.length(); i++) {
+ if (!PrintExpr(c, *args[i]))
+ return false;
+ if (i + 1 == args.length())
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+ if (!c.buffer.append(")"))
+ return false;
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintCall(WasmPrintContext& c, AstCall& call)
+{
+ if (call.op() == Op::Call) {
+ if (!c.buffer.append("call "))
+ return false;
+ } else {
+ return false;
+ }
+
+ if (!PrintRef(c, call.func()))
+ return false;
+
+ if (!c.buffer.append(" "))
+ return false;
+
+ if (!PrintCallArgs(c, call.args()))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintCallIndirect(WasmPrintContext& c, AstCallIndirect& call)
+{
+ if (!c.buffer.append("call_indirect "))
+ return false;
+ if (!PrintRef(c, call.sig()))
+ return false;
+
+ if (!c.buffer.append(' '))
+ return false;
+
+ if (!PrintCallArgs(c, call.args()))
+ return false;
+
+ if (!c.buffer.append(" ["))
+ return false;
+
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+ c.currentPrecedence = ExpressionPrecedence;
+
+ if (!PrintExpr(c, *call.index()))
+ return false;
+
+ c.currentPrecedence = lastPrecedence;
+
+ if (!c.buffer.append(']'))
+ return false;
+ return true;
+}
+
+static bool
+PrintConst(WasmPrintContext& c, AstConst& cst)
+{
+ switch (ToExprType(cst.val().type())) {
+ case ExprType::I32:
+ if (!PrintInt32(c, (uint32_t)cst.val().i32()))
+ return false;
+ break;
+ case ExprType::I64:
+ if (!PrintInt64(c, (uint32_t)cst.val().i64()))
+ return false;
+ if (!c.buffer.append("i64"))
+ return false;
+ break;
+ case ExprType::F32:
+ if (!PrintFloat32(c, cst.val().f32()))
+ return false;
+ break;
+ case ExprType::F64:
+ if (!PrintDouble(c, cst.val().f64()))
+ return false;
+ break;
+ default:
+ return false;
+ }
+ return true;
+}
+
+static bool
+PrintGetLocal(WasmPrintContext& c, AstGetLocal& gl)
+{
+ if (!PrintRef(c, gl.local()))
+ return false;
+ return true;
+}
+
+static bool
+PrintSetLocal(WasmPrintContext& c, AstSetLocal& sl)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ if (!PrintRef(c, sl.local()))
+ return false;
+ if (!c.buffer.append(" = "))
+ return false;
+
+ c.currentPrecedence = AssignmentPrecedence;
+
+ if (!PrintExpr(c, sl.value()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintTeeLocal(WasmPrintContext& c, AstTeeLocal& sl)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ if (!PrintRef(c, sl.local()))
+ return false;
+ if (!c.buffer.append(" = "))
+ return false;
+
+ c.currentPrecedence = AssignmentPrecedence;
+
+ if (!PrintExpr(c, sl.value()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintGetGlobal(WasmPrintContext& c, AstGetGlobal& gg)
+{
+ return PrintRef(c, gg.global());
+}
+
+static bool
+PrintSetGlobal(WasmPrintContext& c, AstSetGlobal& sg)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ if (!PrintRef(c, sg.global()))
+ return false;
+ if (!c.buffer.append(" = "))
+ return false;
+
+ c.currentPrecedence = AssignmentPrecedence;
+
+ if (!PrintExpr(c, sg.value()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > AssignmentPrecedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintExprList(WasmPrintContext& c, const AstExprVector& exprs, uint32_t startFrom = 0)
+{
+ for (uint32_t i = startFrom; i < exprs.length(); i++) {
+ if (!PrintBlockLevelExpr(c, *exprs[i], i + 1 == exprs.length()))
+ return false;
+ }
+ return true;
+}
+
+static bool
+PrintGroupedBlock(WasmPrintContext& c, AstBlock& block)
+{
+ uint32_t skip = 0;
+ if (block.exprs().length() > 0 &&
+ block.exprs()[0]->kind() == AstExprKind::Block) {
+ if (!PrintGroupedBlock(c, *static_cast<AstBlock*>(block.exprs()[0])))
+ return false;
+ skip = 1;
+ }
+ c.indent++;
+ if (!PrintExprList(c, block.exprs(), skip))
+ return false;
+ c.indent--;
+ if (!PrintIndent(c))
+ return false;
+
+ // If no br/br_if/br_table refer this block, use some non-existent label.
+ if (block.name().empty())
+ return c.buffer.append("$label:\n");
+
+ if (!PrintName(c, block.name()))
+ return false;
+ if (!c.buffer.append(":\n"))
+ return false;
+ return true;
+}
+
+static bool
+PrintBlockName(WasmPrintContext& c, const AstName& name) {
+ if (name.empty())
+ return true;
+
+ if (!PrintIndent(c))
+ return false;
+ if (!PrintName(c, name))
+ return false;
+ return c.buffer.append(":\n");
+}
+
+static bool
+PrintBlock(WasmPrintContext& c, AstBlock& block)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+ if (block.op() == Op::Block) {
+ if (!c.buffer.append("{\n"))
+ return false;
+ } else if (block.op() == Op::Loop) {
+ if (!c.buffer.append("loop"))
+ return false;
+ if (!block.name().empty()) {
+ if (!c.buffer.append(" "))
+ return false;
+ if (!PrintName(c, block.name()))
+ return false;
+ }
+ if (!c.buffer.append(" {\n"))
+ return false;
+ } else
+ return false;
+
+ c.currentPrecedence = ExpressionPrecedence;
+
+ bool skip = 0;
+ if (c.f.groupBlocks && block.op() == Op::Block &&
+ block.exprs().length() > 0 && block.exprs()[0]->kind() == AstExprKind::Block)
+ {
+ AstBlock* innerBlock = static_cast<AstBlock*>(block.exprs()[0]);
+ if (innerBlock->op() == Op::Block) {
+ if (!PrintGroupedBlock(c, *innerBlock))
+ return false;
+ skip = 1;
+ if (block.exprs().length() == 1 && block.name().empty()) {
+ // Special case to resolve ambiguity in parsing of optional end block label.
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("$exit$:\n"))
+ return false;
+ }
+ }
+ }
+
+ c.indent++;
+ if (!PrintExprList(c, block.exprs(), skip))
+ return false;
+ c.indent--;
+ c.currentPrecedence = lastPrecedence;
+
+ if (block.op() != Op::Loop) {
+ if (!PrintBlockName(c, block.name()))
+ return false;
+ }
+
+ if (!PrintIndent(c))
+ return false;
+
+ return c.buffer.append("}");
+}
+
+static bool
+PrintUnaryOperator(WasmPrintContext& c, AstUnaryOperator& unary)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ const char* opStr;
+ const char* prefixStr = nullptr;
+ PrintOperatorPrecedence precedence = OperatorPrecedence;
+ switch (unary.op()) {
+ case Op::I32Clz: opStr = "i32.clz"; break;
+ case Op::I32Ctz: opStr = "i32.ctz"; break;
+ case Op::I32Popcnt: opStr = "i32.popcnt"; break;
+ case Op::I64Clz: opStr = "i64.clz"; break;
+ case Op::I64Ctz: opStr = "i64.ctz"; break;
+ case Op::I64Popcnt: opStr = "i64.popcnt"; break;
+ case Op::F32Abs: opStr = "f32.abs"; break;
+ case Op::F32Neg: opStr = "f32.neg"; prefixStr = "-"; precedence = NegatePrecedence; break;
+ case Op::F32Ceil: opStr = "f32.ceil"; break;
+ case Op::F32Floor: opStr = "f32.floor"; break;
+ case Op::F32Sqrt: opStr = "f32.sqrt"; break;
+ case Op::F32Trunc: opStr = "f32.trunc"; break;
+ case Op::F32Nearest: opStr = "f32.nearest"; break;
+ case Op::F64Abs: opStr = "f64.abs"; break;
+ case Op::F64Neg: opStr = "f64.neg"; prefixStr = "-"; precedence = NegatePrecedence; break;
+ case Op::F64Ceil: opStr = "f64.ceil"; break;
+ case Op::F64Floor: opStr = "f64.floor"; break;
+ case Op::F64Sqrt: opStr = "f64.sqrt"; break;
+ default: return false;
+ }
+
+ if (c.f.allowAsciiOperators && prefixStr) {
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ c.currentPrecedence = precedence;
+ if (!c.buffer.append(prefixStr, strlen(prefixStr)))
+ return false;
+ if (!PrintExpr(c, *unary.operand()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ } else {
+ if (!c.buffer.append(opStr, strlen(opStr)))
+ return false;
+ if (!c.buffer.append("("))
+ return false;
+
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!PrintExpr(c, *unary.operand()))
+ return false;
+
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintBinaryOperator(WasmPrintContext& c, AstBinaryOperator& binary)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ const char* opStr;
+ const char* infixStr = nullptr;
+ PrintOperatorPrecedence precedence;
+ switch (binary.op()) {
+ case Op::I32Add: opStr = "i32.add"; infixStr = "+"; precedence = AdditionPrecedence; break;
+ case Op::I32Sub: opStr = "i32.sub"; infixStr = "-"; precedence = AdditionPrecedence; break;
+ case Op::I32Mul: opStr = "i32.mul"; infixStr = "*"; precedence = MultiplicationPrecedence; break;
+ case Op::I32DivS: opStr = "i32.div_s"; infixStr = "/s"; precedence = MultiplicationPrecedence; break;
+ case Op::I32DivU: opStr = "i32.div_u"; infixStr = "/u"; precedence = MultiplicationPrecedence; break;
+ case Op::I32RemS: opStr = "i32.rem_s"; infixStr = "%s"; precedence = MultiplicationPrecedence; break;
+ case Op::I32RemU: opStr = "i32.rem_u"; infixStr = "%u"; precedence = MultiplicationPrecedence; break;
+ case Op::I32And: opStr = "i32.and"; infixStr = "&"; precedence = BitwiseAndPrecedence; break;
+ case Op::I32Or: opStr = "i32.or"; infixStr = "|"; precedence = BitwiseOrPrecedence; break;
+ case Op::I32Xor: opStr = "i32.xor"; infixStr = "^"; precedence = BitwiseXorPrecedence; break;
+ case Op::I32Shl: opStr = "i32.shl"; infixStr = "<<"; precedence = BitwiseShiftPrecedence; break;
+ case Op::I32ShrS: opStr = "i32.shr_s"; infixStr = ">>s"; precedence = BitwiseShiftPrecedence; break;
+ case Op::I32ShrU: opStr = "i32.shr_u"; infixStr = ">>u"; precedence = BitwiseShiftPrecedence; break;
+ case Op::I64Add: opStr = "i64.add"; infixStr = "+"; precedence = AdditionPrecedence; break;
+ case Op::I64Sub: opStr = "i64.sub"; infixStr = "-"; precedence = AdditionPrecedence; break;
+ case Op::I64Mul: opStr = "i64.mul"; infixStr = "*"; precedence = MultiplicationPrecedence; break;
+ case Op::I64DivS: opStr = "i64.div_s"; infixStr = "/s"; precedence = MultiplicationPrecedence; break;
+ case Op::I64DivU: opStr = "i64.div_u"; infixStr = "/u"; precedence = MultiplicationPrecedence; break;
+ case Op::I64RemS: opStr = "i64.rem_s"; infixStr = "%s"; precedence = MultiplicationPrecedence; break;
+ case Op::I64RemU: opStr = "i64.rem_u"; infixStr = "%u"; precedence = MultiplicationPrecedence; break;
+ case Op::I64And: opStr = "i64.and"; infixStr = "&"; precedence = BitwiseAndPrecedence; break;
+ case Op::I64Or: opStr = "i64.or"; infixStr = "|"; precedence = BitwiseOrPrecedence; break;
+ case Op::I64Xor: opStr = "i64.xor"; infixStr = "^"; precedence = BitwiseXorPrecedence; break;
+ case Op::I64Shl: opStr = "i64.shl"; infixStr = "<<"; precedence = BitwiseShiftPrecedence; break;
+ case Op::I64ShrS: opStr = "i64.shr_s"; infixStr = ">>s"; precedence = BitwiseShiftPrecedence; break;
+ case Op::I64ShrU: opStr = "i64.shr_u"; infixStr = ">>u"; precedence = BitwiseShiftPrecedence; break;
+ case Op::F32Add: opStr = "f32.add"; infixStr = "+"; precedence = AdditionPrecedence; break;
+ case Op::F32Sub: opStr = "f32.sub"; infixStr = "-"; precedence = AdditionPrecedence; break;
+ case Op::F32Mul: opStr = "f32.mul"; infixStr = "*"; precedence = MultiplicationPrecedence; break;
+ case Op::F32Div: opStr = "f32.div"; infixStr = "/"; precedence = MultiplicationPrecedence; break;
+ case Op::F32Min: opStr = "f32.min"; precedence = OperatorPrecedence; break;
+ case Op::F32Max: opStr = "f32.max"; precedence = OperatorPrecedence; break;
+ case Op::F32CopySign: opStr = "f32.copysign"; precedence = OperatorPrecedence; break;
+ case Op::F64Add: opStr = "f64.add"; infixStr = "+"; precedence = AdditionPrecedence; break;
+ case Op::F64Sub: opStr = "f64.sub"; infixStr = "-"; precedence = AdditionPrecedence; break;
+ case Op::F64Mul: opStr = "f64.mul"; infixStr = "*"; precedence = MultiplicationPrecedence; break;
+ case Op::F64Div: opStr = "f64.div"; infixStr = "/"; precedence = MultiplicationPrecedence; break;
+ case Op::F64Min: opStr = "f64.min"; precedence = OperatorPrecedence; break;
+ case Op::F64Max: opStr = "f64.max"; precedence = OperatorPrecedence; break;
+ case Op::F64CopySign: opStr = "f64.copysign"; precedence = OperatorPrecedence; break;
+ default: return false;
+ }
+
+ if (c.f.allowAsciiOperators && infixStr) {
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ c.currentPrecedence = precedence;
+ if (!PrintExpr(c, *binary.lhs()))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ if (!c.buffer.append(infixStr, strlen(infixStr)))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ // case of A / (B / C)
+ c.currentPrecedence = (PrintOperatorPrecedence)(precedence + 1);
+
+ if (!PrintExpr(c, *binary.rhs()))
+ return false;
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ } else {
+ if (!c.buffer.append(opStr, strlen(opStr)))
+ return false;
+ if (!c.buffer.append("("))
+ return false;
+
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!PrintExpr(c, *binary.lhs()))
+ return false;
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintExpr(c, *binary.rhs()))
+ return false;
+
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintTernaryOperator(WasmPrintContext& c, AstTernaryOperator& ternary)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ const char* opStr;
+ switch (ternary.op()) {
+ case Op::Select: opStr = "select"; break;
+ default: return false;
+ }
+
+ if (!c.buffer.append(opStr, strlen(opStr)))
+ return false;
+ if (!c.buffer.append("("))
+ return false;
+
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!PrintExpr(c, *ternary.op0()))
+ return false;
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintExpr(c, *ternary.op1()))
+ return false;
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintExpr(c, *ternary.op2()))
+ return false;
+
+ if (!c.buffer.append(")"))
+ return false;
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintComparisonOperator(WasmPrintContext& c, AstComparisonOperator& comp)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ const char* opStr;
+ const char* infixStr = nullptr;
+ PrintOperatorPrecedence precedence;
+ switch (comp.op()) {
+ case Op::I32Eq: opStr = "i32.eq"; infixStr = "=="; precedence = EqualityPrecedence; break;
+ case Op::I32Ne: opStr = "i32.ne"; infixStr = "!="; precedence = EqualityPrecedence; break;
+ case Op::I32LtS: opStr = "i32.lt_s"; infixStr = "<s"; precedence = ComparisonPrecedence; break;
+ case Op::I32LtU: opStr = "i32.lt_u"; infixStr = "<u"; precedence = ComparisonPrecedence; break;
+ case Op::I32LeS: opStr = "i32.le_s"; infixStr = "<=s"; precedence = ComparisonPrecedence; break;
+ case Op::I32LeU: opStr = "i32.le_u"; infixStr = "<=u"; precedence = ComparisonPrecedence; break;
+ case Op::I32GtS: opStr = "i32.gt_s"; infixStr = ">s"; precedence = ComparisonPrecedence; break;
+ case Op::I32GtU: opStr = "i32.gt_u"; infixStr = ">u"; precedence = ComparisonPrecedence; break;
+ case Op::I32GeS: opStr = "i32.ge_s"; infixStr = ">=s"; precedence = ComparisonPrecedence; break;
+ case Op::I32GeU: opStr = "i32.ge_u"; infixStr = ">=u"; precedence = ComparisonPrecedence; break;
+ case Op::I64Eq: opStr = "i64.eq"; infixStr = "=="; precedence = EqualityPrecedence; break;
+ case Op::I64Ne: opStr = "i64.ne"; infixStr = "!="; precedence = EqualityPrecedence; break;
+ case Op::I64LtS: opStr = "i64.lt_s"; infixStr = "<s"; precedence = ComparisonPrecedence; break;
+ case Op::I64LtU: opStr = "i64.lt_u"; infixStr = "<u"; precedence = ComparisonPrecedence; break;
+ case Op::I64LeS: opStr = "i64.le_s"; infixStr = "<=s"; precedence = ComparisonPrecedence; break;
+ case Op::I64LeU: opStr = "i64.le_u"; infixStr = "<=u"; precedence = ComparisonPrecedence; break;
+ case Op::I64GtS: opStr = "i64.gt_s"; infixStr = ">s"; precedence = ComparisonPrecedence; break;
+ case Op::I64GtU: opStr = "i64.gt_u"; infixStr = ">u"; precedence = ComparisonPrecedence; break;
+ case Op::I64GeS: opStr = "i64.ge_s"; infixStr = ">=s"; precedence = ComparisonPrecedence; break;
+ case Op::I64GeU: opStr = "i64.ge_u"; infixStr = ">=u"; precedence = ComparisonPrecedence; break;
+ case Op::F32Eq: opStr = "f32.eq"; infixStr = "=="; precedence = EqualityPrecedence; break;
+ case Op::F32Ne: opStr = "f32.ne"; infixStr = "!="; precedence = EqualityPrecedence; break;
+ case Op::F32Lt: opStr = "f32.lt"; infixStr = "<"; precedence = ComparisonPrecedence; break;
+ case Op::F32Le: opStr = "f32.le"; infixStr = "<="; precedence = ComparisonPrecedence; break;
+ case Op::F32Gt: opStr = "f32.gt"; infixStr = ">"; precedence = ComparisonPrecedence; break;
+ case Op::F32Ge: opStr = "f32.ge"; infixStr = ">="; precedence = ComparisonPrecedence; break;
+ case Op::F64Eq: opStr = "f64.eq"; infixStr = "=="; precedence = ComparisonPrecedence; break;
+ case Op::F64Ne: opStr = "f64.ne"; infixStr = "!="; precedence = EqualityPrecedence; break;
+ case Op::F64Lt: opStr = "f64.lt"; infixStr = "<"; precedence = EqualityPrecedence; break;
+ case Op::F64Le: opStr = "f64.le"; infixStr = "<="; precedence = ComparisonPrecedence; break;
+ case Op::F64Gt: opStr = "f64.gt"; infixStr = ">"; precedence = ComparisonPrecedence; break;
+ case Op::F64Ge: opStr = "f64.ge"; infixStr = ">="; precedence = ComparisonPrecedence; break;
+ default: return false;
+ }
+
+ if (c.f.allowAsciiOperators && infixStr) {
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+ c.currentPrecedence = precedence;
+ if (!PrintExpr(c, *comp.lhs()))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ if (!c.buffer.append(infixStr, strlen(infixStr)))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ // case of A == (B == C)
+ c.currentPrecedence = (PrintOperatorPrecedence)(precedence + 1);
+ if (!PrintExpr(c, *comp.rhs()))
+ return false;
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ } else {
+ if (!c.buffer.append(opStr, strlen(opStr)))
+ return false;
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!c.buffer.append("("))
+ return false;
+ if (!PrintExpr(c, *comp.lhs()))
+ return false;
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintExpr(c, *comp.rhs()))
+ return false;
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintConversionOperator(WasmPrintContext& c, AstConversionOperator& conv)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ const char* opStr;
+ const char* prefixStr = nullptr;
+ PrintOperatorPrecedence precedence = ExpressionPrecedence;
+ switch (conv.op()) {
+ case Op::I32Eqz: opStr = "i32.eqz"; prefixStr = "!"; precedence = EqzPrecedence; break;
+ case Op::I32WrapI64: opStr = "i32.wrap/i64"; break;
+ case Op::I32TruncSF32: opStr = "i32.trunc_s/f32"; break;
+ case Op::I32TruncUF32: opStr = "i32.trunc_u/f32"; break;
+ case Op::I32ReinterpretF32: opStr = "i32.reinterpret/f32"; break;
+ case Op::I32TruncSF64: opStr = "i32.trunc_s/f64"; break;
+ case Op::I32TruncUF64: opStr = "i32.trunc_u/f64"; break;
+ case Op::I64Eqz: opStr = "i64.eqz"; prefixStr = "!"; precedence = EqzPrecedence; break;
+ case Op::I64ExtendSI32: opStr = "i64.extend_s/i32"; break;
+ case Op::I64ExtendUI32: opStr = "i64.extend_u/i32"; break;
+ case Op::I64TruncSF32: opStr = "i64.trunc_s/f32"; break;
+ case Op::I64TruncUF32: opStr = "i64.trunc_u/f32"; break;
+ case Op::I64TruncSF64: opStr = "i64.trunc_s/f64"; break;
+ case Op::I64TruncUF64: opStr = "i64.trunc_u/f64"; break;
+ case Op::I64ReinterpretF64: opStr = "i64.reinterpret/f64"; break;
+ case Op::F32ConvertSI32: opStr = "f32.convert_s/i32"; break;
+ case Op::F32ConvertUI32: opStr = "f32.convert_u/i32"; break;
+ case Op::F32ReinterpretI32: opStr = "f32.reinterpret/i32"; break;
+ case Op::F32ConvertSI64: opStr = "f32.convert_s/i64"; break;
+ case Op::F32ConvertUI64: opStr = "f32.convert_u/i64"; break;
+ case Op::F32DemoteF64: opStr = "f32.demote/f64"; break;
+ case Op::F64ConvertSI32: opStr = "f64.convert_s/i32"; break;
+ case Op::F64ConvertUI32: opStr = "f64.convert_u/i32"; break;
+ case Op::F64ConvertSI64: opStr = "f64.convert_s/i64"; break;
+ case Op::F64ConvertUI64: opStr = "f64.convert_u/i64"; break;
+ case Op::F64ReinterpretI64: opStr = "f64.reinterpret/i64"; break;
+ case Op::F64PromoteF32: opStr = "f64.promote/f32"; break;
+ default: return false;
+ }
+
+ if (c.f.allowAsciiOperators && prefixStr) {
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ c.currentPrecedence = precedence;
+ if (!c.buffer.append(prefixStr, strlen(prefixStr)))
+ return false;
+ if (!PrintExpr(c, *conv.operand()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > precedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ } else {
+ if (!c.buffer.append(opStr, strlen(opStr)))
+ return false;
+ if (!c.buffer.append("("))
+ return false;
+
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!PrintExpr(c, *conv.operand()))
+ return false;
+
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintIf(WasmPrintContext& c, AstIf& if_)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ c.currentPrecedence = ExpressionPrecedence;
+ if (!c.buffer.append("if ("))
+ return false;
+ if (!PrintExpr(c, if_.cond()))
+ return false;
+
+ if (!c.buffer.append(") {\n"))
+ return false;
+
+ c.indent++;
+ if (!PrintExprList(c, if_.thenExprs()))
+ return false;
+ c.indent--;
+
+ if (!PrintBlockName(c, if_.name()))
+ return false;
+
+ if (if_.hasElse()) {
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("} else {\n"))
+ return false;
+
+ c.indent++;
+ if (!PrintExprList(c, if_.elseExprs()))
+ return false;
+ c.indent--;
+ if (!PrintBlockName(c, if_.name()))
+ return false;
+ }
+
+ if (!PrintIndent(c))
+ return false;
+
+ c.currentPrecedence = lastPrecedence;
+
+ return c.buffer.append("}");
+}
+
+static bool
+PrintLoadStoreAddress(WasmPrintContext& c, const AstLoadStoreAddress& lsa, uint32_t defaultAlignLog2)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ c.currentPrecedence = ExpressionPrecedence;
+
+ if (!c.buffer.append("["))
+ return false;
+ if (!PrintExpr(c, lsa.base()))
+ return false;
+
+ if (lsa.offset() != 0) {
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintInt32(c, lsa.offset(), true))
+ return false;
+ }
+ if (!c.buffer.append("]"))
+ return false;
+
+ uint32_t alignLog2 = lsa.flags();
+ if (defaultAlignLog2 != alignLog2) {
+ if (!c.buffer.append(", align="))
+ return false;
+ if (!PrintInt32(c, 1 << alignLog2))
+ return false;
+ }
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintLoad(WasmPrintContext& c, AstLoad& load)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ c.currentPrecedence = LoadOperatorPrecedence;
+ if (!c.f.reduceParens || lastPrecedence > LoadOperatorPrecedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ uint32_t defaultAlignLog2;
+ switch (load.op()) {
+ case Op::I32Load8S:
+ if (!c.buffer.append("i32:8s"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Load8S:
+ if (!c.buffer.append("i64:8s"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Load8U:
+ if (!c.buffer.append("i32:8u"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Load8U:
+ if (!c.buffer.append("i64:8u"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Load16S:
+ if (!c.buffer.append("i32:16s"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load16S:
+ if (!c.buffer.append("i64:16s"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I32Load16U:
+ if (!c.buffer.append("i32:16u"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load16U:
+ if (!c.buffer.append("i64:16u"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load32S:
+ if (!c.buffer.append("i64:32s"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Load32U:
+ if (!c.buffer.append("i64:32u"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I32Load:
+ if (!c.buffer.append("i32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Load:
+ if (!c.buffer.append("i64"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ case Op::F32Load:
+ if (!c.buffer.append("f32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::F64Load:
+ if (!c.buffer.append("f64"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ default:
+ return false;
+ }
+
+ if (!PrintLoadStoreAddress(c, load.address(), defaultAlignLog2))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > LoadOperatorPrecedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ c.currentPrecedence = lastPrecedence;
+
+ return true;
+}
+
+static bool
+PrintStore(WasmPrintContext& c, AstStore& store)
+{
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+
+ c.currentPrecedence = StoreOperatorPrecedence;
+ if (!c.f.reduceParens || lastPrecedence > StoreOperatorPrecedence) {
+ if (!c.buffer.append("("))
+ return false;
+ }
+
+ uint32_t defaultAlignLog2;
+ switch (store.op()) {
+ case Op::I32Store8:
+ if (!c.buffer.append("i32:8"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Store8:
+ if (!c.buffer.append("i64:8"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Store16:
+ if (!c.buffer.append("i32:16"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Store16:
+ if (!c.buffer.append("i64:16"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Store32:
+ if (!c.buffer.append("i64:32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I32Store:
+ if (!c.buffer.append("i32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Store:
+ if (!c.buffer.append("i64"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ case Op::F32Store:
+ if (!c.buffer.append("f32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::F64Store:
+ if (!c.buffer.append("f64"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ default:
+ return false;
+ }
+
+ if (!PrintLoadStoreAddress(c, store.address(), defaultAlignLog2))
+ return false;
+
+ if (!c.buffer.append(" = "))
+ return false;
+
+ if (!PrintExpr(c, store.value()))
+ return false;
+
+ if (!c.f.reduceParens || lastPrecedence > StoreOperatorPrecedence) {
+ if (!c.buffer.append(")"))
+ return false;
+ }
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintBranch(WasmPrintContext& c, AstBranch& branch)
+{
+ Op op = branch.op();
+ MOZ_ASSERT(op == Op::BrIf || op == Op::Br);
+
+ if (op == Op::BrIf ? !c.buffer.append("br_if ") : !c.buffer.append("br "))
+ return false;
+
+ if (op == Op::BrIf || branch.maybeValue()) {
+ if (!c.buffer.append('('))
+ return false;
+ }
+
+ if (op == Op::BrIf) {
+ if (!PrintExpr(c, branch.cond()))
+ return false;
+ }
+
+ if (branch.maybeValue()) {
+ if (!c.buffer.append(", "))
+ return false;
+
+ if (!PrintExpr(c, *(branch.maybeValue())))
+ return false;
+ }
+
+ if (op == Op::BrIf || branch.maybeValue()) {
+ if (!c.buffer.append(") "))
+ return false;
+ }
+
+ if (!PrintRef(c, branch.target()))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintBrTable(WasmPrintContext& c, AstBranchTable& table)
+{
+ if (!c.buffer.append("br_table "))
+ return false;
+
+ if (!c.buffer.append('('))
+ return false;
+
+ // Index
+ if (!PrintExpr(c, table.index()))
+ return false;
+
+ if (table.maybeValue()) {
+ if (!c.buffer.append(", "))
+ return false;
+
+ if (!PrintExpr(c, *(table.maybeValue())))
+ return false;
+ }
+
+ if (!c.buffer.append(") "))
+ return false;
+
+ uint32_t tableLength = table.table().length();
+ if (tableLength > 0) {
+ if (!c.buffer.append("["))
+ return false;
+ for (uint32_t i = 0; i < tableLength; i++) {
+ if (!PrintRef(c, table.table()[i]))
+ return false;
+ if (i + 1 == tableLength)
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+ if (!c.buffer.append("], "))
+ return false;
+ }
+
+ if (!PrintRef(c, table.def()))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintReturn(WasmPrintContext& c, AstReturn& ret)
+{
+ if (!c.buffer.append("return"))
+ return false;
+
+ if (ret.maybeExpr()) {
+ if (!c.buffer.append(" "))
+ return false;
+ if (!PrintExpr(c, *(ret.maybeExpr())))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+PrintFirst(WasmPrintContext& c, AstFirst& first)
+{
+ if (!c.buffer.append("first("))
+ return false;
+
+ for (uint32_t i = 0; i < first.exprs().length(); i++) {
+ if (!PrintExpr(c, *first.exprs()[i]))
+ return false;
+ if (i + 1 == first.exprs().length())
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+
+ if (!c.buffer.append(")"))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintCurrentMemory(WasmPrintContext& c, AstCurrentMemory& cm)
+{
+ return c.buffer.append("current_memory");
+}
+
+static bool
+PrintGrowMemory(WasmPrintContext& c, AstGrowMemory& gm)
+{
+ if (!c.buffer.append("grow_memory("))
+ return false;
+
+ PrintOperatorPrecedence lastPrecedence = c.currentPrecedence;
+ c.currentPrecedence = ExpressionPrecedence;
+
+ if (!PrintExpr(c, *gm.operand()))
+ return false;
+
+ if (!c.buffer.append(")"))
+ return false;
+
+ c.currentPrecedence = lastPrecedence;
+ return true;
+}
+
+static bool
+PrintExpr(WasmPrintContext& c, AstExpr& expr)
+{
+ if (c.maybeSourceMap) {
+ uint32_t lineno = c.buffer.lineno();
+ uint32_t column = c.buffer.column();
+ if (!c.maybeSourceMap->exprlocs().emplaceBack(lineno, column, expr.offset()))
+ return false;
+ }
+
+ switch (expr.kind()) {
+ case AstExprKind::Nop:
+ return PrintNop(c);
+ case AstExprKind::Drop:
+ return PrintDrop(c, expr.as<AstDrop>());
+ case AstExprKind::Unreachable:
+ return PrintUnreachable(c, expr.as<AstUnreachable>());
+ case AstExprKind::Call:
+ return PrintCall(c, expr.as<AstCall>());
+ case AstExprKind::CallIndirect:
+ return PrintCallIndirect(c, expr.as<AstCallIndirect>());
+ case AstExprKind::Const:
+ return PrintConst(c, expr.as<AstConst>());
+ case AstExprKind::GetLocal:
+ return PrintGetLocal(c, expr.as<AstGetLocal>());
+ case AstExprKind::SetLocal:
+ return PrintSetLocal(c, expr.as<AstSetLocal>());
+ case AstExprKind::TeeLocal:
+ return PrintTeeLocal(c, expr.as<AstTeeLocal>());
+ case AstExprKind::GetGlobal:
+ return PrintGetGlobal(c, expr.as<AstGetGlobal>());
+ case AstExprKind::SetGlobal:
+ return PrintSetGlobal(c, expr.as<AstSetGlobal>());
+ case AstExprKind::Block:
+ return PrintBlock(c, expr.as<AstBlock>());
+ case AstExprKind::If:
+ return PrintIf(c, expr.as<AstIf>());
+ case AstExprKind::UnaryOperator:
+ return PrintUnaryOperator(c, expr.as<AstUnaryOperator>());
+ case AstExprKind::BinaryOperator:
+ return PrintBinaryOperator(c, expr.as<AstBinaryOperator>());
+ case AstExprKind::TernaryOperator:
+ return PrintTernaryOperator(c, expr.as<AstTernaryOperator>());
+ case AstExprKind::ComparisonOperator:
+ return PrintComparisonOperator(c, expr.as<AstComparisonOperator>());
+ case AstExprKind::ConversionOperator:
+ return PrintConversionOperator(c, expr.as<AstConversionOperator>());
+ case AstExprKind::Load:
+ return PrintLoad(c, expr.as<AstLoad>());
+ case AstExprKind::Store:
+ return PrintStore(c, expr.as<AstStore>());
+ case AstExprKind::Branch:
+ return PrintBranch(c, expr.as<AstBranch>());
+ case AstExprKind::BranchTable:
+ return PrintBrTable(c, expr.as<AstBranchTable>());
+ case AstExprKind::Return:
+ return PrintReturn(c, expr.as<AstReturn>());
+ case AstExprKind::First:
+ return PrintFirst(c, expr.as<AstFirst>());
+ case AstExprKind::CurrentMemory:
+ return PrintCurrentMemory(c, expr.as<AstCurrentMemory>());
+ case AstExprKind::GrowMemory:
+ return PrintGrowMemory(c, expr.as<AstGrowMemory>());
+ case AstExprKind::Pop:
+ return true;
+ }
+
+ MOZ_CRASH("Bad AstExprKind");
+}
+
+static bool
+PrintSignature(WasmPrintContext& c, const AstSig& sig, const AstNameVector* maybeLocals = nullptr)
+{
+ uint32_t paramsNum = sig.args().length();
+
+ if (!c.buffer.append("("))
+ return false;
+ if (maybeLocals) {
+ for (uint32_t i = 0; i < paramsNum; i++) {
+ const AstName& name = (*maybeLocals)[i];
+ if (!name.empty()) {
+ if (!PrintName(c, name))
+ return false;
+ if (!c.buffer.append(": "))
+ return false;
+ }
+ ValType arg = sig.args()[i];
+ if (!PrintValType(c, arg))
+ return false;
+ if (i + 1 == paramsNum)
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+ } else if (paramsNum > 0) {
+ for (uint32_t i = 0; i < paramsNum; i++) {
+ ValType arg = sig.args()[i];
+ if (!PrintValType(c, arg))
+ return false;
+ if (i + 1 == paramsNum)
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+ }
+ if (!c.buffer.append(") : ("))
+ return false;
+ if (sig.ret() != ExprType::Void) {
+ if (!PrintExprType(c, sig.ret()))
+ return false;
+ }
+ if (!c.buffer.append(")"))
+ return false;
+ return true;
+}
+
+static bool
+PrintTypeSection(WasmPrintContext& c, const AstModule::SigVector& sigs)
+{
+ uint32_t numSigs = sigs.length();
+ if (!numSigs)
+ return true;
+
+ for (uint32_t sigIndex = 0; sigIndex < numSigs; sigIndex++) {
+ const AstSig* sig = sigs[sigIndex];
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("type "))
+ return false;
+ if (!sig->name().empty()) {
+ if (!PrintName(c, sig->name()))
+ return false;
+ if (!c.buffer.append(" of "))
+ return false;
+ }
+ if (!c.buffer.append("function "))
+ return false;
+ if (!PrintSignature(c, *sig))
+ return false;
+ if (!c.buffer.append(";\n"))
+ return false;
+ }
+
+ if (!c.buffer.append("\n"))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintTableSection(WasmPrintContext& c, const AstModule& module)
+{
+ if (module.elemSegments().empty())
+ return true;
+
+ const AstElemSegment& segment = *module.elemSegments()[0];
+
+ if (!c.buffer.append("table ["))
+ return false;
+
+ for (uint32_t i = 0; i < segment.elems().length(); i++) {
+ const AstRef& elem = segment.elems()[i];
+ uint32_t index = elem.index();
+ AstName name = index < module.funcImportNames().length()
+ ? module.funcImportNames()[index]
+ : module.funcs()[index - module.funcImportNames().length()]->name();
+ if (name.empty()) {
+ if (!PrintInt32(c, index))
+ return false;
+ } else {
+ if (!PrintName(c, name))
+ return false;
+ }
+ if (i + 1 == segment.elems().length())
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+
+ if (!c.buffer.append("];\n\n"))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintImport(WasmPrintContext& c, AstImport& import, const AstModule::SigVector& sigs)
+{
+ const AstSig* sig = sigs[import.funcSig().index()];
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("import "))
+ return false;
+ if (!c.buffer.append("\""))
+ return false;
+
+ const AstName& fieldName = import.field();
+ if (!PrintEscapedString(c, fieldName))
+ return false;
+
+ if (!c.buffer.append("\" as "))
+ return false;
+
+ if (!PrintName(c, import.name()))
+ return false;
+
+ if (!c.buffer.append(" from \""))
+ return false;
+
+ const AstName& moduleName = import.module();
+ if (!PrintEscapedString(c, moduleName))
+ return false;
+
+ if (!c.buffer.append("\" typeof function "))
+ return false;
+
+ if (!PrintSignature(c, *sig))
+ return false;
+ if (!c.buffer.append(";\n"))
+ return false;
+
+ return true;
+}
+
+
+static bool
+PrintImportSection(WasmPrintContext& c, const AstModule::ImportVector& imports, const AstModule::SigVector& sigs)
+{
+ uint32_t numImports = imports.length();
+
+ for (uint32_t i = 0; i < numImports; i++) {
+ if (!PrintImport(c, *imports[i], sigs))
+ return false;
+ }
+
+ if (numImports) {
+ if (!c.buffer.append("\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+PrintExport(WasmPrintContext& c, AstExport& export_,
+ const AstModule::NameVector& funcImportNames,
+ const AstModule::FuncVector& funcs)
+{
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("export "))
+ return false;
+ if (export_.kind() == DefinitionKind::Memory) {
+ if (!c.buffer.append("memory"))
+ return false;
+ } else {
+ uint32_t index = export_.ref().index();
+ AstName name = index < funcImportNames.length()
+ ? funcImportNames[index]
+ : funcs[index - funcImportNames.length()]->name();
+ if (name.empty()) {
+ if (!PrintInt32(c, index))
+ return false;
+ } else {
+ if (!PrintName(c, name))
+ return false;
+ }
+ }
+ if (!c.buffer.append(" as \""))
+ return false;
+ if (!PrintEscapedString(c, export_.name()))
+ return false;
+ if (!c.buffer.append("\";\n"))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintExportSection(WasmPrintContext& c, const AstModule::ExportVector& exports,
+ const AstModule::NameVector& funcImportNames,
+ const AstModule::FuncVector& funcs)
+{
+ uint32_t numExports = exports.length();
+ for (uint32_t i = 0; i < numExports; i++) {
+ if (!PrintExport(c, *exports[i], funcImportNames, funcs))
+ return false;
+ }
+ if (numExports) {
+ if (!c.buffer.append("\n"))
+ return false;
+ }
+ return true;
+}
+
+static bool
+PrintFunctionBody(WasmPrintContext& c, AstFunc& func, const AstModule::SigVector& sigs)
+{
+ const AstSig* sig = sigs[func.sig().index()];
+ c.indent++;
+
+ size_t startExprIndex = c.maybeSourceMap ? c.maybeSourceMap->exprlocs().length() : 0;
+ uint32_t startLineno = c.buffer.lineno();
+
+ uint32_t argsNum = sig->args().length();
+ uint32_t localsNum = func.vars().length();
+ if (localsNum > 0) {
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("var "))
+ return false;
+ for (uint32_t i = 0; i < localsNum; i++) {
+ const AstName& name = func.locals()[argsNum + i];
+ if (!name.empty()) {
+ if (!PrintName(c, name))
+ return false;
+ if (!c.buffer.append(": "))
+ return false;
+ }
+ ValType local = func.vars()[i];
+ if (!PrintValType(c, local))
+ return false;
+ if (i + 1 == localsNum)
+ break;
+ if (!c.buffer.append(", "))
+ return false;
+ }
+ if (!c.buffer.append(";\n"))
+ return false;
+ }
+
+
+ uint32_t exprsNum = func.body().length();
+ for (uint32_t i = 0; i < exprsNum; i++) {
+ if (!PrintBlockLevelExpr(c, *func.body()[i], i + 1 == exprsNum))
+ return false;
+ }
+
+ c.indent--;
+
+ size_t endExprIndex = c.maybeSourceMap ? c.maybeSourceMap->exprlocs().length() : 0;
+ uint32_t endLineno = c.buffer.lineno();
+
+ if (c.maybeSourceMap) {
+ if (!c.maybeSourceMap->functionlocs().emplaceBack(startExprIndex, endExprIndex, startLineno, endLineno))
+ return false;
+ }
+ return true;
+}
+
+static bool
+PrintCodeSection(WasmPrintContext& c, const AstModule::FuncVector& funcs, const AstModule::SigVector& sigs)
+{
+ uint32_t numFuncBodies = funcs.length();
+ for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
+ AstFunc* func = funcs[funcIndex];
+ uint32_t sigIndex = func->sig().index();
+ AstSig* sig = sigs[sigIndex];
+
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("function "))
+ return false;
+ if (!func->name().empty()) {
+ if (!PrintName(c, func->name()))
+ return false;
+ }
+
+ if (!PrintSignature(c, *sig, &(func->locals())))
+ return false;
+ if (!c.buffer.append(" {\n"))
+ return false;
+
+ c.currentFuncIndex = funcIndex;
+
+ if (!PrintFunctionBody(c, *func, sigs))
+ return false;
+
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("}\n\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+PrintDataSection(WasmPrintContext& c, const AstModule& module)
+{
+ if (!module.hasMemory())
+ return true;
+
+ MOZ_ASSERT(module.memories().length() == 1, "NYI: several memories");
+
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("memory "))
+ return false;
+
+ const Limits& memory = module.memories()[0].limits;
+ MOZ_ASSERT(memory.initial % PageSize == 0);
+ if (!PrintInt32(c, memory.initial / PageSize))
+ return false;
+
+ if (memory.maximum) {
+ MOZ_ASSERT(*memory.maximum % PageSize == 0);
+ if (!c.buffer.append(", "))
+ return false;
+ if (!PrintInt32(c, *memory.maximum / PageSize))
+ return false;
+ }
+
+ c.indent++;
+
+ uint32_t numSegments = module.dataSegments().length();
+ if (!numSegments) {
+ if (!c.buffer.append(" {}\n\n"))
+ return false;
+ return true;
+ }
+ if (!c.buffer.append(" {\n"))
+ return false;
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ const AstDataSegment* segment = module.dataSegments()[i];
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("segment "))
+ return false;
+ if (!PrintInt32(c, segment->offset()->as<AstConst>().val().i32()))
+ return false;
+ if (!c.buffer.append("\n"))
+ return false;
+
+ c.indent++;
+ for (const AstName& fragment : segment->fragments()) {
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append("\""))
+ return false;
+ if (!PrintEscapedString(c, fragment))
+ return false;
+ if (!c.buffer.append("\"\n"))
+ return false;
+ }
+ c.indent--;
+
+ if (!PrintIndent(c))
+ return false;
+ if (!c.buffer.append(";\n"))
+ return false;
+ }
+
+ c.indent--;
+ if (!c.buffer.append("}\n\n"))
+ return false;
+
+ return true;
+}
+
+static bool
+PrintModule(WasmPrintContext& c, AstModule& module)
+{
+ if (!PrintTypeSection(c, module.sigs()))
+ return false;
+
+ if (!PrintImportSection(c, module.imports(), module.sigs()))
+ return false;
+
+ if (!PrintTableSection(c, module))
+ return false;
+
+ if (!PrintExportSection(c, module.exports(), module.funcImportNames(), module.funcs()))
+ return false;
+
+ if (!PrintCodeSection(c, module.funcs(), module.sigs()))
+ return false;
+
+ if (!PrintDataSection(c, module))
+ return false;
+
+ return true;
+}
+
+/*****************************************************************************/
+// Top-level functions
+
+bool
+wasm::BinaryToExperimentalText(JSContext* cx, const uint8_t* bytes, size_t length,
+ StringBuffer& buffer, const ExperimentalTextFormatting& formatting,
+ GeneratedSourceMap* sourceMap)
+{
+
+ LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE);
+
+ AstModule* module;
+ if (!BinaryToAst(cx, bytes, length, lifo, &module))
+ return false;
+
+ WasmPrintBuffer buf(buffer);
+ WasmPrintContext c(cx, module, buf, formatting, sourceMap);
+
+ if (!PrintModule(c, *module)) {
+ if (!cx->isExceptionPending())
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
diff --git a/js/src/wasm/WasmBinaryToExperimentalText.h b/js/src/wasm/WasmBinaryToExperimentalText.h
new file mode 100644
index 0000000000..f08dc041c4
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToExperimentalText.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_to_experimental_text_h
+#define wasm_binary_to_experimental_text_h
+
+#include "NamespaceImports.h"
+
+#include "gc/Rooting.h"
+#include "js/Class.h"
+#include "wasm/WasmGeneratedSourceMap.h"
+
+namespace js {
+
+class StringBuffer;
+
+namespace wasm {
+
+struct ExperimentalTextFormatting
+{
+ bool allowAsciiOperators:1;
+ bool reduceParens:1;
+ bool groupBlocks:1;
+
+ ExperimentalTextFormatting()
+ : allowAsciiOperators(true),
+ reduceParens(true),
+ groupBlocks(true)
+ {}
+};
+
+// Translate the given binary representation of a wasm module into the module's textual
+// representation.
+
+MOZ_MUST_USE bool
+BinaryToExperimentalText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuffer& buffer,
+ const ExperimentalTextFormatting& formatting = ExperimentalTextFormatting(),
+ GeneratedSourceMap* sourceMap = nullptr);
+
+} // namespace wasm
+
+} // namespace js
+
+#endif // namespace wasm_binary_to_experimental_text_h
diff --git a/js/src/wasm/WasmBinaryToText.cpp b/js/src/wasm/WasmBinaryToText.cpp
new file mode 100644
index 0000000000..50b9d8358b
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToText.cpp
@@ -0,0 +1,1744 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmBinaryToText.h"
+
+#include "jsnum.h"
+#include "jsprf.h"
+
+#include "vm/ArrayBufferObject.h"
+#include "vm/StringBuffer.h"
+#include "wasm/WasmAST.h"
+#include "wasm/WasmBinaryToAST.h"
+#include "wasm/WasmTextUtils.h"
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::IsInfinite;
+using mozilla::IsNaN;
+using mozilla::IsNegativeZero;
+
+struct WasmRenderContext
+{
+ JSContext* cx;
+ AstModule* module;
+ WasmPrintBuffer& buffer;
+ GeneratedSourceMap* maybeSourceMap;
+ uint32_t indent;
+
+ uint32_t currentFuncIndex;
+
+ WasmRenderContext(JSContext* cx, AstModule* module, WasmPrintBuffer& buffer, GeneratedSourceMap* sourceMap)
+ : cx(cx), module(module), buffer(buffer), maybeSourceMap(sourceMap), indent(0), currentFuncIndex(0)
+ {}
+
+ StringBuffer& sb() { return buffer.stringBuffer(); }
+};
+
+/*****************************************************************************/
+// utilities
+
+// Return true on purpose, so that we have a useful error message to provide to
+// the user.
+static bool
+Fail(WasmRenderContext& c, const char* msg)
+{
+ c.buffer.stringBuffer().clear();
+
+ return c.buffer.append("There was a problem when rendering the wasm text format: ") &&
+ c.buffer.append(msg, strlen(msg)) &&
+ c.buffer.append("\nYou should consider file a bug on Bugzilla in the "
+ "Core:::JavaScript Engine::JIT component at "
+ "https://bugzilla.mozilla.org/enter_bug.cgi.");
+}
+
+static bool
+RenderIndent(WasmRenderContext& c)
+{
+ for (uint32_t i = 0; i < c.indent; i++) {
+ if (!c.buffer.append(" "))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderInt32(WasmRenderContext& c, int32_t num)
+{
+ return NumberValueToStringBuffer(c.cx, Int32Value(num), c.sb());
+}
+
+static bool
+RenderInt64(WasmRenderContext& c, int64_t num)
+{
+ if (num < 0 && !c.buffer.append("-"))
+ return false;
+ if (!num)
+ return c.buffer.append("0");
+ return RenderInBase<10>(c.sb(), mozilla::Abs(num));
+}
+
+static bool
+RenderDouble(WasmRenderContext& c, RawF64 num)
+{
+ double d = num.fp();
+ if (IsNaN(d))
+ return RenderNaN(c.sb(), num);
+ if (IsNegativeZero(d))
+ return c.buffer.append("-0");
+ if (IsInfinite(d)) {
+ if (d > 0)
+ return c.buffer.append("infinity");
+ return c.buffer.append("-infinity");
+ }
+ return NumberValueToStringBuffer(c.cx, DoubleValue(d), c.sb());
+}
+
+static bool
+RenderFloat32(WasmRenderContext& c, RawF32 num)
+{
+ float f = num.fp();
+ if (IsNaN(f))
+ return RenderNaN(c.sb(), num);
+ return RenderDouble(c, RawF64(double(f)));
+}
+
+static bool
+RenderEscapedString(WasmRenderContext& c, const AstName& s)
+{
+ size_t length = s.length();
+ const char16_t* p = s.begin();
+ for (size_t i = 0; i < length; i++) {
+ char16_t byte = p[i];
+ switch (byte) {
+ case '\n':
+ if (!c.buffer.append("\\n"))
+ return false;
+ break;
+ case '\r':
+ if (!c.buffer.append("\\0d"))
+ return false;
+ break;
+ case '\t':
+ if (!c.buffer.append("\\t"))
+ return false;
+ break;
+ case '\f':
+ if (!c.buffer.append("\\0c"))
+ return false;
+ break;
+ case '\b':
+ if (!c.buffer.append("\\08"))
+ return false;
+ break;
+ case '\\':
+ if (!c.buffer.append("\\\\"))
+ return false;
+ break;
+ case '"' :
+ if (!c.buffer.append("\\\""))
+ return false;
+ break;
+ case '\'':
+ if (!c.buffer.append("\\'"))
+ return false;
+ break;
+ default:
+ if (byte >= 32 && byte < 127) {
+ if (!c.buffer.append((char)byte))
+ return false;
+ } else {
+ char digit1 = byte / 16, digit2 = byte % 16;
+ if (!c.buffer.append("\\"))
+ return false;
+ if (!c.buffer.append((char)(digit1 < 10 ? digit1 + '0' : digit1 + 'a' - 10)))
+ return false;
+ if (!c.buffer.append((char)(digit2 < 10 ? digit2 + '0' : digit2 + 'a' - 10)))
+ return false;
+ }
+ break;
+ }
+ }
+ return true;
+}
+
+static bool
+RenderExprType(WasmRenderContext& c, ExprType type)
+{
+ switch (type) {
+ case ExprType::Void: return true; // ignoring void
+ case ExprType::I32: return c.buffer.append("i32");
+ case ExprType::I64: return c.buffer.append("i64");
+ case ExprType::F32: return c.buffer.append("f32");
+ case ExprType::F64: return c.buffer.append("f64");
+ default:;
+ }
+
+ MOZ_CRASH("bad type");
+}
+
+static bool
+RenderValType(WasmRenderContext& c, ValType type)
+{
+ return RenderExprType(c, ToExprType(type));
+}
+
+static bool
+RenderName(WasmRenderContext& c, const AstName& name)
+{
+ return c.buffer.append(name.begin(), name.end());
+}
+
+static bool
+RenderRef(WasmRenderContext& c, const AstRef& ref)
+{
+ if (ref.name().empty())
+ return RenderInt32(c, ref.index());
+
+ return RenderName(c, ref.name());
+}
+
+static bool
+RenderBlockNameAndSignature(WasmRenderContext& c, const AstName& name, ExprType type)
+{
+ if (!name.empty()) {
+ if (!c.buffer.append(' '))
+ return false;
+
+ if (!RenderName(c, name))
+ return false;
+ }
+
+ if (!IsVoid(type)) {
+ if (!c.buffer.append(' '))
+ return false;
+
+ if (!RenderExprType(c, type))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderExpr(WasmRenderContext& c, AstExpr& expr, bool newLine = true);
+
+#define MAP_AST_EXPR(c, expr) \
+ if (c.maybeSourceMap) { \
+ uint32_t lineno = c.buffer.lineno(); \
+ uint32_t column = c.buffer.column(); \
+ if (!c.maybeSourceMap->exprlocs().emplaceBack(lineno, column, expr.offset())) \
+ return false; \
+ }
+
+/*****************************************************************************/
+// binary format parsing and rendering
+
+static bool
+RenderNop(WasmRenderContext& c, AstNop& nop)
+{
+ if (!RenderIndent(c))
+ return false;
+ MAP_AST_EXPR(c, nop);
+ return c.buffer.append("nop");
+}
+
+static bool
+RenderDrop(WasmRenderContext& c, AstDrop& drop)
+{
+ if (!RenderExpr(c, drop.value()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+ MAP_AST_EXPR(c, drop);
+ return c.buffer.append("drop");
+}
+
+static bool
+RenderUnreachable(WasmRenderContext& c, AstUnreachable& unreachable)
+{
+ if (!RenderIndent(c))
+ return false;
+ MAP_AST_EXPR(c, unreachable);
+ return c.buffer.append("unreachable");
+}
+
+static bool
+RenderCallArgs(WasmRenderContext& c, const AstExprVector& args)
+{
+ for (uint32_t i = 0; i < args.length(); i++) {
+ if (!RenderExpr(c, *args[i]))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderCall(WasmRenderContext& c, AstCall& call)
+{
+ if (!RenderCallArgs(c, call.args()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, call);
+ if (call.op() == Op::Call) {
+ if (!c.buffer.append("call "))
+ return false;
+ } else {
+ return Fail(c, "unexpected operator");
+ }
+
+ return RenderRef(c, call.func());
+}
+
+static bool
+RenderCallIndirect(WasmRenderContext& c, AstCallIndirect& call)
+{
+ if (!RenderCallArgs(c, call.args()))
+ return false;
+
+ if (!RenderExpr(c, *call.index()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, call);
+ if (!c.buffer.append("call_indirect "))
+ return false;
+ return RenderRef(c, call.sig());
+}
+
+static bool
+RenderConst(WasmRenderContext& c, AstConst& cst)
+{
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, cst);
+ if (!RenderValType(c, cst.val().type()))
+ return false;
+ if (!c.buffer.append(".const "))
+ return false;
+
+ switch (ToExprType(cst.val().type())) {
+ case ExprType::I32:
+ return RenderInt32(c, (int32_t)cst.val().i32());
+ case ExprType::I64:
+ return RenderInt64(c, (int64_t)cst.val().i64());
+ case ExprType::F32:
+ return RenderFloat32(c, cst.val().f32());
+ case ExprType::F64:
+ return RenderDouble(c, cst.val().f64());
+ default:
+ break;
+ }
+
+ return false;
+}
+
+static bool
+RenderGetLocal(WasmRenderContext& c, AstGetLocal& gl)
+{
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, gl);
+ if (!c.buffer.append("get_local "))
+ return false;
+ return RenderRef(c, gl.local());
+}
+
+static bool
+RenderSetLocal(WasmRenderContext& c, AstSetLocal& sl)
+{
+ if (!RenderExpr(c, sl.value()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, sl);
+ if (!c.buffer.append("set_local "))
+ return false;
+ return RenderRef(c, sl.local());
+}
+
+static bool
+RenderTeeLocal(WasmRenderContext& c, AstTeeLocal& tl)
+{
+ if (!RenderExpr(c, tl.value()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, tl);
+ if (!c.buffer.append("tee_local "))
+ return false;
+ return RenderRef(c, tl.local());
+}
+
+static bool
+RenderGetGlobal(WasmRenderContext& c, AstGetGlobal& gg)
+{
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, gg);
+ if (!c.buffer.append("get_global "))
+ return false;
+ return RenderRef(c, gg.global());
+}
+
+static bool
+RenderSetGlobal(WasmRenderContext& c, AstSetGlobal& sg)
+{
+ if (!RenderExpr(c, sg.value()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, sg);
+ if (!c.buffer.append("set_global "))
+ return false;
+ return RenderRef(c, sg.global());
+}
+
+static bool
+RenderExprList(WasmRenderContext& c, const AstExprVector& exprs)
+{
+ for (uint32_t i = 0; i < exprs.length(); i++) {
+ if (!RenderExpr(c, *exprs[i]))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderBlock(WasmRenderContext& c, AstBlock& block)
+{
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, block);
+ if (block.op() == Op::Block) {
+ if (!c.buffer.append("block"))
+ return false;
+ } else if (block.op() == Op::Loop) {
+ if (!c.buffer.append("loop"))
+ return false;
+ } else {
+ return Fail(c, "unexpected block kind");
+ }
+
+ if (!RenderBlockNameAndSignature(c, block.name(), block.type()))
+ return false;
+
+ if (!c.buffer.append('\n'))
+ return false;
+
+ c.indent++;
+ if (!RenderExprList(c, block.exprs()))
+ return false;
+ c.indent--;
+
+ if (!RenderIndent(c))
+ return false;
+
+ return c.buffer.append("end");
+}
+
+static bool
+RenderFirst(WasmRenderContext& c, AstFirst& first)
+{
+ return RenderExprList(c, first.exprs());
+}
+
+static bool
+RenderCurrentMemory(WasmRenderContext& c, AstCurrentMemory& cm)
+{
+ if (!RenderIndent(c))
+ return false;
+
+ return c.buffer.append("current_memory\n");
+}
+
+static bool
+RenderGrowMemory(WasmRenderContext& c, AstGrowMemory& gm)
+{
+ if (!RenderExpr(c, *gm.operand()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, gm);
+ return c.buffer.append("grow_memory\n");
+}
+
+static bool
+RenderUnaryOperator(WasmRenderContext& c, AstUnaryOperator& unary)
+{
+ if (!RenderExpr(c, *unary.operand()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, unary);
+ const char* opStr;
+ switch (unary.op()) {
+ case Op::I32Eqz: opStr = "i32.eqz"; break;
+ case Op::I32Clz: opStr = "i32.clz"; break;
+ case Op::I32Ctz: opStr = "i32.ctz"; break;
+ case Op::I32Popcnt: opStr = "i32.popcnt"; break;
+ case Op::I64Clz: opStr = "i64.clz"; break;
+ case Op::I64Ctz: opStr = "i64.ctz"; break;
+ case Op::I64Popcnt: opStr = "i64.popcnt"; break;
+ case Op::F32Abs: opStr = "f32.abs"; break;
+ case Op::F32Neg: opStr = "f32.neg"; break;
+ case Op::F32Ceil: opStr = "f32.ceil"; break;
+ case Op::F32Floor: opStr = "f32.floor"; break;
+ case Op::F32Sqrt: opStr = "f32.sqrt"; break;
+ case Op::F32Trunc: opStr = "f32.trunc"; break;
+ case Op::F32Nearest: opStr = "f32.nearest"; break;
+ case Op::F64Abs: opStr = "f64.abs"; break;
+ case Op::F64Neg: opStr = "f64.neg"; break;
+ case Op::F64Ceil: opStr = "f64.ceil"; break;
+ case Op::F64Floor: opStr = "f64.floor"; break;
+ case Op::F64Nearest: opStr = "f64.nearest"; break;
+ case Op::F64Sqrt: opStr = "f64.sqrt"; break;
+ case Op::F64Trunc: opStr = "f64.trunc"; break;
+ default: return Fail(c, "unexpected unary operator");
+ }
+
+ return c.buffer.append(opStr, strlen(opStr));
+}
+
+static bool
+RenderBinaryOperator(WasmRenderContext& c, AstBinaryOperator& binary)
+{
+ if (!RenderExpr(c, *binary.lhs()))
+ return false;
+ if (!RenderExpr(c, *binary.rhs()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, binary);
+ const char* opStr;
+ switch (binary.op()) {
+ case Op::I32Add: opStr = "i32.add"; break;
+ case Op::I32Sub: opStr = "i32.sub"; break;
+ case Op::I32Mul: opStr = "i32.mul"; break;
+ case Op::I32DivS: opStr = "i32.div_s"; break;
+ case Op::I32DivU: opStr = "i32.div_u"; break;
+ case Op::I32RemS: opStr = "i32.rem_s"; break;
+ case Op::I32RemU: opStr = "i32.rem_u"; break;
+ case Op::I32And: opStr = "i32.and"; break;
+ case Op::I32Or: opStr = "i32.or"; break;
+ case Op::I32Xor: opStr = "i32.xor"; break;
+ case Op::I32Shl: opStr = "i32.shl"; break;
+ case Op::I32ShrS: opStr = "i32.shr_s"; break;
+ case Op::I32ShrU: opStr = "i32.shr_u"; break;
+ case Op::I32Rotl: opStr = "i32.rotl"; break;
+ case Op::I32Rotr: opStr = "i32.rotr"; break;
+ case Op::I64Add: opStr = "i64.add"; break;
+ case Op::I64Sub: opStr = "i64.sub"; break;
+ case Op::I64Mul: opStr = "i64.mul"; break;
+ case Op::I64DivS: opStr = "i64.div_s"; break;
+ case Op::I64DivU: opStr = "i64.div_u"; break;
+ case Op::I64RemS: opStr = "i64.rem_s"; break;
+ case Op::I64RemU: opStr = "i64.rem_u"; break;
+ case Op::I64And: opStr = "i64.and"; break;
+ case Op::I64Or: opStr = "i64.or"; break;
+ case Op::I64Xor: opStr = "i64.xor"; break;
+ case Op::I64Shl: opStr = "i64.shl"; break;
+ case Op::I64ShrS: opStr = "i64.shr_s"; break;
+ case Op::I64ShrU: opStr = "i64.shr_u"; break;
+ case Op::I64Rotl: opStr = "i64.rotl"; break;
+ case Op::I64Rotr: opStr = "i64.rotr"; break;
+ case Op::F32Add: opStr = "f32.add"; break;
+ case Op::F32Sub: opStr = "f32.sub"; break;
+ case Op::F32Mul: opStr = "f32.mul"; break;
+ case Op::F32Div: opStr = "f32.div"; break;
+ case Op::F32Min: opStr = "f32.min"; break;
+ case Op::F32Max: opStr = "f32.max"; break;
+ case Op::F32CopySign: opStr = "f32.copysign"; break;
+ case Op::F64Add: opStr = "f64.add"; break;
+ case Op::F64Sub: opStr = "f64.sub"; break;
+ case Op::F64Mul: opStr = "f64.mul"; break;
+ case Op::F64Div: opStr = "f64.div"; break;
+ case Op::F64Min: opStr = "f64.min"; break;
+ case Op::F64Max: opStr = "f64.max"; break;
+ case Op::F64CopySign: opStr = "f64.copysign"; break;
+ default: return Fail(c, "unexpected binary operator");
+ }
+
+ return c.buffer.append(opStr, strlen(opStr));
+}
+
+static bool
+RenderTernaryOperator(WasmRenderContext& c, AstTernaryOperator& ternary)
+{
+ if (!RenderExpr(c, *ternary.op0()))
+ return false;
+ if (!RenderExpr(c, *ternary.op1()))
+ return false;
+ if (!RenderExpr(c, *ternary.op2()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, ternary);
+ const char* opStr;
+ switch (ternary.op()) {
+ case Op::Select: opStr = "select"; break;
+ default: return Fail(c, "unexpected ternary operator");
+ }
+
+ return c.buffer.append(opStr, strlen(opStr));
+}
+
+static bool
+RenderComparisonOperator(WasmRenderContext& c, AstComparisonOperator& comp)
+{
+ if (!RenderExpr(c, *comp.lhs()))
+ return false;
+ if (!RenderExpr(c, *comp.rhs()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, comp);
+ const char* opStr;
+ switch (comp.op()) {
+ case Op::I32Eq: opStr = "i32.eq"; break;
+ case Op::I32Ne: opStr = "i32.ne"; break;
+ case Op::I32LtS: opStr = "i32.lt_s"; break;
+ case Op::I32LtU: opStr = "i32.lt_u"; break;
+ case Op::I32LeS: opStr = "i32.le_s"; break;
+ case Op::I32LeU: opStr = "i32.le_u"; break;
+ case Op::I32GtS: opStr = "i32.gt_s"; break;
+ case Op::I32GtU: opStr = "i32.gt_u"; break;
+ case Op::I32GeS: opStr = "i32.ge_s"; break;
+ case Op::I32GeU: opStr = "i32.ge_u"; break;
+ case Op::I64Eq: opStr = "i64.eq"; break;
+ case Op::I64Ne: opStr = "i64.ne"; break;
+ case Op::I64LtS: opStr = "i64.lt_s"; break;
+ case Op::I64LtU: opStr = "i64.lt_u"; break;
+ case Op::I64LeS: opStr = "i64.le_s"; break;
+ case Op::I64LeU: opStr = "i64.le_u"; break;
+ case Op::I64GtS: opStr = "i64.gt_s"; break;
+ case Op::I64GtU: opStr = "i64.gt_u"; break;
+ case Op::I64GeS: opStr = "i64.ge_s"; break;
+ case Op::I64GeU: opStr = "i64.ge_u"; break;
+ case Op::F32Eq: opStr = "f32.eq"; break;
+ case Op::F32Ne: opStr = "f32.ne"; break;
+ case Op::F32Lt: opStr = "f32.lt"; break;
+ case Op::F32Le: opStr = "f32.le"; break;
+ case Op::F32Gt: opStr = "f32.gt"; break;
+ case Op::F32Ge: opStr = "f32.ge"; break;
+ case Op::F64Eq: opStr = "f64.eq"; break;
+ case Op::F64Ne: opStr = "f64.ne"; break;
+ case Op::F64Lt: opStr = "f64.lt"; break;
+ case Op::F64Le: opStr = "f64.le"; break;
+ case Op::F64Gt: opStr = "f64.gt"; break;
+ case Op::F64Ge: opStr = "f64.ge"; break;
+ default: return Fail(c, "unexpected comparison operator");
+ }
+
+ return c.buffer.append(opStr, strlen(opStr));
+}
+
+static bool
+RenderConversionOperator(WasmRenderContext& c, AstConversionOperator& conv)
+{
+ if (!RenderExpr(c, *conv.operand()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, conv);
+ const char* opStr;
+ switch (conv.op()) {
+ case Op::I32WrapI64: opStr = "i32.wrap/i64"; break;
+ case Op::I32TruncSF32: opStr = "i32.trunc_s/f32"; break;
+ case Op::I32TruncUF32: opStr = "i32.trunc_u/f32"; break;
+ case Op::I32ReinterpretF32: opStr = "i32.reinterpret/f32"; break;
+ case Op::I32TruncSF64: opStr = "i32.trunc_s/f64"; break;
+ case Op::I32TruncUF64: opStr = "i32.trunc_u/f64"; break;
+ case Op::I64ExtendSI32: opStr = "i64.extend_s/i32"; break;
+ case Op::I64ExtendUI32: opStr = "i64.extend_u/i32"; break;
+ case Op::I64TruncSF32: opStr = "i64.trunc_s/f32"; break;
+ case Op::I64TruncUF32: opStr = "i64.trunc_u/f32"; break;
+ case Op::I64TruncSF64: opStr = "i64.trunc_s/f64"; break;
+ case Op::I64TruncUF64: opStr = "i64.trunc_u/f64"; break;
+ case Op::I64ReinterpretF64: opStr = "i64.reinterpret/f64"; break;
+ case Op::F32ConvertSI32: opStr = "f32.convert_s/i32"; break;
+ case Op::F32ConvertUI32: opStr = "f32.convert_u/i32"; break;
+ case Op::F32ReinterpretI32: opStr = "f32.reinterpret/i32"; break;
+ case Op::F32ConvertSI64: opStr = "f32.convert_s/i64"; break;
+ case Op::F32ConvertUI64: opStr = "f32.convert_u/i64"; break;
+ case Op::F32DemoteF64: opStr = "f32.demote/f64"; break;
+ case Op::F64ConvertSI32: opStr = "f64.convert_s/i32"; break;
+ case Op::F64ConvertUI32: opStr = "f64.convert_u/i32"; break;
+ case Op::F64ConvertSI64: opStr = "f64.convert_s/i64"; break;
+ case Op::F64ConvertUI64: opStr = "f64.convert_u/i64"; break;
+ case Op::F64ReinterpretI64: opStr = "f64.reinterpret/i64"; break;
+ case Op::F64PromoteF32: opStr = "f64.promote/f32"; break;
+ case Op::I32Eqz: opStr = "i32.eqz"; break;
+ case Op::I64Eqz: opStr = "i64.eqz"; break;
+ default: return Fail(c, "unexpected conversion operator");
+ }
+ return c.buffer.append(opStr, strlen(opStr));
+}
+
+static bool
+RenderIf(WasmRenderContext& c, AstIf& if_)
+{
+ if (!RenderExpr(c, if_.cond()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, if_);
+ if (!c.buffer.append("if"))
+ return false;
+ if (!RenderBlockNameAndSignature(c, if_.name(), if_.type()))
+ return false;
+ if (!c.buffer.append('\n'))
+ return false;
+
+ c.indent++;
+ if (!RenderExprList(c, if_.thenExprs()))
+ return false;
+ c.indent--;
+
+ if (if_.hasElse()) {
+ if (!RenderIndent(c))
+ return false;
+
+ if (!c.buffer.append("else\n"))
+ return false;
+
+ c.indent++;
+ if (!RenderExprList(c, if_.elseExprs()))
+ return false;
+ c.indent--;
+ }
+
+ if (!RenderIndent(c))
+ return false;
+
+ return c.buffer.append("end");
+}
+
+static bool
+RenderLoadStoreBase(WasmRenderContext& c, const AstLoadStoreAddress& lsa)
+{
+ return RenderExpr(c, lsa.base());
+}
+
+static bool
+RenderLoadStoreAddress(WasmRenderContext& c, const AstLoadStoreAddress& lsa, uint32_t defaultAlignLog2)
+{
+ if (lsa.offset() != 0) {
+ if (!c.buffer.append(" offset="))
+ return false;
+ if (!RenderInt32(c, lsa.offset()))
+ return false;
+ }
+
+ uint32_t alignLog2 = lsa.flags();
+ if (defaultAlignLog2 != alignLog2) {
+ if (!c.buffer.append(" align="))
+ return false;
+ if (!RenderInt32(c, 1 << alignLog2))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderLoad(WasmRenderContext& c, AstLoad& load)
+{
+ if (!RenderLoadStoreBase(c, load.address()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, load);
+ uint32_t defaultAlignLog2;
+ switch (load.op()) {
+ case Op::I32Load8S:
+ if (!c.buffer.append("i32.load8_s"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Load8S:
+ if (!c.buffer.append("i64.load8_s"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Load8U:
+ if (!c.buffer.append("i32.load8_u"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Load8U:
+ if (!c.buffer.append("i64.load8_u"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Load16S:
+ if (!c.buffer.append("i32.load16_s"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load16S:
+ if (!c.buffer.append("i64.load16_s"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I32Load16U:
+ if (!c.buffer.append("i32.load16_u"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load16U:
+ if (!c.buffer.append("i64.load16_u"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Load32S:
+ if (!c.buffer.append("i64.load32_s"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Load32U:
+ if (!c.buffer.append("i64.load32_u"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I32Load:
+ if (!c.buffer.append("i32.load"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Load:
+ if (!c.buffer.append("i64.load"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ case Op::F32Load:
+ if (!c.buffer.append("f32.load"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::F64Load:
+ if (!c.buffer.append("f64.load"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ default:
+ return Fail(c, "unexpected load operator");
+ }
+
+ return RenderLoadStoreAddress(c, load.address(), defaultAlignLog2);
+}
+
+static bool
+RenderStore(WasmRenderContext& c, AstStore& store)
+{
+ if (!RenderLoadStoreBase(c, store.address()))
+ return false;
+
+ if (!RenderExpr(c, store.value()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, store);
+ uint32_t defaultAlignLog2;
+ switch (store.op()) {
+ case Op::I32Store8:
+ if (!c.buffer.append("i32.store8"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I64Store8:
+ if (!c.buffer.append("i64.store8"))
+ return false;
+ defaultAlignLog2 = 0;
+ break;
+ case Op::I32Store16:
+ if (!c.buffer.append("i32.store16"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Store16:
+ if (!c.buffer.append("i64.store16"))
+ return false;
+ defaultAlignLog2 = 1;
+ break;
+ case Op::I64Store32:
+ if (!c.buffer.append("i64.store32"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I32Store:
+ if (!c.buffer.append("i32.store"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::I64Store:
+ if (!c.buffer.append("i64.store"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ case Op::F32Store:
+ if (!c.buffer.append("f32.store"))
+ return false;
+ defaultAlignLog2 = 2;
+ break;
+ case Op::F64Store:
+ if (!c.buffer.append("f64.store"))
+ return false;
+ defaultAlignLog2 = 3;
+ break;
+ default:
+ return Fail(c, "unexpected store operator");
+ }
+
+ return RenderLoadStoreAddress(c, store.address(), defaultAlignLog2);
+}
+
+static bool
+RenderBranch(WasmRenderContext& c, AstBranch& branch)
+{
+ Op op = branch.op();
+ MOZ_ASSERT(op == Op::BrIf || op == Op::Br);
+
+ if (op == Op::BrIf) {
+ if (!RenderExpr(c, branch.cond()))
+ return false;
+ }
+
+ if (branch.maybeValue()) {
+ if (!RenderExpr(c, *(branch.maybeValue())))
+ return false;
+ }
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, branch);
+ if (op == Op::BrIf ? !c.buffer.append("br_if ") : !c.buffer.append("br "))
+ return false;
+
+ return RenderRef(c, branch.target());
+}
+
+static bool
+RenderBrTable(WasmRenderContext& c, AstBranchTable& table)
+{
+ if (table.maybeValue()) {
+ if (!RenderExpr(c, *(table.maybeValue())))
+ return false;
+ }
+
+ // Index
+ if (!RenderExpr(c, table.index()))
+ return false;
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, table);
+ if (!c.buffer.append("br_table "))
+ return false;
+
+ uint32_t tableLength = table.table().length();
+ for (uint32_t i = 0; i < tableLength; i++) {
+ if (!RenderRef(c, table.table()[i]))
+ return false;
+
+ if (!c.buffer.append(" "))
+ return false;
+ }
+
+ return RenderRef(c, table.def());
+}
+
+static bool
+RenderReturn(WasmRenderContext& c, AstReturn& ret)
+{
+ if (ret.maybeExpr()) {
+ if (!RenderExpr(c, *(ret.maybeExpr())))
+ return false;
+ }
+
+ if (!RenderIndent(c))
+ return false;
+
+ MAP_AST_EXPR(c, ret);
+ return c.buffer.append("return");
+}
+
+static bool
+RenderExpr(WasmRenderContext& c, AstExpr& expr, bool newLine /* = true */)
+{
+ switch (expr.kind()) {
+ case AstExprKind::Drop:
+ if (!RenderDrop(c, expr.as<AstDrop>()))
+ return false;
+ break;
+ case AstExprKind::Nop:
+ if (!RenderNop(c, expr.as<AstNop>()))
+ return false;
+ break;
+ case AstExprKind::Unreachable:
+ if (!RenderUnreachable(c, expr.as<AstUnreachable>()))
+ return false;
+ break;
+ case AstExprKind::Call:
+ if (!RenderCall(c, expr.as<AstCall>()))
+ return false;
+ break;
+ case AstExprKind::CallIndirect:
+ if (!RenderCallIndirect(c, expr.as<AstCallIndirect>()))
+ return false;
+ break;
+ case AstExprKind::Const:
+ if (!RenderConst(c, expr.as<AstConst>()))
+ return false;
+ break;
+ case AstExprKind::GetLocal:
+ if (!RenderGetLocal(c, expr.as<AstGetLocal>()))
+ return false;
+ break;
+ case AstExprKind::SetLocal:
+ if (!RenderSetLocal(c, expr.as<AstSetLocal>()))
+ return false;
+ break;
+ case AstExprKind::GetGlobal:
+ if (!RenderGetGlobal(c, expr.as<AstGetGlobal>()))
+ return false;
+ break;
+ case AstExprKind::SetGlobal:
+ if (!RenderSetGlobal(c, expr.as<AstSetGlobal>()))
+ return false;
+ break;
+ case AstExprKind::TeeLocal:
+ if (!RenderTeeLocal(c, expr.as<AstTeeLocal>()))
+ return false;
+ break;
+ case AstExprKind::Block:
+ if (!RenderBlock(c, expr.as<AstBlock>()))
+ return false;
+ break;
+ case AstExprKind::If:
+ if (!RenderIf(c, expr.as<AstIf>()))
+ return false;
+ break;
+ case AstExprKind::UnaryOperator:
+ if (!RenderUnaryOperator(c, expr.as<AstUnaryOperator>()))
+ return false;
+ break;
+ case AstExprKind::BinaryOperator:
+ if (!RenderBinaryOperator(c, expr.as<AstBinaryOperator>()))
+ return false;
+ break;
+ case AstExprKind::TernaryOperator:
+ if (!RenderTernaryOperator(c, expr.as<AstTernaryOperator>()))
+ return false;
+ break;
+ case AstExprKind::ComparisonOperator:
+ if (!RenderComparisonOperator(c, expr.as<AstComparisonOperator>()))
+ return false;
+ break;
+ case AstExprKind::ConversionOperator:
+ if (!RenderConversionOperator(c, expr.as<AstConversionOperator>()))
+ return false;
+ break;
+ case AstExprKind::Load:
+ if (!RenderLoad(c, expr.as<AstLoad>()))
+ return false;
+ break;
+ case AstExprKind::Store:
+ if (!RenderStore(c, expr.as<AstStore>()))
+ return false;
+ break;
+ case AstExprKind::Branch:
+ if (!RenderBranch(c, expr.as<AstBranch>()))
+ return false;
+ break;
+ case AstExprKind::BranchTable:
+ if (!RenderBrTable(c, expr.as<AstBranchTable>()))
+ return false;
+ break;
+ case AstExprKind::Return:
+ if (!RenderReturn(c, expr.as<AstReturn>()))
+ return false;
+ break;
+ case AstExprKind::First:
+ newLine = false;
+ if (!RenderFirst(c, expr.as<AstFirst>()))
+ return false;
+ break;
+ case AstExprKind::CurrentMemory:
+ if (!RenderCurrentMemory(c, expr.as<AstCurrentMemory>()))
+ return false;
+ break;
+ case AstExprKind::GrowMemory:
+ if (!RenderGrowMemory(c, expr.as<AstGrowMemory>()))
+ return false;
+ break;
+ default:
+ MOZ_CRASH("Bad AstExprKind");
+ }
+
+ return !newLine || c.buffer.append("\n");
+}
+
+static bool
+RenderSignature(WasmRenderContext& c, const AstSig& sig, const AstNameVector* maybeLocals = nullptr)
+{
+ uint32_t paramsNum = sig.args().length();
+
+ if (maybeLocals) {
+ for (uint32_t i = 0; i < paramsNum; i++) {
+ if (!c.buffer.append(" (param "))
+ return false;
+ const AstName& name = (*maybeLocals)[i];
+ if (!name.empty()) {
+ if (!RenderName(c, name))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ }
+ ValType arg = sig.args()[i];
+ if (!RenderValType(c, arg))
+ return false;
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ } else if (paramsNum > 0) {
+ if (!c.buffer.append(" (param"))
+ return false;
+ for (uint32_t i = 0; i < paramsNum; i++) {
+ if (!c.buffer.append(" "))
+ return false;
+ ValType arg = sig.args()[i];
+ if (!RenderValType(c, arg))
+ return false;
+ }
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ if (sig.ret() != ExprType::Void) {
+ if (!c.buffer.append(" (result "))
+ return false;
+ if (!RenderExprType(c, sig.ret()))
+ return false;
+ if (!c.buffer.append(")"))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderTypeSection(WasmRenderContext& c, const AstModule::SigVector& sigs)
+{
+ uint32_t numSigs = sigs.length();
+ if (!numSigs)
+ return true;
+
+ for (uint32_t sigIndex = 0; sigIndex < numSigs; sigIndex++) {
+ const AstSig* sig = sigs[sigIndex];
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(type"))
+ return false;
+ if (!sig->name().empty()) {
+ if (!c.buffer.append(" "))
+ return false;
+ if (!RenderName(c, sig->name()))
+ return false;
+ }
+ if (!c.buffer.append(" (func"))
+ return false;
+ if (!RenderSignature(c, *sig))
+ return false;
+ if (!c.buffer.append("))\n"))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderLimits(WasmRenderContext& c, const Limits& limits)
+{
+ if (!RenderInt32(c, limits.initial))
+ return false;
+ if (limits.maximum) {
+ if (!c.buffer.append(" "))
+ return false;
+ if (!RenderInt32(c, *limits.maximum))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderResizableTable(WasmRenderContext& c, const Limits& table)
+{
+ if (!c.buffer.append("(table "))
+ return false;
+ if (!RenderLimits(c, table))
+ return false;
+ return c.buffer.append(" anyfunc)");
+}
+
+static bool
+RenderTableSection(WasmRenderContext& c, const AstModule& module)
+{
+ if (!module.hasTable())
+ return true;
+ for (const AstResizable& table : module.tables()) {
+ if (table.imported)
+ continue;
+ if (!RenderIndent(c))
+ return false;
+ if (!RenderResizableTable(c, table.limits))
+ return false;
+ if (!c.buffer.append("\n"))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderInlineExpr(WasmRenderContext& c, AstExpr& expr)
+{
+ if (!c.buffer.append("("))
+ return false;
+
+ uint32_t prevIndent = c.indent;
+ c.indent = 0;
+ if (!RenderExpr(c, expr, /* newLine */ false))
+ return false;
+ c.indent = prevIndent;
+
+ return c.buffer.append(")");
+}
+
+static bool
+RenderElemSection(WasmRenderContext& c, const AstModule& module)
+{
+ for (const AstElemSegment* segment : module.elemSegments()) {
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(elem "))
+ return false;
+ if (!RenderInlineExpr(c, *segment->offset()))
+ return false;
+
+ for (const AstRef& elem : segment->elems()) {
+ if (!c.buffer.append(" "))
+ return false;
+
+ uint32_t index = elem.index();
+ AstName name = index < module.funcImportNames().length()
+ ? module.funcImportNames()[index]
+ : module.funcs()[index - module.funcImportNames().length()]->name();
+
+ if (name.empty()) {
+ if (!RenderInt32(c, index))
+ return false;
+ } else {
+ if (!RenderName(c, name))
+ return false;
+ }
+ }
+
+ if (!c.buffer.append(")\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderGlobal(WasmRenderContext& c, const AstGlobal& glob, bool inImport = false)
+{
+ if (!c.buffer.append("(global "))
+ return false;
+
+ if (!inImport) {
+ if (!RenderName(c, glob.name()))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ }
+
+ if (glob.isMutable()) {
+ if (!c.buffer.append("(mut "))
+ return false;
+ if (!RenderValType(c, glob.type()))
+ return false;
+ if (!c.buffer.append(")"))
+ return false;
+ } else {
+ if (!RenderValType(c, glob.type()))
+ return false;
+ }
+
+ if (glob.hasInit()) {
+ if (!c.buffer.append(" "))
+ return false;
+ if (!RenderInlineExpr(c, glob.init()))
+ return false;
+ }
+
+ if (!c.buffer.append(")"))
+ return false;
+
+ return inImport || c.buffer.append("\n");
+}
+
+static bool
+RenderGlobalSection(WasmRenderContext& c, const AstModule& module)
+{
+ if (module.globals().empty())
+ return true;
+
+ for (const AstGlobal* global : module.globals()) {
+ if (!RenderIndent(c))
+ return false;
+ if (!RenderGlobal(c, *global))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderResizableMemory(WasmRenderContext& c, Limits memory)
+{
+ if (!c.buffer.append("(memory "))
+ return false;
+
+ MOZ_ASSERT(memory.initial % PageSize == 0);
+ memory.initial /= PageSize;
+
+ if (memory.maximum) {
+ MOZ_ASSERT(*memory.maximum % PageSize == 0);
+ *memory.maximum /= PageSize;
+ }
+
+ if (!RenderLimits(c, memory))
+ return false;
+
+ return c.buffer.append(")");
+}
+
+static bool
+RenderImport(WasmRenderContext& c, AstImport& import, const AstModule& module)
+{
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(import "))
+ return false;
+ if (!RenderName(c, import.name()))
+ return false;
+ if (!c.buffer.append(" \""))
+ return false;
+
+ const AstName& moduleName = import.module();
+ if (!RenderEscapedString(c, moduleName))
+ return false;
+
+ if (!c.buffer.append("\" \""))
+ return false;
+
+ const AstName& fieldName = import.field();
+ if (!RenderEscapedString(c, fieldName))
+ return false;
+
+ if (!c.buffer.append("\" "))
+ return false;
+
+ switch (import.kind()) {
+ case DefinitionKind::Function: {
+ const AstSig* sig = module.sigs()[import.funcSig().index()];
+ if (!RenderSignature(c, *sig))
+ return false;
+ break;
+ }
+ case DefinitionKind::Table: {
+ if (!RenderResizableTable(c, import.limits()))
+ return false;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!RenderResizableMemory(c, import.limits()))
+ return false;
+ break;
+ }
+ case DefinitionKind::Global: {
+ const AstGlobal& glob = import.global();
+ if (!RenderGlobal(c, glob, /* inImport */ true))
+ return false;
+ break;
+ }
+ }
+
+ return c.buffer.append(")\n");
+}
+
+static bool
+RenderImportSection(WasmRenderContext& c, const AstModule& module)
+{
+ for (AstImport* import : module.imports()) {
+ if (!RenderImport(c, *import, module))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderExport(WasmRenderContext& c, AstExport& export_,
+ const AstModule::NameVector& funcImportNames,
+ const AstModule::FuncVector& funcs)
+{
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(export \""))
+ return false;
+ if (!RenderEscapedString(c, export_.name()))
+ return false;
+ if (!c.buffer.append("\" "))
+ return false;
+
+ switch (export_.kind()) {
+ case DefinitionKind::Function: {
+ uint32_t index = export_.ref().index();
+ AstName name = index < funcImportNames.length()
+ ? funcImportNames[index]
+ : funcs[index - funcImportNames.length()]->name();
+ if (name.empty()) {
+ if (!RenderInt32(c, index))
+ return false;
+ } else {
+ if (!RenderName(c, name))
+ return false;
+ }
+ break;
+ }
+ case DefinitionKind::Table: {
+ if (!c.buffer.append("table"))
+ return false;
+ break;
+ }
+ case DefinitionKind::Memory: {
+ if (!c.buffer.append("memory"))
+ return false;
+ break;
+ }
+ case DefinitionKind::Global: {
+ if (!c.buffer.append("global"))
+ return false;
+ if (!RenderRef(c, export_.ref()))
+ return false;
+ break;
+ }
+ }
+
+ return c.buffer.append(")\n");
+}
+
+static bool
+RenderExportSection(WasmRenderContext& c, const AstModule::ExportVector& exports,
+ const AstModule::NameVector& funcImportNames,
+ const AstModule::FuncVector& funcs)
+{
+ uint32_t numExports = exports.length();
+ for (uint32_t i = 0; i < numExports; i++) {
+ if (!RenderExport(c, *exports[i], funcImportNames, funcs))
+ return false;
+ }
+ return true;
+}
+
+static bool
+RenderFunctionBody(WasmRenderContext& c, AstFunc& func, const AstModule::SigVector& sigs)
+{
+ const AstSig* sig = sigs[func.sig().index()];
+
+ size_t startExprIndex = c.maybeSourceMap ? c.maybeSourceMap->exprlocs().length() : 0;
+ uint32_t startLineno = c.buffer.lineno();
+
+ uint32_t argsNum = sig->args().length();
+ uint32_t localsNum = func.vars().length();
+ if (localsNum > 0) {
+ if (!RenderIndent(c))
+ return false;
+ for (uint32_t i = 0; i < localsNum; i++) {
+ if (!c.buffer.append("(local "))
+ return false;
+ const AstName& name = func.locals()[argsNum + i];
+ if (!name.empty()) {
+ if (!RenderName(c, name))
+ return false;
+ if (!c.buffer.append(" "))
+ return false;
+ }
+ ValType local = func.vars()[i];
+ if (!RenderValType(c, local))
+ return false;
+ if (!c.buffer.append(") "))
+ return false;
+ }
+ if (!c.buffer.append("\n"))
+ return false;
+ }
+
+
+ uint32_t exprsNum = func.body().length();
+ for (uint32_t i = 0; i < exprsNum; i++) {
+ if (!RenderExpr(c, *func.body()[i]))
+ return false;
+ }
+
+ size_t endExprIndex = c.maybeSourceMap ? c.maybeSourceMap->exprlocs().length() : 0;
+ uint32_t endLineno = c.buffer.lineno();
+
+ if (c.maybeSourceMap) {
+ if (!c.maybeSourceMap->functionlocs().emplaceBack(startExprIndex, endExprIndex,
+ startLineno, endLineno))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderCodeSection(WasmRenderContext& c, const AstModule::FuncVector& funcs,
+ const AstModule::SigVector& sigs)
+{
+ uint32_t numFuncBodies = funcs.length();
+ for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
+ AstFunc* func = funcs[funcIndex];
+ uint32_t sigIndex = func->sig().index();
+ AstSig* sig = sigs[sigIndex];
+
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(func "))
+ return false;
+ if (!func->name().empty()) {
+ if (!RenderName(c, func->name()))
+ return false;
+ }
+
+ if (!RenderSignature(c, *sig, &(func->locals())))
+ return false;
+ if (!c.buffer.append("\n"))
+ return false;
+
+ c.currentFuncIndex = funcIndex;
+
+ c.indent++;
+ if (!RenderFunctionBody(c, *func, sigs))
+ return false;
+ c.indent--;
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append(")\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderMemorySection(WasmRenderContext& c, const AstModule& module)
+{
+ if (!module.hasMemory())
+ return true;
+
+ for (const AstResizable& memory : module.memories()) {
+ if (memory.imported)
+ continue;
+ if (!RenderIndent(c))
+ return false;
+ if (!RenderResizableMemory(c, memory.limits))
+ return false;
+ if (!c.buffer.append("\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderDataSection(WasmRenderContext& c, const AstModule& module)
+{
+ uint32_t numSegments = module.dataSegments().length();
+ if (!numSegments)
+ return true;
+
+ for (const AstDataSegment* seg : module.dataSegments()) {
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(data "))
+ return false;
+ if (!RenderInlineExpr(c, *seg->offset()))
+ return false;
+ if (!c.buffer.append("\n"))
+ return false;
+
+ c.indent++;
+ for (const AstName& fragment : seg->fragments()) {
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("\""))
+ return false;
+ if (!RenderEscapedString(c, fragment))
+ return false;
+ if (!c.buffer.append("\"\n"))
+ return false;
+ }
+ c.indent--;
+
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append(")\n"))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+RenderStartSection(WasmRenderContext& c, AstModule& module)
+{
+ if (!module.hasStartFunc())
+ return true;
+
+ if (!RenderIndent(c))
+ return false;
+ if (!c.buffer.append("(start "))
+ return false;
+ if (!RenderRef(c, module.startFunc().func()))
+ return false;
+ if (!c.buffer.append(")\n"))
+ return false;
+
+ return true;
+}
+
+static bool
+RenderModule(WasmRenderContext& c, AstModule& module)
+{
+ if (!c.buffer.append("(module\n"))
+ return false;
+
+ c.indent++;
+
+ if (!RenderTypeSection(c, module.sigs()))
+ return false;
+
+ if (!RenderImportSection(c, module))
+ return false;
+
+ if (!RenderTableSection(c, module))
+ return false;
+
+ if (!RenderMemorySection(c, module))
+ return false;
+
+ if (!RenderGlobalSection(c, module))
+ return false;
+
+ if (!RenderExportSection(c, module.exports(), module.funcImportNames(), module.funcs()))
+ return false;
+
+ if (!RenderStartSection(c, module))
+ return false;
+
+ if (!RenderElemSection(c, module))
+ return false;
+
+ if (!RenderCodeSection(c, module.funcs(), module.sigs()))
+ return false;
+
+ if (!RenderDataSection(c, module))
+ return false;
+
+ c.indent--;
+
+ if (!c.buffer.append(")"))
+ return false;
+
+ return true;
+}
+
+#undef MAP_AST_EXPR
+
+/*****************************************************************************/
+// Top-level functions
+
+bool
+wasm::BinaryToText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuffer& buffer, GeneratedSourceMap* sourceMap)
+{
+ LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE);
+
+ AstModule* module;
+ if (!BinaryToAst(cx, bytes, length, lifo, &module))
+ return false;
+
+ WasmPrintBuffer buf(buffer);
+ WasmRenderContext c(cx, module, buf, sourceMap);
+
+ if (!RenderModule(c, *module)) {
+ if (!cx->isExceptionPending())
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
diff --git a/js/src/wasm/WasmBinaryToText.h b/js/src/wasm/WasmBinaryToText.h
new file mode 100644
index 0000000000..7126e0c3f0
--- /dev/null
+++ b/js/src/wasm/WasmBinaryToText.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_binary_to_text_h
+#define wasm_binary_to_text_h
+
+#include "NamespaceImports.h"
+
+#include "gc/Rooting.h"
+#include "js/Class.h"
+#include "wasm/WasmGeneratedSourceMap.h"
+
+namespace js {
+
+class StringBuffer;
+
+namespace wasm {
+
+// Translate the given binary representation of a wasm module into the module's textual
+// representation.
+
+MOZ_MUST_USE bool
+BinaryToText(JSContext* cx, const uint8_t* bytes, size_t length, StringBuffer& buffer,
+ GeneratedSourceMap* sourceMap = nullptr);
+
+} // namespace wasm
+
+} // namespace js
+
+#endif // namespace wasm_binary_to_text_h
diff --git a/js/src/wasm/WasmCode.cpp b/js/src/wasm/WasmCode.cpp
new file mode 100644
index 0000000000..bec987764c
--- /dev/null
+++ b/js/src/wasm/WasmCode.cpp
@@ -0,0 +1,835 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCode.h"
+
+#include "mozilla/Atomics.h"
+#include "mozilla/BinarySearch.h"
+#include "mozilla/EnumeratedRange.h"
+
+#include "jsprf.h"
+
+#include "jit/ExecutableAllocator.h"
+#include "jit/MacroAssembler.h"
+#ifdef JS_ION_PERF
+# include "jit/PerfSpewer.h"
+#endif
+#include "vm/StringBuffer.h"
+#ifdef MOZ_VTUNE
+# include "vtune/VTuneWrapper.h"
+#endif
+#include "wasm/WasmBinaryToText.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmSerialize.h"
+
+#include "jit/MacroAssembler-inl.h"
+#include "vm/ArrayBufferObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::Atomic;
+using mozilla::BinarySearch;
+using mozilla::MakeEnumeratedRange;
+using JS::GenericNaN;
+
+// Limit the number of concurrent wasm code allocations per process. Note that
+// on Linux, the real maximum is ~32k, as each module requires 2 maps (RW/RX),
+// and the kernel's default max_map_count is ~65k.
+//
+// Note: this can be removed once writable/non-executable global data stops
+// being stored in the code segment.
+static Atomic<uint32_t> wasmCodeAllocations(0);
+static const uint32_t MaxWasmCodeAllocations = 16384;
+
+static uint8_t*
+AllocateCodeSegment(JSContext* cx, uint32_t totalLength)
+{
+ if (wasmCodeAllocations >= MaxWasmCodeAllocations)
+ return nullptr;
+
+ // codeLength is a multiple of the system's page size, but not necessarily
+ // a multiple of ExecutableCodePageSize.
+ totalLength = JS_ROUNDUP(totalLength, ExecutableCodePageSize);
+
+ void* p = AllocateExecutableMemory(totalLength, ProtectionSetting::Writable);
+
+ // If the allocation failed and the embedding gives us a last-ditch attempt
+ // to purge all memory (which, in gecko, does a purging GC/CC/GC), do that
+ // then retry the allocation.
+ if (!p) {
+ JSRuntime* rt = cx->runtime();
+ if (rt->largeAllocationFailureCallback) {
+ rt->largeAllocationFailureCallback(rt->largeAllocationFailureCallbackData);
+ p = AllocateExecutableMemory(totalLength, ProtectionSetting::Writable);
+ }
+ }
+
+ if (!p) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ wasmCodeAllocations++;
+ return (uint8_t*)p;
+}
+
+static void
+StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
+{
+ for (LinkData::InternalLink link : linkData.internalLinks) {
+ uint8_t* patchAt = cs.base() + link.patchAtOffset;
+ void* target = cs.base() + link.targetOffset;
+ if (link.isRawPointerPatch())
+ *(void**)(patchAt) = target;
+ else
+ Assembler::PatchInstructionImmediate(patchAt, PatchedImmPtr(target));
+ }
+
+ for (auto imm : MakeEnumeratedRange(SymbolicAddress::Limit)) {
+ const Uint32Vector& offsets = linkData.symbolicLinks[imm];
+ for (size_t i = 0; i < offsets.length(); i++) {
+ uint8_t* patchAt = cs.base() + offsets[i];
+ void* target = AddressOf(imm, cx);
+ Assembler::PatchDataWithValueCheck(CodeLocationLabel(patchAt),
+ PatchedImmPtr(target),
+ PatchedImmPtr((void*)-1));
+ }
+ }
+
+ // These constants are logically part of the code:
+
+ *(double*)(cs.globalData() + NaN64GlobalDataOffset) = GenericNaN();
+ *(float*)(cs.globalData() + NaN32GlobalDataOffset) = GenericNaN();
+}
+
+static void
+SpecializeToMemory(uint8_t* prevMemoryBase, CodeSegment& cs, const Metadata& metadata,
+ ArrayBufferObjectMaybeShared& buffer)
+{
+#ifdef WASM_HUGE_MEMORY
+ MOZ_RELEASE_ASSERT(metadata.boundsChecks.empty());
+#else
+ uint32_t limit = buffer.wasmBoundsCheckLimit();
+ MOZ_RELEASE_ASSERT(IsValidBoundsCheckImmediate(limit));
+
+ for (const BoundsCheck& check : metadata.boundsChecks)
+ MacroAssembler::wasmPatchBoundsCheck(check.patchAt(cs.base()), limit);
+#endif
+
+#if defined(JS_CODEGEN_X86)
+ uint8_t* memoryBase = buffer.dataPointerEither().unwrap(/* code patching */);
+ if (prevMemoryBase != memoryBase) {
+ for (MemoryPatch patch : metadata.memoryPatches) {
+ void* patchAt = cs.base() + patch.offset;
+
+ uint8_t* prevImm = (uint8_t*)X86Encoding::GetPointer(patchAt);
+ MOZ_ASSERT(prevImm >= prevMemoryBase);
+
+ uint32_t offset = prevImm - prevMemoryBase;
+ MOZ_ASSERT(offset <= INT32_MAX);
+
+ X86Encoding::SetPointer(patchAt, memoryBase + offset);
+ }
+ }
+#else
+ MOZ_RELEASE_ASSERT(metadata.memoryPatches.empty());
+#endif
+}
+
+static bool
+SendCodeRangesToProfiler(JSContext* cx, CodeSegment& cs, const Bytes& bytecode,
+ const Metadata& metadata)
+{
+ bool enabled = false;
+#ifdef JS_ION_PERF
+ enabled |= PerfFuncEnabled();
+#endif
+#ifdef MOZ_VTUNE
+ enabled |= IsVTuneProfilingActive();
+#endif
+ if (!enabled)
+ return true;
+
+ for (const CodeRange& codeRange : metadata.codeRanges) {
+ if (!codeRange.isFunction())
+ continue;
+
+ uintptr_t start = uintptr_t(cs.base() + codeRange.begin());
+ uintptr_t end = uintptr_t(cs.base() + codeRange.end());
+ uintptr_t size = end - start;
+
+ TwoByteName name(cx);
+ if (!metadata.getFuncName(cx, &bytecode, codeRange.funcIndex(), &name))
+ return false;
+
+ UniqueChars chars(
+ (char*)JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, name.begin(), name.length()).get());
+ if (!chars)
+ return false;
+
+ // Avoid "unused" warnings
+ (void)start;
+ (void)size;
+
+#ifdef JS_ION_PERF
+ if (PerfFuncEnabled()) {
+ const char* file = metadata.filename.get();
+ unsigned line = codeRange.funcLineOrBytecode();
+ unsigned column = 0;
+ writePerfSpewerAsmJSFunctionMap(start, size, file, line, column, chars.get());
+ }
+#endif
+#ifdef MOZ_VTUNE
+ if (IsVTuneProfilingActive()) {
+ unsigned method_id = iJIT_GetNewMethodID();
+ if (method_id == 0)
+ return true;
+ iJIT_Method_Load method;
+ method.method_id = method_id;
+ method.method_name = chars.get();
+ method.method_load_address = (void*)start;
+ method.method_size = size;
+ method.line_number_size = 0;
+ method.line_number_table = nullptr;
+ method.class_id = 0;
+ method.class_file_name = nullptr;
+ method.source_file_name = nullptr;
+ iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED, (void*)&method);
+ }
+#endif
+ }
+
+ return true;
+}
+
+/* static */ UniqueCodeSegment
+CodeSegment::create(JSContext* cx,
+ const Bytes& bytecode,
+ const LinkData& linkData,
+ const Metadata& metadata,
+ HandleWasmMemoryObject memory)
+{
+ MOZ_ASSERT(bytecode.length() % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(linkData.globalDataLength % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(linkData.functionCodeLength < bytecode.length());
+
+ auto cs = cx->make_unique<CodeSegment>();
+ if (!cs)
+ return nullptr;
+
+ cs->bytes_ = AllocateCodeSegment(cx, bytecode.length() + linkData.globalDataLength);
+ if (!cs->bytes_)
+ return nullptr;
+
+ uint8_t* codeBase = cs->base();
+
+ cs->functionCodeLength_ = linkData.functionCodeLength;
+ cs->codeLength_ = bytecode.length();
+ cs->globalDataLength_ = linkData.globalDataLength;
+ cs->interruptCode_ = codeBase + linkData.interruptOffset;
+ cs->outOfBoundsCode_ = codeBase + linkData.outOfBoundsOffset;
+ cs->unalignedAccessCode_ = codeBase + linkData.unalignedAccessOffset;
+
+ {
+ JitContext jcx(CompileRuntime::get(cx->compartment()->runtimeFromAnyThread()));
+ AutoFlushICache afc("CodeSegment::create");
+ AutoFlushICache::setRange(uintptr_t(codeBase), cs->codeLength());
+
+ memcpy(codeBase, bytecode.begin(), bytecode.length());
+ StaticallyLink(*cs, linkData, cx);
+ if (memory)
+ SpecializeToMemory(nullptr, *cs, metadata, memory->buffer());
+ }
+
+ if (!ExecutableAllocator::makeExecutable(codeBase, cs->codeLength())) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ if (!SendCodeRangesToProfiler(cx, *cs, bytecode, metadata))
+ return nullptr;
+
+ return cs;
+}
+
+CodeSegment::~CodeSegment()
+{
+ if (!bytes_)
+ return;
+
+ MOZ_ASSERT(wasmCodeAllocations > 0);
+ wasmCodeAllocations--;
+
+ MOZ_ASSERT(totalLength() > 0);
+
+ // Match AllocateCodeSegment.
+ uint32_t size = JS_ROUNDUP(totalLength(), ExecutableCodePageSize);
+ DeallocateExecutableMemory(bytes_, size);
+}
+
+void
+CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer)
+{
+ AutoWritableJitCode awjc(base(), codeLength());
+ AutoFlushICache afc("CodeSegment::onMovingGrow");
+ AutoFlushICache::setRange(uintptr_t(base()), codeLength());
+
+ SpecializeToMemory(prevMemoryBase, *this, metadata, buffer);
+}
+
+size_t
+FuncExport::serializedSize() const
+{
+ return sig_.serializedSize() +
+ sizeof(pod);
+}
+
+uint8_t*
+FuncExport::serialize(uint8_t* cursor) const
+{
+ cursor = sig_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t*
+FuncExport::deserialize(const uint8_t* cursor)
+{
+ (cursor = sig_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t
+FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return sig_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+FuncImport::serializedSize() const
+{
+ return sig_.serializedSize() +
+ sizeof(pod);
+}
+
+uint8_t*
+FuncImport::serialize(uint8_t* cursor) const
+{
+ cursor = sig_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t*
+FuncImport::deserialize(const uint8_t* cursor)
+{
+ (cursor = sig_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t
+FuncImport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return sig_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+CodeRange::CodeRange(Kind kind, Offsets offsets)
+ : begin_(offsets.begin),
+ profilingReturn_(0),
+ end_(offsets.end),
+ funcIndex_(0),
+ funcLineOrBytecode_(0),
+ funcBeginToTableEntry_(0),
+ funcBeginToTableProfilingJump_(0),
+ funcBeginToNonProfilingEntry_(0),
+ funcProfilingJumpToProfilingReturn_(0),
+ funcProfilingEpilogueToProfilingReturn_(0),
+ kind_(kind)
+{
+ MOZ_ASSERT(begin_ <= end_);
+ MOZ_ASSERT(kind_ == Entry || kind_ == Inline || kind_ == FarJumpIsland);
+}
+
+CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
+ : begin_(offsets.begin),
+ profilingReturn_(offsets.profilingReturn),
+ end_(offsets.end),
+ funcIndex_(0),
+ funcLineOrBytecode_(0),
+ funcBeginToTableEntry_(0),
+ funcBeginToTableProfilingJump_(0),
+ funcBeginToNonProfilingEntry_(0),
+ funcProfilingJumpToProfilingReturn_(0),
+ funcProfilingEpilogueToProfilingReturn_(0),
+ kind_(kind)
+{
+ MOZ_ASSERT(begin_ < profilingReturn_);
+ MOZ_ASSERT(profilingReturn_ < end_);
+ MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit || kind_ == TrapExit);
+}
+
+CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
+ : begin_(offsets.begin),
+ profilingReturn_(offsets.profilingReturn),
+ end_(offsets.end),
+ funcIndex_(funcIndex),
+ funcLineOrBytecode_(funcLineOrBytecode),
+ funcBeginToTableEntry_(offsets.tableEntry - begin_),
+ funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
+ funcBeginToNonProfilingEntry_(offsets.nonProfilingEntry - begin_),
+ funcProfilingJumpToProfilingReturn_(profilingReturn_ - offsets.profilingJump),
+ funcProfilingEpilogueToProfilingReturn_(profilingReturn_ - offsets.profilingEpilogue),
+ kind_(Function)
+{
+ MOZ_ASSERT(begin_ < profilingReturn_);
+ MOZ_ASSERT(profilingReturn_ < end_);
+ MOZ_ASSERT(funcBeginToTableEntry_ == offsets.tableEntry - begin_);
+ MOZ_ASSERT(funcBeginToTableProfilingJump_ == offsets.tableProfilingJump - begin_);
+ MOZ_ASSERT(funcBeginToNonProfilingEntry_ == offsets.nonProfilingEntry - begin_);
+ MOZ_ASSERT(funcProfilingJumpToProfilingReturn_ == profilingReturn_ - offsets.profilingJump);
+ MOZ_ASSERT(funcProfilingEpilogueToProfilingReturn_ == profilingReturn_ - offsets.profilingEpilogue);
+}
+
+static size_t
+StringLengthWithNullChar(const char* chars)
+{
+ return chars ? strlen(chars) + 1 : 0;
+}
+
+size_t
+CacheableChars::serializedSize() const
+{
+ return sizeof(uint32_t) + StringLengthWithNullChar(get());
+}
+
+uint8_t*
+CacheableChars::serialize(uint8_t* cursor) const
+{
+ uint32_t lengthWithNullChar = StringLengthWithNullChar(get());
+ cursor = WriteScalar<uint32_t>(cursor, lengthWithNullChar);
+ cursor = WriteBytes(cursor, get(), lengthWithNullChar);
+ return cursor;
+}
+
+const uint8_t*
+CacheableChars::deserialize(const uint8_t* cursor)
+{
+ uint32_t lengthWithNullChar;
+ cursor = ReadBytes(cursor, &lengthWithNullChar, sizeof(uint32_t));
+
+ if (lengthWithNullChar) {
+ reset(js_pod_malloc<char>(lengthWithNullChar));
+ if (!get())
+ return nullptr;
+
+ cursor = ReadBytes(cursor, get(), lengthWithNullChar);
+ } else {
+ MOZ_ASSERT(!get());
+ }
+
+ return cursor;
+}
+
+size_t
+CacheableChars::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return mallocSizeOf(get());
+}
+
+size_t
+Metadata::serializedSize() const
+{
+ return sizeof(pod()) +
+ SerializedVectorSize(funcImports) +
+ SerializedVectorSize(funcExports) +
+ SerializedVectorSize(sigIds) +
+ SerializedPodVectorSize(globals) +
+ SerializedPodVectorSize(tables) +
+ SerializedPodVectorSize(memoryAccesses) +
+ SerializedPodVectorSize(memoryPatches) +
+ SerializedPodVectorSize(boundsChecks) +
+ SerializedPodVectorSize(codeRanges) +
+ SerializedPodVectorSize(callSites) +
+ SerializedPodVectorSize(callThunks) +
+ SerializedPodVectorSize(funcNames) +
+ filename.serializedSize();
+}
+
+uint8_t*
+Metadata::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+ cursor = SerializeVector(cursor, funcImports);
+ cursor = SerializeVector(cursor, funcExports);
+ cursor = SerializeVector(cursor, sigIds);
+ cursor = SerializePodVector(cursor, globals);
+ cursor = SerializePodVector(cursor, tables);
+ cursor = SerializePodVector(cursor, memoryAccesses);
+ cursor = SerializePodVector(cursor, memoryPatches);
+ cursor = SerializePodVector(cursor, boundsChecks);
+ cursor = SerializePodVector(cursor, codeRanges);
+ cursor = SerializePodVector(cursor, callSites);
+ cursor = SerializePodVector(cursor, callThunks);
+ cursor = SerializePodVector(cursor, funcNames);
+ cursor = filename.serialize(cursor);
+ return cursor;
+}
+
+/* static */ const uint8_t*
+Metadata::deserialize(const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+ (cursor = DeserializeVector(cursor, &funcImports)) &&
+ (cursor = DeserializeVector(cursor, &funcExports)) &&
+ (cursor = DeserializeVector(cursor, &sigIds)) &&
+ (cursor = DeserializePodVector(cursor, &globals)) &&
+ (cursor = DeserializePodVector(cursor, &tables)) &&
+ (cursor = DeserializePodVector(cursor, &memoryAccesses)) &&
+ (cursor = DeserializePodVector(cursor, &memoryPatches)) &&
+ (cursor = DeserializePodVector(cursor, &boundsChecks)) &&
+ (cursor = DeserializePodVector(cursor, &codeRanges)) &&
+ (cursor = DeserializePodVector(cursor, &callSites)) &&
+ (cursor = DeserializePodVector(cursor, &callThunks)) &&
+ (cursor = DeserializePodVector(cursor, &funcNames)) &&
+ (cursor = filename.deserialize(cursor));
+ return cursor;
+}
+
+size_t
+Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
+ SizeOfVectorExcludingThis(funcExports, mallocSizeOf) +
+ SizeOfVectorExcludingThis(sigIds, mallocSizeOf) +
+ globals.sizeOfExcludingThis(mallocSizeOf) +
+ tables.sizeOfExcludingThis(mallocSizeOf) +
+ memoryAccesses.sizeOfExcludingThis(mallocSizeOf) +
+ memoryPatches.sizeOfExcludingThis(mallocSizeOf) +
+ boundsChecks.sizeOfExcludingThis(mallocSizeOf) +
+ codeRanges.sizeOfExcludingThis(mallocSizeOf) +
+ callSites.sizeOfExcludingThis(mallocSizeOf) +
+ callThunks.sizeOfExcludingThis(mallocSizeOf) +
+ funcNames.sizeOfExcludingThis(mallocSizeOf) +
+ filename.sizeOfExcludingThis(mallocSizeOf);
+}
+
+struct ProjectFuncIndex
+{
+ const FuncExportVector& funcExports;
+
+ explicit ProjectFuncIndex(const FuncExportVector& funcExports)
+ : funcExports(funcExports)
+ {}
+ uint32_t operator[](size_t index) const {
+ return funcExports[index].funcIndex();
+ }
+};
+
+const FuncExport&
+Metadata::lookupFuncExport(uint32_t funcIndex) const
+{
+ size_t match;
+ if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), funcIndex, &match))
+ MOZ_CRASH("missing function export");
+
+ return funcExports[match];
+}
+
+bool
+Metadata::getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
+ TwoByteName* name) const
+{
+ if (funcIndex < funcNames.length()) {
+ MOZ_ASSERT(maybeBytecode, "NameInBytecode requires preserved bytecode");
+
+ const NameInBytecode& n = funcNames[funcIndex];
+ MOZ_ASSERT(n.offset + n.length < maybeBytecode->length());
+
+ if (n.length == 0)
+ goto invalid;
+
+ UTF8Chars utf8((const char*)maybeBytecode->begin() + n.offset, n.length);
+
+ // This code could be optimized by having JS::UTF8CharsToNewTwoByteCharsZ
+ // return a Vector directly.
+ size_t twoByteLength;
+ UniqueTwoByteChars chars(JS::UTF8CharsToNewTwoByteCharsZ(cx, utf8, &twoByteLength).get());
+ if (!chars)
+ goto invalid;
+
+ if (!name->growByUninitialized(twoByteLength))
+ return false;
+
+ PodCopy(name->begin(), chars.get(), twoByteLength);
+ return true;
+ }
+
+ invalid:
+
+ // For names that are out of range or invalid, synthesize a name.
+
+ UniqueChars chars(JS_smprintf("wasm-function[%u]", funcIndex));
+ if (!chars) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!name->growByUninitialized(strlen(chars.get())))
+ return false;
+
+ CopyAndInflateChars(name->begin(), chars.get(), name->length());
+ return true;
+}
+
+Code::Code(UniqueCodeSegment segment,
+ const Metadata& metadata,
+ const ShareableBytes* maybeBytecode)
+ : segment_(Move(segment)),
+ metadata_(&metadata),
+ maybeBytecode_(maybeBytecode),
+ profilingEnabled_(false)
+{}
+
+struct CallSiteRetAddrOffset
+{
+ const CallSiteVector& callSites;
+ explicit CallSiteRetAddrOffset(const CallSiteVector& callSites) : callSites(callSites) {}
+ uint32_t operator[](size_t index) const {
+ return callSites[index].returnAddressOffset();
+ }
+};
+
+const CallSite*
+Code::lookupCallSite(void* returnAddress) const
+{
+ uint32_t target = ((uint8_t*)returnAddress) - segment_->base();
+ size_t lowerBound = 0;
+ size_t upperBound = metadata_->callSites.length();
+
+ size_t match;
+ if (!BinarySearch(CallSiteRetAddrOffset(metadata_->callSites), lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &metadata_->callSites[match];
+}
+
+const CodeRange*
+Code::lookupRange(void* pc) const
+{
+ CodeRange::PC target((uint8_t*)pc - segment_->base());
+ size_t lowerBound = 0;
+ size_t upperBound = metadata_->codeRanges.length();
+
+ size_t match;
+ if (!BinarySearch(metadata_->codeRanges, lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &metadata_->codeRanges[match];
+}
+
+struct MemoryAccessOffset
+{
+ const MemoryAccessVector& accesses;
+ explicit MemoryAccessOffset(const MemoryAccessVector& accesses) : accesses(accesses) {}
+ uintptr_t operator[](size_t index) const {
+ return accesses[index].insnOffset();
+ }
+};
+
+const MemoryAccess*
+Code::lookupMemoryAccess(void* pc) const
+{
+ MOZ_ASSERT(segment_->containsFunctionPC(pc));
+
+ uint32_t target = ((uint8_t*)pc) - segment_->base();
+ size_t lowerBound = 0;
+ size_t upperBound = metadata_->memoryAccesses.length();
+
+ size_t match;
+ if (!BinarySearch(MemoryAccessOffset(metadata_->memoryAccesses), lowerBound, upperBound, target, &match))
+ return nullptr;
+
+ return &metadata_->memoryAccesses[match];
+}
+
+bool
+Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const
+{
+ const Bytes* maybeBytecode = maybeBytecode_ ? &maybeBytecode_.get()->bytes : nullptr;
+ return metadata_->getFuncName(cx, maybeBytecode, funcIndex, name);
+}
+
+JSAtom*
+Code::getFuncAtom(JSContext* cx, uint32_t funcIndex) const
+{
+ TwoByteName name(cx);
+ if (!getFuncName(cx, funcIndex, &name))
+ return nullptr;
+
+ return AtomizeChars(cx, name.begin(), name.length());
+}
+
+const char experimentalWarning[] =
+ ".--. .--. ____ .-'''-. ,---. ,---.\n"
+ "| |_ | | .' __ `. / _ \\| \\ / |\n"
+ "| _( )_ | |/ ' \\ \\ (`' )/`--'| , \\/ , |\n"
+ "|(_ o _) | ||___| / |(_ o _). | |\\_ /| |\n"
+ "| (_,_) \\ | | _.-` | (_,_). '. | _( )_/ | |\n"
+ "| |/ \\| |.' _ |.---. \\ :| (_ o _) | |\n"
+ "| ' /\\ ` || _( )_ |\\ `-' || (_,_) | |\n"
+ "| / \\ |\\ (_ o _) / \\ / | | | |\n"
+ "`---' `---` '.(_,_).' `-...-' '--' '--'\n"
+ "WebAssembly text support and debugging is not supported in this version. You can download\n"
+ "and use the following versions which have experimental debugger support:\n"
+ "- Firefox Developer Edition: https://www.mozilla.org/en-US/firefox/developer/\n"
+ "- Firefox Nightly: https://www.mozilla.org/en-US/firefox/nightly"
+ ;
+
+const size_t experimentalWarningLinesCount = 13;
+
+struct LineComparator
+{
+ const uint32_t lineno;
+ explicit LineComparator(uint32_t lineno) : lineno(lineno) {}
+
+ int operator()(const ExprLoc& loc) const {
+ return lineno == loc.lineno ? 0 : lineno < loc.lineno ? -1 : 1;
+ }
+};
+
+JSString*
+Code::createText(JSContext* cx)
+{
+ StringBuffer buffer(cx);
+ if (!buffer.append(experimentalWarning))
+ return nullptr;
+ return buffer.finishString();
+}
+
+bool
+Code::getLineOffsets(size_t lineno, Vector<uint32_t>& offsets) const
+{
+ // TODO Ensure text was generated?
+ if (!maybeSourceMap_)
+ return false;
+
+ if (lineno < experimentalWarningLinesCount)
+ return true;
+
+ lineno -= experimentalWarningLinesCount;
+
+ ExprLocVector& exprlocs = maybeSourceMap_->exprlocs();
+
+ // Binary search for the expression with the specified line number and
+ // rewind to the first expression, if more than one expression on the same line.
+ size_t match;
+ if (!BinarySearchIf(exprlocs, 0, exprlocs.length(), LineComparator(lineno), &match))
+ return true;
+
+ while (match > 0 && exprlocs[match - 1].lineno == lineno)
+ match--;
+
+ // Return all expression offsets that were printed on the specified line.
+ for (size_t i = match; i < exprlocs.length() && exprlocs[i].lineno == lineno; i++) {
+ if (!offsets.append(exprlocs[i].offset))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+Code::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
+{
+ if (profilingEnabled_ == newProfilingEnabled)
+ return true;
+
+ // When enabled, generate profiling labels for every name in funcNames_
+ // that is the name of some Function CodeRange. This involves malloc() so
+ // do it now since, once we start sampling, we'll be in a signal-handing
+ // context where we cannot malloc.
+ if (newProfilingEnabled) {
+ for (const CodeRange& codeRange : metadata_->codeRanges) {
+ if (!codeRange.isFunction())
+ continue;
+
+ TwoByteName name(cx);
+ if (!getFuncName(cx, codeRange.funcIndex(), &name))
+ return false;
+ if (!name.append('\0'))
+ return false;
+
+ TwoByteChars chars(name.begin(), name.length());
+ UniqueChars utf8Name(JS::CharsToNewUTF8CharsZ(nullptr, chars).c_str());
+ UniqueChars label(JS_smprintf("%s (%s:%u)",
+ utf8Name.get(),
+ metadata_->filename.get(),
+ codeRange.funcLineOrBytecode()));
+ if (!label) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (codeRange.funcIndex() >= funcLabels_.length()) {
+ if (!funcLabels_.resize(codeRange.funcIndex() + 1))
+ return false;
+ }
+ funcLabels_[codeRange.funcIndex()] = Move(label);
+ }
+ } else {
+ funcLabels_.clear();
+ }
+
+ // Only mutate the code after the fallible operations are complete to avoid
+ // the need to rollback.
+ profilingEnabled_ = newProfilingEnabled;
+
+ {
+ AutoWritableJitCode awjc(cx->runtime(), segment_->base(), segment_->codeLength());
+ AutoFlushICache afc("Code::ensureProfilingState");
+ AutoFlushICache::setRange(uintptr_t(segment_->base()), segment_->codeLength());
+
+ for (const CallSite& callSite : metadata_->callSites)
+ ToggleProfiling(*this, callSite, newProfilingEnabled);
+ for (const CallThunk& callThunk : metadata_->callThunks)
+ ToggleProfiling(*this, callThunk, newProfilingEnabled);
+ for (const CodeRange& codeRange : metadata_->codeRanges)
+ ToggleProfiling(*this, codeRange, newProfilingEnabled);
+ }
+
+ return true;
+}
+
+void
+Code::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ size_t* code,
+ size_t* data) const
+{
+ *code += segment_->codeLength();
+ *data += mallocSizeOf(this) +
+ segment_->globalDataLength() +
+ metadata_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata);
+
+ if (maybeBytecode_)
+ *data += maybeBytecode_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenBytes);
+}
diff --git a/js/src/wasm/WasmCode.h b/js/src/wasm/WasmCode.h
new file mode 100644
index 0000000000..db14ace40d
--- /dev/null
+++ b/js/src/wasm/WasmCode.h
@@ -0,0 +1,554 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_code_h
+#define wasm_code_h
+
+#include "wasm/WasmGeneratedSourceMap.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+struct AsmJSMetadata;
+
+namespace wasm {
+
+struct LinkData;
+struct Metadata;
+
+// A wasm CodeSegment owns the allocated executable code for a wasm module.
+// This allocation also currently includes the global data segment, which allows
+// RIP-relative access to global data on some architectures, but this will
+// change in the future to give global data its own allocation.
+
+class CodeSegment;
+typedef UniquePtr<CodeSegment> UniqueCodeSegment;
+
+class CodeSegment
+{
+ // bytes_ points to a single allocation with two contiguous ranges:
+ // executable machine code in the range [0, codeLength) and global data in
+ // the range [codeLength, codeLength + globalDataLength). The range
+ // [0, functionCodeLength) is the subrange of [0, codeLength) which contains
+ // function code.
+ uint8_t* bytes_;
+ uint32_t functionCodeLength_;
+ uint32_t codeLength_;
+ uint32_t globalDataLength_;
+
+ // These are pointers into code for stubs used for asynchronous
+ // signal-handler control-flow transfer.
+ uint8_t* interruptCode_;
+ uint8_t* outOfBoundsCode_;
+ uint8_t* unalignedAccessCode_;
+
+ // The profiling mode may be changed dynamically.
+ bool profilingEnabled_;
+
+ CodeSegment() { PodZero(this); }
+ template <class> friend struct js::MallocProvider;
+
+ CodeSegment(const CodeSegment&) = delete;
+ CodeSegment(CodeSegment&&) = delete;
+ void operator=(const CodeSegment&) = delete;
+ void operator=(CodeSegment&&) = delete;
+
+ public:
+ static UniqueCodeSegment create(JSContext* cx,
+ const Bytes& code,
+ const LinkData& linkData,
+ const Metadata& metadata,
+ HandleWasmMemoryObject memory);
+ ~CodeSegment();
+
+ uint8_t* base() const { return bytes_; }
+ uint8_t* globalData() const { return bytes_ + codeLength_; }
+ uint32_t codeLength() const { return codeLength_; }
+ uint32_t globalDataLength() const { return globalDataLength_; }
+ uint32_t totalLength() const { return codeLength_ + globalDataLength_; }
+
+ uint8_t* interruptCode() const { return interruptCode_; }
+ uint8_t* outOfBoundsCode() const { return outOfBoundsCode_; }
+ uint8_t* unalignedAccessCode() const { return unalignedAccessCode_; }
+
+ // The range [0, functionBytes) is a subrange of [0, codeBytes) that
+ // contains only function body code, not the stub code. This distinction is
+ // used by the async interrupt handler to only interrupt when the pc is in
+ // function code which, in turn, simplifies reasoning about how stubs
+ // enter/exit.
+
+ bool containsFunctionPC(const void* pc) const {
+ return pc >= base() && pc < (base() + functionCodeLength_);
+ }
+ bool containsCodePC(const void* pc) const {
+ return pc >= base() && pc < (base() + codeLength_);
+ }
+
+ // onMovingGrow must be called if the memory passed to 'create' performs a
+ // moving grow operation.
+
+ void onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer);
+};
+
+// ShareableBytes is a ref-counted vector of bytes which are incrementally built
+// during compilation and then immutably shared.
+
+struct ShareableBytes : ShareableBase<ShareableBytes>
+{
+ // Vector is 'final', so instead make Vector a member and add boilerplate.
+ Bytes bytes;
+ size_t sizeOfExcludingThis(MallocSizeOf m) const { return bytes.sizeOfExcludingThis(m); }
+ const uint8_t* begin() const { return bytes.begin(); }
+ const uint8_t* end() const { return bytes.end(); }
+ size_t length() const { return bytes.length(); }
+ bool append(const uint8_t *p, uint32_t ct) { return bytes.append(p, ct); }
+};
+
+typedef RefPtr<ShareableBytes> MutableBytes;
+typedef RefPtr<const ShareableBytes> SharedBytes;
+
+// A FuncExport represents a single function definition inside a wasm Module
+// that has been exported one or more times. A FuncExport represents an
+// internal entry point that can be called via function definition index by
+// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
+// function definition index, the FuncExportVector is stored sorted by
+// function definition index.
+
+class FuncExport
+{
+ Sig sig_;
+ MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
+ uint32_t funcIndex_;
+ uint32_t codeRangeIndex_;
+ uint32_t entryOffset_;
+ } pod;
+
+ public:
+ FuncExport() = default;
+ explicit FuncExport(Sig&& sig,
+ uint32_t funcIndex,
+ uint32_t codeRangeIndex)
+ : sig_(Move(sig))
+ {
+ pod.funcIndex_ = funcIndex;
+ pod.codeRangeIndex_ = codeRangeIndex;
+ pod.entryOffset_ = UINT32_MAX;
+ }
+ void initEntryOffset(uint32_t entryOffset) {
+ MOZ_ASSERT(pod.entryOffset_ == UINT32_MAX);
+ pod.entryOffset_ = entryOffset;
+ }
+
+ const Sig& sig() const {
+ return sig_;
+ }
+ uint32_t funcIndex() const {
+ return pod.funcIndex_;
+ }
+ uint32_t codeRangeIndex() const {
+ return pod.codeRangeIndex_;
+ }
+ uint32_t entryOffset() const {
+ MOZ_ASSERT(pod.entryOffset_ != UINT32_MAX);
+ return pod.entryOffset_;
+ }
+
+ WASM_DECLARE_SERIALIZABLE(FuncExport)
+};
+
+typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
+
+// An FuncImport contains the runtime metadata needed to implement a call to an
+// imported function. Each function import has two call stubs: an optimized path
+// into JIT code and a slow path into the generic C++ js::Invoke and these
+// offsets of these stubs are stored so that function-import callsites can be
+// dynamically patched at runtime.
+
+class FuncImport
+{
+ Sig sig_;
+ struct CacheablePod {
+ uint32_t tlsDataOffset_;
+ uint32_t interpExitCodeOffset_;
+ uint32_t jitExitCodeOffset_;
+ } pod;
+
+ public:
+ FuncImport() {
+ memset(&pod, 0, sizeof(CacheablePod));
+ }
+
+ FuncImport(Sig&& sig, uint32_t tlsDataOffset)
+ : sig_(Move(sig))
+ {
+ pod.tlsDataOffset_ = tlsDataOffset;
+ pod.interpExitCodeOffset_ = 0;
+ pod.jitExitCodeOffset_ = 0;
+ }
+
+ void initInterpExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.interpExitCodeOffset_);
+ pod.interpExitCodeOffset_ = off;
+ }
+ void initJitExitOffset(uint32_t off) {
+ MOZ_ASSERT(!pod.jitExitCodeOffset_);
+ pod.jitExitCodeOffset_ = off;
+ }
+
+ const Sig& sig() const {
+ return sig_;
+ }
+ uint32_t tlsDataOffset() const {
+ return pod.tlsDataOffset_;
+ }
+ uint32_t interpExitCodeOffset() const {
+ return pod.interpExitCodeOffset_;
+ }
+ uint32_t jitExitCodeOffset() const {
+ return pod.jitExitCodeOffset_;
+ }
+
+ WASM_DECLARE_SERIALIZABLE(FuncImport)
+};
+
+typedef Vector<FuncImport, 0, SystemAllocPolicy> FuncImportVector;
+
+// A CodeRange describes a single contiguous range of code within a wasm
+// module's code segment. A CodeRange describes what the code does and, for
+// function bodies, the name and source coordinates of the function.
+
+class CodeRange
+{
+ public:
+ enum Kind {
+ Function, // function definition
+ Entry, // calls into wasm from C++
+ ImportJitExit, // fast-path calling from wasm into JIT code
+ ImportInterpExit, // slow-path calling from wasm into C++ interp
+ TrapExit, // calls C++ to report and jumps to throw stub
+ FarJumpIsland, // inserted to connect otherwise out-of-range insns
+ Inline // stub that is jumped-to, not called, and thus
+ // replaces/loses preceding innermost frame
+ };
+
+ private:
+ // All fields are treated as cacheable POD:
+ uint32_t begin_;
+ uint32_t profilingReturn_;
+ uint32_t end_;
+ uint32_t funcIndex_;
+ uint32_t funcLineOrBytecode_;
+ uint8_t funcBeginToTableEntry_;
+ uint8_t funcBeginToTableProfilingJump_;
+ uint8_t funcBeginToNonProfilingEntry_;
+ uint8_t funcProfilingJumpToProfilingReturn_;
+ uint8_t funcProfilingEpilogueToProfilingReturn_;
+ Kind kind_ : 8;
+
+ public:
+ CodeRange() = default;
+ CodeRange(Kind kind, Offsets offsets);
+ CodeRange(Kind kind, ProfilingOffsets offsets);
+ CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
+
+ // All CodeRanges have a begin and end.
+
+ uint32_t begin() const {
+ return begin_;
+ }
+ uint32_t end() const {
+ return end_;
+ }
+
+ // Other fields are only available for certain CodeRange::Kinds.
+
+ Kind kind() const {
+ return kind_;
+ }
+
+ bool isFunction() const {
+ return kind() == Function;
+ }
+ bool isImportExit() const {
+ return kind() == ImportJitExit || kind() == ImportInterpExit;
+ }
+ bool isTrapExit() const {
+ return kind() == TrapExit;
+ }
+ bool isInline() const {
+ return kind() == Inline;
+ }
+
+ // Every CodeRange except entry and inline stubs has a profiling return
+ // which is used for asynchronous profiling to determine the frame pointer.
+
+ uint32_t profilingReturn() const {
+ MOZ_ASSERT(isFunction() || isImportExit() || isTrapExit());
+ return profilingReturn_;
+ }
+
+ // Functions have offsets which allow patching to selectively execute
+ // profiling prologues/epilogues.
+
+ uint32_t funcProfilingEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin();
+ }
+ uint32_t funcTableEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + funcBeginToTableEntry_;
+ }
+ uint32_t funcTableProfilingJump() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + funcBeginToTableProfilingJump_;
+ }
+ uint32_t funcNonProfilingEntry() const {
+ MOZ_ASSERT(isFunction());
+ return begin_ + funcBeginToNonProfilingEntry_;
+ }
+ uint32_t funcProfilingJump() const {
+ MOZ_ASSERT(isFunction());
+ return profilingReturn_ - funcProfilingJumpToProfilingReturn_;
+ }
+ uint32_t funcProfilingEpilogue() const {
+ MOZ_ASSERT(isFunction());
+ return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
+ }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(isFunction());
+ return funcIndex_;
+ }
+ uint32_t funcLineOrBytecode() const {
+ MOZ_ASSERT(isFunction());
+ return funcLineOrBytecode_;
+ }
+
+ // A sorted array of CodeRanges can be looked up via BinarySearch and PC.
+
+ struct PC {
+ size_t offset;
+ explicit PC(size_t offset) : offset(offset) {}
+ bool operator==(const CodeRange& rhs) const {
+ return offset >= rhs.begin() && offset < rhs.end();
+ }
+ bool operator<(const CodeRange& rhs) const {
+ return offset < rhs.begin();
+ }
+ };
+};
+
+WASM_DECLARE_POD_VECTOR(CodeRange, CodeRangeVector)
+
+// A CallThunk describes the offset and target of thunks so that they may be
+// patched at runtime when profiling is toggled. Thunks are emitted to connect
+// callsites that are too far away from callees to fit in a single call
+// instruction's relative offset.
+
+struct CallThunk
+{
+ uint32_t offset;
+ union {
+ uint32_t funcIndex;
+ uint32_t codeRangeIndex;
+ } u;
+
+ CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
+ CallThunk() = default;
+};
+
+WASM_DECLARE_POD_VECTOR(CallThunk, CallThunkVector)
+
+// A wasm module can either use no memory, a unshared memory (ArrayBuffer) or
+// shared memory (SharedArrayBuffer).
+
+enum class MemoryUsage
+{
+ None = false,
+ Unshared = 1,
+ Shared = 2
+};
+
+static inline bool
+UsesMemory(MemoryUsage memoryUsage)
+{
+ return bool(memoryUsage);
+}
+
+// NameInBytecode represents a name that is embedded in the wasm bytecode.
+// The presence of NameInBytecode implies that bytecode has been kept.
+
+struct NameInBytecode
+{
+ uint32_t offset;
+ uint32_t length;
+
+ NameInBytecode() = default;
+ NameInBytecode(uint32_t offset, uint32_t length) : offset(offset), length(length) {}
+};
+
+typedef Vector<NameInBytecode, 0, SystemAllocPolicy> NameInBytecodeVector;
+typedef Vector<char16_t, 64> TwoByteName;
+
+// Metadata holds all the data that is needed to describe compiled wasm code
+// at runtime (as opposed to data that is only used to statically link or
+// instantiate a module).
+//
+// Metadata is built incrementally by ModuleGenerator and then shared immutably
+// between modules.
+
+struct MetadataCacheablePod
+{
+ ModuleKind kind;
+ MemoryUsage memoryUsage;
+ uint32_t minMemoryLength;
+ Maybe<uint32_t> maxMemoryLength;
+ Maybe<uint32_t> startFuncIndex;
+
+ explicit MetadataCacheablePod(ModuleKind kind)
+ : kind(kind),
+ memoryUsage(MemoryUsage::None),
+ minMemoryLength(0)
+ {}
+};
+
+struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
+{
+ explicit Metadata(ModuleKind kind = ModuleKind::Wasm) : MetadataCacheablePod(kind) {}
+ virtual ~Metadata() {}
+
+ MetadataCacheablePod& pod() { return *this; }
+ const MetadataCacheablePod& pod() const { return *this; }
+
+ FuncImportVector funcImports;
+ FuncExportVector funcExports;
+ SigWithIdVector sigIds;
+ GlobalDescVector globals;
+ TableDescVector tables;
+ MemoryAccessVector memoryAccesses;
+ MemoryPatchVector memoryPatches;
+ BoundsCheckVector boundsChecks;
+ CodeRangeVector codeRanges;
+ CallSiteVector callSites;
+ CallThunkVector callThunks;
+ NameInBytecodeVector funcNames;
+ CacheableChars filename;
+
+ bool usesMemory() const { return UsesMemory(memoryUsage); }
+ bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
+
+ const FuncExport& lookupFuncExport(uint32_t funcIndex) const;
+
+ // AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
+ // encapsulated within AsmJS.cpp, but the additional virtual functions allow
+ // asm.js to override wasm behavior in the handful of cases that can't be
+ // easily encapsulated by AsmJS.cpp.
+
+ bool isAsmJS() const {
+ return kind == ModuleKind::AsmJS;
+ }
+ const AsmJSMetadata& asAsmJS() const {
+ MOZ_ASSERT(isAsmJS());
+ return *(const AsmJSMetadata*)this;
+ }
+ virtual bool mutedErrors() const {
+ return false;
+ }
+ virtual const char16_t* displayURL() const {
+ return nullptr;
+ }
+ virtual ScriptSource* maybeScriptSource() const {
+ return nullptr;
+ }
+ virtual bool getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
+ TwoByteName* name) const;
+
+ WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata);
+};
+
+typedef RefPtr<Metadata> MutableMetadata;
+typedef RefPtr<const Metadata> SharedMetadata;
+
+// Code objects own executable code and the metadata that describes it. At the
+// moment, Code objects are owned uniquely by instances since CodeSegments are
+// not shareable. However, once this restriction is removed, a single Code
+// object will be shared between a module and all its instances.
+
+class Code
+{
+ const UniqueCodeSegment segment_;
+ const SharedMetadata metadata_;
+ const SharedBytes maybeBytecode_;
+ UniqueGeneratedSourceMap maybeSourceMap_;
+ CacheableCharsVector funcLabels_;
+ bool profilingEnabled_;
+
+ public:
+ Code(UniqueCodeSegment segment,
+ const Metadata& metadata,
+ const ShareableBytes* maybeBytecode);
+
+ CodeSegment& segment() { return *segment_; }
+ const CodeSegment& segment() const { return *segment_; }
+ const Metadata& metadata() const { return *metadata_; }
+
+ // Frame iterator support:
+
+ const CallSite* lookupCallSite(void* returnAddress) const;
+ const CodeRange* lookupRange(void* pc) const;
+ const MemoryAccess* lookupMemoryAccess(void* pc) const;
+
+ // Return the name associated with a given function index, or generate one
+ // if none was given by the module.
+
+ bool getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const;
+ JSAtom* getFuncAtom(JSContext* cx, uint32_t funcIndex) const;
+
+ // If the source bytecode was saved when this Code was constructed, this
+ // method will render the binary as text. Otherwise, a diagnostic string
+ // will be returned.
+
+ JSString* createText(JSContext* cx);
+ bool getLineOffsets(size_t lineno, Vector<uint32_t>& offsets) const;
+
+ // Each Code has a profiling mode that is updated to match the runtime's
+ // profiling mode when there are no other activations of the code live on
+ // the stack. Once in profiling mode, ProfilingFrameIterator can be used to
+ // asynchronously walk the stack. Otherwise, the ProfilingFrameIterator will
+ // skip any activations of this code.
+
+ MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
+ bool profilingEnabled() const { return profilingEnabled_; }
+ const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ size_t* code,
+ size_t* data) const;
+
+ WASM_DECLARE_SERIALIZABLE(Code);
+};
+
+typedef UniquePtr<Code> UniqueCode;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_code_h
diff --git a/js/src/wasm/WasmCompartment.cpp b/js/src/wasm/WasmCompartment.cpp
new file mode 100644
index 0000000000..46b2b23b23
--- /dev/null
+++ b/js/src/wasm/WasmCompartment.cpp
@@ -0,0 +1,180 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCompartment.h"
+
+#include "jscompartment.h"
+
+#include "wasm/WasmInstance.h"
+
+#include "vm/Debugger-inl.h"
+
+using namespace js;
+using namespace wasm;
+
+Compartment::Compartment(Zone* zone)
+ : mutatingInstances_(false),
+ activationCount_(0),
+ profilingEnabled_(false)
+{}
+
+Compartment::~Compartment()
+{
+ MOZ_ASSERT(activationCount_ == 0);
+ MOZ_ASSERT(instances_.empty());
+ MOZ_ASSERT(!mutatingInstances_);
+}
+
+struct InstanceComparator
+{
+ const Instance& target;
+ explicit InstanceComparator(const Instance& target) : target(target) {}
+
+ int operator()(const Instance* instance) const {
+ if (instance == &target)
+ return 0;
+ MOZ_ASSERT(!target.codeSegment().containsCodePC(instance->codeBase()));
+ MOZ_ASSERT(!instance->codeSegment().containsCodePC(target.codeBase()));
+ return target.codeBase() < instance->codeBase() ? -1 : 1;
+ }
+};
+
+void
+Compartment::trace(JSTracer* trc)
+{
+ // A WasmInstanceObject that was initially reachable when called can become
+ // unreachable while executing on the stack. Since wasm does not otherwise
+ // scan the stack during GC to identify live instances, we mark all instance
+ // objects live if there is any running wasm in the compartment.
+ if (activationCount_) {
+ for (Instance* i : instances_)
+ i->trace(trc);
+ }
+}
+
+bool
+Compartment::registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj)
+{
+ Instance& instance = instanceObj->instance();
+ MOZ_ASSERT(this == &instance.compartment()->wasm);
+
+ if (!instance.ensureProfilingState(cx, profilingEnabled_))
+ return false;
+
+ size_t index;
+ if (BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
+ MOZ_CRASH("duplicate registration");
+
+ {
+ AutoMutateInstances guard(*this);
+ if (!instances_.insert(instances_.begin() + index, &instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+
+ Debugger::onNewWasmInstance(cx, instanceObj);
+ return true;
+}
+
+void
+Compartment::unregisterInstance(Instance& instance)
+{
+ size_t index;
+ if (!BinarySearchIf(instances_, 0, instances_.length(), InstanceComparator(instance), &index))
+ return;
+
+ AutoMutateInstances guard(*this);
+ instances_.erase(instances_.begin() + index);
+}
+
+struct PCComparator
+{
+ const void* pc;
+ explicit PCComparator(const void* pc) : pc(pc) {}
+
+ int operator()(const Instance* instance) const {
+ if (instance->codeSegment().containsCodePC(pc))
+ return 0;
+ return pc < instance->codeBase() ? -1 : 1;
+ }
+};
+
+Code*
+Compartment::lookupCode(const void* pc) const
+{
+ Instance* instance = lookupInstanceDeprecated(pc);
+ return instance ? &instance->code() : nullptr;
+}
+
+Instance*
+Compartment::lookupInstanceDeprecated(const void* pc) const
+{
+ // lookupInstanceDeprecated can be called asynchronously from the interrupt
+ // signal handler. In that case, the signal handler is just asking whether
+ // the pc is in wasm code. If instances_ is being mutated then we can't be
+ // executing wasm code so returning nullptr is fine.
+ if (mutatingInstances_)
+ return nullptr;
+
+ size_t index;
+ if (!BinarySearchIf(instances_, 0, instances_.length(), PCComparator(pc), &index))
+ return nullptr;
+
+ return instances_[index];
+}
+
+bool
+Compartment::ensureProfilingState(JSContext* cx)
+{
+ bool newProfilingEnabled = cx->spsProfiler.enabled();
+ if (profilingEnabled_ == newProfilingEnabled)
+ return true;
+
+ // Since one Instance can call another Instance in the same compartment
+ // directly without calling through Instance::callExport(), when profiling
+ // is enabled, enable it for the entire compartment at once. It is only safe
+ // to enable profiling when the wasm is not on the stack, so delay enabling
+ // profiling until there are no live WasmActivations in this compartment.
+
+ if (activationCount_ > 0)
+ return true;
+
+ for (Instance* instance : instances_) {
+ if (!instance->ensureProfilingState(cx, newProfilingEnabled))
+ return false;
+ }
+
+ profilingEnabled_ = newProfilingEnabled;
+ return true;
+}
+
+bool
+Compartment::profilingEnabled() const
+{
+ // Profiling can asynchronously interrupt the mutation of the instances_
+ // vector which is used by lookupCode() during stack-walking. To handle
+ // this rare case, disable profiling during mutation.
+ return profilingEnabled_ && !mutatingInstances_;
+}
+
+void
+Compartment::addSizeOfExcludingThis(MallocSizeOf mallocSizeOf, size_t* compartmentTables)
+{
+ *compartmentTables += instances_.sizeOfExcludingThis(mallocSizeOf);
+}
diff --git a/js/src/wasm/WasmCompartment.h b/js/src/wasm/WasmCompartment.h
new file mode 100644
index 0000000000..dcdd75d0c3
--- /dev/null
+++ b/js/src/wasm/WasmCompartment.h
@@ -0,0 +1,107 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_compartment_h
+#define wasm_compartment_h
+
+#include "wasm/WasmJS.h"
+
+namespace js {
+
+class WasmActivation;
+
+namespace wasm {
+
+class Code;
+typedef Vector<Instance*, 0, SystemAllocPolicy> InstanceVector;
+
+// wasm::Compartment lives in JSCompartment and contains the wasm-related
+// per-compartment state. wasm::Compartment tracks every live instance in the
+// compartment and must be notified, via registerInstance(), of any new
+// WasmInstanceObject.
+
+class Compartment
+{
+ InstanceVector instances_;
+ volatile bool mutatingInstances_;
+ size_t activationCount_;
+ bool profilingEnabled_;
+
+ friend class js::WasmActivation;
+
+ struct AutoMutateInstances {
+ Compartment &c;
+ explicit AutoMutateInstances(Compartment& c) : c(c) {
+ MOZ_ASSERT(!c.mutatingInstances_);
+ c.mutatingInstances_ = true;
+ }
+ ~AutoMutateInstances() {
+ MOZ_ASSERT(c.mutatingInstances_);
+ c.mutatingInstances_ = false;
+ }
+ };
+
+ public:
+ explicit Compartment(Zone* zone);
+ ~Compartment();
+ void trace(JSTracer* trc);
+
+ // Before a WasmInstanceObject can be considered fully constructed and
+ // valid, it must be registered with the Compartment. If this method fails,
+ // an error has been reported and the instance object must be abandoned.
+ // After a successful registration, an Instance must call
+ // unregisterInstance() before being destroyed.
+
+ bool registerInstance(JSContext* cx, HandleWasmInstanceObject instanceObj);
+ void unregisterInstance(Instance& instance);
+
+ // Return a vector of all live instances in the compartment. The lifetime of
+ // these Instances is determined by their owning WasmInstanceObject.
+ // Note that accessing instances()[i]->object() triggers a read barrier
+ // since instances() is effectively a weak list.
+
+ const InstanceVector& instances() const { return instances_; }
+
+ // This methods returns the wasm::Code containing the given pc, if any
+ // exists in the compartment.
+
+ Code* lookupCode(const void* pc) const;
+
+ // Currently, there is one Code per Instance so it is also possible to
+ // lookup a Instance given a pc. However, the goal is to share one Code
+ // between multiple Instances at which point in time this method will be
+ // removed.
+
+ Instance* lookupInstanceDeprecated(const void* pc) const;
+
+ // To ensure profiling is enabled (so that wasm frames are not lost in
+ // profiling callstacks), ensureProfilingState must be called before calling
+ // the first wasm function in a compartment.
+
+ bool ensureProfilingState(JSContext* cx);
+ bool profilingEnabled() const;
+
+ // about:memory reporting
+
+ void addSizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf, size_t* compartmentTables);
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_compartment_h
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
new file mode 100644
index 0000000000..890a480c57
--- /dev/null
+++ b/js/src/wasm/WasmCompile.cpp
@@ -0,0 +1,967 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmCompile.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "jsprf.h"
+
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmBinaryIterator.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmSignalHandlers.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::IsNaN;
+
+namespace {
+
+struct ValidatingPolicy : OpIterPolicy
+{
+ // Validation is what we're all about here.
+ static const bool Validate = true;
+};
+
+typedef OpIter<ValidatingPolicy> ValidatingOpIter;
+
+class FunctionDecoder
+{
+ const ModuleGenerator& mg_;
+ const ValTypeVector& locals_;
+ ValidatingOpIter iter_;
+
+ public:
+ FunctionDecoder(const ModuleGenerator& mg, const ValTypeVector& locals, Decoder& d)
+ : mg_(mg), locals_(locals), iter_(d)
+ {}
+ const ModuleGenerator& mg() const { return mg_; }
+ ValidatingOpIter& iter() { return iter_; }
+ const ValTypeVector& locals() const { return locals_; }
+
+ bool checkHasMemory() {
+ if (!mg().usesMemory())
+ return iter().fail("can't touch memory without memory");
+ return true;
+ }
+};
+
+} // end anonymous namespace
+
+static bool
+DecodeCallArgs(FunctionDecoder& f, const Sig& sig)
+{
+ const ValTypeVector& args = sig.args();
+ uint32_t numArgs = args.length();
+ for (size_t i = 0; i < numArgs; ++i) {
+ ValType argType = args[i];
+ if (!f.iter().readCallArg(argType, numArgs, i, nullptr))
+ return false;
+ }
+
+ return f.iter().readCallArgsEnd(numArgs);
+}
+
+static bool
+DecodeCallReturn(FunctionDecoder& f, const Sig& sig)
+{
+ return f.iter().readCallReturn(sig.ret());
+}
+
+static bool
+DecodeCall(FunctionDecoder& f)
+{
+ uint32_t funcIndex;
+ if (!f.iter().readCall(&funcIndex))
+ return false;
+
+ if (funcIndex >= f.mg().numFuncs())
+ return f.iter().fail("callee index out of range");
+
+ if (!f.iter().inReachableCode())
+ return true;
+
+ const Sig* sig = &f.mg().funcSig(funcIndex);
+
+ return DecodeCallArgs(f, *sig) &&
+ DecodeCallReturn(f, *sig);
+}
+
+static bool
+DecodeCallIndirect(FunctionDecoder& f)
+{
+ if (!f.mg().numTables())
+ return f.iter().fail("can't call_indirect without a table");
+
+ uint32_t sigIndex;
+ if (!f.iter().readCallIndirect(&sigIndex, nullptr))
+ return false;
+
+ if (sigIndex >= f.mg().numSigs())
+ return f.iter().fail("signature index out of range");
+
+ if (!f.iter().inReachableCode())
+ return true;
+
+ const Sig& sig = f.mg().sig(sigIndex);
+ if (!DecodeCallArgs(f, sig))
+ return false;
+
+ return DecodeCallReturn(f, sig);
+}
+
+static bool
+DecodeBrTable(FunctionDecoder& f)
+{
+ uint32_t tableLength;
+ ExprType type = ExprType::Limit;
+ if (!f.iter().readBrTable(&tableLength, &type, nullptr, nullptr))
+ return false;
+
+ uint32_t depth;
+ for (size_t i = 0, e = tableLength; i < e; ++i) {
+ if (!f.iter().readBrTableEntry(&type, nullptr, &depth))
+ return false;
+ }
+
+ // Read the default label.
+ return f.iter().readBrTableDefault(&type, nullptr, &depth);
+}
+
+static bool
+DecodeFunctionBodyExprs(FunctionDecoder& f)
+{
+#define CHECK(c) if (!(c)) return false; break
+
+ while (true) {
+ uint16_t op;
+ if (!f.iter().readOp(&op))
+ return false;
+
+ switch (op) {
+ case uint16_t(Op::End):
+ if (!f.iter().readEnd(nullptr, nullptr, nullptr))
+ return false;
+ if (f.iter().controlStackEmpty())
+ return true;
+ break;
+ case uint16_t(Op::Nop):
+ CHECK(f.iter().readNop());
+ case uint16_t(Op::Drop):
+ CHECK(f.iter().readDrop());
+ case uint16_t(Op::Call):
+ CHECK(DecodeCall(f));
+ case uint16_t(Op::CallIndirect):
+ CHECK(DecodeCallIndirect(f));
+ case uint16_t(Op::I32Const):
+ CHECK(f.iter().readI32Const(nullptr));
+ case uint16_t(Op::I64Const):
+ CHECK(f.iter().readI64Const(nullptr));
+ case uint16_t(Op::F32Const):
+ CHECK(f.iter().readF32Const(nullptr));
+ case uint16_t(Op::F64Const):
+ CHECK(f.iter().readF64Const(nullptr));
+ case uint16_t(Op::GetLocal):
+ CHECK(f.iter().readGetLocal(f.locals(), nullptr));
+ case uint16_t(Op::SetLocal):
+ CHECK(f.iter().readSetLocal(f.locals(), nullptr, nullptr));
+ case uint16_t(Op::TeeLocal):
+ CHECK(f.iter().readTeeLocal(f.locals(), nullptr, nullptr));
+ case uint16_t(Op::GetGlobal):
+ CHECK(f.iter().readGetGlobal(f.mg().globals(), nullptr));
+ case uint16_t(Op::SetGlobal):
+ CHECK(f.iter().readSetGlobal(f.mg().globals(), nullptr, nullptr));
+ case uint16_t(Op::Select):
+ CHECK(f.iter().readSelect(nullptr, nullptr, nullptr, nullptr));
+ case uint16_t(Op::Block):
+ CHECK(f.iter().readBlock());
+ case uint16_t(Op::Loop):
+ CHECK(f.iter().readLoop());
+ case uint16_t(Op::If):
+ CHECK(f.iter().readIf(nullptr));
+ case uint16_t(Op::Else):
+ CHECK(f.iter().readElse(nullptr, nullptr));
+ case uint16_t(Op::I32Clz):
+ case uint16_t(Op::I32Ctz):
+ case uint16_t(Op::I32Popcnt):
+ CHECK(f.iter().readUnary(ValType::I32, nullptr));
+ case uint16_t(Op::I64Clz):
+ case uint16_t(Op::I64Ctz):
+ case uint16_t(Op::I64Popcnt):
+ CHECK(f.iter().readUnary(ValType::I64, nullptr));
+ case uint16_t(Op::F32Abs):
+ case uint16_t(Op::F32Neg):
+ case uint16_t(Op::F32Ceil):
+ case uint16_t(Op::F32Floor):
+ case uint16_t(Op::F32Sqrt):
+ case uint16_t(Op::F32Trunc):
+ case uint16_t(Op::F32Nearest):
+ CHECK(f.iter().readUnary(ValType::F32, nullptr));
+ case uint16_t(Op::F64Abs):
+ case uint16_t(Op::F64Neg):
+ case uint16_t(Op::F64Ceil):
+ case uint16_t(Op::F64Floor):
+ case uint16_t(Op::F64Sqrt):
+ case uint16_t(Op::F64Trunc):
+ case uint16_t(Op::F64Nearest):
+ CHECK(f.iter().readUnary(ValType::F64, nullptr));
+ case uint16_t(Op::I32Add):
+ case uint16_t(Op::I32Sub):
+ case uint16_t(Op::I32Mul):
+ case uint16_t(Op::I32DivS):
+ case uint16_t(Op::I32DivU):
+ case uint16_t(Op::I32RemS):
+ case uint16_t(Op::I32RemU):
+ case uint16_t(Op::I32And):
+ case uint16_t(Op::I32Or):
+ case uint16_t(Op::I32Xor):
+ case uint16_t(Op::I32Shl):
+ case uint16_t(Op::I32ShrS):
+ case uint16_t(Op::I32ShrU):
+ case uint16_t(Op::I32Rotl):
+ case uint16_t(Op::I32Rotr):
+ CHECK(f.iter().readBinary(ValType::I32, nullptr, nullptr));
+ case uint16_t(Op::I64Add):
+ case uint16_t(Op::I64Sub):
+ case uint16_t(Op::I64Mul):
+ case uint16_t(Op::I64DivS):
+ case uint16_t(Op::I64DivU):
+ case uint16_t(Op::I64RemS):
+ case uint16_t(Op::I64RemU):
+ case uint16_t(Op::I64And):
+ case uint16_t(Op::I64Or):
+ case uint16_t(Op::I64Xor):
+ case uint16_t(Op::I64Shl):
+ case uint16_t(Op::I64ShrS):
+ case uint16_t(Op::I64ShrU):
+ case uint16_t(Op::I64Rotl):
+ case uint16_t(Op::I64Rotr):
+ CHECK(f.iter().readBinary(ValType::I64, nullptr, nullptr));
+ case uint16_t(Op::F32Add):
+ case uint16_t(Op::F32Sub):
+ case uint16_t(Op::F32Mul):
+ case uint16_t(Op::F32Div):
+ case uint16_t(Op::F32Min):
+ case uint16_t(Op::F32Max):
+ case uint16_t(Op::F32CopySign):
+ CHECK(f.iter().readBinary(ValType::F32, nullptr, nullptr));
+ case uint16_t(Op::F64Add):
+ case uint16_t(Op::F64Sub):
+ case uint16_t(Op::F64Mul):
+ case uint16_t(Op::F64Div):
+ case uint16_t(Op::F64Min):
+ case uint16_t(Op::F64Max):
+ case uint16_t(Op::F64CopySign):
+ CHECK(f.iter().readBinary(ValType::F64, nullptr, nullptr));
+ case uint16_t(Op::I32Eq):
+ case uint16_t(Op::I32Ne):
+ case uint16_t(Op::I32LtS):
+ case uint16_t(Op::I32LtU):
+ case uint16_t(Op::I32LeS):
+ case uint16_t(Op::I32LeU):
+ case uint16_t(Op::I32GtS):
+ case uint16_t(Op::I32GtU):
+ case uint16_t(Op::I32GeS):
+ case uint16_t(Op::I32GeU):
+ CHECK(f.iter().readComparison(ValType::I32, nullptr, nullptr));
+ case uint16_t(Op::I64Eq):
+ case uint16_t(Op::I64Ne):
+ case uint16_t(Op::I64LtS):
+ case uint16_t(Op::I64LtU):
+ case uint16_t(Op::I64LeS):
+ case uint16_t(Op::I64LeU):
+ case uint16_t(Op::I64GtS):
+ case uint16_t(Op::I64GtU):
+ case uint16_t(Op::I64GeS):
+ case uint16_t(Op::I64GeU):
+ CHECK(f.iter().readComparison(ValType::I64, nullptr, nullptr));
+ case uint16_t(Op::F32Eq):
+ case uint16_t(Op::F32Ne):
+ case uint16_t(Op::F32Lt):
+ case uint16_t(Op::F32Le):
+ case uint16_t(Op::F32Gt):
+ case uint16_t(Op::F32Ge):
+ CHECK(f.iter().readComparison(ValType::F32, nullptr, nullptr));
+ case uint16_t(Op::F64Eq):
+ case uint16_t(Op::F64Ne):
+ case uint16_t(Op::F64Lt):
+ case uint16_t(Op::F64Le):
+ case uint16_t(Op::F64Gt):
+ case uint16_t(Op::F64Ge):
+ CHECK(f.iter().readComparison(ValType::F64, nullptr, nullptr));
+ case uint16_t(Op::I32Eqz):
+ CHECK(f.iter().readConversion(ValType::I32, ValType::I32, nullptr));
+ case uint16_t(Op::I64Eqz):
+ case uint16_t(Op::I32WrapI64):
+ CHECK(f.iter().readConversion(ValType::I64, ValType::I32, nullptr));
+ case uint16_t(Op::I32TruncSF32):
+ case uint16_t(Op::I32TruncUF32):
+ case uint16_t(Op::I32ReinterpretF32):
+ CHECK(f.iter().readConversion(ValType::F32, ValType::I32, nullptr));
+ case uint16_t(Op::I32TruncSF64):
+ case uint16_t(Op::I32TruncUF64):
+ CHECK(f.iter().readConversion(ValType::F64, ValType::I32, nullptr));
+ case uint16_t(Op::I64ExtendSI32):
+ case uint16_t(Op::I64ExtendUI32):
+ CHECK(f.iter().readConversion(ValType::I32, ValType::I64, nullptr));
+ case uint16_t(Op::I64TruncSF32):
+ case uint16_t(Op::I64TruncUF32):
+ CHECK(f.iter().readConversion(ValType::F32, ValType::I64, nullptr));
+ case uint16_t(Op::I64TruncSF64):
+ case uint16_t(Op::I64TruncUF64):
+ case uint16_t(Op::I64ReinterpretF64):
+ CHECK(f.iter().readConversion(ValType::F64, ValType::I64, nullptr));
+ case uint16_t(Op::F32ConvertSI32):
+ case uint16_t(Op::F32ConvertUI32):
+ case uint16_t(Op::F32ReinterpretI32):
+ CHECK(f.iter().readConversion(ValType::I32, ValType::F32, nullptr));
+ case uint16_t(Op::F32ConvertSI64):
+ case uint16_t(Op::F32ConvertUI64):
+ CHECK(f.iter().readConversion(ValType::I64, ValType::F32, nullptr));
+ case uint16_t(Op::F32DemoteF64):
+ CHECK(f.iter().readConversion(ValType::F64, ValType::F32, nullptr));
+ case uint16_t(Op::F64ConvertSI32):
+ case uint16_t(Op::F64ConvertUI32):
+ CHECK(f.iter().readConversion(ValType::I32, ValType::F64, nullptr));
+ case uint16_t(Op::F64ConvertSI64):
+ case uint16_t(Op::F64ConvertUI64):
+ case uint16_t(Op::F64ReinterpretI64):
+ CHECK(f.iter().readConversion(ValType::I64, ValType::F64, nullptr));
+ case uint16_t(Op::F64PromoteF32):
+ CHECK(f.iter().readConversion(ValType::F32, ValType::F64, nullptr));
+ case uint16_t(Op::I32Load8S):
+ case uint16_t(Op::I32Load8U):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I32, 1, nullptr));
+ case uint16_t(Op::I32Load16S):
+ case uint16_t(Op::I32Load16U):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I32, 2, nullptr));
+ case uint16_t(Op::I32Load):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I32, 4, nullptr));
+ case uint16_t(Op::I64Load8S):
+ case uint16_t(Op::I64Load8U):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I64, 1, nullptr));
+ case uint16_t(Op::I64Load16S):
+ case uint16_t(Op::I64Load16U):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I64, 2, nullptr));
+ case uint16_t(Op::I64Load32S):
+ case uint16_t(Op::I64Load32U):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I64, 4, nullptr));
+ case uint16_t(Op::I64Load):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::I64, 8, nullptr));
+ case uint16_t(Op::F32Load):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::F32, 4, nullptr));
+ case uint16_t(Op::F64Load):
+ CHECK(f.checkHasMemory() && f.iter().readLoad(ValType::F64, 8, nullptr));
+ case uint16_t(Op::I32Store8):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I32, 1, nullptr, nullptr));
+ case uint16_t(Op::I32Store16):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I32, 2, nullptr, nullptr));
+ case uint16_t(Op::I32Store):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I32, 4, nullptr, nullptr));
+ case uint16_t(Op::I64Store8):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I64, 1, nullptr, nullptr));
+ case uint16_t(Op::I64Store16):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I64, 2, nullptr, nullptr));
+ case uint16_t(Op::I64Store32):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I64, 4, nullptr, nullptr));
+ case uint16_t(Op::I64Store):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::I64, 8, nullptr, nullptr));
+ case uint16_t(Op::F32Store):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::F32, 4, nullptr, nullptr));
+ case uint16_t(Op::F64Store):
+ CHECK(f.checkHasMemory() && f.iter().readStore(ValType::F64, 8, nullptr, nullptr));
+ case uint16_t(Op::GrowMemory):
+ CHECK(f.checkHasMemory() && f.iter().readGrowMemory(nullptr));
+ case uint16_t(Op::CurrentMemory):
+ CHECK(f.checkHasMemory() && f.iter().readCurrentMemory());
+ case uint16_t(Op::Br):
+ CHECK(f.iter().readBr(nullptr, nullptr, nullptr));
+ case uint16_t(Op::BrIf):
+ CHECK(f.iter().readBrIf(nullptr, nullptr, nullptr, nullptr));
+ case uint16_t(Op::BrTable):
+ CHECK(DecodeBrTable(f));
+ case uint16_t(Op::Return):
+ CHECK(f.iter().readReturn(nullptr));
+ case uint16_t(Op::Unreachable):
+ CHECK(f.iter().readUnreachable());
+ default:
+ return f.iter().unrecognizedOpcode(op);
+ }
+ }
+
+ MOZ_CRASH("unreachable");
+
+#undef CHECK
+}
+
+static bool
+DecodeImportSection(Decoder& d, ModuleGeneratorData* init, ImportVector* imports)
+{
+ Maybe<Limits> memory;
+ Uint32Vector funcSigIndices;
+ if (!DecodeImportSection(d, init->sigs, &funcSigIndices, &init->globals, &init->tables, &memory,
+ imports))
+ return false;
+
+ for (uint32_t sigIndex : funcSigIndices) {
+ if (!init->funcSigs.append(&init->sigs[sigIndex]))
+ return false;
+ }
+
+ // The global data offsets will be filled in by ModuleGenerator::init.
+ if (!init->funcImportGlobalDataOffsets.resize(init->funcSigs.length()))
+ return false;
+
+ if (memory) {
+ init->memoryUsage = MemoryUsage::Unshared;
+ init->minMemoryLength = memory->initial;
+ init->maxMemoryLength = memory->maximum;
+ }
+
+ return true;
+}
+
+static bool
+DecodeFunctionSection(Decoder& d, ModuleGeneratorData* init)
+{
+ Uint32Vector funcSigIndexes;
+ if (!DecodeFunctionSection(d, init->sigs, init->funcSigs.length(), &funcSigIndexes))
+ return false;
+
+ if (!init->funcSigs.reserve(init->funcSigs.length() + funcSigIndexes.length()))
+ return false;
+
+ for (uint32_t sigIndex : funcSigIndexes)
+ init->funcSigs.infallibleAppend(&init->sigs[sigIndex]);
+
+ return true;
+}
+
+static bool
+DecodeTableSection(Decoder& d, ModuleGeneratorData* init)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Table, &sectionStart, &sectionSize, "table"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numTables;
+ if (!d.readVarU32(&numTables))
+ return d.fail("failed to read number of tables");
+
+ if (numTables != 1)
+ return d.fail("the number of tables must be exactly one");
+
+ if (!DecodeTableLimits(d, &init->tables))
+ return false;
+
+ if (!d.finishSection(sectionStart, sectionSize, "table"))
+ return false;
+
+ return true;
+}
+
+static bool
+DecodeMemorySection(Decoder& d, ModuleGeneratorData* init)
+{
+ bool present;
+ Limits memory;
+ if (!DecodeMemorySection(d, UsesMemory(init->memoryUsage), &memory, &present))
+ return false;
+
+ if (present) {
+ init->memoryUsage = MemoryUsage::Unshared;
+ init->minMemoryLength = memory.initial;
+ init->maxMemoryLength = memory.maximum;
+ }
+
+ return true;
+}
+
+static bool
+DecodeGlobalSection(Decoder& d, ModuleGeneratorData* init)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Global, &sectionStart, &sectionSize, "global"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numDefs;
+ if (!d.readVarU32(&numDefs))
+ return d.fail("expected number of globals");
+
+ CheckedInt<uint32_t> numGlobals = init->globals.length();
+ numGlobals += numDefs;
+ if (!numGlobals.isValid() || numGlobals.value() > MaxGlobals)
+ return d.fail("too many globals");
+
+ for (uint32_t i = 0; i < numDefs; i++) {
+ ValType type;
+ bool isMutable;
+ if (!DecodeGlobalType(d, &type, &isMutable))
+ return false;
+
+ InitExpr initializer;
+ if (!DecodeInitializerExpression(d, init->globals, type, &initializer))
+ return false;
+
+ if (!init->globals.append(GlobalDesc(initializer, isMutable)))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "global"))
+ return false;
+
+ return true;
+}
+
+typedef HashSet<const char*, CStringHasher, SystemAllocPolicy> CStringSet;
+
+static UniqueChars
+DecodeExportName(Decoder& d, CStringSet* dupSet)
+{
+ UniqueChars exportName = DecodeName(d);
+ if (!exportName) {
+ d.fail("expected valid export name");
+ return nullptr;
+ }
+
+ CStringSet::AddPtr p = dupSet->lookupForAdd(exportName.get());
+ if (p) {
+ d.fail("duplicate export");
+ return nullptr;
+ }
+
+ if (!dupSet->add(p, exportName.get()))
+ return nullptr;
+
+ return Move(exportName);
+}
+
+static bool
+DecodeExport(Decoder& d, ModuleGenerator& mg, CStringSet* dupSet)
+{
+ UniqueChars fieldName = DecodeExportName(d, dupSet);
+ if (!fieldName)
+ return false;
+
+ uint32_t exportKind;
+ if (!d.readVarU32(&exportKind))
+ return d.fail("failed to read export kind");
+
+ switch (DefinitionKind(exportKind)) {
+ case DefinitionKind::Function: {
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex))
+ return d.fail("expected export internal index");
+
+ if (funcIndex >= mg.numFuncs())
+ return d.fail("exported function index out of bounds");
+
+ return mg.addFuncExport(Move(fieldName), funcIndex);
+ }
+ case DefinitionKind::Table: {
+ uint32_t tableIndex;
+ if (!d.readVarU32(&tableIndex))
+ return d.fail("expected table index");
+
+ if (tableIndex >= mg.tables().length())
+ return d.fail("exported table index out of bounds");
+
+ return mg.addTableExport(Move(fieldName));
+ }
+ case DefinitionKind::Memory: {
+ uint32_t memoryIndex;
+ if (!d.readVarU32(&memoryIndex))
+ return d.fail("expected memory index");
+
+ if (memoryIndex > 0 || !mg.usesMemory())
+ return d.fail("exported memory index out of bounds");
+
+ return mg.addMemoryExport(Move(fieldName));
+ }
+ case DefinitionKind::Global: {
+ uint32_t globalIndex;
+ if (!d.readVarU32(&globalIndex))
+ return d.fail("expected global index");
+
+ if (globalIndex >= mg.globals().length())
+ return d.fail("exported global index out of bounds");
+
+ const GlobalDesc& global = mg.globals()[globalIndex];
+ if (!GlobalIsJSCompatible(d, global.type(), global.isMutable()))
+ return false;
+
+ return mg.addGlobalExport(Move(fieldName), globalIndex);
+ }
+ default:
+ return d.fail("unexpected export kind");
+ }
+
+ MOZ_CRASH("unreachable");
+}
+
+static bool
+DecodeExportSection(Decoder& d, ModuleGenerator& mg)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Export, &sectionStart, &sectionSize, "export"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ CStringSet dupSet;
+ if (!dupSet.init())
+ return false;
+
+ uint32_t numExports;
+ if (!d.readVarU32(&numExports))
+ return d.fail("failed to read number of exports");
+
+ if (numExports > MaxExports)
+ return d.fail("too many exports");
+
+ for (uint32_t i = 0; i < numExports; i++) {
+ if (!DecodeExport(d, mg, &dupSet))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "export"))
+ return false;
+
+ return true;
+}
+
+static bool
+DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
+{
+ uint32_t bodySize;
+ if (!d.readVarU32(&bodySize))
+ return d.fail("expected number of function body bytes");
+
+ if (d.bytesRemain() < bodySize)
+ return d.fail("function body length too big");
+
+ const uint8_t* bodyBegin = d.currentPosition();
+ const size_t offsetInModule = d.currentOffset();
+
+ FunctionGenerator fg;
+ if (!mg.startFuncDef(offsetInModule, &fg))
+ return false;
+
+ ValTypeVector locals;
+ const Sig& sig = mg.funcSig(funcIndex);
+ if (!locals.appendAll(sig.args()))
+ return false;
+
+ if (!DecodeLocalEntries(d, ModuleKind::Wasm, &locals))
+ return false;
+
+ FunctionDecoder f(mg, locals, d);
+
+ if (!f.iter().readFunctionStart(sig.ret()))
+ return false;
+
+ if (!DecodeFunctionBodyExprs(f))
+ return false;
+
+ if (!f.iter().readFunctionEnd())
+ return false;
+
+ if (d.currentPosition() != bodyBegin + bodySize)
+ return d.fail("function body length mismatch");
+
+ if (!fg.bytes().resize(bodySize))
+ return false;
+
+ memcpy(fg.bytes().begin(), bodyBegin, bodySize);
+
+ return mg.finishFuncDef(funcIndex, &fg);
+}
+
+static bool
+DecodeStartSection(Decoder& d, ModuleGenerator& mg)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Start, &sectionStart, &sectionSize, "start"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t funcIndex;
+ if (!d.readVarU32(&funcIndex))
+ return d.fail("failed to read start func index");
+
+ if (funcIndex >= mg.numFuncs())
+ return d.fail("unknown start function");
+
+ const Sig& sig = mg.funcSig(funcIndex);
+ if (!IsVoid(sig.ret()))
+ return d.fail("start function must not return anything");
+
+ if (sig.args().length())
+ return d.fail("start function must be nullary");
+
+ if (!mg.setStartFunction(funcIndex))
+ return false;
+
+ if (!d.finishSection(sectionStart, sectionSize, "start"))
+ return false;
+
+ return true;
+}
+
+static bool
+DecodeCodeSection(Decoder& d, ModuleGenerator& mg)
+{
+ if (!mg.startFuncDefs())
+ return false;
+
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Code, &sectionStart, &sectionSize, "code"))
+ return false;
+
+ if (sectionStart == Decoder::NotStarted) {
+ if (mg.numFuncDefs() != 0)
+ return d.fail("expected function bodies");
+
+ return mg.finishFuncDefs();
+ }
+
+ uint32_t numFuncDefs;
+ if (!d.readVarU32(&numFuncDefs))
+ return d.fail("expected function body count");
+
+ if (numFuncDefs != mg.numFuncDefs())
+ return d.fail("function body count does not match function signature count");
+
+ for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
+ if (!DecodeFunctionBody(d, mg, mg.numFuncImports() + funcDefIndex))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "code"))
+ return false;
+
+ return mg.finishFuncDefs();
+}
+
+static bool
+DecodeElemSection(Decoder& d, ModuleGenerator& mg)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startSection(SectionId::Elem, &sectionStart, &sectionSize, "elem"))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ uint32_t numSegments;
+ if (!d.readVarU32(&numSegments))
+ return d.fail("failed to read number of elem segments");
+
+ if (numSegments > MaxElemSegments)
+ return d.fail("too many elem segments");
+
+ for (uint32_t i = 0; i < numSegments; i++) {
+ uint32_t tableIndex;
+ if (!d.readVarU32(&tableIndex))
+ return d.fail("expected table index");
+
+ MOZ_ASSERT(mg.tables().length() <= 1);
+ if (tableIndex >= mg.tables().length())
+ return d.fail("table index out of range");
+
+ InitExpr offset;
+ if (!DecodeInitializerExpression(d, mg.globals(), ValType::I32, &offset))
+ return false;
+
+ uint32_t numElems;
+ if (!d.readVarU32(&numElems))
+ return d.fail("expected segment size");
+
+ Uint32Vector elemFuncIndices;
+ if (!elemFuncIndices.resize(numElems))
+ return false;
+
+ for (uint32_t i = 0; i < numElems; i++) {
+ if (!d.readVarU32(&elemFuncIndices[i]))
+ return d.fail("failed to read element function index");
+ if (elemFuncIndices[i] >= mg.numFuncs())
+ return d.fail("table element out of range");
+ }
+
+ if (!mg.addElemSegment(offset, Move(elemFuncIndices)))
+ return false;
+ }
+
+ if (!d.finishSection(sectionStart, sectionSize, "elem"))
+ return false;
+
+ return true;
+}
+
+static void
+MaybeDecodeNameSectionBody(Decoder& d, ModuleGenerator& mg)
+{
+ // For simplicity, ignore all failures, even OOM. Failure will simply result
+ // in the names section not being included for this module.
+
+ uint32_t numFuncNames;
+ if (!d.readVarU32(&numFuncNames))
+ return;
+
+ if (numFuncNames > MaxFuncs)
+ return;
+
+ NameInBytecodeVector funcNames;
+ if (!funcNames.resize(numFuncNames))
+ return;
+
+ for (uint32_t i = 0; i < numFuncNames; i++) {
+ uint32_t numBytes;
+ if (!d.readVarU32(&numBytes))
+ return;
+
+ NameInBytecode name;
+ name.offset = d.currentOffset();
+ name.length = numBytes;
+ funcNames[i] = name;
+
+ if (!d.readBytes(numBytes))
+ return;
+
+ // Skip local names for a function.
+ uint32_t numLocals;
+ if (!d.readVarU32(&numLocals))
+ return;
+ for (uint32_t j = 0; j < numLocals; j++) {
+ uint32_t numBytes;
+ if (!d.readVarU32(&numBytes))
+ return;
+ if (!d.readBytes(numBytes))
+ return;
+ }
+ }
+
+ mg.setFuncNames(Move(funcNames));
+}
+
+static bool
+DecodeDataSection(Decoder& d, ModuleGenerator& mg)
+{
+ DataSegmentVector dataSegments;
+ if (!DecodeDataSection(d, mg.usesMemory(), mg.minMemoryLength(), mg.globals(), &dataSegments))
+ return false;
+
+ mg.setDataSegments(Move(dataSegments));
+ return true;
+}
+
+static bool
+DecodeNameSection(Decoder& d, ModuleGenerator& mg)
+{
+ uint32_t sectionStart, sectionSize;
+ if (!d.startUserDefinedSection(NameSectionName, &sectionStart, &sectionSize))
+ return false;
+ if (sectionStart == Decoder::NotStarted)
+ return true;
+
+ // Once started, user-defined sections do not report validation errors.
+
+ MaybeDecodeNameSectionBody(d, mg);
+
+ d.finishUserDefinedSection(sectionStart, sectionSize);
+ return true;
+}
+
+bool
+CompileArgs::initFromContext(ExclusiveContext* cx, ScriptedCaller&& scriptedCaller)
+{
+ alwaysBaseline = cx->options().wasmAlwaysBaseline();
+ this->scriptedCaller = Move(scriptedCaller);
+ return assumptions.initBuildIdFromContext(cx);
+}
+
+SharedModule
+wasm::Compile(const ShareableBytes& bytecode, const CompileArgs& args, UniqueChars* error)
+{
+ MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+
+ Decoder d(bytecode.begin(), bytecode.end(), error);
+
+ auto init = js::MakeUnique<ModuleGeneratorData>();
+ if (!init)
+ return nullptr;
+
+ if (!DecodePreamble(d))
+ return nullptr;
+
+ if (!DecodeTypeSection(d, &init->sigs))
+ return nullptr;
+
+ ImportVector imports;
+ if (!::DecodeImportSection(d, init.get(), &imports))
+ return nullptr;
+
+ if (!::DecodeFunctionSection(d, init.get()))
+ return nullptr;
+
+ if (!DecodeTableSection(d, init.get()))
+ return nullptr;
+
+ if (!::DecodeMemorySection(d, init.get()))
+ return nullptr;
+
+ if (!DecodeGlobalSection(d, init.get()))
+ return nullptr;
+
+ ModuleGenerator mg(Move(imports));
+ if (!mg.init(Move(init), args))
+ return nullptr;
+
+ if (!DecodeExportSection(d, mg))
+ return nullptr;
+
+ if (!DecodeStartSection(d, mg))
+ return nullptr;
+
+ if (!DecodeElemSection(d, mg))
+ return nullptr;
+
+ if (!DecodeCodeSection(d, mg))
+ return nullptr;
+
+ if (!::DecodeDataSection(d, mg))
+ return nullptr;
+
+ if (!DecodeNameSection(d, mg))
+ return nullptr;
+
+ if (!DecodeUnknownSections(d))
+ return nullptr;
+
+ MOZ_ASSERT(!*error, "unreported error in decoding");
+
+ return mg.finish(bytecode);
+}
diff --git a/js/src/wasm/WasmCompile.h b/js/src/wasm/WasmCompile.h
new file mode 100644
index 0000000000..87f2b16c67
--- /dev/null
+++ b/js/src/wasm/WasmCompile.h
@@ -0,0 +1,68 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_compile_h
+#define wasm_compile_h
+
+#include "wasm/WasmModule.h"
+
+namespace js {
+namespace wasm {
+
+// Describes the JS scripted caller of a request to compile a wasm module.
+
+struct ScriptedCaller
+{
+ UniqueChars filename;
+ unsigned line;
+ unsigned column;
+};
+
+// Describes all the parameters that control wasm compilation.
+
+struct CompileArgs
+{
+ Assumptions assumptions;
+ ScriptedCaller scriptedCaller;
+ bool alwaysBaseline;
+
+ CompileArgs(Assumptions&& assumptions, ScriptedCaller&& scriptedCaller)
+ : assumptions(Move(assumptions)),
+ scriptedCaller(Move(scriptedCaller)),
+ alwaysBaseline(false)
+ {}
+
+ // If CompileArgs is constructed without arguments, initFromContext() must
+ // be called to complete initialization.
+ CompileArgs() = default;
+ bool initFromContext(ExclusiveContext* cx, ScriptedCaller&& scriptedCaller);
+};
+
+// Compile the given WebAssembly bytecode with the given arguments into a
+// wasm::Module. On success, the Module is returned. On failure, the returned
+// SharedModule pointer is null and either:
+// - *error points to a string description of the error
+// - *error is null and the caller should report out-of-memory.
+
+SharedModule
+Compile(const ShareableBytes& bytecode, const CompileArgs& args, UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // namespace wasm_compile_h
diff --git a/js/src/wasm/WasmFrameIterator.cpp b/js/src/wasm/WasmFrameIterator.cpp
new file mode 100644
index 0000000000..4b616b5bce
--- /dev/null
+++ b/js/src/wasm/WasmFrameIterator.cpp
@@ -0,0 +1,891 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmFrameIterator.h"
+
+#include "wasm/WasmInstance.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+using mozilla::Swap;
+
+/*****************************************************************************/
+// FrameIterator implementation
+
+static void*
+ReturnAddressFromFP(void* fp)
+{
+ return reinterpret_cast<Frame*>(fp)->returnAddress;
+}
+
+static uint8_t*
+CallerFPFromFP(void* fp)
+{
+ return reinterpret_cast<Frame*>(fp)->callerFP;
+}
+
+FrameIterator::FrameIterator()
+ : activation_(nullptr),
+ code_(nullptr),
+ callsite_(nullptr),
+ codeRange_(nullptr),
+ fp_(nullptr),
+ pc_(nullptr),
+ missingFrameMessage_(false)
+{
+ MOZ_ASSERT(done());
+}
+
+FrameIterator::FrameIterator(const WasmActivation& activation)
+ : activation_(&activation),
+ code_(nullptr),
+ callsite_(nullptr),
+ codeRange_(nullptr),
+ fp_(activation.fp()),
+ pc_(nullptr),
+ missingFrameMessage_(false)
+{
+ if (fp_) {
+ settle();
+ return;
+ }
+
+ void* pc = activation.resumePC();
+ if (!pc) {
+ MOZ_ASSERT(done());
+ return;
+ }
+ pc_ = (uint8_t*)pc;
+
+ code_ = activation_->compartment()->wasm.lookupCode(pc);
+ MOZ_ASSERT(code_);
+
+ const CodeRange* codeRange = code_->lookupRange(pc);
+ MOZ_ASSERT(codeRange);
+
+ if (codeRange->kind() == CodeRange::Function)
+ codeRange_ = codeRange;
+ else
+ missingFrameMessage_ = true;
+
+ MOZ_ASSERT(!done());
+}
+
+bool
+FrameIterator::done() const
+{
+ return !codeRange_ && !missingFrameMessage_;
+}
+
+void
+FrameIterator::operator++()
+{
+ MOZ_ASSERT(!done());
+ if (fp_) {
+ DebugOnly<uint8_t*> oldfp = fp_;
+ fp_ += callsite_->stackDepth();
+ MOZ_ASSERT_IF(code_->profilingEnabled(), fp_ == CallerFPFromFP(oldfp));
+ settle();
+ } else if (codeRange_) {
+ MOZ_ASSERT(codeRange_);
+ codeRange_ = nullptr;
+ missingFrameMessage_ = true;
+ } else {
+ MOZ_ASSERT(missingFrameMessage_);
+ missingFrameMessage_ = false;
+ }
+}
+
+void
+FrameIterator::settle()
+{
+ void* returnAddress = ReturnAddressFromFP(fp_);
+
+ code_ = activation_->compartment()->wasm.lookupCode(returnAddress);
+ MOZ_ASSERT(code_);
+
+ codeRange_ = code_->lookupRange(returnAddress);
+ MOZ_ASSERT(codeRange_);
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function:
+ pc_ = (uint8_t*)returnAddress;
+ callsite_ = code_->lookupCallSite(returnAddress);
+ MOZ_ASSERT(callsite_);
+ break;
+ case CodeRange::Entry:
+ fp_ = nullptr;
+ pc_ = nullptr;
+ code_ = nullptr;
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ break;
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::TrapExit:
+ case CodeRange::Inline:
+ case CodeRange::FarJumpIsland:
+ MOZ_CRASH("Should not encounter an exit during iteration");
+ }
+}
+
+const char*
+FrameIterator::filename() const
+{
+ MOZ_ASSERT(!done());
+ return code_->metadata().filename.get();
+}
+
+const char16_t*
+FrameIterator::displayURL() const
+{
+ MOZ_ASSERT(!done());
+ return code_->metadata().displayURL();
+}
+
+bool
+FrameIterator::mutedErrors() const
+{
+ MOZ_ASSERT(!done());
+ return code_->metadata().mutedErrors();
+}
+
+JSAtom*
+FrameIterator::functionDisplayAtom() const
+{
+ MOZ_ASSERT(!done());
+
+ JSContext* cx = activation_->cx();
+
+ if (missingFrameMessage_) {
+ const char* msg = "asm.js/wasm frames may be missing; enable the profiler before running "
+ "to see all frames";
+ JSAtom* atom = Atomize(cx, msg, strlen(msg));
+ if (!atom) {
+ cx->clearPendingException();
+ return cx->names().empty;
+ }
+
+ return atom;
+ }
+
+ MOZ_ASSERT(codeRange_);
+
+ JSAtom* atom = code_->getFuncAtom(cx, codeRange_->funcIndex());
+ if (!atom) {
+ cx->clearPendingException();
+ return cx->names().empty;
+ }
+
+ return atom;
+}
+
+unsigned
+FrameIterator::lineOrBytecode() const
+{
+ MOZ_ASSERT(!done());
+ return callsite_ ? callsite_->lineOrBytecode()
+ : (codeRange_ ? codeRange_->funcLineOrBytecode() : 0);
+}
+
+/*****************************************************************************/
+// Prologue/epilogue code generation
+
+// These constants reflect statically-determined offsets in the profiling
+// prologue/epilogue. The offsets are dynamically asserted during code
+// generation.
+#if defined(JS_CODEGEN_X64)
+# if defined(DEBUG)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PostStorePrePopFP = 0;
+# endif
+static const unsigned PushedFP = 23;
+static const unsigned StoredFP = 30;
+#elif defined(JS_CODEGEN_X86)
+# if defined(DEBUG)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PostStorePrePopFP = 0;
+# endif
+static const unsigned PushedFP = 14;
+static const unsigned StoredFP = 17;
+#elif defined(JS_CODEGEN_ARM)
+static const unsigned PushedRetAddr = 4;
+static const unsigned PushedFP = 24;
+static const unsigned StoredFP = 28;
+static const unsigned PostStorePrePopFP = 4;
+#elif defined(JS_CODEGEN_ARM64)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PushedFP = 0;
+static const unsigned StoredFP = 0;
+static const unsigned PostStorePrePopFP = 0;
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+static const unsigned PushedRetAddr = 8;
+static const unsigned PushedFP = 32;
+static const unsigned StoredFP = 36;
+static const unsigned PostStorePrePopFP = 4;
+#elif defined(JS_CODEGEN_NONE)
+# if defined(DEBUG)
+static const unsigned PushedRetAddr = 0;
+static const unsigned PostStorePrePopFP = 0;
+# endif
+static const unsigned PushedFP = 1;
+static const unsigned StoredFP = 1;
+#else
+# error "Unknown architecture!"
+#endif
+
+static void
+PushRetAddr(MacroAssembler& masm)
+{
+#if defined(JS_CODEGEN_ARM)
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.push(ra);
+#else
+ // The x86/x64 call instruction pushes the return address.
+#endif
+}
+
+// Generate a prologue that maintains WasmActivation::fp as the virtual frame
+// pointer so that ProfilingFrameIterator can walk the stack at any pc in
+// generated code.
+static void
+GenerateProfilingPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets)
+{
+ Register scratch = ABINonArgReg0;
+
+ // ProfilingFrameIterator needs to know the offsets of several key
+ // instructions from entry. To save space, we make these offsets static
+ // constants and assert that they match the actual codegen below. On ARM,
+ // this requires AutoForbidPools to prevent a constant pool from being
+ // randomly inserted between two instructions.
+ {
+#if defined(JS_CODEGEN_ARM)
+ AutoForbidPools afp(&masm, /* number of instructions in scope = */ 7);
+#endif
+
+ offsets->begin = masm.currentOffset();
+
+ PushRetAddr(masm);
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - offsets->begin);
+
+ masm.loadWasmActivationFromSymbolicAddress(scratch);
+ masm.push(Address(scratch, WasmActivation::offsetOfFP()));
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - offsets->begin);
+
+ masm.storePtr(masm.getStackPointer(), Address(scratch, WasmActivation::offsetOfFP()));
+ MOZ_ASSERT_IF(!masm.oom(), StoredFP == masm.currentOffset() - offsets->begin);
+ }
+
+ if (reason != ExitReason::None)
+ masm.store32(Imm32(int32_t(reason)), Address(scratch, WasmActivation::offsetOfExitReason()));
+
+ if (framePushed)
+ masm.subFromStackPtr(Imm32(framePushed));
+}
+
+// Generate the inverse of GenerateProfilingPrologue.
+static void
+GenerateProfilingEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets)
+{
+ Register scratch = ABINonArgReturnReg0;
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ Register scratch2 = ABINonArgReturnReg1;
+#endif
+
+ if (framePushed)
+ masm.addToStackPtr(Imm32(framePushed));
+
+ masm.loadWasmActivationFromSymbolicAddress(scratch);
+
+ if (reason != ExitReason::None) {
+ masm.store32(Imm32(int32_t(ExitReason::None)),
+ Address(scratch, WasmActivation::offsetOfExitReason()));
+ }
+
+ // ProfilingFrameIterator assumes fixed offsets of the last few
+ // instructions from profilingReturn, so AutoForbidPools to ensure that
+ // unintended instructions are not automatically inserted.
+ {
+#if defined(JS_CODEGEN_ARM)
+ AutoForbidPools afp(&masm, /* number of instructions in scope = */ 4);
+#endif
+
+ // sp protects the stack from clobber via asynchronous signal handlers
+ // and the async interrupt exit. Since activation.fp can be read at any
+ // time and still points to the current frame, be careful to only update
+ // sp after activation.fp has been repointed to the caller's frame.
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.loadPtr(Address(masm.getStackPointer(), 0), scratch2);
+ masm.storePtr(scratch2, Address(scratch, WasmActivation::offsetOfFP()));
+ DebugOnly<uint32_t> prePop = masm.currentOffset();
+ masm.addToStackPtr(Imm32(sizeof(void *)));
+ MOZ_ASSERT_IF(!masm.oom(), PostStorePrePopFP == masm.currentOffset() - prePop);
+#else
+ masm.pop(Address(scratch, WasmActivation::offsetOfFP()));
+ MOZ_ASSERT(PostStorePrePopFP == 0);
+#endif
+
+ offsets->profilingReturn = masm.currentOffset();
+ masm.ret();
+ }
+}
+
+// In profiling mode, we need to maintain fp so that we can unwind the stack at
+// any pc. In non-profiling mode, the only way to observe WasmActivation::fp is
+// to call out to C++ so, as an optimization, we don't update fp. To avoid
+// recompilation when the profiling mode is toggled, we generate both prologues
+// a priori and switch between prologues when the profiling mode is toggled.
+// Specifically, ToggleProfiling patches all callsites to either call the
+// profiling or non-profiling entry point.
+void
+wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
+ FuncOffsets* offsets)
+{
+#if defined(JS_CODEGEN_ARM)
+ // Flush pending pools so they do not get dumped between the 'begin' and
+ // 'entry' offsets since the difference must be less than UINT8_MAX.
+ masm.flushBuffer();
+#endif
+
+ masm.haltingAlign(CodeAlignment);
+
+ GenerateProfilingPrologue(masm, framePushed, ExitReason::None, offsets);
+ Label body;
+ masm.jump(&body);
+
+ // Generate table entry thunk:
+ masm.haltingAlign(CodeAlignment);
+ offsets->tableEntry = masm.currentOffset();
+ TrapOffset trapOffset(0); // ignored by masm.wasmEmitTrapOutOfLineCode
+ TrapDesc trap(trapOffset, Trap::IndirectCallBadSig, masm.framePushed());
+ switch (sigId.kind()) {
+ case SigIdDesc::Kind::Global: {
+ Register scratch = WasmTableCallScratchReg;
+ masm.loadWasmGlobalPtr(sigId.globalDataOffset(), scratch);
+ masm.branchPtr(Assembler::Condition::NotEqual, WasmTableCallSigReg, scratch, trap);
+ break;
+ }
+ case SigIdDesc::Kind::Immediate:
+ masm.branch32(Assembler::Condition::NotEqual, WasmTableCallSigReg, Imm32(sigId.immediate()), trap);
+ break;
+ case SigIdDesc::Kind::None:
+ break;
+ }
+ offsets->tableProfilingJump = masm.nopPatchableToNearJump().offset();
+
+ // Generate normal prologue:
+ masm.nopAlign(CodeAlignment);
+ offsets->nonProfilingEntry = masm.currentOffset();
+ PushRetAddr(masm);
+ masm.subFromStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
+
+ // Prologue join point, body begin:
+ masm.bind(&body);
+ masm.setFramePushed(framePushed);
+}
+
+// Similar to GenerateFunctionPrologue (see comment), we generate both a
+// profiling and non-profiling epilogue a priori. When the profiling mode is
+// toggled, ToggleProfiling patches the 'profiling jump' to either be a nop
+// (falling through to the normal prologue) or a jump (jumping to the profiling
+// epilogue).
+void
+wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
+{
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+
+#if defined(JS_CODEGEN_ARM)
+ // Flush pending pools so they do not get dumped between the profilingReturn
+ // and profilingJump/profilingEpilogue offsets since the difference must be
+ // less than UINT8_MAX.
+ masm.flushBuffer();
+#endif
+
+ // Generate a nop that is overwritten by a jump to the profiling epilogue
+ // when profiling is enabled.
+ offsets->profilingJump = masm.nopPatchableToNearJump().offset();
+
+ // Normal epilogue:
+ masm.addToStackPtr(Imm32(framePushed + FrameBytesAfterReturnAddress));
+ masm.ret();
+ masm.setFramePushed(0);
+
+ // Profiling epilogue:
+ offsets->profilingEpilogue = masm.currentOffset();
+ GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
+}
+
+void
+wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets)
+{
+ masm.haltingAlign(CodeAlignment);
+ GenerateProfilingPrologue(masm, framePushed, reason, offsets);
+ masm.setFramePushed(framePushed);
+}
+
+void
+wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets)
+{
+ // Inverse of GenerateExitPrologue:
+ MOZ_ASSERT(masm.framePushed() == framePushed);
+ GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
+ masm.setFramePushed(0);
+}
+
+/*****************************************************************************/
+// ProfilingFrameIterator
+
+ProfilingFrameIterator::ProfilingFrameIterator()
+ : activation_(nullptr),
+ code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ exitReason_(ExitReason::None)
+{
+ MOZ_ASSERT(done());
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation)
+ : activation_(&activation),
+ code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ exitReason_(ExitReason::None)
+{
+ // If profiling hasn't been enabled for this instance, then CallerFPFromFP
+ // will be trash, so ignore the entire activation. In practice, this only
+ // happens if profiling is enabled while the instance is on the stack (in
+ // which case profiling will be enabled when the instance becomes inactive
+ // and gets called again).
+ if (!activation_->compartment()->wasm.profilingEnabled()) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ initFromFP();
+}
+
+static inline void
+AssertMatchesCallSite(const WasmActivation& activation, void* callerPC, void* callerFP, void* fp)
+{
+#ifdef DEBUG
+ Code* code = activation.compartment()->wasm.lookupCode(callerPC);
+ MOZ_ASSERT(code);
+
+ const CodeRange* callerCodeRange = code->lookupRange(callerPC);
+ MOZ_ASSERT(callerCodeRange);
+
+ if (callerCodeRange->kind() == CodeRange::Entry) {
+ MOZ_ASSERT(callerFP == nullptr);
+ return;
+ }
+
+ const CallSite* callsite = code->lookupCallSite(callerPC);
+ MOZ_ASSERT(callsite);
+
+ MOZ_ASSERT(callerFP == (uint8_t*)fp + callsite->stackDepth());
+#endif
+}
+
+void
+ProfilingFrameIterator::initFromFP()
+{
+ uint8_t* fp = activation_->fp();
+ stackAddress_ = fp;
+
+ // If a signal was handled while entering an activation, the frame will
+ // still be null.
+ if (!fp) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ void* pc = ReturnAddressFromFP(fp);
+
+ code_ = activation_->compartment()->wasm.lookupCode(pc);
+ MOZ_ASSERT(code_);
+
+ codeRange_ = code_->lookupRange(pc);
+ MOZ_ASSERT(codeRange_);
+
+ // Since we don't have the pc for fp, start unwinding at the caller of fp
+ // (ReturnAddressFromFP(fp)). This means that the innermost frame is
+ // skipped. This is fine because:
+ // - for import exit calls, the innermost frame is a thunk, so the first
+ // frame that shows up is the function calling the import;
+ // - for Math and other builtin calls as well as interrupts, we note the absence
+ // of an exit reason and inject a fake "builtin" frame; and
+ // - for async interrupts, we just accept that we'll lose the innermost frame.
+ switch (codeRange_->kind()) {
+ case CodeRange::Entry:
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ break;
+ case CodeRange::Function:
+ fp = CallerFPFromFP(fp);
+ callerPC_ = ReturnAddressFromFP(fp);
+ callerFP_ = CallerFPFromFP(fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ break;
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::TrapExit:
+ case CodeRange::Inline:
+ case CodeRange::FarJumpIsland:
+ MOZ_CRASH("Unexpected CodeRange kind");
+ }
+
+ // The iterator inserts a pretend innermost frame for non-None ExitReasons.
+ // This allows the variety of exit reasons to show up in the callstack.
+ exitReason_ = activation_->exitReason();
+
+ // In the case of calls to builtins or asynchronous interrupts, no exit path
+ // is taken so the exitReason is None. Coerce these to the Native exit
+ // reason so that self-time is accounted for.
+ if (exitReason_ == ExitReason::None)
+ exitReason_ = ExitReason::Native;
+
+ MOZ_ASSERT(!done());
+}
+
+typedef JS::ProfilingFrameIterator::RegisterState RegisterState;
+
+static bool
+InThunk(const CodeRange& codeRange, uint32_t offsetInModule)
+{
+ if (codeRange.kind() == CodeRange::FarJumpIsland)
+ return true;
+
+ return codeRange.isFunction() &&
+ offsetInModule >= codeRange.funcTableEntry() &&
+ offsetInModule < codeRange.funcNonProfilingEntry();
+}
+
+ProfilingFrameIterator::ProfilingFrameIterator(const WasmActivation& activation,
+ const RegisterState& state)
+ : activation_(&activation),
+ code_(nullptr),
+ codeRange_(nullptr),
+ callerFP_(nullptr),
+ callerPC_(nullptr),
+ stackAddress_(nullptr),
+ exitReason_(ExitReason::None)
+{
+ // If profiling hasn't been enabled for this instance, then CallerFPFromFP
+ // will be trash, so ignore the entire activation. In practice, this only
+ // happens if profiling is enabled while the instance is on the stack (in
+ // which case profiling will be enabled when the instance becomes inactive
+ // and gets called again).
+ if (!activation_->compartment()->wasm.profilingEnabled()) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ // If pc isn't in the instance's code, we must have exited the code via an
+ // exit trampoline or signal handler.
+ code_ = activation_->compartment()->wasm.lookupCode(state.pc);
+ if (!code_) {
+ initFromFP();
+ return;
+ }
+
+ // Note: fp may be null while entering and leaving the activation.
+ uint8_t* fp = activation.fp();
+
+ const CodeRange* codeRange = code_->lookupRange(state.pc);
+ switch (codeRange->kind()) {
+ case CodeRange::Function:
+ case CodeRange::FarJumpIsland:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::TrapExit: {
+ // When the pc is inside the prologue/epilogue, the innermost call's
+ // Frame is not complete and thus fp points to the second-to-innermost
+ // call's Frame. Since fp can only tell you about its caller (via
+ // ReturnAddressFromFP(fp)), naively unwinding while pc is in the
+ // prologue/epilogue would skip the second-to- innermost call. To avoid
+ // this problem, we use the static structure of the code in the prologue
+ // and epilogue to do the Right Thing.
+ uint32_t offsetInModule = (uint8_t*)state.pc - code_->segment().base();
+ MOZ_ASSERT(offsetInModule >= codeRange->begin());
+ MOZ_ASSERT(offsetInModule < codeRange->end());
+ uint32_t offsetInCodeRange = offsetInModule - codeRange->begin();
+ void** sp = (void**)state.sp;
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ if (offsetInCodeRange < PushedRetAddr || InThunk(*codeRange, offsetInModule)) {
+ // First instruction of the ARM/MIPS function; the return address is
+ // still in lr and fp still holds the caller's fp.
+ callerPC_ = state.lr;
+ callerFP_ = fp;
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 2);
+ } else if (offsetInModule == codeRange->profilingReturn() - PostStorePrePopFP) {
+ // Second-to-last instruction of the ARM/MIPS function; fp points to
+ // the caller's fp; have not yet popped Frame.
+ callerPC_ = ReturnAddressFromFP(sp);
+ callerFP_ = CallerFPFromFP(sp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
+ } else
+#endif
+ if (offsetInCodeRange < PushedFP || offsetInModule == codeRange->profilingReturn() ||
+ InThunk(*codeRange, offsetInModule))
+ {
+ // The return address has been pushed on the stack but not fp; fp
+ // still points to the caller's fp.
+ callerPC_ = *sp;
+ callerFP_ = fp;
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp - 1);
+ } else if (offsetInCodeRange < StoredFP) {
+ // The full Frame has been pushed; fp still points to the caller's
+ // frame.
+ MOZ_ASSERT(fp == CallerFPFromFP(sp));
+ callerPC_ = ReturnAddressFromFP(sp);
+ callerFP_ = CallerFPFromFP(sp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, sp);
+ } else {
+ // Not in the prologue/epilogue.
+ callerPC_ = ReturnAddressFromFP(fp);
+ callerFP_ = CallerFPFromFP(fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ }
+ break;
+ }
+ case CodeRange::Entry: {
+ // The entry trampoline is the final frame in an WasmActivation. The entry
+ // trampoline also doesn't GeneratePrologue/Epilogue so we can't use
+ // the general unwinding logic above.
+ MOZ_ASSERT(!fp);
+ callerPC_ = nullptr;
+ callerFP_ = nullptr;
+ break;
+ }
+ case CodeRange::Inline: {
+ // The throw stub clears WasmActivation::fp on it's way out.
+ if (!fp) {
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ // Most inline code stubs execute after the prologue/epilogue have
+ // completed so we can simply unwind based on fp. The only exception is
+ // the async interrupt stub, since it can be executed at any time.
+ // However, the async interrupt is super rare, so we can tolerate
+ // skipped frames. Thus, we use simply unwind based on fp.
+ callerPC_ = ReturnAddressFromFP(fp);
+ callerFP_ = CallerFPFromFP(fp);
+ AssertMatchesCallSite(*activation_, callerPC_, callerFP_, fp);
+ break;
+ }
+ }
+
+ codeRange_ = codeRange;
+ stackAddress_ = state.sp;
+ MOZ_ASSERT(!done());
+}
+
+void
+ProfilingFrameIterator::operator++()
+{
+ if (exitReason_ != ExitReason::None) {
+ MOZ_ASSERT(codeRange_);
+ exitReason_ = ExitReason::None;
+ MOZ_ASSERT(!done());
+ return;
+ }
+
+ if (!callerPC_) {
+ MOZ_ASSERT(!callerFP_);
+ codeRange_ = nullptr;
+ MOZ_ASSERT(done());
+ return;
+ }
+
+ code_ = activation_->compartment()->wasm.lookupCode(callerPC_);
+ MOZ_ASSERT(code_);
+
+ codeRange_ = code_->lookupRange(callerPC_);
+ MOZ_ASSERT(codeRange_);
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Entry:
+ MOZ_ASSERT(callerFP_ == nullptr);
+ callerPC_ = nullptr;
+ break;
+ case CodeRange::Function:
+ case CodeRange::ImportJitExit:
+ case CodeRange::ImportInterpExit:
+ case CodeRange::TrapExit:
+ case CodeRange::Inline:
+ case CodeRange::FarJumpIsland:
+ stackAddress_ = callerFP_;
+ callerPC_ = ReturnAddressFromFP(callerFP_);
+ AssertMatchesCallSite(*activation_, callerPC_, CallerFPFromFP(callerFP_), callerFP_);
+ callerFP_ = CallerFPFromFP(callerFP_);
+ break;
+ }
+
+ MOZ_ASSERT(!done());
+}
+
+const char*
+ProfilingFrameIterator::label() const
+{
+ MOZ_ASSERT(!done());
+
+ // Use the same string for both time inside and under so that the two
+ // entries will be coalesced by the profiler.
+ //
+ // NB: these labels are parsed for location by
+ // devtools/client/performance/modules/logic/frame-utils.js
+ const char* importJitDescription = "fast FFI trampoline (in asm.js)";
+ const char* importInterpDescription = "slow FFI trampoline (in asm.js)";
+ const char* nativeDescription = "native call (in asm.js)";
+ const char* trapDescription = "trap handling (in asm.js)";
+
+ switch (exitReason_) {
+ case ExitReason::None:
+ break;
+ case ExitReason::ImportJit:
+ return importJitDescription;
+ case ExitReason::ImportInterp:
+ return importInterpDescription;
+ case ExitReason::Native:
+ return nativeDescription;
+ case ExitReason::Trap:
+ return trapDescription;
+ }
+
+ switch (codeRange_->kind()) {
+ case CodeRange::Function: return code_->profilingLabel(codeRange_->funcIndex());
+ case CodeRange::Entry: return "entry trampoline (in asm.js)";
+ case CodeRange::ImportJitExit: return importJitDescription;
+ case CodeRange::ImportInterpExit: return importInterpDescription;
+ case CodeRange::TrapExit: return trapDescription;
+ case CodeRange::Inline: return "inline stub (in asm.js)";
+ case CodeRange::FarJumpIsland: return "interstitial (in asm.js)";
+ }
+
+ MOZ_CRASH("bad code range kind");
+}
+
+/*****************************************************************************/
+// Runtime patching to enable/disable profiling
+
+void
+wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled)
+{
+ if (callSite.kind() != CallSite::Func)
+ return;
+
+ uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ void* callee = X86Encoding::GetRel32Target(callerRetAddr);
+#elif defined(JS_CODEGEN_ARM)
+ uint8_t* caller = callerRetAddr - 4;
+ Instruction* callerInsn = reinterpret_cast<Instruction*>(caller);
+ BOffImm calleeOffset;
+ callerInsn->as<InstBLImm>()->extractImm(&calleeOffset);
+ void* callee = calleeOffset.getDest(callerInsn);
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_CRASH();
+ void* callee = nullptr;
+ (void)callerRetAddr;
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ uint8_t* caller = callerRetAddr - 2 * sizeof(uint32_t);
+ InstImm* callerInsn = reinterpret_cast<InstImm*>(caller);
+ BOffImm16 calleeOffset;
+ callerInsn->extractImm16(&calleeOffset);
+ void* callee = calleeOffset.getDest(reinterpret_cast<Instruction*>(caller));
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+ void* callee = nullptr;
+#else
+# error "Missing architecture"
+#endif
+
+ const CodeRange* codeRange = code.lookupRange(callee);
+ if (!codeRange->isFunction())
+ return;
+
+ uint8_t* from = code.segment().base() + codeRange->funcNonProfilingEntry();
+ uint8_t* to = code.segment().base() + codeRange->funcProfilingEntry();
+ if (!enabled)
+ Swap(from, to);
+
+ MOZ_ASSERT(callee == from);
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ X86Encoding::SetRel32(callerRetAddr, to);
+#elif defined(JS_CODEGEN_ARM)
+ new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always);
+#elif defined(JS_CODEGEN_ARM64)
+ (void)to;
+ MOZ_CRASH();
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ new (caller) InstImm(op_regimm, zero, rt_bgezal, BOffImm16(to - caller));
+#elif defined(JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Missing architecture"
+#endif
+}
+
+void
+wasm::ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled)
+{
+ const CodeRange& cr = code.metadata().codeRanges[callThunk.u.codeRangeIndex];
+ uint32_t calleeOffset = enabled ? cr.funcProfilingEntry() : cr.funcNonProfilingEntry();
+ MacroAssembler::repatchFarJump(code.segment().base(), callThunk.offset, calleeOffset);
+}
+
+void
+wasm::ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled)
+{
+ if (!codeRange.isFunction())
+ return;
+
+ uint8_t* codeBase = code.segment().base();
+ uint8_t* profilingEntry = codeBase + codeRange.funcProfilingEntry();
+ uint8_t* tableProfilingJump = codeBase + codeRange.funcTableProfilingJump();
+ uint8_t* profilingJump = codeBase + codeRange.funcProfilingJump();
+ uint8_t* profilingEpilogue = codeBase + codeRange.funcProfilingEpilogue();
+
+ if (enabled) {
+ MacroAssembler::patchNopToNearJump(tableProfilingJump, profilingEntry);
+ MacroAssembler::patchNopToNearJump(profilingJump, profilingEpilogue);
+ } else {
+ MacroAssembler::patchNearJumpToNop(tableProfilingJump);
+ MacroAssembler::patchNearJumpToNop(profilingJump);
+ }
+}
diff --git a/js/src/wasm/WasmFrameIterator.h b/js/src/wasm/WasmFrameIterator.h
new file mode 100644
index 0000000000..a62a2d3ee5
--- /dev/null
+++ b/js/src/wasm/WasmFrameIterator.h
@@ -0,0 +1,142 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_frame_iterator_h
+#define wasm_frame_iterator_h
+
+#include "js/ProfilingFrameIterator.h"
+
+class JSAtom;
+
+namespace js {
+
+class WasmActivation;
+namespace jit { class MacroAssembler; }
+
+namespace wasm {
+
+class CallSite;
+class Code;
+class CodeRange;
+class SigIdDesc;
+struct CallThunk;
+struct FuncOffsets;
+struct ProfilingOffsets;
+struct TrapOffset;
+
+// Iterates over the frames of a single WasmActivation, called synchronously
+// from C++ in the thread of the asm.js.
+//
+// The one exception is that this iterator may be called from the interrupt
+// callback which may be called asynchronously from asm.js code; in this case,
+// the backtrace may not be correct. That being said, we try our best printing
+// an informative message to the user and at least the name of the innermost
+// function stack frame.
+class FrameIterator
+{
+ const WasmActivation* activation_;
+ const Code* code_;
+ const CallSite* callsite_;
+ const CodeRange* codeRange_;
+ uint8_t* fp_;
+ uint8_t* pc_;
+ bool missingFrameMessage_;
+
+ void settle();
+
+ public:
+ explicit FrameIterator();
+ explicit FrameIterator(const WasmActivation& activation);
+ void operator++();
+ bool done() const;
+ const char* filename() const;
+ const char16_t* displayURL() const;
+ bool mutedErrors() const;
+ JSAtom* functionDisplayAtom() const;
+ unsigned lineOrBytecode() const;
+ inline void* fp() const { return fp_; }
+ inline uint8_t* pc() const { return pc_; }
+};
+
+// An ExitReason describes the possible reasons for leaving compiled wasm code
+// or the state of not having left compiled wasm code (ExitReason::None).
+enum class ExitReason : uint32_t
+{
+ None, // default state, the pc is in wasm code
+ ImportJit, // fast-path call directly into JIT code
+ ImportInterp, // slow-path call into C++ Invoke()
+ Native, // call to native C++ code (e.g., Math.sin, ToInt32(), interrupt)
+ Trap // call to trap handler for the trap in WasmActivation::trap
+};
+
+// Iterates over the frames of a single WasmActivation, given an
+// asynchrously-interrupted thread's state. If the activation's
+// module is not in profiling mode, the activation is skipped.
+class ProfilingFrameIterator
+{
+ const WasmActivation* activation_;
+ const Code* code_;
+ const CodeRange* codeRange_;
+ uint8_t* callerFP_;
+ void* callerPC_;
+ void* stackAddress_;
+ ExitReason exitReason_;
+
+ void initFromFP();
+
+ public:
+ ProfilingFrameIterator();
+ explicit ProfilingFrameIterator(const WasmActivation& activation);
+ ProfilingFrameIterator(const WasmActivation& activation,
+ const JS::ProfilingFrameIterator::RegisterState& state);
+ void operator++();
+ bool done() const { return !codeRange_; }
+
+ void* stackAddress() const { MOZ_ASSERT(!done()); return stackAddress_; }
+ const char* label() const;
+};
+
+// Prologue/epilogue code generation
+
+void
+GenerateExitPrologue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets);
+void
+GenerateExitEpilogue(jit::MacroAssembler& masm, unsigned framePushed, ExitReason reason,
+ ProfilingOffsets* offsets);
+void
+GenerateFunctionPrologue(jit::MacroAssembler& masm, unsigned framePushed, const SigIdDesc& sigId,
+ FuncOffsets* offsets);
+void
+GenerateFunctionEpilogue(jit::MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets);
+
+// Runtime patching to enable/disable profiling
+
+void
+ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled);
+
+void
+ToggleProfiling(const Code& code, const CallThunk& callThunk, bool enabled);
+
+void
+ToggleProfiling(const Code& code, const CodeRange& codeRange, bool enabled);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_frame_iterator_h
diff --git a/js/src/wasm/WasmGeneratedSourceMap.h b/js/src/wasm/WasmGeneratedSourceMap.h
new file mode 100644
index 0000000000..6a7306ecea
--- /dev/null
+++ b/js/src/wasm/WasmGeneratedSourceMap.h
@@ -0,0 +1,151 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_generated_source_map_h
+#define wasm_generated_source_map_h
+
+#include "mozilla/Vector.h"
+
+#include "vm/StringBuffer.h"
+
+namespace js {
+
+namespace wasm {
+
+// The generated source location for the AST node/expression. The offset field refers
+// an offset in an binary format file.
+struct ExprLoc
+{
+ uint32_t lineno;
+ uint32_t column;
+ uint32_t offset;
+ ExprLoc() : lineno(0), column(0), offset(0) {}
+ ExprLoc(uint32_t lineno_, uint32_t column_, uint32_t offset_) : lineno(lineno_), column(column_), offset(offset_) {}
+};
+
+typedef mozilla::Vector<ExprLoc, 0, TempAllocPolicy> ExprLocVector;
+
+// The generated source WebAssembly function lines and expressions ranges.
+struct FunctionLoc
+{
+ size_t startExprsIndex;
+ size_t endExprsIndex;
+ uint32_t startLineno;
+ uint32_t endLineno;
+ FunctionLoc(size_t startExprsIndex_, size_t endExprsIndex_, uint32_t startLineno_, uint32_t endLineno_)
+ : startExprsIndex(startExprsIndex_),
+ endExprsIndex(endExprsIndex_),
+ startLineno(startLineno_),
+ endLineno(endLineno_)
+ {}
+};
+
+typedef mozilla::Vector<FunctionLoc, 0, TempAllocPolicy> FunctionLocVector;
+
+// The generated source map for WebAssembly binary file. This map is generated during
+// building the text buffer (see BinaryToExperimentalText).
+class GeneratedSourceMap
+{
+ ExprLocVector exprlocs_;
+ FunctionLocVector functionlocs_;
+ uint32_t totalLines_;
+
+ public:
+ explicit GeneratedSourceMap(JSContext* cx)
+ : exprlocs_(cx),
+ functionlocs_(cx),
+ totalLines_(0)
+ {}
+ ExprLocVector& exprlocs() { return exprlocs_; }
+ FunctionLocVector& functionlocs() { return functionlocs_; }
+
+ uint32_t totalLines() { return totalLines_; }
+ void setTotalLines(uint32_t val) { totalLines_ = val; }
+};
+
+typedef UniquePtr<GeneratedSourceMap> UniqueGeneratedSourceMap;
+
+// Helper class, StringBuffer wrapper, to track the position (line and column)
+// within the generated source.
+class WasmPrintBuffer
+{
+ StringBuffer& stringBuffer_;
+ uint32_t lineno_;
+ uint32_t column_;
+
+ public:
+ explicit WasmPrintBuffer(StringBuffer& stringBuffer)
+ : stringBuffer_(stringBuffer),
+ lineno_(1),
+ column_(1)
+ {}
+ inline char processChar(char ch) {
+ if (ch == '\n') {
+ lineno_++; column_ = 1;
+ } else
+ column_++;
+ return ch;
+ }
+ inline char16_t processChar(char16_t ch) {
+ if (ch == '\n') {
+ lineno_++; column_ = 1;
+ } else
+ column_++;
+ return ch;
+ }
+ bool append(const char ch) {
+ return stringBuffer_.append(processChar(ch));
+ }
+ bool append(const char16_t ch) {
+ return stringBuffer_.append(processChar(ch));
+ }
+ bool append(const char* str, size_t length) {
+ for (size_t i = 0; i < length; i++)
+ processChar(str[i]);
+ return stringBuffer_.append(str, length);
+ }
+ bool append(const char16_t* begin, const char16_t* end) {
+ for (const char16_t* p = begin; p != end; p++)
+ processChar(*p);
+ return stringBuffer_.append(begin, end);
+ }
+ bool append(const char16_t* str, size_t length) {
+ return append(str, str + length);
+ }
+ template <size_t ArrayLength>
+ bool append(const char (&array)[ArrayLength]) {
+ static_assert(ArrayLength > 0, "null-terminated");
+ MOZ_ASSERT(array[ArrayLength - 1] == '\0');
+ return append(array, ArrayLength - 1);
+ }
+ char16_t getChar(size_t index) {
+ return stringBuffer_.getChar(index);
+ }
+ size_t length() {
+ return stringBuffer_.length();
+ }
+ StringBuffer& stringBuffer() { return stringBuffer_; }
+ uint32_t lineno() { return lineno_; }
+ uint32_t column() { return column_; }
+};
+
+} // namespace wasm
+
+} // namespace js
+
+#endif // namespace wasm_generated_source_map_h
diff --git a/js/src/wasm/WasmGenerator.cpp b/js/src/wasm/WasmGenerator.cpp
new file mode 100644
index 0000000000..e6f1edd991
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.cpp
@@ -0,0 +1,1174 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmGenerator.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/EnumeratedRange.h"
+
+#include <algorithm>
+
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmIonCompile.h"
+#include "wasm/WasmStubs.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::CheckedInt;
+using mozilla::MakeEnumeratedRange;
+
+// ****************************************************************************
+// ModuleGenerator
+
+static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
+static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
+static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
+
+ModuleGenerator::ModuleGenerator(ImportVector&& imports)
+ : alwaysBaseline_(false),
+ imports_(Move(imports)),
+ numSigs_(0),
+ numTables_(0),
+ lifo_(GENERATOR_LIFO_DEFAULT_CHUNK_SIZE),
+ masmAlloc_(&lifo_),
+ masm_(MacroAssembler::WasmToken(), masmAlloc_),
+ lastPatchedCallsite_(0),
+ startOfUnpatchedCallsites_(0),
+ parallel_(false),
+ outstanding_(0),
+ activeFuncDef_(nullptr),
+ startedFuncDefs_(false),
+ finishedFuncDefs_(false),
+ numFinishedFuncDefs_(0)
+{
+ MOZ_ASSERT(IsCompilingWasm());
+}
+
+ModuleGenerator::~ModuleGenerator()
+{
+ if (parallel_) {
+ // Wait for any outstanding jobs to fail or complete.
+ if (outstanding_) {
+ AutoLockHelperThreadState lock;
+ while (true) {
+ IonCompileTaskPtrVector& worklist = HelperThreadState().wasmWorklist(lock);
+ MOZ_ASSERT(outstanding_ >= worklist.length());
+ outstanding_ -= worklist.length();
+ worklist.clear();
+
+ IonCompileTaskPtrVector& finished = HelperThreadState().wasmFinishedList(lock);
+ MOZ_ASSERT(outstanding_ >= finished.length());
+ outstanding_ -= finished.length();
+ finished.clear();
+
+ uint32_t numFailed = HelperThreadState().harvestFailedWasmJobs(lock);
+ MOZ_ASSERT(outstanding_ >= numFailed);
+ outstanding_ -= numFailed;
+
+ if (!outstanding_)
+ break;
+
+ HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
+ }
+ }
+
+ MOZ_ASSERT(HelperThreadState().wasmCompilationInProgress);
+ HelperThreadState().wasmCompilationInProgress = false;
+ } else {
+ MOZ_ASSERT(!outstanding_);
+ }
+}
+
+bool
+ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args,
+ Metadata* maybeAsmJSMetadata)
+{
+ shared_ = Move(shared);
+ alwaysBaseline_ = args.alwaysBaseline;
+
+ if (!exportedFuncs_.init())
+ return false;
+
+ if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, shared_->funcSigs.length()))
+ return false;
+
+ linkData_.globalDataLength = AlignBytes(InitialGlobalDataBytes, sizeof(void*));;
+
+ // asm.js passes in an AsmJSMetadata subclass to use instead.
+ if (maybeAsmJSMetadata) {
+ metadata_ = maybeAsmJSMetadata;
+ MOZ_ASSERT(isAsmJS());
+ } else {
+ metadata_ = js_new<Metadata>();
+ if (!metadata_)
+ return false;
+ MOZ_ASSERT(!isAsmJS());
+ }
+
+ if (args.scriptedCaller.filename) {
+ metadata_->filename = DuplicateString(args.scriptedCaller.filename.get());
+ if (!metadata_->filename)
+ return false;
+ }
+
+ if (!assumptions_.clone(args.assumptions))
+ return false;
+
+ // For asm.js, the Vectors in ModuleGeneratorData are max-sized reservations
+ // and will be initialized in a linear order via init* functions as the
+ // module is generated. For wasm, the Vectors are correctly-sized and
+ // already initialized.
+
+ if (!isAsmJS()) {
+ numSigs_ = shared_->sigs.length();
+ numTables_ = shared_->tables.length();
+
+ for (size_t i = 0; i < shared_->funcImportGlobalDataOffsets.length(); i++) {
+ shared_->funcImportGlobalDataOffsets[i] = linkData_.globalDataLength;
+ linkData_.globalDataLength += sizeof(FuncImportTls);
+ if (!addFuncImport(*shared_->funcSigs[i], shared_->funcImportGlobalDataOffsets[i]))
+ return false;
+ }
+
+ for (const Import& import : imports_) {
+ if (import.kind == DefinitionKind::Table) {
+ MOZ_ASSERT(shared_->tables.length() == 1);
+ shared_->tables[0].external = true;
+ break;
+ }
+ }
+
+ for (TableDesc& table : shared_->tables) {
+ if (!allocateGlobalBytes(sizeof(TableTls), sizeof(void*), &table.globalDataOffset))
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numSigs_; i++) {
+ SigWithId& sig = shared_->sigs[i];
+ if (SigIdDesc::isGlobal(sig)) {
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(sizeof(void*), sizeof(void*), &globalDataOffset))
+ return false;
+
+ sig.id = SigIdDesc::global(sig, globalDataOffset);
+
+ Sig copy;
+ if (!copy.clone(sig))
+ return false;
+
+ if (!metadata_->sigIds.emplaceBack(Move(copy), sig.id))
+ return false;
+ } else {
+ sig.id = SigIdDesc::immediate(sig);
+ }
+ }
+
+ for (GlobalDesc& global : shared_->globals) {
+ if (global.isConstant())
+ continue;
+ if (!allocateGlobal(&global))
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(shared_->sigs.length() == MaxSigs);
+ MOZ_ASSERT(shared_->tables.length() == MaxTables);
+ MOZ_ASSERT(shared_->asmJSSigToTableIndex.length() == MaxSigs);
+ }
+
+ return true;
+}
+
+bool
+ModuleGenerator::finishOutstandingTask()
+{
+ MOZ_ASSERT(parallel_);
+
+ IonCompileTask* task = nullptr;
+ {
+ AutoLockHelperThreadState lock;
+ while (true) {
+ MOZ_ASSERT(outstanding_ > 0);
+
+ if (HelperThreadState().wasmFailed(lock))
+ return false;
+
+ if (!HelperThreadState().wasmFinishedList(lock).empty()) {
+ outstanding_--;
+ task = HelperThreadState().wasmFinishedList(lock).popCopy();
+ break;
+ }
+
+ HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
+ }
+ }
+
+ return finishTask(task);
+}
+
+bool
+ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const
+{
+ return funcToCodeRange_[funcIndex] != BAD_CODE_RANGE;
+}
+
+const CodeRange&
+ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
+{
+ MOZ_ASSERT(funcIsCompiled(funcIndex));
+ const CodeRange& cr = metadata_->codeRanges[funcToCodeRange_[funcIndex]];
+ MOZ_ASSERT(cr.isFunction());
+ return cr;
+}
+
+static uint32_t
+JumpRange()
+{
+ return Min(JitOptions.jumpThreshold, JumpImmediateRange);
+}
+
+typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy> OffsetMap;
+
+bool
+ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
+{
+ MacroAssembler::AutoPrepareForPatching patching(masm_);
+
+ masm_.haltingAlign(CodeAlignment);
+
+ // Create far jumps for calls that have relative offsets that may otherwise
+ // go out of range. Far jumps are created for two cases: direct calls
+ // between function definitions and calls to trap exits by trap out-of-line
+ // paths. Far jump code is shared when possible to reduce bloat. This method
+ // is called both between function bodies (at a frequency determined by the
+ // ISA's jump range) and once at the very end of a module's codegen after
+ // all possible calls/traps have been emitted.
+
+ OffsetMap existingCallFarJumps;
+ if (!existingCallFarJumps.init())
+ return false;
+
+ EnumeratedArray<Trap, Trap::Limit, Maybe<uint32_t>> existingTrapFarJumps;
+
+ for (; lastPatchedCallsite_ < masm_.callSites().length(); lastPatchedCallsite_++) {
+ const CallSiteAndTarget& cs = masm_.callSites()[lastPatchedCallsite_];
+ uint32_t callerOffset = cs.returnAddressOffset();
+ MOZ_RELEASE_ASSERT(callerOffset < INT32_MAX);
+
+ switch (cs.kind()) {
+ case CallSiteDesc::Dynamic:
+ case CallSiteDesc::Symbolic:
+ break;
+ case CallSiteDesc::Func: {
+ if (funcIsCompiled(cs.funcIndex())) {
+ uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNonProfilingEntry();
+ MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
+
+ if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
+ masm_.patchCall(callerOffset, calleeOffset);
+ break;
+ }
+ }
+
+ OffsetMap::AddPtr p = existingCallFarJumps.lookupForAdd(cs.funcIndex());
+ if (!p) {
+ Offsets offsets;
+ offsets.begin = masm_.currentOffset();
+ uint32_t jumpOffset = masm_.farJumpWithPatch().offset();
+ offsets.end = masm_.currentOffset();
+ if (masm_.oom())
+ return false;
+
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets))
+ return false;
+ if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin))
+ return false;
+
+ // Record calls' far jumps in metadata since they must be
+ // repatched at runtime when profiling mode is toggled.
+ if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcIndex()))
+ return false;
+ }
+
+ masm_.patchCall(callerOffset, p->value());
+ break;
+ }
+ case CallSiteDesc::TrapExit: {
+ if (maybeTrapExits) {
+ uint32_t calleeOffset = (*maybeTrapExits)[cs.trap()].begin;
+ MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
+
+ if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
+ masm_.patchCall(callerOffset, calleeOffset);
+ break;
+ }
+ }
+
+ if (!existingTrapFarJumps[cs.trap()]) {
+ Offsets offsets;
+ offsets.begin = masm_.currentOffset();
+ masm_.append(TrapFarJump(cs.trap(), masm_.farJumpWithPatch()));
+ offsets.end = masm_.currentOffset();
+ if (masm_.oom())
+ return false;
+
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets))
+ return false;
+ existingTrapFarJumps[cs.trap()] = Some(offsets.begin);
+ }
+
+ masm_.patchCall(callerOffset, *existingTrapFarJumps[cs.trap()]);
+ break;
+ }
+ }
+ }
+
+ return true;
+}
+
+bool
+ModuleGenerator::patchFarJumps(const TrapExitOffsetArray& trapExits)
+{
+ MacroAssembler::AutoPrepareForPatching patching(masm_);
+
+ for (CallThunk& callThunk : metadata_->callThunks) {
+ uint32_t funcIndex = callThunk.u.funcIndex;
+ callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex];
+ CodeOffset farJump(callThunk.offset);
+ masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry());
+ }
+
+ for (const TrapFarJump& farJump : masm_.trapFarJumps())
+ masm_.patchFarJump(farJump.jump, trapExits[farJump.trap].begin);
+
+ return true;
+}
+
+bool
+ModuleGenerator::finishTask(IonCompileTask* task)
+{
+ const FuncBytes& func = task->func();
+ FuncCompileResults& results = task->results();
+
+ masm_.haltingAlign(CodeAlignment);
+
+ // Before merging in the new function's code, if calls in a prior function
+ // body might go out of range, insert far jumps to extend the range.
+ if ((masm_.size() - startOfUnpatchedCallsites_) + results.masm().size() > JumpRange()) {
+ startOfUnpatchedCallsites_ = masm_.size();
+ if (!patchCallSites())
+ return false;
+ }
+
+ // Offset the recorded FuncOffsets by the offset of the function in the
+ // whole module's code segment.
+ uint32_t offsetInWhole = masm_.size();
+ results.offsets().offsetBy(offsetInWhole);
+
+ // Add the CodeRange for this function.
+ uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
+ if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
+ return false;
+
+ MOZ_ASSERT(!funcIsCompiled(func.index()));
+ funcToCodeRange_[func.index()] = funcCodeRangeIndex;
+
+ // Merge the compiled results into the whole-module masm.
+ mozilla::DebugOnly<size_t> sizeBefore = masm_.size();
+ if (!masm_.asmMergeWith(results.masm()))
+ return false;
+ MOZ_ASSERT(masm_.size() == offsetInWhole + results.masm().size());
+
+ freeTasks_.infallibleAppend(task);
+ return true;
+}
+
+bool
+ModuleGenerator::finishFuncExports()
+{
+ // In addition to all the functions that were explicitly exported, any
+ // element of an exported table is also exported.
+
+ for (ElemSegment& elems : elemSegments_) {
+ if (shared_->tables[elems.tableIndex].external) {
+ for (uint32_t funcIndex : elems.elemFuncIndices) {
+ if (!exportedFuncs_.put(funcIndex))
+ return false;
+ }
+ }
+ }
+
+ // ModuleGenerator::exportedFuncs_ is an unordered HashSet. The
+ // FuncExportVector stored in Metadata needs to be stored sorted by
+ // function index to allow O(log(n)) lookup at runtime.
+
+ Uint32Vector sorted;
+ if (!sorted.reserve(exportedFuncs_.count()))
+ return false;
+
+ for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront())
+ sorted.infallibleAppend(r.front());
+
+ std::sort(sorted.begin(), sorted.end());
+
+ MOZ_ASSERT(metadata_->funcExports.empty());
+ if (!metadata_->funcExports.reserve(sorted.length()))
+ return false;
+
+ for (uint32_t funcIndex : sorted) {
+ Sig sig;
+ if (!sig.clone(funcSig(funcIndex)))
+ return false;
+
+ uint32_t codeRangeIndex = funcToCodeRange_[funcIndex];
+ metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex);
+ }
+
+ return true;
+}
+
+typedef Vector<Offsets, 0, SystemAllocPolicy> OffsetVector;
+typedef Vector<ProfilingOffsets, 0, SystemAllocPolicy> ProfilingOffsetVector;
+
+bool
+ModuleGenerator::finishCodegen()
+{
+ masm_.haltingAlign(CodeAlignment);
+ uint32_t offsetInWhole = masm_.size();
+
+ uint32_t numFuncExports = metadata_->funcExports.length();
+ MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
+
+ // Generate stubs in a separate MacroAssembler since, otherwise, for modules
+ // larger than the JumpImmediateRange, even local uses of Label will fail
+ // due to the large absolute offsets temporarily stored by Label::bind().
+
+ OffsetVector entries;
+ ProfilingOffsetVector interpExits;
+ ProfilingOffsetVector jitExits;
+ TrapExitOffsetArray trapExits;
+ Offsets outOfBoundsExit;
+ Offsets unalignedAccessExit;
+ Offsets interruptExit;
+ Offsets throwStub;
+
+ {
+ TempAllocator alloc(&lifo_);
+ MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
+ Label throwLabel;
+
+ if (!entries.resize(numFuncExports))
+ return false;
+ for (uint32_t i = 0; i < numFuncExports; i++)
+ entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
+
+ if (!interpExits.resize(numFuncImports()))
+ return false;
+ if (!jitExits.resize(numFuncImports()))
+ return false;
+ for (uint32_t i = 0; i < numFuncImports(); i++) {
+ interpExits[i] = GenerateImportInterpExit(masm, metadata_->funcImports[i], i, &throwLabel);
+ jitExits[i] = GenerateImportJitExit(masm, metadata_->funcImports[i], &throwLabel);
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit))
+ trapExits[trap] = GenerateTrapExit(masm, trap, &throwLabel);
+
+ outOfBoundsExit = GenerateOutOfBoundsExit(masm, &throwLabel);
+ unalignedAccessExit = GenerateUnalignedExit(masm, &throwLabel);
+ interruptExit = GenerateInterruptExit(masm, &throwLabel);
+ throwStub = GenerateThrowStub(masm, &throwLabel);
+
+ if (masm.oom() || !masm_.asmMergeWith(masm))
+ return false;
+ }
+
+ // Adjust each of the resulting Offsets (to account for being merged into
+ // masm_) and then create code ranges for all the stubs.
+
+ for (uint32_t i = 0; i < numFuncExports; i++) {
+ entries[i].offsetBy(offsetInWhole);
+ metadata_->funcExports[i].initEntryOffset(entries[i].begin);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
+ return false;
+ }
+
+ for (uint32_t i = 0; i < numFuncImports(); i++) {
+ interpExits[i].offsetBy(offsetInWhole);
+ metadata_->funcImports[i].initInterpExitOffset(interpExits[i].begin);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportInterpExit, interpExits[i]))
+ return false;
+
+ jitExits[i].offsetBy(offsetInWhole);
+ metadata_->funcImports[i].initJitExitOffset(jitExits[i].begin);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::ImportJitExit, jitExits[i]))
+ return false;
+ }
+
+ for (Trap trap : MakeEnumeratedRange(Trap::Limit)) {
+ trapExits[trap].offsetBy(offsetInWhole);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::TrapExit, trapExits[trap]))
+ return false;
+ }
+
+ outOfBoundsExit.offsetBy(offsetInWhole);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, outOfBoundsExit))
+ return false;
+
+ unalignedAccessExit.offsetBy(offsetInWhole);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, unalignedAccessExit))
+ return false;
+
+ interruptExit.offsetBy(offsetInWhole);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, interruptExit))
+ return false;
+
+ throwStub.offsetBy(offsetInWhole);
+ if (!metadata_->codeRanges.emplaceBack(CodeRange::Inline, throwStub))
+ return false;
+
+ // Fill in LinkData with the offsets of these stubs.
+
+ linkData_.outOfBoundsOffset = outOfBoundsExit.begin;
+ linkData_.interruptOffset = interruptExit.begin;
+
+ // Now that all other code has been emitted, patch all remaining callsites
+ // then far jumps. Patching callsites can generate far jumps so there is an
+ // ordering dependency.
+
+ if (!patchCallSites(&trapExits))
+ return false;
+
+ if (!patchFarJumps(trapExits))
+ return false;
+
+ // Code-generation is complete!
+
+ masm_.finish();
+ return !masm_.oom();
+}
+
+bool
+ModuleGenerator::finishLinkData(Bytes& code)
+{
+ // Inflate the global bytes up to page size so that the total bytes are a
+ // page size (as required by the allocator functions).
+ linkData_.globalDataLength = AlignBytes(linkData_.globalDataLength, gc::SystemPageSize());
+
+ // Add links to absolute addresses identified symbolically.
+ for (size_t i = 0; i < masm_.numSymbolicAccesses(); i++) {
+ SymbolicAccess src = masm_.symbolicAccess(i);
+ if (!linkData_.symbolicLinks[src.target].append(src.patchAt.offset()))
+ return false;
+ }
+
+ // Relative link metadata: absolute addresses that refer to another point within
+ // the asm.js module.
+
+ // CodeLabels are used for switch cases and loads from floating-point /
+ // SIMD values in the constant pool.
+ for (size_t i = 0; i < masm_.numCodeLabels(); i++) {
+ CodeLabel cl = masm_.codeLabel(i);
+ LinkData::InternalLink inLink(LinkData::InternalLink::CodeLabel);
+ inLink.patchAtOffset = masm_.labelToPatchOffset(*cl.patchAt());
+ inLink.targetOffset = cl.target()->offset();
+ if (!linkData_.internalLinks.append(inLink))
+ return false;
+ }
+
+#if defined(JS_CODEGEN_X86)
+ // Global data accesses in x86 need to be patched with the absolute
+ // address of the global. Globals are allocated sequentially after the
+ // code section so we can just use an InternalLink.
+ for (GlobalAccess a : masm_.globalAccesses()) {
+ LinkData::InternalLink inLink(LinkData::InternalLink::RawPointer);
+ inLink.patchAtOffset = masm_.labelToPatchOffset(a.patchAt);
+ inLink.targetOffset = code.length() + a.globalDataOffset;
+ if (!linkData_.internalLinks.append(inLink))
+ return false;
+ }
+#elif defined(JS_CODEGEN_X64)
+ // Global data accesses on x64 use rip-relative addressing and thus we can
+ // patch here, now that we know the final codeLength.
+ for (GlobalAccess a : masm_.globalAccesses()) {
+ void* from = code.begin() + a.patchAt.offset();
+ void* to = code.end() + a.globalDataOffset;
+ X86Encoding::SetRel32(from, to);
+ }
+#else
+ // Global access is performed using the GlobalReg and requires no patching.
+ MOZ_ASSERT(masm_.globalAccesses().length() == 0);
+#endif
+
+ return true;
+}
+
+bool
+ModuleGenerator::addFuncImport(const Sig& sig, uint32_t globalDataOffset)
+{
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ Sig copy;
+ if (!copy.clone(sig))
+ return false;
+
+ return metadata_->funcImports.emplaceBack(Move(copy), globalDataOffset);
+}
+
+bool
+ModuleGenerator::allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOffset)
+{
+ CheckedInt<uint32_t> newGlobalDataLength(linkData_.globalDataLength);
+
+ newGlobalDataLength += ComputeByteAlignment(newGlobalDataLength.value(), align);
+ if (!newGlobalDataLength.isValid())
+ return false;
+
+ *globalDataOffset = newGlobalDataLength.value();
+ newGlobalDataLength += bytes;
+
+ if (!newGlobalDataLength.isValid())
+ return false;
+
+ linkData_.globalDataLength = newGlobalDataLength.value();
+ return true;
+}
+
+bool
+ModuleGenerator::allocateGlobal(GlobalDesc* global)
+{
+ MOZ_ASSERT(!startedFuncDefs_);
+ unsigned width = 0;
+ switch (global->type()) {
+ case ValType::I32:
+ case ValType::F32:
+ width = 4;
+ break;
+ case ValType::I64:
+ case ValType::F64:
+ width = 8;
+ break;
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ width = 16;
+ break;
+ }
+
+ uint32_t offset;
+ if (!allocateGlobalBytes(width, width, &offset))
+ return false;
+
+ global->setOffset(offset);
+ return true;
+}
+
+bool
+ModuleGenerator::addGlobal(ValType type, bool isConst, uint32_t* index)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(!startedFuncDefs_);
+
+ *index = shared_->globals.length();
+ GlobalDesc global(type, !isConst, *index);
+ if (!allocateGlobal(&global))
+ return false;
+
+ return shared_->globals.append(global);
+}
+
+void
+ModuleGenerator::initSig(uint32_t sigIndex, Sig&& sig)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(sigIndex == numSigs_);
+ numSigs_++;
+
+ MOZ_ASSERT(shared_->sigs[sigIndex] == Sig());
+ shared_->sigs[sigIndex] = Move(sig);
+}
+
+const SigWithId&
+ModuleGenerator::sig(uint32_t index) const
+{
+ MOZ_ASSERT(index < numSigs_);
+ return shared_->sigs[index];
+}
+
+void
+ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
+
+ shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
+}
+
+void
+ModuleGenerator::initMemoryUsage(MemoryUsage memoryUsage)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(shared_->memoryUsage == MemoryUsage::None);
+
+ shared_->memoryUsage = memoryUsage;
+}
+
+void
+ModuleGenerator::bumpMinMemoryLength(uint32_t newMinMemoryLength)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(newMinMemoryLength >= shared_->minMemoryLength);
+
+ shared_->minMemoryLength = newMinMemoryLength;
+}
+
+bool
+ModuleGenerator::initImport(uint32_t funcIndex, uint32_t sigIndex)
+{
+ MOZ_ASSERT(isAsmJS());
+
+ MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
+ shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
+
+ uint32_t globalDataOffset;
+ if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*), &globalDataOffset))
+ return false;
+
+ MOZ_ASSERT(!shared_->funcImportGlobalDataOffsets[funcIndex]);
+ shared_->funcImportGlobalDataOffsets[funcIndex] = globalDataOffset;
+
+ MOZ_ASSERT(funcIndex == metadata_->funcImports.length());
+ return addFuncImport(sig(sigIndex), globalDataOffset);
+}
+
+uint32_t
+ModuleGenerator::numFuncImports() const
+{
+ // Until all functions have been validated, asm.js doesn't know the total
+ // number of imports.
+ MOZ_ASSERT_IF(isAsmJS(), finishedFuncDefs_);
+ return metadata_->funcImports.length();
+}
+
+uint32_t
+ModuleGenerator::numFuncDefs() const
+{
+ // asm.js overallocates the length of funcSigs and in general does not know
+ // the number of function definitions until it's done compiling.
+ MOZ_ASSERT(!isAsmJS());
+ return shared_->funcSigs.length() - numFuncImports();
+}
+
+uint32_t
+ModuleGenerator::numFuncs() const
+{
+ // asm.js pre-reserves a bunch of function index space which is
+ // incrementally filled in during function-body validation. Thus, there are
+ // a few possible interpretations of numFuncs() (total index space size vs.
+ // exact number of imports/definitions encountered so far) and to simplify
+ // things we simply only define this quantity for wasm.
+ MOZ_ASSERT(!isAsmJS());
+ return shared_->funcSigs.length();
+}
+
+const SigWithId&
+ModuleGenerator::funcSig(uint32_t funcIndex) const
+{
+ MOZ_ASSERT(shared_->funcSigs[funcIndex]);
+ return *shared_->funcSigs[funcIndex];
+}
+
+bool
+ModuleGenerator::addFuncExport(UniqueChars fieldName, uint32_t funcIndex)
+{
+ return exportedFuncs_.put(funcIndex) &&
+ exports_.emplaceBack(Move(fieldName), funcIndex, DefinitionKind::Function);
+}
+
+bool
+ModuleGenerator::addTableExport(UniqueChars fieldName)
+{
+ MOZ_ASSERT(!startedFuncDefs_);
+ MOZ_ASSERT(shared_->tables.length() == 1);
+ shared_->tables[0].external = true;
+ return exports_.emplaceBack(Move(fieldName), DefinitionKind::Table);
+}
+
+bool
+ModuleGenerator::addMemoryExport(UniqueChars fieldName)
+{
+ return exports_.emplaceBack(Move(fieldName), DefinitionKind::Memory);
+}
+
+bool
+ModuleGenerator::addGlobalExport(UniqueChars fieldName, uint32_t globalIndex)
+{
+ return exports_.emplaceBack(Move(fieldName), globalIndex, DefinitionKind::Global);
+}
+
+bool
+ModuleGenerator::setStartFunction(uint32_t funcIndex)
+{
+ metadata_->startFuncIndex.emplace(funcIndex);
+ return exportedFuncs_.put(funcIndex);
+}
+
+bool
+ModuleGenerator::addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices)
+{
+ MOZ_ASSERT(!isAsmJS());
+ MOZ_ASSERT(!startedFuncDefs_);
+ MOZ_ASSERT(shared_->tables.length() == 1);
+
+ for (uint32_t funcIndex : elemFuncIndices) {
+ if (funcIndex < numFuncImports()) {
+ shared_->tables[0].external = true;
+ break;
+ }
+ }
+
+ return elemSegments_.emplaceBack(0, offset, Move(elemFuncIndices));
+}
+
+void
+ModuleGenerator::setDataSegments(DataSegmentVector&& segments)
+{
+ MOZ_ASSERT(dataSegments_.empty());
+ dataSegments_ = Move(segments);
+}
+
+bool
+ModuleGenerator::startFuncDefs()
+{
+ MOZ_ASSERT(!startedFuncDefs_);
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ // The wasmCompilationInProgress atomic ensures that there is only one
+ // parallel compilation in progress at a time. In the special case of
+ // asm.js, where the ModuleGenerator itself can be on a helper thread, this
+ // avoids the possibility of deadlock since at most 1 helper thread will be
+ // blocking on other helper threads and there are always >1 helper threads.
+ // With wasm, this restriction could be relaxed by moving the worklist state
+ // out of HelperThreadState since each independent compilation needs its own
+ // worklist pair. Alternatively, the deadlock could be avoided by having the
+ // ModuleGenerator thread make progress (on compile tasks) instead of
+ // blocking.
+
+ GlobalHelperThreadState& threads = HelperThreadState();
+ MOZ_ASSERT(threads.threadCount > 1);
+
+ uint32_t numTasks;
+ if (CanUseExtraThreads() && threads.wasmCompilationInProgress.compareExchange(false, true)) {
+#ifdef DEBUG
+ {
+ AutoLockHelperThreadState lock;
+ MOZ_ASSERT(!HelperThreadState().wasmFailed(lock));
+ MOZ_ASSERT(HelperThreadState().wasmWorklist(lock).empty());
+ MOZ_ASSERT(HelperThreadState().wasmFinishedList(lock).empty());
+ }
+#endif
+ parallel_ = true;
+ numTasks = 2 * threads.maxWasmCompilationThreads();
+ } else {
+ numTasks = 1;
+ }
+
+ if (!tasks_.initCapacity(numTasks))
+ return false;
+ for (size_t i = 0; i < numTasks; i++)
+ tasks_.infallibleEmplaceBack(*shared_, COMPILATION_LIFO_DEFAULT_CHUNK_SIZE);
+
+ if (!freeTasks_.reserve(numTasks))
+ return false;
+ for (size_t i = 0; i < numTasks; i++)
+ freeTasks_.infallibleAppend(&tasks_[i]);
+
+ startedFuncDefs_ = true;
+ MOZ_ASSERT(!finishedFuncDefs_);
+ return true;
+}
+
+bool
+ModuleGenerator::startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg)
+{
+ MOZ_ASSERT(startedFuncDefs_);
+ MOZ_ASSERT(!activeFuncDef_);
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ if (freeTasks_.empty() && !finishOutstandingTask())
+ return false;
+
+ IonCompileTask* task = freeTasks_.popCopy();
+
+ task->reset(&fg->bytes_);
+ fg->bytes_.clear();
+ fg->lineOrBytecode_ = lineOrBytecode;
+ fg->m_ = this;
+ fg->task_ = task;
+ activeFuncDef_ = fg;
+ return true;
+}
+
+bool
+ModuleGenerator::finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg)
+{
+ MOZ_ASSERT(activeFuncDef_ == fg);
+
+ auto func = js::MakeUnique<FuncBytes>(Move(fg->bytes_),
+ funcIndex,
+ funcSig(funcIndex),
+ fg->lineOrBytecode_,
+ Move(fg->callSiteLineNums_));
+ if (!func)
+ return false;
+
+ auto mode = alwaysBaseline_ && BaselineCanCompile(fg)
+ ? IonCompileTask::CompileMode::Baseline
+ : IonCompileTask::CompileMode::Ion;
+
+ fg->task_->init(Move(func), mode);
+
+ if (parallel_) {
+ if (!StartOffThreadWasmCompile(fg->task_))
+ return false;
+ outstanding_++;
+ } else {
+ if (!CompileFunction(fg->task_))
+ return false;
+ if (!finishTask(fg->task_))
+ return false;
+ }
+
+ fg->m_ = nullptr;
+ fg->task_ = nullptr;
+ activeFuncDef_ = nullptr;
+ numFinishedFuncDefs_++;
+ return true;
+}
+
+bool
+ModuleGenerator::finishFuncDefs()
+{
+ MOZ_ASSERT(startedFuncDefs_);
+ MOZ_ASSERT(!activeFuncDef_);
+ MOZ_ASSERT(!finishedFuncDefs_);
+
+ while (outstanding_ > 0) {
+ if (!finishOutstandingTask())
+ return false;
+ }
+
+ linkData_.functionCodeLength = masm_.size();
+ finishedFuncDefs_ = true;
+
+ // Generate wrapper functions for every import. These wrappers turn imports
+ // into plain functions so they can be put into tables and re-exported.
+ // asm.js cannot do either and so no wrappers are generated.
+
+ if (!isAsmJS()) {
+ for (size_t funcIndex = 0; funcIndex < numFuncImports(); funcIndex++) {
+ const FuncImport& funcImport = metadata_->funcImports[funcIndex];
+ const SigWithId& sig = funcSig(funcIndex);
+
+ FuncOffsets offsets = GenerateImportFunction(masm_, funcImport, sig.id);
+ if (masm_.oom())
+ return false;
+
+ uint32_t codeRangeIndex = metadata_->codeRanges.length();
+ if (!metadata_->codeRanges.emplaceBack(funcIndex, /* bytecodeOffset = */ 0, offsets))
+ return false;
+
+ MOZ_ASSERT(!funcIsCompiled(funcIndex));
+ funcToCodeRange_[funcIndex] = codeRangeIndex;
+ }
+ }
+
+ // All function indices should have an associated code range at this point
+ // (except in asm.js, which doesn't have import wrapper functions).
+
+#ifdef DEBUG
+ if (isAsmJS()) {
+ MOZ_ASSERT(numFuncImports() < AsmJSFirstDefFuncIndex);
+ for (uint32_t i = 0; i < AsmJSFirstDefFuncIndex; i++)
+ MOZ_ASSERT(funcToCodeRange_[i] == BAD_CODE_RANGE);
+ for (uint32_t i = AsmJSFirstDefFuncIndex; i < numFinishedFuncDefs_; i++)
+ MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
+ } else {
+ MOZ_ASSERT(numFinishedFuncDefs_ == numFuncDefs());
+ for (uint32_t i = 0; i < numFuncs(); i++)
+ MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
+ }
+#endif
+
+ // Complete element segments with the code range index of every element, now
+ // that all functions have been compiled.
+
+ for (ElemSegment& elems : elemSegments_) {
+ Uint32Vector& codeRangeIndices = elems.elemCodeRangeIndices;
+
+ MOZ_ASSERT(codeRangeIndices.empty());
+ if (!codeRangeIndices.reserve(elems.elemFuncIndices.length()))
+ return false;
+
+ for (uint32_t funcIndex : elems.elemFuncIndices)
+ codeRangeIndices.infallibleAppend(funcToCodeRange_[funcIndex]);
+ }
+
+ return true;
+}
+
+void
+ModuleGenerator::setFuncNames(NameInBytecodeVector&& funcNames)
+{
+ MOZ_ASSERT(metadata_->funcNames.empty());
+ metadata_->funcNames = Move(funcNames);
+}
+
+bool
+ModuleGenerator::initSigTableLength(uint32_t sigIndex, uint32_t length)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(length != 0);
+ MOZ_ASSERT(length <= MaxTableElems);
+
+ MOZ_ASSERT(shared_->asmJSSigToTableIndex[sigIndex] == 0);
+ shared_->asmJSSigToTableIndex[sigIndex] = numTables_;
+
+ TableDesc& table = shared_->tables[numTables_++];
+ table.kind = TableKind::TypedFunction;
+ table.limits.initial = length;
+ table.limits.maximum = Some(length);
+ return allocateGlobalBytes(sizeof(TableTls), sizeof(void*), &table.globalDataOffset);
+}
+
+bool
+ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices)
+{
+ MOZ_ASSERT(isAsmJS());
+ MOZ_ASSERT(finishedFuncDefs_);
+
+ uint32_t tableIndex = shared_->asmJSSigToTableIndex[sigIndex];
+ MOZ_ASSERT(shared_->tables[tableIndex].limits.initial == elemFuncIndices.length());
+
+ Uint32Vector codeRangeIndices;
+ if (!codeRangeIndices.resize(elemFuncIndices.length()))
+ return false;
+ for (size_t i = 0; i < elemFuncIndices.length(); i++)
+ codeRangeIndices[i] = funcToCodeRange_[elemFuncIndices[i]];
+
+ InitExpr offset(Val(uint32_t(0)));
+ if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncIndices)))
+ return false;
+
+ elemSegments_.back().elemCodeRangeIndices = Move(codeRangeIndices);
+ return true;
+}
+
+SharedModule
+ModuleGenerator::finish(const ShareableBytes& bytecode)
+{
+ MOZ_ASSERT(!activeFuncDef_);
+ MOZ_ASSERT(finishedFuncDefs_);
+
+ if (!finishFuncExports())
+ return nullptr;
+
+ if (!finishCodegen())
+ return nullptr;
+
+ // Round up the code size to page size since this is eventually required by
+ // the executable-code allocator and for setting memory protection.
+ uint32_t bytesNeeded = masm_.bytesNeeded();
+ uint32_t padding = ComputeByteAlignment(bytesNeeded, gc::SystemPageSize());
+
+ // Use initLengthUninitialized so there is no round-up allocation nor time
+ // wasted zeroing memory.
+ Bytes code;
+ if (!code.initLengthUninitialized(bytesNeeded + padding))
+ return nullptr;
+
+ // Delay flushing of the icache until CodeSegment::create since there is
+ // more patching to do before this code becomes executable.
+ {
+ AutoFlushICache afc("ModuleGenerator::finish", /* inhibit = */ true);
+ masm_.executableCopy(code.begin());
+ }
+
+ // Zero the padding, since we used resizeUninitialized above.
+ memset(code.begin() + bytesNeeded, 0, padding);
+
+ // Convert the CallSiteAndTargetVector (needed during generation) to a
+ // CallSiteVector (what is stored in the Module).
+ if (!metadata_->callSites.appendAll(masm_.callSites()))
+ return nullptr;
+
+ // The MacroAssembler has accumulated all the memory accesses during codegen.
+ metadata_->memoryAccesses = masm_.extractMemoryAccesses();
+ metadata_->memoryPatches = masm_.extractMemoryPatches();
+ metadata_->boundsChecks = masm_.extractBoundsChecks();
+
+ // Copy over data from the ModuleGeneratorData.
+ metadata_->memoryUsage = shared_->memoryUsage;
+ metadata_->minMemoryLength = shared_->minMemoryLength;
+ metadata_->maxMemoryLength = shared_->maxMemoryLength;
+ metadata_->tables = Move(shared_->tables);
+ metadata_->globals = Move(shared_->globals);
+
+ // These Vectors can get large and the excess capacity can be significant,
+ // so realloc them down to size.
+ metadata_->memoryAccesses.podResizeToFit();
+ metadata_->memoryPatches.podResizeToFit();
+ metadata_->boundsChecks.podResizeToFit();
+ metadata_->codeRanges.podResizeToFit();
+ metadata_->callSites.podResizeToFit();
+ metadata_->callThunks.podResizeToFit();
+
+ // For asm.js, the tables vector is over-allocated (to avoid resize during
+ // parallel copilation). Shrink it back down to fit.
+ if (isAsmJS() && !metadata_->tables.resize(numTables_))
+ return nullptr;
+
+ // Assert CodeRanges are sorted.
+#ifdef DEBUG
+ uint32_t lastEnd = 0;
+ for (const CodeRange& codeRange : metadata_->codeRanges) {
+ MOZ_ASSERT(codeRange.begin() >= lastEnd);
+ lastEnd = codeRange.end();
+ }
+#endif
+
+ if (!finishLinkData(code))
+ return nullptr;
+
+ return SharedModule(js_new<Module>(Move(assumptions_),
+ Move(code),
+ Move(linkData_),
+ Move(imports_),
+ Move(exports_),
+ Move(dataSegments_),
+ Move(elemSegments_),
+ *metadata_,
+ bytecode));
+}
diff --git a/js/src/wasm/WasmGenerator.h b/js/src/wasm/WasmGenerator.h
new file mode 100644
index 0000000000..d1badf6790
--- /dev/null
+++ b/js/src/wasm/WasmGenerator.h
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_generator_h
+#define wasm_generator_h
+
+#include "jit/MacroAssembler.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmModule.h"
+
+namespace js {
+namespace wasm {
+
+class FunctionGenerator;
+
+// The ModuleGeneratorData holds all the state shared between the
+// ModuleGenerator thread and background compile threads. The background
+// threads are given a read-only view of the ModuleGeneratorData and the
+// ModuleGenerator is careful to initialize, and never subsequently mutate,
+// any given datum before being read by a background thread. In particular,
+// once created, the Vectors are never resized.
+
+struct ModuleGeneratorData
+{
+ ModuleKind kind;
+ MemoryUsage memoryUsage;
+ mozilla::Atomic<uint32_t> minMemoryLength;
+ Maybe<uint32_t> maxMemoryLength;
+
+ SigWithIdVector sigs;
+ SigWithIdPtrVector funcSigs;
+ Uint32Vector funcImportGlobalDataOffsets;
+ GlobalDescVector globals;
+ TableDescVector tables;
+ Uint32Vector asmJSSigToTableIndex;
+
+ explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
+ : kind(kind),
+ memoryUsage(MemoryUsage::None),
+ minMemoryLength(0)
+ {}
+
+ bool isAsmJS() const {
+ return kind == ModuleKind::AsmJS;
+ }
+ bool funcIsImport(uint32_t funcIndex) const {
+ return funcIndex < funcImportGlobalDataOffsets.length();
+ }
+};
+
+typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
+
+// A ModuleGenerator encapsulates the creation of a wasm module. During the
+// lifetime of a ModuleGenerator, a sequence of FunctionGenerators are created
+// and destroyed to compile the individual function bodies. After generating all
+// functions, ModuleGenerator::finish() must be called to complete the
+// compilation and extract the resulting wasm module.
+
+class MOZ_STACK_CLASS ModuleGenerator
+{
+ typedef HashSet<uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy> Uint32Set;
+ typedef Vector<IonCompileTask, 0, SystemAllocPolicy> IonCompileTaskVector;
+ typedef Vector<IonCompileTask*, 0, SystemAllocPolicy> IonCompileTaskPtrVector;
+ typedef EnumeratedArray<Trap, Trap::Limit, ProfilingOffsets> TrapExitOffsetArray;
+
+ // Constant parameters
+ bool alwaysBaseline_;
+
+ // Data that is moved into the result of finish()
+ Assumptions assumptions_;
+ LinkData linkData_;
+ MutableMetadata metadata_;
+ ExportVector exports_;
+ ImportVector imports_;
+ DataSegmentVector dataSegments_;
+ ElemSegmentVector elemSegments_;
+
+ // Data scoped to the ModuleGenerator's lifetime
+ UniqueModuleGeneratorData shared_;
+ uint32_t numSigs_;
+ uint32_t numTables_;
+ LifoAlloc lifo_;
+ jit::JitContext jcx_;
+ jit::TempAllocator masmAlloc_;
+ jit::MacroAssembler masm_;
+ Uint32Vector funcToCodeRange_;
+ Uint32Set exportedFuncs_;
+ uint32_t lastPatchedCallsite_;
+ uint32_t startOfUnpatchedCallsites_;
+
+ // Parallel compilation
+ bool parallel_;
+ uint32_t outstanding_;
+ IonCompileTaskVector tasks_;
+ IonCompileTaskPtrVector freeTasks_;
+
+ // Assertions
+ DebugOnly<FunctionGenerator*> activeFuncDef_;
+ DebugOnly<bool> startedFuncDefs_;
+ DebugOnly<bool> finishedFuncDefs_;
+ DebugOnly<uint32_t> numFinishedFuncDefs_;
+
+ bool funcIsCompiled(uint32_t funcIndex) const;
+ const CodeRange& funcCodeRange(uint32_t funcIndex) const;
+ MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
+ MOZ_MUST_USE bool patchFarJumps(const TrapExitOffsetArray& trapExits);
+ MOZ_MUST_USE bool finishTask(IonCompileTask* task);
+ MOZ_MUST_USE bool finishOutstandingTask();
+ MOZ_MUST_USE bool finishFuncExports();
+ MOZ_MUST_USE bool finishCodegen();
+ MOZ_MUST_USE bool finishLinkData(Bytes& code);
+ MOZ_MUST_USE bool addFuncImport(const Sig& sig, uint32_t globalDataOffset);
+ MOZ_MUST_USE bool allocateGlobalBytes(uint32_t bytes, uint32_t align, uint32_t* globalDataOff);
+ MOZ_MUST_USE bool allocateGlobal(GlobalDesc* global);
+
+ public:
+ explicit ModuleGenerator(ImportVector&& imports);
+ ~ModuleGenerator();
+
+ MOZ_MUST_USE bool init(UniqueModuleGeneratorData shared, const CompileArgs& args,
+ Metadata* maybeAsmJSMetadata = nullptr);
+
+ bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
+ jit::MacroAssembler& masm() { return masm_; }
+
+ // Memory:
+ bool usesMemory() const { return UsesMemory(shared_->memoryUsage); }
+ uint32_t minMemoryLength() const { return shared_->minMemoryLength; }
+
+ // Tables:
+ uint32_t numTables() const { return numTables_; }
+ const TableDescVector& tables() const { return shared_->tables; }
+
+ // Signatures:
+ uint32_t numSigs() const { return numSigs_; }
+ const SigWithId& sig(uint32_t sigIndex) const;
+ const SigWithId& funcSig(uint32_t funcIndex) const;
+
+ // Globals:
+ const GlobalDescVector& globals() const { return shared_->globals; }
+
+ // Functions declarations:
+ uint32_t numFuncImports() const;
+ uint32_t numFuncDefs() const;
+ uint32_t numFuncs() const;
+
+ // Exports:
+ MOZ_MUST_USE bool addFuncExport(UniqueChars fieldName, uint32_t funcIndex);
+ MOZ_MUST_USE bool addTableExport(UniqueChars fieldName);
+ MOZ_MUST_USE bool addMemoryExport(UniqueChars fieldName);
+ MOZ_MUST_USE bool addGlobalExport(UniqueChars fieldName, uint32_t globalIndex);
+
+ // Function definitions:
+ MOZ_MUST_USE bool startFuncDefs();
+ MOZ_MUST_USE bool startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg);
+ MOZ_MUST_USE bool finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg);
+ MOZ_MUST_USE bool finishFuncDefs();
+
+ // Start function:
+ bool setStartFunction(uint32_t funcIndex);
+
+ // Segments:
+ void setDataSegments(DataSegmentVector&& segments);
+ MOZ_MUST_USE bool addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices);
+
+ // Function names:
+ void setFuncNames(NameInBytecodeVector&& funcNames);
+
+ // asm.js lazy initialization:
+ void initSig(uint32_t sigIndex, Sig&& sig);
+ void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
+ MOZ_MUST_USE bool initImport(uint32_t funcIndex, uint32_t sigIndex);
+ MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t length);
+ MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
+ void initMemoryUsage(MemoryUsage memoryUsage);
+ void bumpMinMemoryLength(uint32_t newMinMemoryLength);
+ MOZ_MUST_USE bool addGlobal(ValType type, bool isConst, uint32_t* index);
+
+ // Finish compilation, provided the list of imports and source bytecode.
+ // Both these Vectors may be empty (viz., b/c asm.js does different things
+ // for imports and source).
+ SharedModule finish(const ShareableBytes& bytecode);
+};
+
+// A FunctionGenerator encapsulates the generation of a single function body.
+// ModuleGenerator::startFunc must be called after construction and before doing
+// anything else. After the body is complete, ModuleGenerator::finishFunc must
+// be called before the FunctionGenerator is destroyed and the next function is
+// started.
+
+class MOZ_STACK_CLASS FunctionGenerator
+{
+ friend class ModuleGenerator;
+
+ ModuleGenerator* m_;
+ IonCompileTask* task_;
+ bool usesSimd_;
+ bool usesAtomics_;
+
+ // Data created during function generation, then handed over to the
+ // FuncBytes in ModuleGenerator::finishFunc().
+ Bytes bytes_;
+ Uint32Vector callSiteLineNums_;
+
+ uint32_t lineOrBytecode_;
+
+ public:
+ FunctionGenerator()
+ : m_(nullptr), task_(nullptr), usesSimd_(false), usesAtomics_(false), lineOrBytecode_(0)
+ {}
+
+ bool usesSimd() const {
+ return usesSimd_;
+ }
+ void setUsesSimd() {
+ usesSimd_ = true;
+ }
+
+ bool usesAtomics() const {
+ return usesAtomics_;
+ }
+ void setUsesAtomics() {
+ usesAtomics_ = true;
+ }
+
+ Bytes& bytes() {
+ return bytes_;
+ }
+ MOZ_MUST_USE bool addCallSiteLineNum(uint32_t lineno) {
+ return callSiteLineNums_.append(lineno);
+ }
+};
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_generator_h
diff --git a/js/src/wasm/WasmInstance.cpp b/js/src/wasm/WasmInstance.cpp
new file mode 100644
index 0000000000..88af4f0db6
--- /dev/null
+++ b/js/src/wasm/WasmInstance.cpp
@@ -0,0 +1,849 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmInstance.h"
+
+#include "jit/BaselineJIT.h"
+#include "jit/JitCommon.h"
+#include "wasm/WasmModule.h"
+
+#include "jsobjinlines.h"
+
+#include "vm/ArrayBufferObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::BinarySearch;
+using mozilla::BitwiseCast;
+using mozilla::IsNaN;
+using mozilla::Swap;
+
+class SigIdSet
+{
+ typedef HashMap<const Sig*, uint32_t, SigHashPolicy, SystemAllocPolicy> Map;
+ Map map_;
+
+ public:
+ ~SigIdSet() {
+ MOZ_ASSERT_IF(!JSRuntime::hasLiveRuntimes(), !map_.initialized() || map_.empty());
+ }
+
+ bool ensureInitialized(JSContext* cx) {
+ if (!map_.initialized() && !map_.init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+ }
+
+ bool allocateSigId(JSContext* cx, const Sig& sig, const void** sigId) {
+ Map::AddPtr p = map_.lookupForAdd(sig);
+ if (p) {
+ MOZ_ASSERT(p->value() > 0);
+ p->value()++;
+ *sigId = p->key();
+ return true;
+ }
+
+ UniquePtr<Sig> clone = MakeUnique<Sig>();
+ if (!clone || !clone->clone(sig) || !map_.add(p, clone.get(), 1)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ *sigId = clone.release();
+ MOZ_ASSERT(!(uintptr_t(*sigId) & SigIdDesc::ImmediateBit));
+ return true;
+ }
+
+ void deallocateSigId(const Sig& sig, const void* sigId) {
+ Map::Ptr p = map_.lookup(sig);
+ MOZ_RELEASE_ASSERT(p && p->key() == sigId && p->value() > 0);
+
+ p->value()--;
+ if (!p->value()) {
+ js_delete(p->key());
+ map_.remove(p);
+ }
+ }
+};
+
+ExclusiveData<SigIdSet>* sigIdSet = nullptr;
+
+bool
+js::wasm::InitInstanceStaticData()
+{
+ MOZ_ASSERT(!sigIdSet);
+ sigIdSet = js_new<ExclusiveData<SigIdSet>>(mutexid::WasmSigIdSet);
+ return sigIdSet != nullptr;
+}
+
+void
+js::wasm::ShutDownInstanceStaticData()
+{
+ MOZ_ASSERT(sigIdSet);
+ js_delete(sigIdSet);
+ sigIdSet = nullptr;
+}
+
+const void**
+Instance::addressOfSigId(const SigIdDesc& sigId) const
+{
+ MOZ_ASSERT(sigId.globalDataOffset() >= InitialGlobalDataBytes);
+ return (const void**)(codeSegment().globalData() + sigId.globalDataOffset());
+}
+
+FuncImportTls&
+Instance::funcImportTls(const FuncImport& fi)
+{
+ MOZ_ASSERT(fi.tlsDataOffset() >= InitialGlobalDataBytes);
+ return *(FuncImportTls*)(codeSegment().globalData() + fi.tlsDataOffset());
+}
+
+TableTls&
+Instance::tableTls(const TableDesc& td) const
+{
+ MOZ_ASSERT(td.globalDataOffset >= InitialGlobalDataBytes);
+ return *(TableTls*)(codeSegment().globalData() + td.globalDataOffset);
+}
+
+bool
+Instance::callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
+ MutableHandleValue rval)
+{
+ const FuncImport& fi = metadata().funcImports[funcImportIndex];
+
+ InvokeArgs args(cx);
+ if (!args.init(cx, argc))
+ return false;
+
+ bool hasI64Arg = false;
+ MOZ_ASSERT(fi.sig().args().length() == argc);
+ for (size_t i = 0; i < argc; i++) {
+ switch (fi.sig().args()[i]) {
+ case ValType::I32:
+ args[i].set(Int32Value(*(int32_t*)&argv[i]));
+ break;
+ case ValType::F32:
+ args[i].set(JS::CanonicalizedDoubleValue(*(float*)&argv[i]));
+ break;
+ case ValType::F64:
+ args[i].set(JS::CanonicalizedDoubleValue(*(double*)&argv[i]));
+ break;
+ case ValType::I64: {
+ if (!JitOptions.wasmTestMode) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64);
+ return false;
+ }
+ RootedObject obj(cx, CreateI64Object(cx, *(int64_t*)&argv[i]));
+ if (!obj)
+ return false;
+ args[i].set(ObjectValue(*obj));
+ hasI64Arg = true;
+ break;
+ }
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ MOZ_CRASH("unhandled type in callImport");
+ }
+ }
+
+ FuncImportTls& import = funcImportTls(fi);
+ RootedFunction importFun(cx, &import.obj->as<JSFunction>());
+ RootedValue fval(cx, ObjectValue(*import.obj));
+ RootedValue thisv(cx, UndefinedValue());
+ if (!Call(cx, fval, thisv, args, rval))
+ return false;
+
+ // Throw an error if returning i64 and not in test mode.
+ if (!JitOptions.wasmTestMode && fi.sig().ret() == ExprType::I64) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64);
+ return false;
+ }
+
+ // Don't try to optimize if the function has at least one i64 arg or if
+ // it returns an int64. GenerateJitExit relies on this, as does the
+ // type inference code below in this function.
+ if (hasI64Arg || fi.sig().ret() == ExprType::I64)
+ return true;
+
+ // The import may already have become optimized.
+ void* jitExitCode = codeBase() + fi.jitExitCodeOffset();
+ if (import.code == jitExitCode)
+ return true;
+
+ // Test if the function is JIT compiled.
+ if (!importFun->hasScript())
+ return true;
+
+ JSScript* script = importFun->nonLazyScript();
+ if (!script->hasBaselineScript()) {
+ MOZ_ASSERT(!script->hasIonScript());
+ return true;
+ }
+
+ // Don't enable jit entry when we have a pending ion builder.
+ // Take the interpreter path which will link it and enable
+ // the fast path on the next call.
+ if (script->baselineScript()->hasPendingIonBuilder())
+ return true;
+
+ // Currently we can't rectify arguments. Therefore disable if argc is too low.
+ if (importFun->nargs() > fi.sig().args().length())
+ return true;
+
+ // Ensure the argument types are included in the argument TypeSets stored in
+ // the TypeScript. This is necessary for Ion, because the import will use
+ // the skip-arg-checks entry point.
+ //
+ // Note that the TypeScript is never discarded while the script has a
+ // BaselineScript, so if those checks hold now they must hold at least until
+ // the BaselineScript is discarded and when that happens the import is
+ // patched back.
+ if (!TypeScript::ThisTypes(script)->hasType(TypeSet::UndefinedType()))
+ return true;
+ for (uint32_t i = 0; i < importFun->nargs(); i++) {
+ TypeSet::Type type = TypeSet::UnknownType();
+ switch (fi.sig().args()[i]) {
+ case ValType::I32: type = TypeSet::Int32Type(); break;
+ case ValType::I64: MOZ_CRASH("can't happen because of above guard");
+ case ValType::F32: type = TypeSet::DoubleType(); break;
+ case ValType::F64: type = TypeSet::DoubleType(); break;
+ case ValType::I8x16: MOZ_CRASH("NYI");
+ case ValType::I16x8: MOZ_CRASH("NYI");
+ case ValType::I32x4: MOZ_CRASH("NYI");
+ case ValType::F32x4: MOZ_CRASH("NYI");
+ case ValType::B8x16: MOZ_CRASH("NYI");
+ case ValType::B16x8: MOZ_CRASH("NYI");
+ case ValType::B32x4: MOZ_CRASH("NYI");
+ }
+ if (!TypeScript::ArgTypes(script, i)->hasType(type))
+ return true;
+ }
+
+ // Let's optimize it!
+ if (!script->baselineScript()->addDependentWasmImport(cx, *this, funcImportIndex))
+ return false;
+
+ import.code = jitExitCode;
+ import.baselineScript = script->baselineScript();
+ return true;
+}
+
+/* static */ int32_t
+Instance::callImport_void(Instance* instance, int32_t funcImportIndex, int32_t argc, uint64_t* argv)
+{
+ JSContext* cx = instance->cx();
+ RootedValue rval(cx);
+ return instance->callImport(cx, funcImportIndex, argc, argv, &rval);
+}
+
+/* static */ int32_t
+Instance::callImport_i32(Instance* instance, int32_t funcImportIndex, int32_t argc, uint64_t* argv)
+{
+ JSContext* cx = instance->cx();
+ RootedValue rval(cx);
+ if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
+ return false;
+
+ return ToInt32(cx, rval, (int32_t*)argv);
+}
+
+/* static */ int32_t
+Instance::callImport_i64(Instance* instance, int32_t funcImportIndex, int32_t argc, uint64_t* argv)
+{
+ JSContext* cx = instance->cx();
+ RootedValue rval(cx);
+ if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
+ return false;
+
+ return ReadI64Object(cx, rval, (int64_t*)argv);
+}
+
+/* static */ int32_t
+Instance::callImport_f64(Instance* instance, int32_t funcImportIndex, int32_t argc, uint64_t* argv)
+{
+ JSContext* cx = instance->cx();
+ RootedValue rval(cx);
+ if (!instance->callImport(cx, funcImportIndex, argc, argv, &rval))
+ return false;
+
+ return ToNumber(cx, rval, (double*)argv);
+}
+
+/* static */ uint32_t
+Instance::growMemory_i32(Instance* instance, uint32_t delta)
+{
+ MOZ_ASSERT(!instance->isAsmJS());
+
+ JSContext* cx = instance->cx();
+ RootedWasmMemoryObject memory(cx, instance->memory_);
+
+ uint32_t ret = WasmMemoryObject::grow(memory, delta, cx);
+
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(instance->tlsData_.memoryBase ==
+ instance->memory_->buffer().dataPointerEither());
+
+ return ret;
+}
+
+/* static */ uint32_t
+Instance::currentMemory_i32(Instance* instance)
+{
+ uint32_t byteLength = instance->memoryLength();
+ MOZ_ASSERT(byteLength % wasm::PageSize == 0);
+ return byteLength / wasm::PageSize;
+}
+
+Instance::Instance(JSContext* cx,
+ Handle<WasmInstanceObject*> object,
+ UniqueCode code,
+ HandleWasmMemoryObject memory,
+ SharedTableVector&& tables,
+ Handle<FunctionVector> funcImports,
+ const ValVector& globalImports)
+ : compartment_(cx->compartment()),
+ object_(object),
+ code_(Move(code)),
+ memory_(memory),
+ tables_(Move(tables))
+{
+ MOZ_ASSERT(funcImports.length() == metadata().funcImports.length());
+ MOZ_ASSERT(tables_.length() == metadata().tables.length());
+
+ tlsData_.cx = cx;
+ tlsData_.instance = this;
+ tlsData_.globalData = code_->segment().globalData();
+ tlsData_.memoryBase = memory ? memory->buffer().dataPointerEither().unwrap() : nullptr;
+ tlsData_.stackLimit = *(void**)cx->stackLimitAddressForJitCode(StackForUntrustedScript);
+
+ for (size_t i = 0; i < metadata().funcImports.length(); i++) {
+ HandleFunction f = funcImports[i];
+ const FuncImport& fi = metadata().funcImports[i];
+ FuncImportTls& import = funcImportTls(fi);
+ if (!isAsmJS() && IsExportedWasmFunction(f)) {
+ WasmInstanceObject* calleeInstanceObj = ExportedFunctionToInstanceObject(f);
+ const CodeRange& codeRange = calleeInstanceObj->getExportedFunctionCodeRange(f);
+ Instance& calleeInstance = calleeInstanceObj->instance();
+ import.tls = &calleeInstance.tlsData_;
+ import.code = calleeInstance.codeSegment().base() + codeRange.funcNonProfilingEntry();
+ import.baselineScript = nullptr;
+ import.obj = calleeInstanceObj;
+ } else {
+ import.tls = &tlsData_;
+ import.code = codeBase() + fi.interpExitCodeOffset();
+ import.baselineScript = nullptr;
+ import.obj = f;
+ }
+ }
+
+ for (size_t i = 0; i < tables_.length(); i++) {
+ const TableDesc& td = metadata().tables[i];
+ TableTls& table = tableTls(td);
+ table.length = tables_[i]->length();
+ table.base = tables_[i]->base();
+ }
+
+ uint8_t* globalData = code_->segment().globalData();
+
+ for (size_t i = 0; i < metadata().globals.length(); i++) {
+ const GlobalDesc& global = metadata().globals[i];
+ if (global.isConstant())
+ continue;
+
+ uint8_t* globalAddr = globalData + global.offset();
+ switch (global.kind()) {
+ case GlobalKind::Import: {
+ globalImports[global.importIndex()].writePayload(globalAddr);
+ break;
+ }
+ case GlobalKind::Variable: {
+ const InitExpr& init = global.initExpr();
+ switch (init.kind()) {
+ case InitExpr::Kind::Constant: {
+ init.val().writePayload(globalAddr);
+ break;
+ }
+ case InitExpr::Kind::GetGlobal: {
+ const GlobalDesc& imported = metadata().globals[init.globalIndex()];
+ globalImports[imported.importIndex()].writePayload(globalAddr);
+ break;
+ }
+ }
+ break;
+ }
+ case GlobalKind::Constant: {
+ MOZ_CRASH("skipped at the top");
+ }
+ }
+ }
+}
+
+bool
+Instance::init(JSContext* cx)
+{
+ if (memory_ && memory_->movingGrowable() && !memory_->addMovingGrowObserver(cx, object_))
+ return false;
+
+ for (const SharedTable& table : tables_) {
+ if (table->movingGrowable() && !table->addMovingGrowObserver(cx, object_))
+ return false;
+ }
+
+ if (!metadata().sigIds.empty()) {
+ ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet->lock();
+
+ if (!lockedSigIdSet->ensureInitialized(cx))
+ return false;
+
+ for (const SigWithId& sig : metadata().sigIds) {
+ const void* sigId;
+ if (!lockedSigIdSet->allocateSigId(cx, sig, &sigId))
+ return false;
+
+ *addressOfSigId(sig.id) = sigId;
+ }
+ }
+
+ return true;
+}
+
+Instance::~Instance()
+{
+ compartment_->wasm.unregisterInstance(*this);
+
+ for (unsigned i = 0; i < metadata().funcImports.length(); i++) {
+ FuncImportTls& import = funcImportTls(metadata().funcImports[i]);
+ if (import.baselineScript)
+ import.baselineScript->removeDependentWasmImport(*this, i);
+ }
+
+ if (!metadata().sigIds.empty()) {
+ ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet->lock();
+
+ for (const SigWithId& sig : metadata().sigIds) {
+ if (const void* sigId = *addressOfSigId(sig.id))
+ lockedSigIdSet->deallocateSigId(sig, sigId);
+ }
+ }
+}
+
+size_t
+Instance::memoryMappedSize() const
+{
+ return memory_->buffer().wasmMappedSize();
+}
+
+bool
+Instance::memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const
+{
+ MOZ_ASSERT(numBytes > 0);
+
+ if (!metadata().usesMemory())
+ return false;
+
+ uint8_t* base = memoryBase().unwrap(/* comparison */);
+ if (addr < base)
+ return false;
+
+ size_t lastByteOffset = addr - base + (numBytes - 1);
+ return lastByteOffset >= memoryLength() && lastByteOffset < memoryMappedSize();
+}
+
+void
+Instance::tracePrivate(JSTracer* trc)
+{
+ // This method is only called from WasmInstanceObject so the only reason why
+ // TraceEdge is called is so that the pointer can be updated during a moving
+ // GC. TraceWeakEdge may sound better, but it is less efficient given that
+ // we know object_ is already marked.
+ MOZ_ASSERT(!gc::IsAboutToBeFinalized(&object_));
+ TraceEdge(trc, &object_, "wasm instance object");
+
+ for (const FuncImport& fi : metadata().funcImports)
+ TraceNullableEdge(trc, &funcImportTls(fi).obj, "wasm import");
+
+ for (const SharedTable& table : tables_)
+ table->trace(trc);
+
+ TraceNullableEdge(trc, &memory_, "wasm buffer");
+}
+
+void
+Instance::trace(JSTracer* trc)
+{
+ // Technically, instead of having this method, the caller could use
+ // Instance::object() to get the owning WasmInstanceObject to mark,
+ // but this method is simpler and more efficient. The trace hook of
+ // WasmInstanceObject will call Instance::tracePrivate at which point we
+ // can mark the rest of the children.
+ TraceEdge(trc, &object_, "wasm instance object");
+}
+
+SharedMem<uint8_t*>
+Instance::memoryBase() const
+{
+ MOZ_ASSERT(metadata().usesMemory());
+ MOZ_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither());
+ return memory_->buffer().dataPointerEither();
+}
+
+size_t
+Instance::memoryLength() const
+{
+ return memory_->buffer().byteLength();
+}
+
+WasmInstanceObject*
+Instance::objectUnbarriered() const
+{
+ return object_.unbarrieredGet();
+}
+
+WasmInstanceObject*
+Instance::object() const
+{
+ return object_;
+}
+
+bool
+Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
+{
+ // If there has been a moving grow, this Instance should have been notified.
+ MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
+
+ if (!cx->compartment()->wasm.ensureProfilingState(cx))
+ return false;
+
+ const FuncExport& func = metadata().lookupFuncExport(funcIndex);
+
+ // The calling convention for an external call into wasm is to pass an
+ // array of 16-byte values where each value contains either a coerced int32
+ // (in the low word), a double value (in the low dword) or a SIMD vector
+ // value, with the coercions specified by the wasm signature. The external
+ // entry point unpacks this array into the system-ABI-specified registers
+ // and stack memory and then calls into the internal entry point. The return
+ // value is stored in the first element of the array (which, therefore, must
+ // have length >= 1).
+ Vector<ExportArg, 8> exportArgs(cx);
+ if (!exportArgs.resize(Max<size_t>(1, func.sig().args().length())))
+ return false;
+
+ RootedValue v(cx);
+ for (unsigned i = 0; i < func.sig().args().length(); ++i) {
+ v = i < args.length() ? args[i] : UndefinedValue();
+ switch (func.sig().arg(i)) {
+ case ValType::I32:
+ if (!ToInt32(cx, v, (int32_t*)&exportArgs[i]))
+ return false;
+ break;
+ case ValType::I64:
+ if (!JitOptions.wasmTestMode) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64);
+ return false;
+ }
+ if (!ReadI64Object(cx, v, (int64_t*)&exportArgs[i]))
+ return false;
+ break;
+ case ValType::F32:
+ if (JitOptions.wasmTestMode && v.isObject()) {
+ if (!ReadCustomFloat32NaNObject(cx, v, (uint32_t*)&exportArgs[i]))
+ return false;
+ break;
+ }
+ if (!RoundFloat32(cx, v, (float*)&exportArgs[i]))
+ return false;
+ break;
+ case ValType::F64:
+ if (JitOptions.wasmTestMode && v.isObject()) {
+ if (!ReadCustomDoubleNaNObject(cx, v, (uint64_t*)&exportArgs[i]))
+ return false;
+ break;
+ }
+ if (!ToNumber(cx, v, (double*)&exportArgs[i]))
+ return false;
+ break;
+ case ValType::I8x16: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Int8x16>(cx, v, &simd))
+ return false;
+ memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
+ break;
+ }
+ case ValType::I16x8: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Int16x8>(cx, v, &simd))
+ return false;
+ memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
+ break;
+ }
+ case ValType::I32x4: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Int32x4>(cx, v, &simd))
+ return false;
+ memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
+ break;
+ }
+ case ValType::F32x4: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Float32x4>(cx, v, &simd))
+ return false;
+ memcpy(&exportArgs[i], simd.asFloat32x4(), Simd128DataSize);
+ break;
+ }
+ case ValType::B8x16: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Bool8x16>(cx, v, &simd))
+ return false;
+ // Bool8x16 uses the same representation as Int8x16.
+ memcpy(&exportArgs[i], simd.asInt8x16(), Simd128DataSize);
+ break;
+ }
+ case ValType::B16x8: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Bool16x8>(cx, v, &simd))
+ return false;
+ // Bool16x8 uses the same representation as Int16x8.
+ memcpy(&exportArgs[i], simd.asInt16x8(), Simd128DataSize);
+ break;
+ }
+ case ValType::B32x4: {
+ SimdConstant simd;
+ if (!ToSimdConstant<Bool32x4>(cx, v, &simd))
+ return false;
+ // Bool32x4 uses the same representation as Int32x4.
+ memcpy(&exportArgs[i], simd.asInt32x4(), Simd128DataSize);
+ break;
+ }
+ }
+ }
+
+ {
+ // Push a WasmActivation to describe the wasm frames we're about to push
+ // when running this module. Additionally, push a JitActivation so that
+ // the optimized wasm-to-Ion FFI call path (which we want to be very
+ // fast) can avoid doing so. The JitActivation is marked as inactive so
+ // stack iteration will skip over it.
+ WasmActivation activation(cx);
+ JitActivation jitActivation(cx, /* active */ false);
+
+ // Call the per-exported-function trampoline created by GenerateEntry.
+ auto funcPtr = JS_DATA_TO_FUNC_PTR(ExportFuncPtr, codeBase() + func.entryOffset());
+ if (!CALL_GENERATED_2(funcPtr, exportArgs.begin(), &tlsData_))
+ return false;
+ }
+
+ if (isAsmJS() && args.isConstructing()) {
+ // By spec, when a JS function is called as a constructor and this
+ // function returns a primary type, which is the case for all asm.js
+ // exported functions, the returned value is discarded and an empty
+ // object is returned instead.
+ PlainObject* obj = NewBuiltinClassInstance<PlainObject>(cx);
+ if (!obj)
+ return false;
+ args.rval().set(ObjectValue(*obj));
+ return true;
+ }
+
+ void* retAddr = &exportArgs[0];
+ JSObject* retObj = nullptr;
+ switch (func.sig().ret()) {
+ case ExprType::Void:
+ args.rval().set(UndefinedValue());
+ break;
+ case ExprType::I32:
+ args.rval().set(Int32Value(*(int32_t*)retAddr));
+ break;
+ case ExprType::I64:
+ if (!JitOptions.wasmTestMode) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_I64);
+ return false;
+ }
+ retObj = CreateI64Object(cx, *(int64_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::F32:
+ if (JitOptions.wasmTestMode && IsNaN(*(float*)retAddr)) {
+ retObj = CreateCustomNaNObject(cx, (float*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ }
+ args.rval().set(NumberValue(*(float*)retAddr));
+ break;
+ case ExprType::F64:
+ if (JitOptions.wasmTestMode && IsNaN(*(double*)retAddr)) {
+ retObj = CreateCustomNaNObject(cx, (double*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ }
+ args.rval().set(NumberValue(*(double*)retAddr));
+ break;
+ case ExprType::I8x16:
+ retObj = CreateSimd<Int8x16>(cx, (int8_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::I16x8:
+ retObj = CreateSimd<Int16x8>(cx, (int16_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::I32x4:
+ retObj = CreateSimd<Int32x4>(cx, (int32_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::F32x4:
+ retObj = CreateSimd<Float32x4>(cx, (float*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::B8x16:
+ retObj = CreateSimd<Bool8x16>(cx, (int8_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::B16x8:
+ retObj = CreateSimd<Bool16x8>(cx, (int16_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::B32x4:
+ retObj = CreateSimd<Bool32x4>(cx, (int32_t*)retAddr);
+ if (!retObj)
+ return false;
+ break;
+ case ExprType::Limit:
+ MOZ_CRASH("Limit");
+ }
+
+ if (retObj)
+ args.rval().set(ObjectValue(*retObj));
+
+ return true;
+}
+
+void
+Instance::onMovingGrowMemory(uint8_t* prevMemoryBase)
+{
+ MOZ_ASSERT(!isAsmJS());
+ ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
+ tlsData_.memoryBase = buffer.dataPointer();
+ code_->segment().onMovingGrow(prevMemoryBase, metadata(), buffer);
+}
+
+void
+Instance::onMovingGrowTable()
+{
+ MOZ_ASSERT(!isAsmJS());
+ MOZ_ASSERT(tables_.length() == 1);
+ TableTls& table = tableTls(metadata().tables[0]);
+ table.length = tables_[0]->length();
+ table.base = tables_[0]->base();
+}
+
+void
+Instance::deoptimizeImportExit(uint32_t funcImportIndex)
+{
+ const FuncImport& fi = metadata().funcImports[funcImportIndex];
+ FuncImportTls& import = funcImportTls(fi);
+ import.code = codeBase() + fi.interpExitCodeOffset();
+ import.baselineScript = nullptr;
+}
+
+static void
+UpdateEntry(const Code& code, bool profilingEnabled, void** entry)
+{
+ const CodeRange& codeRange = *code.lookupRange(*entry);
+ void* from = code.segment().base() + codeRange.funcNonProfilingEntry();
+ void* to = code.segment().base() + codeRange.funcProfilingEntry();
+
+ if (!profilingEnabled)
+ Swap(from, to);
+
+ MOZ_ASSERT(*entry == from);
+ *entry = to;
+}
+
+bool
+Instance::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
+{
+ if (code_->profilingEnabled() == newProfilingEnabled)
+ return true;
+
+ if (!code_->ensureProfilingState(cx, newProfilingEnabled))
+ return false;
+
+ // Imported wasm functions and typed function tables point directly to
+ // either the profiling or non-profiling prologue and must therefore be
+ // updated when the profiling mode is toggled.
+
+ for (const FuncImport& fi : metadata().funcImports) {
+ FuncImportTls& import = funcImportTls(fi);
+ if (import.obj && import.obj->is<WasmInstanceObject>()) {
+ Code& code = import.obj->as<WasmInstanceObject>().instance().code();
+ UpdateEntry(code, newProfilingEnabled, &import.code);
+ }
+ }
+
+ for (const SharedTable& table : tables_) {
+ if (!table->isTypedFunction())
+ continue;
+
+ // This logic will have to be generalized to match the import logic
+ // above if wasm can create typed function tables since a single table
+ // can contain elements from multiple instances.
+ MOZ_ASSERT(metadata().kind == ModuleKind::AsmJS);
+
+ void** array = table->internalArray();
+ uint32_t length = table->length();
+ for (size_t i = 0; i < length; i++) {
+ if (array[i])
+ UpdateEntry(*code_, newProfilingEnabled, &array[i]);
+ }
+ }
+
+ return true;
+}
+
+void
+Instance::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ Table::SeenSet* seenTables,
+ size_t* code,
+ size_t* data) const
+{
+ *data += mallocSizeOf(this);
+
+ code_->addSizeOfMisc(mallocSizeOf, seenMetadata, seenBytes, code, data);
+
+ for (const SharedTable& table : tables_)
+ *data += table->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenTables);
+}
diff --git a/js/src/wasm/WasmInstance.h b/js/src/wasm/WasmInstance.h
new file mode 100644
index 0000000000..8d6ee0b737
--- /dev/null
+++ b/js/src/wasm/WasmInstance.h
@@ -0,0 +1,145 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_instance_h
+#define wasm_instance_h
+
+#include "gc/Barrier.h"
+#include "wasm/WasmCode.h"
+#include "wasm/WasmTable.h"
+
+namespace js {
+namespace wasm {
+
+// Instance represents a wasm instance and provides all the support for runtime
+// execution of code in the instance. Instances share various immutable data
+// structures with the Module from which they were instantiated and other
+// instances instantiated from the same Module. However, an Instance has no
+// direct reference to its source Module which allows a Module to be destroyed
+// while it still has live Instances.
+
+class Instance
+{
+ JSCompartment* const compartment_;
+ ReadBarrieredWasmInstanceObject object_;
+ const UniqueCode code_;
+ GCPtrWasmMemoryObject memory_;
+ SharedTableVector tables_;
+ TlsData tlsData_;
+
+ // Internal helpers:
+ const void** addressOfSigId(const SigIdDesc& sigId) const;
+ FuncImportTls& funcImportTls(const FuncImport& fi);
+ TableTls& tableTls(const TableDesc& td) const;
+
+ // Import call slow paths which are called directly from wasm code.
+ friend void* AddressOf(SymbolicAddress, ExclusiveContext*);
+ static int32_t callImport_void(Instance*, int32_t, int32_t, uint64_t*);
+ static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
+ static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
+ static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
+ static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
+ static uint32_t currentMemory_i32(Instance* instance);
+ bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
+ MutableHandleValue rval);
+
+ // Only WasmInstanceObject can call the private trace function.
+ friend class js::WasmInstanceObject;
+ void tracePrivate(JSTracer* trc);
+
+ public:
+ Instance(JSContext* cx,
+ HandleWasmInstanceObject object,
+ UniqueCode code,
+ HandleWasmMemoryObject memory,
+ SharedTableVector&& tables,
+ Handle<FunctionVector> funcImports,
+ const ValVector& globalImports);
+ ~Instance();
+ bool init(JSContext* cx);
+ void trace(JSTracer* trc);
+
+ JSContext* cx() const { return tlsData_.cx; }
+ JSCompartment* compartment() const { return compartment_; }
+ Code& code() { return *code_; }
+ const Code& code() const { return *code_; }
+ const CodeSegment& codeSegment() const { return code_->segment(); }
+ uint8_t* codeBase() const { return code_->segment().base(); }
+ const Metadata& metadata() const { return code_->metadata(); }
+ bool isAsmJS() const { return metadata().isAsmJS(); }
+ const SharedTableVector& tables() const { return tables_; }
+ SharedMem<uint8_t*> memoryBase() const;
+ size_t memoryLength() const;
+ size_t memoryMappedSize() const;
+ bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const;
+ TlsData& tlsData() { return tlsData_; }
+
+ // This method returns a pointer to the GC object that owns this Instance.
+ // Instances may be reached via weak edges (e.g., Compartment::instances_)
+ // so this perform a read-barrier on the returned object unless the barrier
+ // is explicitly waived.
+
+ WasmInstanceObject* object() const;
+ WasmInstanceObject* objectUnbarriered() const;
+
+ // Execute the given export given the JS call arguments, storing the return
+ // value in args.rval.
+
+ MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcIndex, CallArgs args);
+
+ // Initially, calls to imports in wasm code call out through the generic
+ // callImport method. If the imported callee gets JIT compiled and the types
+ // match up, callImport will patch the code to instead call through a thunk
+ // directly into the JIT code. If the JIT code is released, the Instance must
+ // be notified so it can go back to the generic callImport.
+
+ void deoptimizeImportExit(uint32_t funcImportIndex);
+
+ // Called by simulators to check whether accessing 'numBytes' starting at
+ // 'addr' would trigger a fault and be safely handled by signal handlers.
+
+ bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes);
+
+ // Called by Wasm(Memory|Table)Object when a moving resize occurs:
+
+ void onMovingGrowMemory(uint8_t* prevMemoryBase);
+ void onMovingGrowTable();
+
+ // See Code::ensureProfilingState comment.
+
+ MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ Table::SeenSet* seenTables,
+ size_t* code,
+ size_t* data) const;
+};
+
+typedef UniquePtr<Instance> UniqueInstance;
+
+bool InitInstanceStaticData();
+void ShutDownInstanceStaticData();
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_instance_h
diff --git a/js/src/wasm/WasmIonCompile.cpp b/js/src/wasm/WasmIonCompile.cpp
new file mode 100644
index 0000000000..dd7e84ca15
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.cpp
@@ -0,0 +1,3811 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmIonCompile.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "jit/CodeGenerator.h"
+
+#include "wasm/WasmBaselineCompile.h"
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmBinaryIterator.h"
+#include "wasm/WasmGenerator.h"
+#include "wasm/WasmSignalHandlers.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::DebugOnly;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::Nothing;
+using mozilla::Some;
+
+namespace {
+
+typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
+
+struct IonCompilePolicy : OpIterPolicy
+{
+ // Producing output is what we're all about here.
+ static const bool Output = true;
+
+ // We store SSA definitions in the value stack.
+ typedef MDefinition* Value;
+
+ // We store loop headers and then/else blocks in the control flow stack.
+ typedef MBasicBlock* ControlItem;
+};
+
+typedef OpIter<IonCompilePolicy> IonOpIter;
+
+class FunctionCompiler;
+
+// TlsUsage describes how the TLS register is used during a function call.
+
+enum class TlsUsage {
+ Unused, // No particular action is taken with respect to the TLS register.
+ Need, // The TLS register must be reloaded just before the call.
+ CallerSaved // Same, plus space must be allocated to save/restore the TLS
+ // register.
+};
+
+static bool
+NeedsTls(TlsUsage usage) {
+ return usage == TlsUsage::Need || usage == TlsUsage::CallerSaved;
+}
+
+// CallCompileState describes a call that is being compiled. Due to expression
+// nesting, multiple calls can be in the middle of compilation at the same time
+// and these are tracked in a stack by FunctionCompiler.
+
+class CallCompileState
+{
+ // The line or bytecode of the call.
+ uint32_t lineOrBytecode_;
+
+ // A generator object that is passed each argument as it is compiled.
+ ABIArgGenerator abi_;
+
+ // The maximum number of bytes used by "child" calls, i.e., calls that occur
+ // while evaluating the arguments of the call represented by this
+ // CallCompileState.
+ uint32_t maxChildStackBytes_;
+
+ // Set by FunctionCompiler::finishCall(), tells the MWasmCall by how
+ // much to bump the stack pointer before making the call. See
+ // FunctionCompiler::startCall() comment below.
+ uint32_t spIncrement_;
+
+ // Set by FunctionCompiler::finishCall(), tells a potentially-inter-module
+ // call the offset of the reserved space in which it can save the caller's
+ // WasmTlsReg.
+ uint32_t tlsStackOffset_;
+
+ // Accumulates the register arguments while compiling arguments.
+ MWasmCall::Args regArgs_;
+
+ // Reserved argument for passing Instance* to builtin instance method calls.
+ ABIArg instanceArg_;
+
+ // Accumulates the stack arguments while compiling arguments. This is only
+ // necessary to track when childClobbers_ is true so that the stack offsets
+ // can be updated.
+ Vector<MWasmStackArg*, 0, SystemAllocPolicy> stackArgs_;
+
+ // Set by child calls (i.e., calls that execute while evaluating a parent's
+ // operands) to indicate that the child and parent call cannot reuse the
+ // same stack space -- the parent must store its stack arguments below the
+ // child's and increment sp when performing its call.
+ bool childClobbers_;
+
+ // Only FunctionCompiler should be directly manipulating CallCompileState.
+ friend class FunctionCompiler;
+
+ public:
+ CallCompileState(FunctionCompiler& f, uint32_t lineOrBytecode)
+ : lineOrBytecode_(lineOrBytecode),
+ maxChildStackBytes_(0),
+ spIncrement_(0),
+ tlsStackOffset_(MWasmCall::DontSaveTls),
+ childClobbers_(false)
+ { }
+};
+
+// Encapsulates the compilation of a single function in an asm.js module. The
+// function compiler handles the creation and final backend compilation of the
+// MIR graph.
+class FunctionCompiler
+{
+ struct ControlFlowPatch {
+ MControlInstruction* ins;
+ uint32_t index;
+ ControlFlowPatch(MControlInstruction* ins, uint32_t index)
+ : ins(ins),
+ index(index)
+ {}
+ };
+
+ typedef Vector<ControlFlowPatch, 0, SystemAllocPolicy> ControlFlowPatchVector;
+ typedef Vector<ControlFlowPatchVector, 0, SystemAllocPolicy> ControlFlowPatchsVector;
+ typedef Vector<CallCompileState*, 0, SystemAllocPolicy> CallCompileStateVector;
+
+ const ModuleGeneratorData& mg_;
+ IonOpIter iter_;
+ const FuncBytes& func_;
+ const ValTypeVector& locals_;
+ size_t lastReadCallSite_;
+
+ TempAllocator& alloc_;
+ MIRGraph& graph_;
+ const CompileInfo& info_;
+ MIRGenerator& mirGen_;
+
+ MInstruction* dummyIns_;
+
+ MBasicBlock* curBlock_;
+ CallCompileStateVector callStack_;
+ uint32_t maxStackArgBytes_;
+
+ uint32_t loopDepth_;
+ uint32_t blockDepth_;
+ ControlFlowPatchsVector blockPatches_;
+
+ FuncCompileResults& compileResults_;
+
+ // TLS pointer argument to the current function.
+ MWasmParameter* tlsPointer_;
+
+ public:
+ FunctionCompiler(const ModuleGeneratorData& mg,
+ Decoder& decoder,
+ const FuncBytes& func,
+ const ValTypeVector& locals,
+ MIRGenerator& mirGen,
+ FuncCompileResults& compileResults)
+ : mg_(mg),
+ iter_(decoder, func.lineOrBytecode()),
+ func_(func),
+ locals_(locals),
+ lastReadCallSite_(0),
+ alloc_(mirGen.alloc()),
+ graph_(mirGen.graph()),
+ info_(mirGen.info()),
+ mirGen_(mirGen),
+ dummyIns_(nullptr),
+ curBlock_(nullptr),
+ maxStackArgBytes_(0),
+ loopDepth_(0),
+ blockDepth_(0),
+ compileResults_(compileResults),
+ tlsPointer_(nullptr)
+ {}
+
+ const ModuleGeneratorData& mg() const { return mg_; }
+ IonOpIter& iter() { return iter_; }
+ TempAllocator& alloc() const { return alloc_; }
+ MacroAssembler& masm() const { return compileResults_.masm(); }
+ const Sig& sig() const { return func_.sig(); }
+
+ TrapOffset trapOffset() const {
+ return iter_.trapOffset();
+ }
+ Maybe<TrapOffset> trapIfNotAsmJS() const {
+ return mg_.isAsmJS() ? Nothing() : Some(iter_.trapOffset());
+ }
+
+ bool init()
+ {
+ // Prepare the entry block for MIR generation:
+
+ const ValTypeVector& args = func_.sig().args();
+
+ if (!mirGen_.ensureBallast())
+ return false;
+ if (!newBlock(/* prev */ nullptr, &curBlock_))
+ return false;
+
+ for (ABIArgValTypeIter i(args); !i.done(); i++) {
+ MWasmParameter* ins = MWasmParameter::New(alloc(), *i, i.mirType());
+ curBlock_->add(ins);
+ curBlock_->initSlot(info().localSlot(i.index()), ins);
+ if (!mirGen_.ensureBallast())
+ return false;
+ }
+
+ // Set up a parameter that receives the hidden TLS pointer argument.
+ tlsPointer_ = MWasmParameter::New(alloc(), ABIArg(WasmTlsReg), MIRType::Pointer);
+ curBlock_->add(tlsPointer_);
+ if (!mirGen_.ensureBallast())
+ return false;
+
+ for (size_t i = args.length(); i < locals_.length(); i++) {
+ MInstruction* ins = nullptr;
+ switch (locals_[i]) {
+ case ValType::I32:
+ ins = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
+ break;
+ case ValType::I64:
+ ins = MConstant::NewInt64(alloc(), 0);
+ break;
+ case ValType::F32:
+ ins = MConstant::New(alloc(), Float32Value(0.f), MIRType::Float32);
+ break;
+ case ValType::F64:
+ ins = MConstant::New(alloc(), DoubleValue(0.0), MIRType::Double);
+ break;
+ case ValType::I8x16:
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Int8x16);
+ break;
+ case ValType::I16x8:
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Int16x8);
+ break;
+ case ValType::I32x4:
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Int32x4);
+ break;
+ case ValType::F32x4:
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0.f), MIRType::Float32x4);
+ break;
+ case ValType::B8x16:
+ // Bool8x16 uses the same data layout as Int8x16.
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX16(0), MIRType::Bool8x16);
+ break;
+ case ValType::B16x8:
+ // Bool16x8 uses the same data layout as Int16x8.
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX8(0), MIRType::Bool16x8);
+ break;
+ case ValType::B32x4:
+ // Bool32x4 uses the same data layout as Int32x4.
+ ins = MSimdConstant::New(alloc(), SimdConstant::SplatX4(0), MIRType::Bool32x4);
+ break;
+ }
+
+ curBlock_->add(ins);
+ curBlock_->initSlot(info().localSlot(i), ins);
+ if (!mirGen_.ensureBallast())
+ return false;
+ }
+
+ dummyIns_ = MConstant::New(alloc(), Int32Value(0), MIRType::Int32);
+ curBlock_->add(dummyIns_);
+
+ addInterruptCheck();
+
+ return true;
+ }
+
+ void finish()
+ {
+ mirGen().initWasmMaxStackArgBytes(maxStackArgBytes_);
+
+ MOZ_ASSERT(callStack_.empty());
+ MOZ_ASSERT(loopDepth_ == 0);
+ MOZ_ASSERT(blockDepth_ == 0);
+#ifdef DEBUG
+ for (ControlFlowPatchVector& patches : blockPatches_)
+ MOZ_ASSERT(patches.empty());
+#endif
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(done(), "all bytes must be consumed");
+ MOZ_ASSERT(func_.callSiteLineNums().length() == lastReadCallSite_);
+ }
+
+ /************************* Read-only interface (after local scope setup) */
+
+ MIRGenerator& mirGen() const { return mirGen_; }
+ MIRGraph& mirGraph() const { return graph_; }
+ const CompileInfo& info() const { return info_; }
+
+ MDefinition* getLocalDef(unsigned slot)
+ {
+ if (inDeadCode())
+ return nullptr;
+ return curBlock_->getSlot(info().localSlot(slot));
+ }
+
+ const ValTypeVector& locals() const { return locals_; }
+
+ /***************************** Code generation (after local scope setup) */
+
+ MDefinition* constant(const SimdConstant& v, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+ MInstruction* constant;
+ constant = MSimdConstant::New(alloc(), v, type);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ MDefinition* constant(const Value& v, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+ MConstant* constant = MConstant::New(alloc(), v, type);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ MDefinition* constant(int64_t i)
+ {
+ if (inDeadCode())
+ return nullptr;
+ MConstant* constant = MConstant::NewInt64(alloc(), i);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ MDefinition* constant(RawF32 f)
+ {
+ if (inDeadCode())
+ return nullptr;
+ MConstant* constant = MConstant::New(alloc(), f);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ MDefinition* constant(RawF64 d)
+ {
+ if (inDeadCode())
+ return nullptr;
+ MConstant* constant = MConstant::New(alloc(), d);
+ curBlock_->add(constant);
+ return constant;
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op)
+ {
+ if (inDeadCode())
+ return nullptr;
+ T* ins = T::New(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* unary(MDefinition* op, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+ T* ins = T::New(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs)
+ {
+ if (inDeadCode())
+ return nullptr;
+ T* ins = T::New(alloc(), lhs, rhs);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* binary(MDefinition* lhs, MDefinition* rhs, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+ T* ins = T::New(alloc(), lhs, rhs, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ bool mustPreserveNaN(MIRType type)
+ {
+ return IsFloatingPointType(type) && mg().kind == ModuleKind::Wasm;
+ }
+
+ MDefinition* sub(MDefinition* lhs, MDefinition* rhs, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ // wasm can't fold x - 0.0 because of NaN with custom payloads.
+ MSub* ins = MSub::New(alloc(), lhs, rhs, type, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* unarySimd(MDefinition* input, MSimdUnaryArith::Operation op, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(input->type()) && input->type() == type);
+ MInstruction* ins = MSimdUnaryArith::New(alloc(), input, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryArith::Operation op,
+ MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+ MOZ_ASSERT(lhs->type() == type);
+ return MSimdBinaryArith::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
+ }
+
+ MDefinition* binarySimd(MDefinition* lhs, MDefinition* rhs, MSimdBinaryBitwise::Operation op,
+ MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+ MOZ_ASSERT(lhs->type() == type);
+ auto* ins = MSimdBinaryBitwise::New(alloc(), lhs, rhs, op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* binarySimdComp(MDefinition* lhs, MDefinition* rhs, MSimdBinaryComp::Operation op,
+ SimdSign sign)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ return MSimdBinaryComp::AddLegalized(alloc(), curBlock_, lhs, rhs, op, sign);
+ }
+
+ MDefinition* binarySimdSaturating(MDefinition* lhs, MDefinition* rhs,
+ MSimdBinarySaturating::Operation op, SimdSign sign)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ auto* ins = MSimdBinarySaturating::New(alloc(), lhs, rhs, op, sign);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* binarySimdShift(MDefinition* lhs, MDefinition* rhs, MSimdShift::Operation op)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ return MSimdShift::AddLegalized(alloc(), curBlock_, lhs, rhs, op);
+ }
+
+ MDefinition* swizzleSimd(MDefinition* vector, const uint8_t lanes[], MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(vector->type() == type);
+ MSimdSwizzle* ins = MSimdSwizzle::New(alloc(), vector, lanes);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* shuffleSimd(MDefinition* lhs, MDefinition* rhs, const uint8_t lanes[],
+ MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(lhs->type() == type);
+ MInstruction* ins = MSimdShuffle::New(alloc(), lhs, rhs, lanes);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* insertElementSimd(MDefinition* vec, MDefinition* val, unsigned lane, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(vec->type()) && vec->type() == type);
+ MOZ_ASSERT(SimdTypeToLaneArgumentType(vec->type()) == val->type());
+ MSimdInsertElement* ins = MSimdInsertElement::New(alloc(), vec, val, lane);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* selectSimd(MDefinition* mask, MDefinition* lhs, MDefinition* rhs, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(mask->type()));
+ MOZ_ASSERT(IsSimdType(lhs->type()) && rhs->type() == lhs->type());
+ MOZ_ASSERT(lhs->type() == type);
+ MSimdSelect* ins = MSimdSelect::New(alloc(), mask, lhs, rhs);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* simdAllTrue(MDefinition* boolVector)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MSimdAllTrue* ins = MSimdAllTrue::New(alloc(), boolVector, MIRType::Int32);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* simdAnyTrue(MDefinition* boolVector)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MSimdAnyTrue* ins = MSimdAnyTrue::New(alloc(), boolVector, MIRType::Int32);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // fromXXXBits()
+ MDefinition* bitcastSimd(MDefinition* vec, MIRType from, MIRType to)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(vec->type() == from);
+ MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
+ auto* ins = MSimdReinterpretCast::New(alloc(), vec, to);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ // Int <--> Float conversions.
+ MDefinition* convertSimd(MDefinition* vec, MIRType from, MIRType to, SimdSign sign)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(from) && IsSimdType(to) && from != to);
+ return MSimdConvert::AddLegalized(alloc(), curBlock_, vec, to, sign, trapOffset());
+ }
+
+ MDefinition* splatSimd(MDefinition* v, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(type));
+ MOZ_ASSERT(SimdTypeToLaneArgumentType(type) == v->type());
+ MSimdSplat* ins = MSimdSplat::New(alloc(), v, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* minMax(MDefinition* lhs, MDefinition* rhs, MIRType type, bool isMax) {
+ if (inDeadCode())
+ return nullptr;
+
+ if (mustPreserveNaN(type)) {
+ // Convert signaling NaN to quiet NaNs.
+ MDefinition* zero = constant(DoubleValue(0.0), type);
+ lhs = sub(lhs, zero, type);
+ rhs = sub(rhs, zero, type);
+ }
+
+ MMinMax* ins = MMinMax::NewWasm(alloc(), lhs, rhs, type, isMax);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* mul(MDefinition* lhs, MDefinition* rhs, MIRType type, MMul::Mode mode)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ // wasm can't fold x * 1.0 because of NaN with custom payloads.
+ auto* ins = MMul::NewWasm(alloc(), lhs, rhs, type, mode, mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* div(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
+ {
+ if (inDeadCode())
+ return nullptr;
+ bool trapOnError = !mg().isAsmJS();
+ auto* ins = MDiv::New(alloc(), lhs, rhs, type, unsignd, trapOnError, trapOffset(),
+ mustPreserveNaN(type));
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* mod(MDefinition* lhs, MDefinition* rhs, MIRType type, bool unsignd)
+ {
+ if (inDeadCode())
+ return nullptr;
+ bool trapOnError = !mg().isAsmJS();
+ auto* ins = MMod::New(alloc(), lhs, rhs, type, unsignd, trapOnError, trapOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* bitnot(MDefinition* op)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MBitNot::NewInt32(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* select(MDefinition* trueExpr, MDefinition* falseExpr, MDefinition* condExpr)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MWasmSelect::New(alloc(), trueExpr, falseExpr, condExpr);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* extendI32(MDefinition* op, bool isUnsigned)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MExtendInt32ToInt64::New(alloc(), op, isUnsigned);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* convertI64ToFloatingPoint(MDefinition* op, MIRType type, bool isUnsigned)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MInt64ToFloatingPoint::New(alloc(), op, type, isUnsigned);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* rotate(MDefinition* input, MDefinition* count, MIRType type, bool left)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MRotate::New(alloc(), input, count, type, left);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template <class T>
+ MDefinition* truncate(MDefinition* op, bool isUnsigned)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = T::New(alloc(), op, isUnsigned, trapOffset());
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ MDefinition* compare(MDefinition* lhs, MDefinition* rhs, JSOp op, MCompare::CompareType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MCompare::New(alloc(), lhs, rhs, op, type);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ void assign(unsigned slot, MDefinition* def)
+ {
+ if (inDeadCode())
+ return;
+ curBlock_->setSlot(info().localSlot(slot), def);
+ }
+
+ private:
+ void checkOffsetAndBounds(MemoryAccessDesc* access, MDefinition** base)
+ {
+ // If the offset is bigger than the guard region, a separate instruction
+ // is necessary to add the offset to the base and check for overflow.
+ if (access->offset() >= OffsetGuardLimit || !JitOptions.wasmFoldOffsets) {
+ auto* ins = MWasmAddOffset::New(alloc(), *base, access->offset(), trapOffset());
+ curBlock_->add(ins);
+
+ *base = ins;
+ access->clearOffset();
+ }
+
+#ifndef WASM_HUGE_MEMORY
+ curBlock_->add(MWasmBoundsCheck::New(alloc(), *base, trapOffset()));
+#endif
+ }
+
+ public:
+ MDefinition* load(MDefinition* base, MemoryAccessDesc access, ValType result)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MInstruction* load = nullptr;
+ if (access.isPlainAsmJS()) {
+ MOZ_ASSERT(access.offset() == 0);
+ load = MAsmJSLoadHeap::New(alloc(), base, access.type());
+ } else {
+ checkOffsetAndBounds(&access, &base);
+ load = MWasmLoad::New(alloc(), base, access, ToMIRType(result));
+ }
+
+ curBlock_->add(load);
+ return load;
+ }
+
+ void store(MDefinition* base, MemoryAccessDesc access, MDefinition* v)
+ {
+ if (inDeadCode())
+ return;
+
+ MInstruction* store = nullptr;
+ if (access.isPlainAsmJS()) {
+ MOZ_ASSERT(access.offset() == 0);
+ store = MAsmJSStoreHeap::New(alloc(), base, access.type(), v);
+ } else {
+ checkOffsetAndBounds(&access, &base);
+ store = MWasmStore::New(alloc(), base, access, v);
+ }
+
+ curBlock_->add(store);
+ }
+
+ MDefinition* atomicCompareExchangeHeap(MDefinition* base, MemoryAccessDesc access,
+ MDefinition* oldv, MDefinition* newv)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ checkOffsetAndBounds(&access, &base);
+ auto* cas = MAsmJSCompareExchangeHeap::New(alloc(), base, access, oldv, newv, tlsPointer_);
+ curBlock_->add(cas);
+ return cas;
+ }
+
+ MDefinition* atomicExchangeHeap(MDefinition* base, MemoryAccessDesc access,
+ MDefinition* value)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ checkOffsetAndBounds(&access, &base);
+ auto* cas = MAsmJSAtomicExchangeHeap::New(alloc(), base, access, value, tlsPointer_);
+ curBlock_->add(cas);
+ return cas;
+ }
+
+ MDefinition* atomicBinopHeap(js::jit::AtomicOp op,
+ MDefinition* base, MemoryAccessDesc access,
+ MDefinition* v)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ checkOffsetAndBounds(&access, &base);
+ auto* binop = MAsmJSAtomicBinopHeap::New(alloc(), op, base, access, v, tlsPointer_);
+ curBlock_->add(binop);
+ return binop;
+ }
+
+ MDefinition* loadGlobalVar(unsigned globalDataOffset, bool isConst, MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ auto* load = MWasmLoadGlobalVar::New(alloc(), type, globalDataOffset, isConst);
+ curBlock_->add(load);
+ return load;
+ }
+
+ void storeGlobalVar(uint32_t globalDataOffset, MDefinition* v)
+ {
+ if (inDeadCode())
+ return;
+ curBlock_->add(MWasmStoreGlobalVar::New(alloc(), globalDataOffset, v));
+ }
+
+ void addInterruptCheck()
+ {
+ // We rely on signal handlers for interrupts on Asm.JS/Wasm
+ MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
+ }
+
+ MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(base->type()));
+ MOZ_ASSERT(!IsSimdType(type));
+ auto* ins = MSimdExtractElement::New(alloc(), base, type, lane, sign);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ template<typename T>
+ MDefinition* constructSimd(MDefinition* x, MDefinition* y, MDefinition* z, MDefinition* w,
+ MIRType type)
+ {
+ if (inDeadCode())
+ return nullptr;
+
+ MOZ_ASSERT(IsSimdType(type));
+ T* ins = T::New(alloc(), type, x, y, z, w);
+ curBlock_->add(ins);
+ return ins;
+ }
+
+ /***************************************************************** Calls */
+
+ // The IonMonkey backend maintains a single stack offset (from the stack
+ // pointer to the base of the frame) by adding the total amount of spill
+ // space required plus the maximum stack required for argument passing.
+ // Since we do not use IonMonkey's MPrepareCall/MPassArg/MCall, we must
+ // manually accumulate, for the entire function, the maximum required stack
+ // space for argument passing. (This is passed to the CodeGenerator via
+ // MIRGenerator::maxWasmStackArgBytes.) Naively, this would just be the
+ // maximum of the stack space required for each individual call (as
+ // determined by the call ABI). However, as an optimization, arguments are
+ // stored to the stack immediately after evaluation (to decrease live
+ // ranges and reduce spilling). This introduces the complexity that,
+ // between evaluating an argument and making the call, another argument
+ // evaluation could perform a call that also needs to store to the stack.
+ // When this occurs childClobbers_ = true and the parent expression's
+ // arguments are stored above the maximum depth clobbered by a child
+ // expression.
+
+ bool startCall(CallCompileState* call)
+ {
+ // Always push calls to maintain the invariant that if we're inDeadCode
+ // in finishCall, we have something to pop.
+ return callStack_.append(call);
+ }
+
+ bool passInstance(CallCompileState* args)
+ {
+ if (inDeadCode())
+ return true;
+
+ // Should only pass an instance once.
+ MOZ_ASSERT(args->instanceArg_ == ABIArg());
+ args->instanceArg_ = args->abi_.next(MIRType::Pointer);
+ return true;
+ }
+
+ bool passArg(MDefinition* argDef, ValType type, CallCompileState* call)
+ {
+ if (inDeadCode())
+ return true;
+
+ ABIArg arg = call->abi_.next(ToMIRType(type));
+ switch (arg.kind()) {
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR: {
+ auto mirLow = MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ true);
+ curBlock_->add(mirLow);
+ auto mirHigh = MWrapInt64ToInt32::New(alloc(), argDef, /* bottomHalf = */ false);
+ curBlock_->add(mirHigh);
+ return call->regArgs_.append(MWasmCall::Arg(AnyRegister(arg.gpr64().low), mirLow)) &&
+ call->regArgs_.append(MWasmCall::Arg(AnyRegister(arg.gpr64().high), mirHigh));
+ }
+#endif
+ case ABIArg::GPR:
+ case ABIArg::FPU:
+ return call->regArgs_.append(MWasmCall::Arg(arg.reg(), argDef));
+ case ABIArg::Stack: {
+ auto* mir = MWasmStackArg::New(alloc(), arg.offsetFromArgBase(), argDef);
+ curBlock_->add(mir);
+ return call->stackArgs_.append(mir);
+ }
+ default:
+ MOZ_CRASH("Unknown ABIArg kind.");
+ }
+ }
+
+ void propagateMaxStackArgBytes(uint32_t stackBytes)
+ {
+ if (callStack_.empty()) {
+ // Outermost call
+ maxStackArgBytes_ = Max(maxStackArgBytes_, stackBytes);
+ return;
+ }
+
+ // Non-outermost call
+ CallCompileState* outer = callStack_.back();
+ outer->maxChildStackBytes_ = Max(outer->maxChildStackBytes_, stackBytes);
+ if (stackBytes && !outer->stackArgs_.empty())
+ outer->childClobbers_ = true;
+ }
+
+ bool finishCall(CallCompileState* call, TlsUsage tls)
+ {
+ MOZ_ALWAYS_TRUE(callStack_.popCopy() == call);
+
+ if (inDeadCode()) {
+ propagateMaxStackArgBytes(call->maxChildStackBytes_);
+ return true;
+ }
+
+ if (NeedsTls(tls)) {
+ if (!call->regArgs_.append(MWasmCall::Arg(AnyRegister(WasmTlsReg), tlsPointer_)))
+ return false;
+ }
+
+ uint32_t stackBytes = call->abi_.stackBytesConsumedSoFar();
+
+ // If this is a potentially-inter-module call, allocate an extra word of
+ // stack space to save/restore the caller's WasmTlsReg during the call.
+ // Record the stack offset before including spIncrement since MWasmCall
+ // will use this offset after having bumped the stack pointer.
+ if (tls == TlsUsage::CallerSaved) {
+ call->tlsStackOffset_ = stackBytes;
+ stackBytes += sizeof(void*);
+ }
+
+ if (call->childClobbers_) {
+ call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, WasmStackAlignment);
+ for (MWasmStackArg* stackArg : call->stackArgs_)
+ stackArg->incrementOffset(call->spIncrement_);
+
+ // If instanceArg_ is not initialized then instanceArg_.kind() != ABIArg::Stack
+ if (call->instanceArg_.kind() == ABIArg::Stack) {
+ call->instanceArg_ = ABIArg(call->instanceArg_.offsetFromArgBase() +
+ call->spIncrement_);
+ }
+
+ stackBytes += call->spIncrement_;
+ } else {
+ call->spIncrement_ = 0;
+ stackBytes = Max(stackBytes, call->maxChildStackBytes_);
+ }
+
+ propagateMaxStackArgBytes(stackBytes);
+ return true;
+ }
+
+ bool callDirect(const Sig& sig, uint32_t funcIndex, const CallCompileState& call,
+ MDefinition** def)
+ {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Func);
+ MIRType ret = ToMIRType(sig.ret());
+ auto callee = CalleeDesc::function(funcIndex);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
+ call.spIncrement_, MWasmCall::DontSaveTls);
+ if (!ins)
+ return false;
+
+ curBlock_->add(ins);
+ *def = ins;
+ return true;
+ }
+
+ bool callIndirect(uint32_t sigIndex, MDefinition* index, const CallCompileState& call,
+ MDefinition** def)
+ {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ const SigWithId& sig = mg_.sigs[sigIndex];
+
+ CalleeDesc callee;
+ if (mg_.isAsmJS()) {
+ MOZ_ASSERT(sig.id.kind() == SigIdDesc::Kind::None);
+ const TableDesc& table = mg_.tables[mg_.asmJSSigToTableIndex[sigIndex]];
+ MOZ_ASSERT(IsPowerOfTwo(table.limits.initial));
+ MOZ_ASSERT(!table.external);
+ MOZ_ASSERT(call.tlsStackOffset_ == MWasmCall::DontSaveTls);
+
+ MConstant* mask = MConstant::New(alloc(), Int32Value(table.limits.initial - 1));
+ curBlock_->add(mask);
+ MBitAnd* maskedIndex = MBitAnd::New(alloc(), index, mask, MIRType::Int32);
+ curBlock_->add(maskedIndex);
+
+ index = maskedIndex;
+ callee = CalleeDesc::asmJSTable(table);
+ } else {
+ MOZ_ASSERT(sig.id.kind() != SigIdDesc::Kind::None);
+ MOZ_ASSERT(mg_.tables.length() == 1);
+ const TableDesc& table = mg_.tables[0];
+ MOZ_ASSERT(table.external == (call.tlsStackOffset_ != MWasmCall::DontSaveTls));
+
+ callee = CalleeDesc::wasmTable(table, sig.id);
+ }
+
+ CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(sig.ret()),
+ call.spIncrement_, call.tlsStackOffset_, index);
+ if (!ins)
+ return false;
+
+ curBlock_->add(ins);
+ *def = ins;
+ return true;
+ }
+
+ bool callImport(unsigned globalDataOffset, const CallCompileState& call, ExprType ret,
+ MDefinition** def)
+ {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ MOZ_ASSERT(call.tlsStackOffset_ != MWasmCall::DontSaveTls);
+
+ CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Dynamic);
+ auto callee = CalleeDesc::import(globalDataOffset);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
+ call.spIncrement_, call.tlsStackOffset_);
+ if (!ins)
+ return false;
+
+ curBlock_->add(ins);
+ *def = ins;
+ return true;
+ }
+
+ bool builtinCall(SymbolicAddress builtin, const CallCompileState& call, ValType ret,
+ MDefinition** def)
+ {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
+ auto callee = CalleeDesc::builtin(builtin);
+ auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ToMIRType(ret),
+ call.spIncrement_, MWasmCall::DontSaveTls);
+ if (!ins)
+ return false;
+
+ curBlock_->add(ins);
+ *def = ins;
+ return true;
+ }
+
+ bool builtinInstanceMethodCall(SymbolicAddress builtin, const CallCompileState& call,
+ ValType ret, MDefinition** def)
+ {
+ if (inDeadCode()) {
+ *def = nullptr;
+ return true;
+ }
+
+ CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Symbolic);
+ auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
+ call.instanceArg_, call.regArgs_,
+ ToMIRType(ret), call.spIncrement_,
+ call.tlsStackOffset_);
+ if (!ins)
+ return false;
+
+ curBlock_->add(ins);
+ *def = ins;
+ return true;
+ }
+
+ /*********************************************** Control flow generation */
+
+ inline bool inDeadCode() const {
+ return curBlock_ == nullptr;
+ }
+
+ void returnExpr(MDefinition* operand)
+ {
+ if (inDeadCode())
+ return;
+
+ MWasmReturn* ins = MWasmReturn::New(alloc(), operand, tlsPointer_);
+ curBlock_->end(ins);
+ curBlock_ = nullptr;
+ }
+
+ void returnVoid()
+ {
+ if (inDeadCode())
+ return;
+
+ MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc(), tlsPointer_);
+ curBlock_->end(ins);
+ curBlock_ = nullptr;
+ }
+
+ void unreachableTrap()
+ {
+ if (inDeadCode())
+ return;
+
+ auto* ins = MWasmTrap::New(alloc(), wasm::Trap::Unreachable, trapOffset());
+ curBlock_->end(ins);
+ curBlock_ = nullptr;
+ }
+
+ private:
+ static bool hasPushed(MBasicBlock* block)
+ {
+ uint32_t numPushed = block->stackDepth() - block->info().firstStackSlot();
+ MOZ_ASSERT(numPushed == 0 || numPushed == 1);
+ return numPushed;
+ }
+
+ static MDefinition* peekPushedDef(MBasicBlock* block)
+ {
+ MOZ_ASSERT(hasPushed(block));
+ return block->getSlot(block->stackDepth() - 1);
+ }
+
+ public:
+ void pushDef(MDefinition* def)
+ {
+ if (inDeadCode())
+ return;
+ MOZ_ASSERT(!hasPushed(curBlock_));
+ if (def && def->type() != MIRType::None)
+ curBlock_->push(def);
+ }
+
+ MDefinition* popDefIfPushed(bool shouldReturn = true)
+ {
+ if (!hasPushed(curBlock_))
+ return nullptr;
+ MDefinition* def = curBlock_->pop();
+ MOZ_ASSERT_IF(def->type() == MIRType::Value, !shouldReturn);
+ return shouldReturn ? def : nullptr;
+ }
+
+ template <typename GetBlock>
+ bool ensurePushInvariants(const GetBlock& getBlock, size_t numBlocks)
+ {
+ // Preserve the invariant that, for every iterated MBasicBlock, either:
+ // every MBasicBlock has a pushed expression with the same type (to
+ // prevent creating used phis with type Value) OR no MBasicBlock has any
+ // pushed expression. This is required by MBasicBlock::addPredecessor.
+ if (numBlocks < 2)
+ return true;
+
+ MBasicBlock* block = getBlock(0);
+
+ bool allPushed = hasPushed(block);
+ if (allPushed) {
+ MIRType type = peekPushedDef(block)->type();
+ for (size_t i = 1; allPushed && i < numBlocks; i++) {
+ block = getBlock(i);
+ allPushed = hasPushed(block) && peekPushedDef(block)->type() == type;
+ }
+ }
+
+ if (!allPushed) {
+ for (size_t i = 0; i < numBlocks; i++) {
+ block = getBlock(i);
+ if (!hasPushed(block))
+ block->push(dummyIns_);
+ }
+ }
+
+ return allPushed;
+ }
+
+ private:
+ void addJoinPredecessor(MDefinition* def, MBasicBlock** joinPred)
+ {
+ *joinPred = curBlock_;
+ if (inDeadCode())
+ return;
+ pushDef(def);
+ }
+
+ public:
+ bool branchAndStartThen(MDefinition* cond, MBasicBlock** elseBlock)
+ {
+ if (inDeadCode()) {
+ *elseBlock = nullptr;
+ } else {
+ MBasicBlock* thenBlock;
+ if (!newBlock(curBlock_, &thenBlock))
+ return false;
+ if (!newBlock(curBlock_, elseBlock))
+ return false;
+
+ curBlock_->end(MTest::New(alloc(), cond, thenBlock, *elseBlock));
+
+ curBlock_ = thenBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred)
+ {
+ MDefinition* ifDef;
+ if (!finishBlock(&ifDef))
+ return false;
+
+ if (!elseBlock) {
+ *thenJoinPred = nullptr;
+ } else {
+ addJoinPredecessor(ifDef, thenJoinPred);
+
+ curBlock_ = elseBlock;
+ mirGraph().moveBlockToEnd(curBlock_);
+ }
+
+ return startBlock();
+ }
+
+ bool joinIfElse(MBasicBlock* thenJoinPred, MDefinition** def)
+ {
+ MDefinition* elseDef;
+ if (!finishBlock(&elseDef))
+ return false;
+
+ if (!thenJoinPred && inDeadCode()) {
+ *def = nullptr;
+ } else {
+ MBasicBlock* elseJoinPred;
+ addJoinPredecessor(elseDef, &elseJoinPred);
+
+ mozilla::Array<MBasicBlock*, 2> blocks;
+ size_t numJoinPreds = 0;
+ if (thenJoinPred)
+ blocks[numJoinPreds++] = thenJoinPred;
+ if (elseJoinPred)
+ blocks[numJoinPreds++] = elseJoinPred;
+
+ auto getBlock = [&](size_t i) -> MBasicBlock* { return blocks[i]; };
+ bool yieldsValue = ensurePushInvariants(getBlock, numJoinPreds);
+
+ if (numJoinPreds == 0) {
+ *def = nullptr;
+ return true;
+ }
+
+ MBasicBlock* join;
+ if (!goToNewBlock(blocks[0], &join))
+ return false;
+ for (size_t i = 1; i < numJoinPreds; ++i) {
+ if (!goToExistingBlock(blocks[i], join))
+ return false;
+ }
+
+ curBlock_ = join;
+ *def = popDefIfPushed(yieldsValue);
+ }
+
+ return true;
+ }
+
+ bool startBlock()
+ {
+ MOZ_ASSERT_IF(blockDepth_ < blockPatches_.length(), blockPatches_[blockDepth_].empty());
+ blockDepth_++;
+ return true;
+ }
+
+ bool finishBlock(MDefinition** def)
+ {
+ MOZ_ASSERT(blockDepth_);
+ uint32_t topLabel = --blockDepth_;
+ return bindBranches(topLabel, def);
+ }
+
+ bool startLoop(MBasicBlock** loopHeader)
+ {
+ *loopHeader = nullptr;
+
+ blockDepth_++;
+ loopDepth_++;
+
+ if (inDeadCode())
+ return true;
+
+ // Create the loop header.
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_ - 1);
+ *loopHeader = MBasicBlock::New(mirGraph(), info(), curBlock_,
+ MBasicBlock::PENDING_LOOP_HEADER);
+ if (!*loopHeader)
+ return false;
+
+ (*loopHeader)->setLoopDepth(loopDepth_);
+ mirGraph().addBlock(*loopHeader);
+ curBlock_->end(MGoto::New(alloc(), *loopHeader));
+
+ MBasicBlock* body;
+ if (!goToNewBlock(*loopHeader, &body))
+ return false;
+ curBlock_ = body;
+ return true;
+ }
+
+ private:
+ void fixupRedundantPhis(MBasicBlock* b)
+ {
+ for (size_t i = 0, depth = b->stackDepth(); i < depth; i++) {
+ MDefinition* def = b->getSlot(i);
+ if (def->isUnused())
+ b->setSlot(i, def->toPhi()->getOperand(0));
+ }
+ }
+
+ bool setLoopBackedge(MBasicBlock* loopEntry, MBasicBlock* loopBody, MBasicBlock* backedge)
+ {
+ if (!loopEntry->setBackedgeWasm(backedge))
+ return false;
+
+ // Flag all redundant phis as unused.
+ for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd(); phi++) {
+ MOZ_ASSERT(phi->numOperands() == 2);
+ if (phi->getOperand(0) == phi->getOperand(1))
+ phi->setUnused();
+ }
+
+ // Fix up phis stored in the slots Vector of pending blocks.
+ for (ControlFlowPatchVector& patches : blockPatches_) {
+ for (ControlFlowPatch& p : patches) {
+ MBasicBlock* block = p.ins->block();
+ if (block->loopDepth() >= loopEntry->loopDepth())
+ fixupRedundantPhis(block);
+ }
+ }
+
+ // The loop body, if any, might be referencing recycled phis too.
+ if (loopBody)
+ fixupRedundantPhis(loopBody);
+
+ // Discard redundant phis and add to the free list.
+ for (MPhiIterator phi = loopEntry->phisBegin(); phi != loopEntry->phisEnd(); ) {
+ MPhi* entryDef = *phi++;
+ if (!entryDef->isUnused())
+ continue;
+
+ entryDef->justReplaceAllUsesWith(entryDef->getOperand(0));
+ loopEntry->discardPhi(entryDef);
+ mirGraph().addPhiToFreeList(entryDef);
+ }
+
+ return true;
+ }
+
+ public:
+ bool closeLoop(MBasicBlock* loopHeader, MDefinition** loopResult)
+ {
+ MOZ_ASSERT(blockDepth_ >= 1);
+ MOZ_ASSERT(loopDepth_);
+
+ uint32_t headerLabel = blockDepth_ - 1;
+
+ if (!loopHeader) {
+ MOZ_ASSERT(inDeadCode());
+ MOZ_ASSERT(headerLabel >= blockPatches_.length() || blockPatches_[headerLabel].empty());
+ blockDepth_--;
+ loopDepth_--;
+ *loopResult = nullptr;
+ return true;
+ }
+
+ // Op::Loop doesn't have an implicit backedge so temporarily set
+ // aside the end of the loop body to bind backedges.
+ MBasicBlock* loopBody = curBlock_;
+ curBlock_ = nullptr;
+
+ // As explained in bug 1253544, Ion apparently has an invariant that
+ // there is only one backedge to loop headers. To handle wasm's ability
+ // to have multiple backedges to the same loop header, we bind all those
+ // branches as forward jumps to a single backward jump. This is
+ // unfortunate but the optimizer is able to fold these into single jumps
+ // to backedges.
+ MDefinition* _;
+ if (!bindBranches(headerLabel, &_))
+ return false;
+
+ MOZ_ASSERT(loopHeader->loopDepth() == loopDepth_);
+
+ if (curBlock_) {
+ // We're on the loop backedge block, created by bindBranches.
+ if (hasPushed(curBlock_))
+ curBlock_->pop();
+
+ MOZ_ASSERT(curBlock_->loopDepth() == loopDepth_);
+ curBlock_->end(MGoto::New(alloc(), loopHeader));
+ if (!setLoopBackedge(loopHeader, loopBody, curBlock_))
+ return false;
+ }
+
+ curBlock_ = loopBody;
+
+ loopDepth_--;
+
+ // If the loop depth still at the inner loop body, correct it.
+ if (curBlock_ && curBlock_->loopDepth() != loopDepth_) {
+ MBasicBlock* out;
+ if (!goToNewBlock(curBlock_, &out))
+ return false;
+ curBlock_ = out;
+ }
+
+ blockDepth_ -= 1;
+ *loopResult = inDeadCode() ? nullptr : popDefIfPushed();
+ return true;
+ }
+
+ bool addControlFlowPatch(MControlInstruction* ins, uint32_t relative, uint32_t index) {
+ MOZ_ASSERT(relative < blockDepth_);
+ uint32_t absolute = blockDepth_ - 1 - relative;
+
+ if (absolute >= blockPatches_.length() && !blockPatches_.resize(absolute + 1))
+ return false;
+
+ return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
+ }
+
+ bool br(uint32_t relativeDepth, MDefinition* maybeValue)
+ {
+ if (inDeadCode())
+ return true;
+
+ MGoto* jump = MGoto::New(alloc());
+ if (!addControlFlowPatch(jump, relativeDepth, MGoto::TargetIndex))
+ return false;
+
+ pushDef(maybeValue);
+
+ curBlock_->end(jump);
+ curBlock_ = nullptr;
+ return true;
+ }
+
+ bool brIf(uint32_t relativeDepth, MDefinition* maybeValue, MDefinition* condition)
+ {
+ if (inDeadCode())
+ return true;
+
+ MBasicBlock* joinBlock = nullptr;
+ if (!newBlock(curBlock_, &joinBlock))
+ return false;
+
+ MTest* test = MTest::New(alloc(), condition, joinBlock);
+ if (!addControlFlowPatch(test, relativeDepth, MTest::TrueBranchIndex))
+ return false;
+
+ pushDef(maybeValue);
+
+ curBlock_->end(test);
+ curBlock_ = joinBlock;
+ return true;
+ }
+
+ bool brTable(MDefinition* operand, uint32_t defaultDepth, const Uint32Vector& depths,
+ MDefinition* maybeValue)
+ {
+ if (inDeadCode())
+ return true;
+
+ size_t numCases = depths.length();
+ MOZ_ASSERT(numCases <= INT32_MAX);
+ MOZ_ASSERT(numCases);
+
+ MTableSwitch* table = MTableSwitch::New(alloc(), operand, 0, int32_t(numCases - 1));
+
+ size_t defaultIndex;
+ if (!table->addDefault(nullptr, &defaultIndex))
+ return false;
+ if (!addControlFlowPatch(table, defaultDepth, defaultIndex))
+ return false;
+
+ typedef HashMap<uint32_t, uint32_t, DefaultHasher<uint32_t>, SystemAllocPolicy>
+ IndexToCaseMap;
+
+ IndexToCaseMap indexToCase;
+ if (!indexToCase.init() || !indexToCase.put(defaultDepth, defaultIndex))
+ return false;
+
+ for (size_t i = 0; i < numCases; i++) {
+ uint32_t depth = depths[i];
+
+ size_t caseIndex;
+ IndexToCaseMap::AddPtr p = indexToCase.lookupForAdd(depth);
+ if (!p) {
+ if (!table->addSuccessor(nullptr, &caseIndex))
+ return false;
+ if (!addControlFlowPatch(table, depth, caseIndex))
+ return false;
+ if (!indexToCase.add(p, depth, caseIndex))
+ return false;
+ } else {
+ caseIndex = p->value();
+ }
+
+ if (!table->addCase(caseIndex))
+ return false;
+ }
+
+ pushDef(maybeValue);
+
+ curBlock_->end(table);
+ curBlock_ = nullptr;
+
+ return true;
+ }
+
+ /************************************************************ DECODING ***/
+
+ uint32_t readCallSiteLineOrBytecode() {
+ if (!func_.callSiteLineNums().empty())
+ return func_.callSiteLineNums()[lastReadCallSite_++];
+ return iter_.trapOffset().bytecodeOffset;
+ }
+
+ bool done() const { return iter_.done(); }
+
+ /*************************************************************************/
+ private:
+ bool newBlock(MBasicBlock* pred, MBasicBlock** block)
+ {
+ *block = MBasicBlock::New(mirGraph(), info(), pred, MBasicBlock::NORMAL);
+ if (!*block)
+ return false;
+ mirGraph().addBlock(*block);
+ (*block)->setLoopDepth(loopDepth_);
+ return true;
+ }
+
+ bool goToNewBlock(MBasicBlock* pred, MBasicBlock** block)
+ {
+ if (!newBlock(pred, block))
+ return false;
+ pred->end(MGoto::New(alloc(), *block));
+ return true;
+ }
+
+ bool goToExistingBlock(MBasicBlock* prev, MBasicBlock* next)
+ {
+ MOZ_ASSERT(prev);
+ MOZ_ASSERT(next);
+ prev->end(MGoto::New(alloc(), next));
+ return next->addPredecessor(alloc(), prev);
+ }
+
+ bool bindBranches(uint32_t absolute, MDefinition** def)
+ {
+ if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
+ *def = inDeadCode() ? nullptr : popDefIfPushed();
+ return true;
+ }
+
+ ControlFlowPatchVector& patches = blockPatches_[absolute];
+
+ auto getBlock = [&](size_t i) -> MBasicBlock* {
+ if (i < patches.length())
+ return patches[i].ins->block();
+ return curBlock_;
+ };
+
+ bool yieldsValue = ensurePushInvariants(getBlock, patches.length() + !!curBlock_);
+
+ MBasicBlock* join = nullptr;
+ MControlInstruction* ins = patches[0].ins;
+ MBasicBlock* pred = ins->block();
+ if (!newBlock(pred, &join))
+ return false;
+
+ pred->mark();
+ ins->replaceSuccessor(patches[0].index, join);
+
+ for (size_t i = 1; i < patches.length(); i++) {
+ ins = patches[i].ins;
+
+ pred = ins->block();
+ if (!pred->isMarked()) {
+ if (!join->addPredecessor(alloc(), pred))
+ return false;
+ pred->mark();
+ }
+
+ ins->replaceSuccessor(patches[i].index, join);
+ }
+
+ MOZ_ASSERT_IF(curBlock_, !curBlock_->isMarked());
+ for (uint32_t i = 0; i < join->numPredecessors(); i++)
+ join->getPredecessor(i)->unmark();
+
+ if (curBlock_ && !goToExistingBlock(curBlock_, join))
+ return false;
+
+ curBlock_ = join;
+
+ *def = popDefIfPushed(yieldsValue);
+
+ patches.clear();
+ return true;
+ }
+};
+
+template <>
+MDefinition* FunctionCompiler::unary<MToFloat32>(MDefinition* op)
+{
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MToFloat32::New(alloc(), op, mustPreserveNaN(op->type()));
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MNot>(MDefinition* op)
+{
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MNot::NewInt32(alloc(), op);
+ curBlock_->add(ins);
+ return ins;
+}
+
+template <>
+MDefinition* FunctionCompiler::unary<MAbs>(MDefinition* op, MIRType type)
+{
+ if (inDeadCode())
+ return nullptr;
+ auto* ins = MAbs::NewWasm(alloc(), op, type);
+ curBlock_->add(ins);
+ return ins;
+}
+
+} // end anonymous namespace
+
+static bool
+EmitBlock(FunctionCompiler& f)
+{
+ return f.iter().readBlock() &&
+ f.startBlock();
+}
+
+static bool
+EmitLoop(FunctionCompiler& f)
+{
+ if (!f.iter().readLoop())
+ return false;
+
+ MBasicBlock* loopHeader;
+ if (!f.startLoop(&loopHeader))
+ return false;
+
+ f.addInterruptCheck();
+
+ f.iter().controlItem() = loopHeader;
+ return true;
+}
+
+static bool
+EmitIf(FunctionCompiler& f)
+{
+ MDefinition* condition = nullptr;
+ if (!f.iter().readIf(&condition))
+ return false;
+
+ MBasicBlock* elseBlock;
+ if (!f.branchAndStartThen(condition, &elseBlock))
+ return false;
+
+ f.iter().controlItem() = elseBlock;
+ return true;
+}
+
+static bool
+EmitElse(FunctionCompiler& f)
+{
+ MBasicBlock* block = f.iter().controlItem();
+
+ ExprType thenType;
+ MDefinition* thenValue;
+ if (!f.iter().readElse(&thenType, &thenValue))
+ return false;
+
+ if (!IsVoid(thenType))
+ f.pushDef(thenValue);
+
+ if (!f.switchToElse(block, &f.iter().controlItem()))
+ return false;
+
+ return true;
+}
+
+static bool
+EmitEnd(FunctionCompiler& f)
+{
+ MBasicBlock* block = f.iter().controlItem();
+
+ LabelKind kind;
+ ExprType type;
+ MDefinition* value;
+ if (!f.iter().readEnd(&kind, &type, &value))
+ return false;
+
+ if (!IsVoid(type))
+ f.pushDef(value);
+
+ MDefinition* def = nullptr;
+ switch (kind) {
+ case LabelKind::Block:
+ if (!f.finishBlock(&def))
+ return false;
+ break;
+ case LabelKind::Loop:
+ if (!f.closeLoop(block, &def))
+ return false;
+ break;
+ case LabelKind::Then:
+ case LabelKind::UnreachableThen:
+ // If we didn't see an Else, create a trivial else block so that we create
+ // a diamond anyway, to preserve Ion invariants.
+ if (!f.switchToElse(block, &block))
+ return false;
+
+ if (!f.joinIfElse(block, &def))
+ return false;
+ break;
+ case LabelKind::Else:
+ if (!f.joinIfElse(block, &def))
+ return false;
+ break;
+ }
+
+ if (!IsVoid(type)) {
+ MOZ_ASSERT_IF(!f.inDeadCode(), def);
+ f.iter().setResult(def);
+ }
+
+ return true;
+}
+
+static bool
+EmitBr(FunctionCompiler& f)
+{
+ uint32_t relativeDepth;
+ ExprType type;
+ MDefinition* value;
+ if (!f.iter().readBr(&relativeDepth, &type, &value))
+ return false;
+
+ if (IsVoid(type)) {
+ if (!f.br(relativeDepth, nullptr))
+ return false;
+ } else {
+ if (!f.br(relativeDepth, value))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+EmitBrIf(FunctionCompiler& f)
+{
+ uint32_t relativeDepth;
+ ExprType type;
+ MDefinition* value;
+ MDefinition* condition;
+ if (!f.iter().readBrIf(&relativeDepth, &type, &value, &condition))
+ return false;
+
+ if (IsVoid(type)) {
+ if (!f.brIf(relativeDepth, nullptr, condition))
+ return false;
+ } else {
+ if (!f.brIf(relativeDepth, value, condition))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+EmitBrTable(FunctionCompiler& f)
+{
+ uint32_t tableLength;
+ ExprType type;
+ MDefinition* value;
+ MDefinition* index;
+ if (!f.iter().readBrTable(&tableLength, &type, &value, &index))
+ return false;
+
+ Uint32Vector depths;
+ if (!depths.reserve(tableLength))
+ return false;
+
+ for (size_t i = 0; i < tableLength; ++i) {
+ uint32_t depth;
+ if (!f.iter().readBrTableEntry(&type, &value, &depth))
+ return false;
+ depths.infallibleAppend(depth);
+ }
+
+ // Read the default label.
+ uint32_t defaultDepth;
+ if (!f.iter().readBrTableDefault(&type, &value, &defaultDepth))
+ return false;
+
+ MDefinition* maybeValue = IsVoid(type) ? nullptr : value;
+
+ // If all the targets are the same, or there are no targets, we can just
+ // use a goto. This is not just an optimization: MaybeFoldConditionBlock
+ // assumes that tables have more than one successor.
+ bool allSameDepth = true;
+ for (uint32_t depth : depths) {
+ if (depth != defaultDepth) {
+ allSameDepth = false;
+ break;
+ }
+ }
+
+ if (allSameDepth)
+ return f.br(defaultDepth, maybeValue);
+
+ return f.brTable(index, defaultDepth, depths, maybeValue);
+}
+
+static bool
+EmitReturn(FunctionCompiler& f)
+{
+ MDefinition* value;
+ if (!f.iter().readReturn(&value))
+ return false;
+
+ if (f.inDeadCode())
+ return true;
+
+ if (IsVoid(f.sig().ret())) {
+ f.returnVoid();
+ return true;
+ }
+
+ f.returnExpr(value);
+ return true;
+}
+
+static bool
+EmitCallArgs(FunctionCompiler& f, const Sig& sig, TlsUsage tls, CallCompileState* call)
+{
+ MOZ_ASSERT(NeedsTls(tls));
+
+ if (!f.startCall(call))
+ return false;
+
+ MDefinition* arg;
+ const ValTypeVector& argTypes = sig.args();
+ uint32_t numArgs = argTypes.length();
+ for (size_t i = 0; i < numArgs; ++i) {
+ ValType argType = argTypes[i];
+ if (!f.iter().readCallArg(argType, numArgs, i, &arg))
+ return false;
+ if (!f.passArg(arg, argType, call))
+ return false;
+ }
+
+ if (!f.iter().readCallArgsEnd(numArgs))
+ return false;
+
+ return f.finishCall(call, tls);
+}
+
+static bool
+EmitCall(FunctionCompiler& f)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t funcIndex;
+ if (!f.iter().readCall(&funcIndex))
+ return false;
+
+ if (f.inDeadCode())
+ return true;
+
+ const Sig& sig = *f.mg().funcSigs[funcIndex];
+ bool import = f.mg().funcIsImport(funcIndex);
+
+ CallCompileState call(f, lineOrBytecode);
+ if (!EmitCallArgs(f, sig, import ? TlsUsage::CallerSaved : TlsUsage::Need, &call))
+ return false;
+
+ if (!f.iter().readCallReturn(sig.ret()))
+ return false;
+
+ MDefinition* def;
+ if (import) {
+ uint32_t globalDataOffset = f.mg().funcImportGlobalDataOffsets[funcIndex];
+ if (!f.callImport(globalDataOffset, call, sig.ret(), &def))
+ return false;
+ } else {
+ if (!f.callDirect(sig, funcIndex, call, &def))
+ return false;
+ }
+
+ if (IsVoid(sig.ret()))
+ return true;
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool
+EmitCallIndirect(FunctionCompiler& f, bool oldStyle)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ uint32_t sigIndex;
+ MDefinition* callee;
+ if (oldStyle) {
+ if (!f.iter().readOldCallIndirect(&sigIndex))
+ return false;
+ } else {
+ if (!f.iter().readCallIndirect(&sigIndex, &callee))
+ return false;
+ }
+
+ if (f.inDeadCode())
+ return true;
+
+ const Sig& sig = f.mg().sigs[sigIndex];
+
+ TlsUsage tls = !f.mg().isAsmJS() && f.mg().tables[0].external
+ ? TlsUsage::CallerSaved
+ : TlsUsage::Need;
+
+ CallCompileState call(f, lineOrBytecode);
+ if (!EmitCallArgs(f, sig, tls, &call))
+ return false;
+
+ if (oldStyle) {
+ if (!f.iter().readOldCallIndirectCallee(&callee))
+ return false;
+ }
+
+ if (!f.iter().readCallReturn(sig.ret()))
+ return false;
+
+ MDefinition* def;
+ if (!f.callIndirect(sigIndex, callee, call, &def))
+ return false;
+
+ if (IsVoid(sig.ret()))
+ return true;
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool
+EmitGetLocal(FunctionCompiler& f)
+{
+ uint32_t id;
+ if (!f.iter().readGetLocal(f.locals(), &id))
+ return false;
+
+ f.iter().setResult(f.getLocalDef(id));
+ return true;
+}
+
+static bool
+EmitSetLocal(FunctionCompiler& f)
+{
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetLocal(f.locals(), &id, &value))
+ return false;
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool
+EmitTeeLocal(FunctionCompiler& f)
+{
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeLocal(f.locals(), &id, &value))
+ return false;
+
+ f.assign(id, value);
+ return true;
+}
+
+static bool
+EmitGetGlobal(FunctionCompiler& f)
+{
+ uint32_t id;
+ if (!f.iter().readGetGlobal(f.mg().globals, &id))
+ return false;
+
+ const GlobalDesc& global = f.mg().globals[id];
+ if (!global.isConstant()) {
+ f.iter().setResult(f.loadGlobalVar(global.offset(), !global.isMutable(),
+ ToMIRType(global.type())));
+ return true;
+ }
+
+ Val value = global.constantValue();
+ MIRType mirType = ToMIRType(value.type());
+
+ MDefinition* result;
+ switch (value.type()) {
+ case ValType::I32:
+ result = f.constant(Int32Value(value.i32()), mirType);
+ break;
+ case ValType::I64:
+ result = f.constant(int64_t(value.i64()));
+ break;
+ case ValType::F32:
+ result = f.constant(value.f32());
+ break;
+ case ValType::F64:
+ result = f.constant(value.f64());
+ break;
+ case ValType::I8x16:
+ result = f.constant(SimdConstant::CreateX16(value.i8x16()), mirType);
+ break;
+ case ValType::I16x8:
+ result = f.constant(SimdConstant::CreateX8(value.i16x8()), mirType);
+ break;
+ case ValType::I32x4:
+ result = f.constant(SimdConstant::CreateX4(value.i32x4()), mirType);
+ break;
+ case ValType::F32x4:
+ result = f.constant(SimdConstant::CreateX4(value.f32x4()), mirType);
+ break;
+ default:
+ MOZ_CRASH("unexpected type in EmitGetGlobal");
+ }
+
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool
+EmitSetGlobal(FunctionCompiler& f)
+{
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readSetGlobal(f.mg().globals, &id, &value))
+ return false;
+
+ const GlobalDesc& global = f.mg().globals[id];
+ MOZ_ASSERT(global.isMutable());
+
+ f.storeGlobalVar(global.offset(), value);
+ return true;
+}
+
+static bool
+EmitTeeGlobal(FunctionCompiler& f)
+{
+ uint32_t id;
+ MDefinition* value;
+ if (!f.iter().readTeeGlobal(f.mg().globals, &id, &value))
+ return false;
+
+ const GlobalDesc& global = f.mg().globals[id];
+ MOZ_ASSERT(global.isMutable());
+
+ f.storeGlobalVar(global.offset(), value);
+ return true;
+}
+
+template <typename MIRClass>
+static bool
+EmitUnary(FunctionCompiler& f, ValType operandType)
+{
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input))
+ return false;
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool
+EmitConversion(FunctionCompiler& f, ValType operandType, ValType resultType)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input))
+ return false;
+
+ f.iter().setResult(f.unary<MIRClass>(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool
+EmitUnaryWithType(FunctionCompiler& f, ValType operandType, MIRType mirType)
+{
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input))
+ return false;
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+template <typename MIRClass>
+static bool
+EmitConversionWithType(FunctionCompiler& f,
+ ValType operandType, ValType resultType, MIRType mirType)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input))
+ return false;
+
+ f.iter().setResult(f.unary<MIRClass>(input, mirType));
+ return true;
+}
+
+static bool
+EmitTruncate(FunctionCompiler& f, ValType operandType, ValType resultType,
+ bool isUnsigned)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input))
+ return false;
+
+ if (resultType == ValType::I32) {
+ if (f.mg().isAsmJS())
+ f.iter().setResult(f.unary<MTruncateToInt32>(input));
+ else
+ f.iter().setResult(f.truncate<MWasmTruncateToInt32>(input, isUnsigned));
+ } else {
+ MOZ_ASSERT(resultType == ValType::I64);
+ MOZ_ASSERT(!f.mg().isAsmJS());
+ f.iter().setResult(f.truncate<MWasmTruncateToInt64>(input, isUnsigned));
+ }
+ return true;
+}
+
+static bool
+EmitExtendI32(FunctionCompiler& f, bool isUnsigned)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I32, ValType::I64, &input))
+ return false;
+
+ f.iter().setResult(f.extendI32(input, isUnsigned));
+ return true;
+}
+
+static bool
+EmitConvertI64ToFloatingPoint(FunctionCompiler& f,
+ ValType resultType, MIRType mirType, bool isUnsigned)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(ValType::I64, resultType, &input))
+ return false;
+
+ f.iter().setResult(f.convertI64ToFloatingPoint(input, mirType, isUnsigned));
+ return true;
+}
+
+static bool
+EmitReinterpret(FunctionCompiler& f, ValType resultType, ValType operandType, MIRType mirType)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(operandType, resultType, &input))
+ return false;
+
+ f.iter().setResult(f.unary<MWasmReinterpret>(input, mirType));
+ return true;
+}
+
+static bool
+EmitAdd(FunctionCompiler& f, ValType type, MIRType mirType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binary<MAdd>(lhs, rhs, mirType));
+ return true;
+}
+
+static bool
+EmitSub(FunctionCompiler& f, ValType type, MIRType mirType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.sub(lhs, rhs, mirType));
+ return true;
+}
+
+static bool
+EmitRotate(FunctionCompiler& f, ValType type, bool isLeftRotation)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs))
+ return false;
+
+ MDefinition* result = f.rotate(lhs, rhs, ToMIRType(type), isLeftRotation);
+ f.iter().setResult(result);
+ return true;
+}
+
+static bool
+EmitBitNot(FunctionCompiler& f, ValType operandType)
+{
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input))
+ return false;
+
+ f.iter().setResult(f.bitnot(input));
+ return true;
+}
+
+template <typename MIRClass>
+static bool
+EmitBitwise(FunctionCompiler& f, ValType operandType, MIRType mirType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binary<MIRClass>(lhs, rhs, mirType));
+ return true;
+}
+
+static bool
+EmitMul(FunctionCompiler& f, ValType operandType, MIRType mirType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.mul(lhs, rhs, mirType,
+ mirType == MIRType::Int32 ? MMul::Integer : MMul::Normal));
+ return true;
+}
+
+static bool
+EmitDiv(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isUnsigned)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.div(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool
+EmitRem(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isUnsigned)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.mod(lhs, rhs, mirType, isUnsigned));
+ return true;
+}
+
+static bool
+EmitMinMax(FunctionCompiler& f, ValType operandType, MIRType mirType, bool isMax)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.minMax(lhs, rhs, mirType, isMax));
+ return true;
+}
+
+static bool
+EmitCopySign(FunctionCompiler& f, ValType operandType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binary<MCopySign>(lhs, rhs, ToMIRType(operandType)));
+ return true;
+}
+
+static bool
+EmitComparison(FunctionCompiler& f,
+ ValType operandType, JSOp compareOp, MCompare::CompareType compareType)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readComparison(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.compare(lhs, rhs, compareOp, compareType));
+ return true;
+}
+
+static bool
+EmitSelect(FunctionCompiler& f)
+{
+ ValType type;
+ MDefinition* trueValue;
+ MDefinition* falseValue;
+ MDefinition* condition;
+ if (!f.iter().readSelect(&type, &trueValue, &falseValue, &condition))
+ return false;
+
+ f.iter().setResult(f.select(trueValue, falseValue, condition));
+ return true;
+}
+
+static bool
+EmitLoad(FunctionCompiler& f, ValType type, Scalar::Type viewType)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoad(type, Scalar::byteSize(viewType), &addr))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.trapIfNotAsmJS());
+ f.iter().setResult(f.load(addr.base, access, type));
+ return true;
+}
+
+static bool
+EmitStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readStore(resultType, Scalar::byteSize(viewType), &addr, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.trapIfNotAsmJS());
+
+ f.store(addr.base, access, value);
+ return true;
+}
+
+static bool
+EmitTeeStore(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.trapIfNotAsmJS());
+
+ f.store(addr.base, access, value);
+ return true;
+}
+
+static bool
+EmitTeeStoreWithCoercion(FunctionCompiler& f, ValType resultType, Scalar::Type viewType)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
+ return false;
+
+ if (resultType == ValType::F32 && viewType == Scalar::Float64)
+ value = f.unary<MToDouble>(value);
+ else if (resultType == ValType::F64 && viewType == Scalar::Float32)
+ value = f.unary<MToFloat32>(value);
+ else
+ MOZ_CRASH("unexpected coerced store");
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, f.trapIfNotAsmJS());
+
+ f.store(addr.base, access, value);
+ return true;
+}
+
+static bool
+EmitUnaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType operandType)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState call(f, lineOrBytecode);
+ if (!f.startCall(&call))
+ return false;
+
+ MDefinition* input;
+ if (!f.iter().readUnary(operandType, &input))
+ return false;
+
+ if (!f.passArg(input, operandType, &call))
+ return false;
+
+ if (!f.finishCall(&call, TlsUsage::Unused))
+ return false;
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, call, operandType, &def))
+ return false;
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool
+EmitBinaryMathBuiltinCall(FunctionCompiler& f, SymbolicAddress callee, ValType operandType)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState call(f, lineOrBytecode);
+ if (!f.startCall(&call))
+ return false;
+
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(operandType, &lhs, &rhs))
+ return false;
+
+ if (!f.passArg(lhs, operandType, &call))
+ return false;
+
+ if (!f.passArg(rhs, operandType, &call))
+ return false;
+
+ if (!f.finishCall(&call, TlsUsage::Unused))
+ return false;
+
+ MDefinition* def;
+ if (!f.builtinCall(callee, call, operandType, &def))
+ return false;
+
+ f.iter().setResult(def);
+ return true;
+}
+
+static bool
+EmitAtomicsLoad(FunctionCompiler& f)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ Scalar::Type viewType;
+ if (!f.iter().readAtomicLoad(&addr, &viewType))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), 0,
+ MembarBeforeLoad, MembarAfterLoad);
+
+ f.iter().setResult(f.load(addr.base, access, ValType::I32));
+ return true;
+}
+
+static bool
+EmitAtomicsStore(FunctionCompiler& f)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ Scalar::Type viewType;
+ MDefinition* value;
+ if (!f.iter().readAtomicStore(&addr, &viewType, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), 0,
+ MembarBeforeStore, MembarAfterStore);
+
+ f.store(addr.base, access, value);
+ f.iter().setResult(value);
+ return true;
+}
+
+static bool
+EmitAtomicsBinOp(FunctionCompiler& f)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ Scalar::Type viewType;
+ jit::AtomicOp op;
+ MDefinition* value;
+ if (!f.iter().readAtomicBinOp(&addr, &viewType, &op, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()));
+
+ f.iter().setResult(f.atomicBinopHeap(op, addr.base, access, value));
+ return true;
+}
+
+static bool
+EmitAtomicsCompareExchange(FunctionCompiler& f)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ Scalar::Type viewType;
+ MDefinition* oldValue;
+ MDefinition* newValue;
+ if (!f.iter().readAtomicCompareExchange(&addr, &viewType, &oldValue, &newValue))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()));
+
+ f.iter().setResult(f.atomicCompareExchangeHeap(addr.base, access, oldValue, newValue));
+ return true;
+}
+
+static bool
+EmitAtomicsExchange(FunctionCompiler& f)
+{
+ LinearMemoryAddress<MDefinition*> addr;
+ Scalar::Type viewType;
+ MDefinition* value;
+ if (!f.iter().readAtomicExchange(&addr, &viewType, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()));
+
+ f.iter().setResult(f.atomicExchangeHeap(addr.base, access, value));
+ return true;
+}
+
+static bool
+EmitSimdUnary(FunctionCompiler& f, ValType type, SimdOperation simdOp)
+{
+ MSimdUnaryArith::Operation op;
+ switch (simdOp) {
+ case SimdOperation::Fn_abs:
+ op = MSimdUnaryArith::abs;
+ break;
+ case SimdOperation::Fn_neg:
+ op = MSimdUnaryArith::neg;
+ break;
+ case SimdOperation::Fn_not:
+ op = MSimdUnaryArith::not_;
+ break;
+ case SimdOperation::Fn_sqrt:
+ op = MSimdUnaryArith::sqrt;
+ break;
+ case SimdOperation::Fn_reciprocalApproximation:
+ op = MSimdUnaryArith::reciprocalApproximation;
+ break;
+ case SimdOperation::Fn_reciprocalSqrtApproximation:
+ op = MSimdUnaryArith::reciprocalSqrtApproximation;
+ break;
+ default:
+ MOZ_CRASH("not a simd unary arithmetic operation");
+ }
+
+ MDefinition* input;
+ if (!f.iter().readUnary(type, &input))
+ return false;
+
+ f.iter().setResult(f.unarySimd(input, op, ToMIRType(type)));
+ return true;
+}
+
+template<class OpKind>
+inline bool
+EmitSimdBinary(FunctionCompiler& f, ValType type, OpKind op)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binarySimd(lhs, rhs, op, ToMIRType(type)));
+ return true;
+}
+
+static bool
+EmitSimdBinaryComp(FunctionCompiler& f, ValType operandType, MSimdBinaryComp::Operation op,
+ SimdSign sign)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readSimdComparison(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binarySimdComp(lhs, rhs, op, sign));
+ return true;
+}
+
+static bool
+EmitSimdBinarySaturating(FunctionCompiler& f, ValType type, MSimdBinarySaturating::Operation op,
+ SimdSign sign)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readBinary(type, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binarySimdSaturating(lhs, rhs, op, sign));
+ return true;
+}
+
+static bool
+EmitSimdShift(FunctionCompiler& f, ValType operandType, MSimdShift::Operation op)
+{
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readSimdShiftByScalar(operandType, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.binarySimdShift(lhs, rhs, op));
+ return true;
+}
+
+static ValType
+SimdToLaneType(ValType type)
+{
+ switch (type) {
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4: return ValType::I32;
+ case ValType::F32x4: return ValType::F32;
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4: return ValType::I32; // Boolean lanes are Int32 in asm.
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ break;
+ }
+ MOZ_CRASH("bad simd type");
+}
+
+static bool
+EmitExtractLane(FunctionCompiler& f, ValType operandType, SimdSign sign)
+{
+ uint8_t lane;
+ MDefinition* vector;
+ if (!f.iter().readExtractLane(operandType, &lane, &vector))
+ return false;
+
+ f.iter().setResult(f.extractSimdElement(lane, vector,
+ ToMIRType(SimdToLaneType(operandType)), sign));
+ return true;
+}
+
+// Emit an I32 expression and then convert it to a boolean SIMD lane value, i.e. -1 or 0.
+static MDefinition*
+EmitSimdBooleanLaneExpr(FunctionCompiler& f, MDefinition* i32)
+{
+ // Compute !i32 - 1 to force the value range into {0, -1}.
+ MDefinition* noti32 = f.unary<MNot>(i32);
+ return f.binary<MSub>(noti32, f.constant(Int32Value(1), MIRType::Int32), MIRType::Int32);
+}
+
+static bool
+EmitSimdReplaceLane(FunctionCompiler& f, ValType simdType)
+{
+ if (IsSimdBoolType(simdType))
+ f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
+
+ uint8_t lane;
+ MDefinition* vector;
+ MDefinition* scalar;
+ if (!f.iter().readReplaceLane(simdType, &lane, &vector, &scalar))
+ return false;
+
+ f.iter().setResult(f.insertElementSimd(vector, scalar, lane, ToMIRType(simdType)));
+ return true;
+}
+
+inline bool
+EmitSimdBitcast(FunctionCompiler& f, ValType fromType, ValType toType)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(fromType, toType, &input))
+ return false;
+
+ f.iter().setResult(f.bitcastSimd(input, ToMIRType(fromType), ToMIRType(toType)));
+ return true;
+}
+
+inline bool
+EmitSimdConvert(FunctionCompiler& f, ValType fromType, ValType toType, SimdSign sign)
+{
+ MDefinition* input;
+ if (!f.iter().readConversion(fromType, toType, &input))
+ return false;
+
+ f.iter().setResult(f.convertSimd(input, ToMIRType(fromType), ToMIRType(toType), sign));
+ return true;
+}
+
+static bool
+EmitSimdSwizzle(FunctionCompiler& f, ValType simdType)
+{
+ uint8_t lanes[16];
+ MDefinition* vector;
+ if (!f.iter().readSwizzle(simdType, &lanes, &vector))
+ return false;
+
+ f.iter().setResult(f.swizzleSimd(vector, lanes, ToMIRType(simdType)));
+ return true;
+}
+
+static bool
+EmitSimdShuffle(FunctionCompiler& f, ValType simdType)
+{
+ uint8_t lanes[16];
+ MDefinition* lhs;
+ MDefinition* rhs;
+ if (!f.iter().readShuffle(simdType, &lanes, &lhs, &rhs))
+ return false;
+
+ f.iter().setResult(f.shuffleSimd(lhs, rhs, lanes, ToMIRType(simdType)));
+ return true;
+}
+
+static inline Scalar::Type
+SimdExprTypeToViewType(ValType type, unsigned* defaultNumElems)
+{
+ switch (type) {
+ case ValType::I8x16: *defaultNumElems = 16; return Scalar::Int8x16;
+ case ValType::I16x8: *defaultNumElems = 8; return Scalar::Int16x8;
+ case ValType::I32x4: *defaultNumElems = 4; return Scalar::Int32x4;
+ case ValType::F32x4: *defaultNumElems = 4; return Scalar::Float32x4;
+ default: break;
+ }
+ MOZ_CRASH("type not handled in SimdExprTypeToViewType");
+}
+
+static bool
+EmitSimdLoad(FunctionCompiler& f, ValType resultType, unsigned numElems)
+{
+ unsigned defaultNumElems;
+ Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
+
+ if (!numElems)
+ numElems = defaultNumElems;
+
+ LinearMemoryAddress<MDefinition*> addr;
+ if (!f.iter().readLoad(resultType, Scalar::byteSize(viewType), &addr))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), numElems);
+
+ f.iter().setResult(f.load(addr.base, access, resultType));
+ return true;
+}
+
+static bool
+EmitSimdStore(FunctionCompiler& f, ValType resultType, unsigned numElems)
+{
+ unsigned defaultNumElems;
+ Scalar::Type viewType = SimdExprTypeToViewType(resultType, &defaultNumElems);
+
+ if (!numElems)
+ numElems = defaultNumElems;
+
+ LinearMemoryAddress<MDefinition*> addr;
+ MDefinition* value;
+ if (!f.iter().readTeeStore(resultType, Scalar::byteSize(viewType), &addr, &value))
+ return false;
+
+ MemoryAccessDesc access(viewType, addr.align, addr.offset, Some(f.trapOffset()), numElems);
+
+ f.store(addr.base, access, value);
+ return true;
+}
+
+static bool
+EmitSimdSelect(FunctionCompiler& f, ValType simdType)
+{
+ MDefinition* trueValue;
+ MDefinition* falseValue;
+ MDefinition* condition;
+ if (!f.iter().readSimdSelect(simdType, &trueValue, &falseValue, &condition))
+ return false;
+
+ f.iter().setResult(f.selectSimd(condition, trueValue, falseValue,
+ ToMIRType(simdType)));
+ return true;
+}
+
+static bool
+EmitSimdAllTrue(FunctionCompiler& f, ValType operandType)
+{
+ MDefinition* input;
+ if (!f.iter().readSimdBooleanReduction(operandType, &input))
+ return false;
+
+ f.iter().setResult(f.simdAllTrue(input));
+ return true;
+}
+
+static bool
+EmitSimdAnyTrue(FunctionCompiler& f, ValType operandType)
+{
+ MDefinition* input;
+ if (!f.iter().readSimdBooleanReduction(operandType, &input))
+ return false;
+
+ f.iter().setResult(f.simdAnyTrue(input));
+ return true;
+}
+
+static bool
+EmitSimdSplat(FunctionCompiler& f, ValType simdType)
+{
+ if (IsSimdBoolType(simdType))
+ f.iter().setResult(EmitSimdBooleanLaneExpr(f, f.iter().getResult()));
+
+ MDefinition* input;
+ if (!f.iter().readSplat(simdType, &input))
+ return false;
+
+ f.iter().setResult(f.splatSimd(input, ToMIRType(simdType)));
+ return true;
+}
+
+// Build a SIMD vector by inserting lanes one at a time into an initial constant.
+static bool
+EmitSimdChainedCtor(FunctionCompiler& f, ValType valType, MIRType type, const SimdConstant& init)
+{
+ const unsigned length = SimdTypeToLength(type);
+ MDefinition* val = f.constant(init, type);
+ for (unsigned i = 0; i < length; i++) {
+ MDefinition* scalar = 0;
+ if (!f.iter().readSimdCtorArg(ValType::I32, length, i, &scalar))
+ return false;
+ val = f.insertElementSimd(val, scalar, i, type);
+ }
+ if (!f.iter().readSimdCtorArgsEnd(length) || !f.iter().readSimdCtorReturn(valType))
+ return false;
+ f.iter().setResult(val);
+ return true;
+}
+
+// Build a boolean SIMD vector by inserting lanes one at a time into an initial constant.
+static bool
+EmitSimdBooleanChainedCtor(FunctionCompiler& f, ValType valType, MIRType type,
+ const SimdConstant& init)
+{
+ const unsigned length = SimdTypeToLength(type);
+ MDefinition* val = f.constant(init, type);
+ for (unsigned i = 0; i < length; i++) {
+ MDefinition* scalar = 0;
+ if (!f.iter().readSimdCtorArg(ValType::I32, length, i, &scalar))
+ return false;
+ val = f.insertElementSimd(val, EmitSimdBooleanLaneExpr(f, scalar), i, type);
+ }
+ if (!f.iter().readSimdCtorArgsEnd(length) || !f.iter().readSimdCtorReturn(valType))
+ return false;
+ f.iter().setResult(val);
+ return true;
+}
+
+static bool
+EmitSimdCtor(FunctionCompiler& f, ValType type)
+{
+ if (!f.iter().readSimdCtor())
+ return false;
+
+ switch (type) {
+ case ValType::I8x16:
+ return EmitSimdChainedCtor(f, type, MIRType::Int8x16, SimdConstant::SplatX16(0));
+ case ValType::I16x8:
+ return EmitSimdChainedCtor(f, type, MIRType::Int16x8, SimdConstant::SplatX8(0));
+ case ValType::I32x4: {
+ MDefinition* args[4];
+ for (unsigned i = 0; i < 4; i++) {
+ if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &args[i]))
+ return false;
+ }
+ if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
+ return false;
+ f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+ MIRType::Int32x4));
+ return true;
+ }
+ case ValType::F32x4: {
+ MDefinition* args[4];
+ for (unsigned i = 0; i < 4; i++) {
+ if (!f.iter().readSimdCtorArg(ValType::F32, 4, i, &args[i]))
+ return false;
+ }
+ if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
+ return false;
+ f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+ MIRType::Float32x4));
+ return true;
+ }
+ case ValType::B8x16:
+ return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool8x16, SimdConstant::SplatX16(0));
+ case ValType::B16x8:
+ return EmitSimdBooleanChainedCtor(f, type, MIRType::Bool16x8, SimdConstant::SplatX8(0));
+ case ValType::B32x4: {
+ MDefinition* args[4];
+ for (unsigned i = 0; i < 4; i++) {
+ MDefinition* i32;
+ if (!f.iter().readSimdCtorArg(ValType::I32, 4, i, &i32))
+ return false;
+ args[i] = EmitSimdBooleanLaneExpr(f, i32);
+ }
+ if (!f.iter().readSimdCtorArgsEnd(4) || !f.iter().readSimdCtorReturn(type))
+ return false;
+ f.iter().setResult(f.constructSimd<MSimdValueX4>(args[0], args[1], args[2], args[3],
+ MIRType::Bool32x4));
+ return true;
+ }
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ break;
+ }
+ MOZ_CRASH("unexpected SIMD type");
+}
+
+static bool
+EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
+{
+ switch (op) {
+ case SimdOperation::Constructor:
+ return EmitSimdCtor(f, type);
+ case SimdOperation::Fn_extractLane:
+ return EmitExtractLane(f, type, sign);
+ case SimdOperation::Fn_replaceLane:
+ return EmitSimdReplaceLane(f, type);
+ case SimdOperation::Fn_check:
+ MOZ_CRASH("only used in asm.js' type system");
+ case SimdOperation::Fn_splat:
+ return EmitSimdSplat(f, type);
+ case SimdOperation::Fn_select:
+ return EmitSimdSelect(f, type);
+ case SimdOperation::Fn_swizzle:
+ return EmitSimdSwizzle(f, type);
+ case SimdOperation::Fn_shuffle:
+ return EmitSimdShuffle(f, type);
+ case SimdOperation::Fn_load:
+ return EmitSimdLoad(f, type, 0);
+ case SimdOperation::Fn_load1:
+ return EmitSimdLoad(f, type, 1);
+ case SimdOperation::Fn_load2:
+ return EmitSimdLoad(f, type, 2);
+ case SimdOperation::Fn_store:
+ return EmitSimdStore(f, type, 0);
+ case SimdOperation::Fn_store1:
+ return EmitSimdStore(f, type, 1);
+ case SimdOperation::Fn_store2:
+ return EmitSimdStore(f, type, 2);
+ case SimdOperation::Fn_allTrue:
+ return EmitSimdAllTrue(f, type);
+ case SimdOperation::Fn_anyTrue:
+ return EmitSimdAnyTrue(f, type);
+ case SimdOperation::Fn_abs:
+ case SimdOperation::Fn_neg:
+ case SimdOperation::Fn_not:
+ case SimdOperation::Fn_sqrt:
+ case SimdOperation::Fn_reciprocalApproximation:
+ case SimdOperation::Fn_reciprocalSqrtApproximation:
+ return EmitSimdUnary(f, type, op);
+ case SimdOperation::Fn_shiftLeftByScalar:
+ return EmitSimdShift(f, type, MSimdShift::lsh);
+ case SimdOperation::Fn_shiftRightByScalar:
+ return EmitSimdShift(f, type, MSimdShift::rshForSign(sign));
+#define _CASE(OP) \
+ case SimdOperation::Fn_##OP: \
+ return EmitSimdBinaryComp(f, type, MSimdBinaryComp::OP, sign);
+ FOREACH_COMP_SIMD_OP(_CASE)
+#undef _CASE
+ case SimdOperation::Fn_and:
+ return EmitSimdBinary(f, type, MSimdBinaryBitwise::and_);
+ case SimdOperation::Fn_or:
+ return EmitSimdBinary(f, type, MSimdBinaryBitwise::or_);
+ case SimdOperation::Fn_xor:
+ return EmitSimdBinary(f, type, MSimdBinaryBitwise::xor_);
+#define _CASE(OP) \
+ case SimdOperation::Fn_##OP: \
+ return EmitSimdBinary(f, type, MSimdBinaryArith::Op_##OP);
+ FOREACH_NUMERIC_SIMD_BINOP(_CASE)
+ FOREACH_FLOAT_SIMD_BINOP(_CASE)
+#undef _CASE
+ case SimdOperation::Fn_addSaturate:
+ return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::add, sign);
+ case SimdOperation::Fn_subSaturate:
+ return EmitSimdBinarySaturating(f, type, MSimdBinarySaturating::sub, sign);
+ case SimdOperation::Fn_fromFloat32x4:
+ return EmitSimdConvert(f, ValType::F32x4, type, sign);
+ case SimdOperation::Fn_fromInt32x4:
+ return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Signed);
+ case SimdOperation::Fn_fromUint32x4:
+ return EmitSimdConvert(f, ValType::I32x4, type, SimdSign::Unsigned);
+ case SimdOperation::Fn_fromInt8x16Bits:
+ case SimdOperation::Fn_fromUint8x16Bits:
+ return EmitSimdBitcast(f, ValType::I8x16, type);
+ case SimdOperation::Fn_fromUint16x8Bits:
+ case SimdOperation::Fn_fromInt16x8Bits:
+ return EmitSimdBitcast(f, ValType::I16x8, type);
+ case SimdOperation::Fn_fromInt32x4Bits:
+ case SimdOperation::Fn_fromUint32x4Bits:
+ return EmitSimdBitcast(f, ValType::I32x4, type);
+ case SimdOperation::Fn_fromFloat32x4Bits:
+ return EmitSimdBitcast(f, ValType::F32x4, type);
+ case SimdOperation::Fn_load3:
+ case SimdOperation::Fn_store3:
+ case SimdOperation::Fn_fromFloat64x2Bits:
+ MOZ_CRASH("NYI");
+ }
+ MOZ_CRASH("unexpected opcode");
+}
+
+static bool
+EmitGrowMemory(FunctionCompiler& f)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState args(f, lineOrBytecode);
+ if (!f.startCall(&args))
+ return false;
+
+ if (!f.passInstance(&args))
+ return false;
+
+ MDefinition* delta;
+ if (!f.iter().readGrowMemory(&delta))
+ return false;
+
+ if (!f.passArg(delta, ValType::I32, &args))
+ return false;
+
+ // As a short-cut, pretend this is an inter-module call so that any pinned
+ // heap pointer will be reloaded after the call. This hack will go away once
+ // we can stop pinning registers.
+ f.finishCall(&args, TlsUsage::CallerSaved);
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
+ return false;
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool
+EmitCurrentMemory(FunctionCompiler& f)
+{
+ uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
+
+ CallCompileState args(f, lineOrBytecode);
+
+ if (!f.iter().readCurrentMemory())
+ return false;
+
+ if (!f.startCall(&args))
+ return false;
+
+ if (!f.passInstance(&args))
+ return false;
+
+ f.finishCall(&args, TlsUsage::Need);
+
+ MDefinition* ret;
+ if (!f.builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, args, ValType::I32, &ret))
+ return false;
+
+ f.iter().setResult(ret);
+ return true;
+}
+
+static bool
+EmitExpr(FunctionCompiler& f)
+{
+ if (!f.mirGen().ensureBallast())
+ return false;
+
+ uint16_t u16;
+ MOZ_ALWAYS_TRUE(f.iter().readOp(&u16));
+ Op op = Op(u16);
+
+ switch (op) {
+ // Control opcodes
+ case Op::Nop:
+ return f.iter().readNop();
+ case Op::Drop:
+ return f.iter().readDrop();
+ case Op::Block:
+ return EmitBlock(f);
+ case Op::Loop:
+ return EmitLoop(f);
+ case Op::If:
+ return EmitIf(f);
+ case Op::Else:
+ return EmitElse(f);
+ case Op::End:
+ return EmitEnd(f);
+ case Op::Br:
+ return EmitBr(f);
+ case Op::BrIf:
+ return EmitBrIf(f);
+ case Op::BrTable:
+ return EmitBrTable(f);
+ case Op::Return:
+ return EmitReturn(f);
+ case Op::Unreachable:
+ if (!f.iter().readUnreachable())
+ return false;
+ f.unreachableTrap();
+ return true;
+
+ // Calls
+ case Op::Call:
+ return EmitCall(f);
+ case Op::CallIndirect:
+ return EmitCallIndirect(f, /* oldStyle = */ false);
+ case Op::OldCallIndirect:
+ return EmitCallIndirect(f, /* oldStyle = */ true);
+
+ // Locals and globals
+ case Op::GetLocal:
+ return EmitGetLocal(f);
+ case Op::SetLocal:
+ return EmitSetLocal(f);
+ case Op::TeeLocal:
+ return EmitTeeLocal(f);
+ case Op::GetGlobal:
+ return EmitGetGlobal(f);
+ case Op::SetGlobal:
+ return EmitSetGlobal(f);
+ case Op::TeeGlobal:
+ return EmitTeeGlobal(f);
+
+ // Select
+ case Op::Select:
+ return EmitSelect(f);
+
+ // I32
+ case Op::I32Const: {
+ int32_t i32;
+ if (!f.iter().readI32Const(&i32))
+ return false;
+
+ f.iter().setResult(f.constant(Int32Value(i32), MIRType::Int32));
+ return true;
+ }
+ case Op::I32Add:
+ return EmitAdd(f, ValType::I32, MIRType::Int32);
+ case Op::I32Sub:
+ return EmitSub(f, ValType::I32, MIRType::Int32);
+ case Op::I32Mul:
+ return EmitMul(f, ValType::I32, MIRType::Int32);
+ case Op::I32DivS:
+ case Op::I32DivU:
+ return EmitDiv(f, ValType::I32, MIRType::Int32, op == Op::I32DivU);
+ case Op::I32RemS:
+ case Op::I32RemU:
+ return EmitRem(f, ValType::I32, MIRType::Int32, op == Op::I32RemU);
+ case Op::I32Min:
+ case Op::I32Max:
+ return EmitMinMax(f, ValType::I32, MIRType::Int32, op == Op::I32Max);
+ case Op::I32Eqz:
+ return EmitConversion<MNot>(f, ValType::I32, ValType::I32);
+ case Op::I32TruncSF32:
+ case Op::I32TruncUF32:
+ return EmitTruncate(f, ValType::F32, ValType::I32, op == Op::I32TruncUF32);
+ case Op::I32TruncSF64:
+ case Op::I32TruncUF64:
+ return EmitTruncate(f, ValType::F64, ValType::I32, op == Op::I32TruncUF64);
+ case Op::I32WrapI64:
+ return EmitConversion<MWrapInt64ToInt32>(f, ValType::I64, ValType::I32);
+ case Op::I32ReinterpretF32:
+ return EmitReinterpret(f, ValType::I32, ValType::F32, MIRType::Int32);
+ case Op::I32Clz:
+ return EmitUnaryWithType<MClz>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Ctz:
+ return EmitUnaryWithType<MCtz>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Popcnt:
+ return EmitUnaryWithType<MPopcnt>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Abs:
+ return EmitUnaryWithType<MAbs>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Neg:
+ return EmitUnaryWithType<MAsmJSNeg>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Or:
+ return EmitBitwise<MBitOr>(f, ValType::I32, MIRType::Int32);
+ case Op::I32And:
+ return EmitBitwise<MBitAnd>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Xor:
+ return EmitBitwise<MBitXor>(f, ValType::I32, MIRType::Int32);
+ case Op::I32Shl:
+ return EmitBitwise<MLsh>(f, ValType::I32, MIRType::Int32);
+ case Op::I32ShrS:
+ return EmitBitwise<MRsh>(f, ValType::I32, MIRType::Int32);
+ case Op::I32ShrU:
+ return EmitBitwise<MUrsh>(f, ValType::I32, MIRType::Int32);
+ case Op::I32BitNot:
+ return EmitBitNot(f, ValType::I32);
+ case Op::I32Load8S:
+ return EmitLoad(f, ValType::I32, Scalar::Int8);
+ case Op::I32Load8U:
+ return EmitLoad(f, ValType::I32, Scalar::Uint8);
+ case Op::I32Load16S:
+ return EmitLoad(f, ValType::I32, Scalar::Int16);
+ case Op::I32Load16U:
+ return EmitLoad(f, ValType::I32, Scalar::Uint16);
+ case Op::I32Load:
+ return EmitLoad(f, ValType::I32, Scalar::Int32);
+ case Op::I32Store8:
+ return EmitStore(f, ValType::I32, Scalar::Int8);
+ case Op::I32TeeStore8:
+ return EmitTeeStore(f, ValType::I32, Scalar::Int8);
+ case Op::I32Store16:
+ return EmitStore(f, ValType::I32, Scalar::Int16);
+ case Op::I32TeeStore16:
+ return EmitTeeStore(f, ValType::I32, Scalar::Int16);
+ case Op::I32Store:
+ return EmitStore(f, ValType::I32, Scalar::Int32);
+ case Op::I32TeeStore:
+ return EmitTeeStore(f, ValType::I32, Scalar::Int32);
+ case Op::I32Rotr:
+ case Op::I32Rotl:
+ return EmitRotate(f, ValType::I32, op == Op::I32Rotl);
+
+ // I64
+ case Op::I64Const: {
+ int64_t i64;
+ if (!f.iter().readI64Const(&i64))
+ return false;
+
+ f.iter().setResult(f.constant(i64));
+ return true;
+ }
+ case Op::I64Add:
+ return EmitAdd(f, ValType::I64, MIRType::Int64);
+ case Op::I64Sub:
+ return EmitSub(f, ValType::I64, MIRType::Int64);
+ case Op::I64Mul:
+ return EmitMul(f, ValType::I64, MIRType::Int64);
+ case Op::I64DivS:
+ case Op::I64DivU:
+ return EmitDiv(f, ValType::I64, MIRType::Int64, op == Op::I64DivU);
+ case Op::I64RemS:
+ case Op::I64RemU:
+ return EmitRem(f, ValType::I64, MIRType::Int64, op == Op::I64RemU);
+ case Op::I64TruncSF32:
+ case Op::I64TruncUF32:
+ return EmitTruncate(f, ValType::F32, ValType::I64, op == Op::I64TruncUF32);
+ case Op::I64TruncSF64:
+ case Op::I64TruncUF64:
+ return EmitTruncate(f, ValType::F64, ValType::I64, op == Op::I64TruncUF64);
+ case Op::I64ExtendSI32:
+ case Op::I64ExtendUI32:
+ return EmitExtendI32(f, op == Op::I64ExtendUI32);
+ case Op::I64ReinterpretF64:
+ return EmitReinterpret(f, ValType::I64, ValType::F64, MIRType::Int64);
+ case Op::I64Or:
+ return EmitBitwise<MBitOr>(f, ValType::I64, MIRType::Int64);
+ case Op::I64And:
+ return EmitBitwise<MBitAnd>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Xor:
+ return EmitBitwise<MBitXor>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Shl:
+ return EmitBitwise<MLsh>(f, ValType::I64, MIRType::Int64);
+ case Op::I64ShrS:
+ return EmitBitwise<MRsh>(f, ValType::I64, MIRType::Int64);
+ case Op::I64ShrU:
+ return EmitBitwise<MUrsh>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Rotr:
+ case Op::I64Rotl:
+ return EmitRotate(f, ValType::I64, op == Op::I64Rotl);
+ case Op::I64Eqz:
+ return EmitConversion<MNot>(f, ValType::I64, ValType::I32);
+ case Op::I64Clz:
+ return EmitUnaryWithType<MClz>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Ctz:
+ return EmitUnaryWithType<MCtz>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Popcnt:
+ return EmitUnaryWithType<MPopcnt>(f, ValType::I64, MIRType::Int64);
+ case Op::I64Load8S:
+ return EmitLoad(f, ValType::I64, Scalar::Int8);
+ case Op::I64Load8U:
+ return EmitLoad(f, ValType::I64, Scalar::Uint8);
+ case Op::I64Load16S:
+ return EmitLoad(f, ValType::I64, Scalar::Int16);
+ case Op::I64Load16U:
+ return EmitLoad(f, ValType::I64, Scalar::Uint16);
+ case Op::I64Load32S:
+ return EmitLoad(f, ValType::I64, Scalar::Int32);
+ case Op::I64Load32U:
+ return EmitLoad(f, ValType::I64, Scalar::Uint32);
+ case Op::I64Load:
+ return EmitLoad(f, ValType::I64, Scalar::Int64);
+ case Op::I64Store8:
+ return EmitStore(f, ValType::I64, Scalar::Int8);
+ case Op::I64TeeStore8:
+ return EmitTeeStore(f, ValType::I64, Scalar::Int8);
+ case Op::I64Store16:
+ return EmitStore(f, ValType::I64, Scalar::Int16);
+ case Op::I64TeeStore16:
+ return EmitTeeStore(f, ValType::I64, Scalar::Int16);
+ case Op::I64Store32:
+ return EmitStore(f, ValType::I64, Scalar::Int32);
+ case Op::I64TeeStore32:
+ return EmitTeeStore(f, ValType::I64, Scalar::Int32);
+ case Op::I64Store:
+ return EmitStore(f, ValType::I64, Scalar::Int64);
+ case Op::I64TeeStore:
+ return EmitTeeStore(f, ValType::I64, Scalar::Int64);
+
+ // F32
+ case Op::F32Const: {
+ RawF32 f32;
+ if (!f.iter().readF32Const(&f32))
+ return false;
+
+ f.iter().setResult(f.constant(f32));
+ return true;
+ }
+ case Op::F32Add:
+ return EmitAdd(f, ValType::F32, MIRType::Float32);
+ case Op::F32Sub:
+ return EmitSub(f, ValType::F32, MIRType::Float32);
+ case Op::F32Mul:
+ return EmitMul(f, ValType::F32, MIRType::Float32);
+ case Op::F32Div:
+ return EmitDiv(f, ValType::F32, MIRType::Float32, /* isUnsigned = */ false);
+ case Op::F32Min:
+ case Op::F32Max:
+ return EmitMinMax(f, ValType::F32, MIRType::Float32, op == Op::F32Max);
+ case Op::F32CopySign:
+ return EmitCopySign(f, ValType::F32);
+ case Op::F32Neg:
+ return EmitUnaryWithType<MAsmJSNeg>(f, ValType::F32, MIRType::Float32);
+ case Op::F32Abs:
+ return EmitUnaryWithType<MAbs>(f, ValType::F32, MIRType::Float32);
+ case Op::F32Sqrt:
+ return EmitUnaryWithType<MSqrt>(f, ValType::F32, MIRType::Float32);
+ case Op::F32Ceil:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::CeilF, ValType::F32);
+ case Op::F32Floor:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::FloorF, ValType::F32);
+ case Op::F32Trunc:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::TruncF, ValType::F32);
+ case Op::F32Nearest:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::NearbyIntF, ValType::F32);
+ case Op::F32DemoteF64:
+ return EmitConversion<MToFloat32>(f, ValType::F64, ValType::F32);
+ case Op::F32ConvertSI32:
+ return EmitConversion<MToFloat32>(f, ValType::I32, ValType::F32);
+ case Op::F32ConvertUI32:
+ return EmitConversion<MWasmUnsignedToFloat32>(f, ValType::I32, ValType::F32);
+ case Op::F32ConvertSI64:
+ case Op::F32ConvertUI64:
+ return EmitConvertI64ToFloatingPoint(f, ValType::F32, MIRType::Float32,
+ op == Op::F32ConvertUI64);
+ case Op::F32ReinterpretI32:
+ return EmitReinterpret(f, ValType::F32, ValType::I32, MIRType::Float32);
+
+ case Op::F32Load:
+ return EmitLoad(f, ValType::F32, Scalar::Float32);
+ case Op::F32Store:
+ return EmitStore(f, ValType::F32, Scalar::Float32);
+ case Op::F32TeeStore:
+ return EmitTeeStore(f, ValType::F32, Scalar::Float32);
+ case Op::F32TeeStoreF64:
+ return EmitTeeStoreWithCoercion(f, ValType::F32, Scalar::Float64);
+
+ // F64
+ case Op::F64Const: {
+ RawF64 f64;
+ if (!f.iter().readF64Const(&f64))
+ return false;
+
+ f.iter().setResult(f.constant(f64));
+ return true;
+ }
+ case Op::F64Add:
+ return EmitAdd(f, ValType::F64, MIRType::Double);
+ case Op::F64Sub:
+ return EmitSub(f, ValType::F64, MIRType::Double);
+ case Op::F64Mul:
+ return EmitMul(f, ValType::F64, MIRType::Double);
+ case Op::F64Div:
+ return EmitDiv(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false);
+ case Op::F64Mod:
+ return EmitRem(f, ValType::F64, MIRType::Double, /* isUnsigned = */ false);
+ case Op::F64Min:
+ case Op::F64Max:
+ return EmitMinMax(f, ValType::F64, MIRType::Double, op == Op::F64Max);
+ case Op::F64CopySign:
+ return EmitCopySign(f, ValType::F64);
+ case Op::F64Neg:
+ return EmitUnaryWithType<MAsmJSNeg>(f, ValType::F64, MIRType::Double);
+ case Op::F64Abs:
+ return EmitUnaryWithType<MAbs>(f, ValType::F64, MIRType::Double);
+ case Op::F64Sqrt:
+ return EmitUnaryWithType<MSqrt>(f, ValType::F64, MIRType::Double);
+ case Op::F64Ceil:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::CeilD, ValType::F64);
+ case Op::F64Floor:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::FloorD, ValType::F64);
+ case Op::F64Trunc:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::TruncD, ValType::F64);
+ case Op::F64Nearest:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::NearbyIntD, ValType::F64);
+ case Op::F64Sin:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::SinD, ValType::F64);
+ case Op::F64Cos:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::CosD, ValType::F64);
+ case Op::F64Tan:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::TanD, ValType::F64);
+ case Op::F64Asin:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::ASinD, ValType::F64);
+ case Op::F64Acos:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::ACosD, ValType::F64);
+ case Op::F64Atan:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::ATanD, ValType::F64);
+ case Op::F64Exp:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::ExpD, ValType::F64);
+ case Op::F64Log:
+ return EmitUnaryMathBuiltinCall(f, SymbolicAddress::LogD, ValType::F64);
+ case Op::F64Pow:
+ return EmitBinaryMathBuiltinCall(f, SymbolicAddress::PowD, ValType::F64);
+ case Op::F64Atan2:
+ return EmitBinaryMathBuiltinCall(f, SymbolicAddress::ATan2D, ValType::F64);
+ case Op::F64PromoteF32:
+ return EmitConversion<MToDouble>(f, ValType::F32, ValType::F64);
+ case Op::F64ConvertSI32:
+ return EmitConversion<MToDouble>(f, ValType::I32, ValType::F64);
+ case Op::F64ConvertUI32:
+ return EmitConversion<MWasmUnsignedToDouble>(f, ValType::I32, ValType::F64);
+ case Op::F64ConvertSI64:
+ case Op::F64ConvertUI64:
+ return EmitConvertI64ToFloatingPoint(f, ValType::F64, MIRType::Double,
+ op == Op::F64ConvertUI64);
+ case Op::F64Load:
+ return EmitLoad(f, ValType::F64, Scalar::Float64);
+ case Op::F64Store:
+ return EmitStore(f, ValType::F64, Scalar::Float64);
+ case Op::F64TeeStore:
+ return EmitTeeStore(f, ValType::F64, Scalar::Float64);
+ case Op::F64TeeStoreF32:
+ return EmitTeeStoreWithCoercion(f, ValType::F64, Scalar::Float32);
+ case Op::F64ReinterpretI64:
+ return EmitReinterpret(f, ValType::F64, ValType::I64, MIRType::Double);
+
+ // Comparisons
+ case Op::I32Eq:
+ return EmitComparison(f, ValType::I32, JSOP_EQ, MCompare::Compare_Int32);
+ case Op::I32Ne:
+ return EmitComparison(f, ValType::I32, JSOP_NE, MCompare::Compare_Int32);
+ case Op::I32LtS:
+ return EmitComparison(f, ValType::I32, JSOP_LT, MCompare::Compare_Int32);
+ case Op::I32LeS:
+ return EmitComparison(f, ValType::I32, JSOP_LE, MCompare::Compare_Int32);
+ case Op::I32GtS:
+ return EmitComparison(f, ValType::I32, JSOP_GT, MCompare::Compare_Int32);
+ case Op::I32GeS:
+ return EmitComparison(f, ValType::I32, JSOP_GE, MCompare::Compare_Int32);
+ case Op::I32LtU:
+ return EmitComparison(f, ValType::I32, JSOP_LT, MCompare::Compare_UInt32);
+ case Op::I32LeU:
+ return EmitComparison(f, ValType::I32, JSOP_LE, MCompare::Compare_UInt32);
+ case Op::I32GtU:
+ return EmitComparison(f, ValType::I32, JSOP_GT, MCompare::Compare_UInt32);
+ case Op::I32GeU:
+ return EmitComparison(f, ValType::I32, JSOP_GE, MCompare::Compare_UInt32);
+ case Op::I64Eq:
+ return EmitComparison(f, ValType::I64, JSOP_EQ, MCompare::Compare_Int64);
+ case Op::I64Ne:
+ return EmitComparison(f, ValType::I64, JSOP_NE, MCompare::Compare_Int64);
+ case Op::I64LtS:
+ return EmitComparison(f, ValType::I64, JSOP_LT, MCompare::Compare_Int64);
+ case Op::I64LeS:
+ return EmitComparison(f, ValType::I64, JSOP_LE, MCompare::Compare_Int64);
+ case Op::I64GtS:
+ return EmitComparison(f, ValType::I64, JSOP_GT, MCompare::Compare_Int64);
+ case Op::I64GeS:
+ return EmitComparison(f, ValType::I64, JSOP_GE, MCompare::Compare_Int64);
+ case Op::I64LtU:
+ return EmitComparison(f, ValType::I64, JSOP_LT, MCompare::Compare_UInt64);
+ case Op::I64LeU:
+ return EmitComparison(f, ValType::I64, JSOP_LE, MCompare::Compare_UInt64);
+ case Op::I64GtU:
+ return EmitComparison(f, ValType::I64, JSOP_GT, MCompare::Compare_UInt64);
+ case Op::I64GeU:
+ return EmitComparison(f, ValType::I64, JSOP_GE, MCompare::Compare_UInt64);
+ case Op::F32Eq:
+ return EmitComparison(f, ValType::F32, JSOP_EQ, MCompare::Compare_Float32);
+ case Op::F32Ne:
+ return EmitComparison(f, ValType::F32, JSOP_NE, MCompare::Compare_Float32);
+ case Op::F32Lt:
+ return EmitComparison(f, ValType::F32, JSOP_LT, MCompare::Compare_Float32);
+ case Op::F32Le:
+ return EmitComparison(f, ValType::F32, JSOP_LE, MCompare::Compare_Float32);
+ case Op::F32Gt:
+ return EmitComparison(f, ValType::F32, JSOP_GT, MCompare::Compare_Float32);
+ case Op::F32Ge:
+ return EmitComparison(f, ValType::F32, JSOP_GE, MCompare::Compare_Float32);
+ case Op::F64Eq:
+ return EmitComparison(f, ValType::F64, JSOP_EQ, MCompare::Compare_Double);
+ case Op::F64Ne:
+ return EmitComparison(f, ValType::F64, JSOP_NE, MCompare::Compare_Double);
+ case Op::F64Lt:
+ return EmitComparison(f, ValType::F64, JSOP_LT, MCompare::Compare_Double);
+ case Op::F64Le:
+ return EmitComparison(f, ValType::F64, JSOP_LE, MCompare::Compare_Double);
+ case Op::F64Gt:
+ return EmitComparison(f, ValType::F64, JSOP_GT, MCompare::Compare_Double);
+ case Op::F64Ge:
+ return EmitComparison(f, ValType::F64, JSOP_GE, MCompare::Compare_Double);
+
+ // SIMD
+#define CASE(TYPE, OP, SIGN) \
+ case Op::TYPE##OP: \
+ return EmitSimdOp(f, ValType::TYPE, SimdOperation::Fn_##OP, SIGN);
+#define I8x16CASE(OP) CASE(I8x16, OP, SimdSign::Signed)
+#define I16x8CASE(OP) CASE(I16x8, OP, SimdSign::Signed)
+#define I32x4CASE(OP) CASE(I32x4, OP, SimdSign::Signed)
+#define F32x4CASE(OP) CASE(F32x4, OP, SimdSign::NotApplicable)
+#define B8x16CASE(OP) CASE(B8x16, OP, SimdSign::NotApplicable)
+#define B16x8CASE(OP) CASE(B16x8, OP, SimdSign::NotApplicable)
+#define B32x4CASE(OP) CASE(B32x4, OP, SimdSign::NotApplicable)
+#define ENUMERATE(TYPE, FORALL, DO) \
+ case Op::TYPE##Constructor: \
+ return EmitSimdOp(f, ValType::TYPE, SimdOperation::Constructor, SimdSign::NotApplicable); \
+ FORALL(DO)
+
+ ENUMERATE(I8x16, FORALL_INT8X16_ASMJS_OP, I8x16CASE)
+ ENUMERATE(I16x8, FORALL_INT16X8_ASMJS_OP, I16x8CASE)
+ ENUMERATE(I32x4, FORALL_INT32X4_ASMJS_OP, I32x4CASE)
+ ENUMERATE(F32x4, FORALL_FLOAT32X4_ASMJS_OP, F32x4CASE)
+ ENUMERATE(B8x16, FORALL_BOOL_SIMD_OP, B8x16CASE)
+ ENUMERATE(B16x8, FORALL_BOOL_SIMD_OP, B16x8CASE)
+ ENUMERATE(B32x4, FORALL_BOOL_SIMD_OP, B32x4CASE)
+
+#undef CASE
+#undef I8x16CASE
+#undef I16x8CASE
+#undef I32x4CASE
+#undef F32x4CASE
+#undef B8x16CASE
+#undef B16x8CASE
+#undef B32x4CASE
+#undef ENUMERATE
+
+ case Op::I8x16Const: {
+ I8x16 i8x16;
+ if (!f.iter().readI8x16Const(&i8x16))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Int8x16));
+ return true;
+ }
+ case Op::I16x8Const: {
+ I16x8 i16x8;
+ if (!f.iter().readI16x8Const(&i16x8))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Int16x8));
+ return true;
+ }
+ case Op::I32x4Const: {
+ I32x4 i32x4;
+ if (!f.iter().readI32x4Const(&i32x4))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Int32x4));
+ return true;
+ }
+ case Op::F32x4Const: {
+ F32x4 f32x4;
+ if (!f.iter().readF32x4Const(&f32x4))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX4(f32x4), MIRType::Float32x4));
+ return true;
+ }
+ case Op::B8x16Const: {
+ I8x16 i8x16;
+ if (!f.iter().readB8x16Const(&i8x16))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX16(i8x16), MIRType::Bool8x16));
+ return true;
+ }
+ case Op::B16x8Const: {
+ I16x8 i16x8;
+ if (!f.iter().readB16x8Const(&i16x8))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX8(i16x8), MIRType::Bool16x8));
+ return true;
+ }
+ case Op::B32x4Const: {
+ I32x4 i32x4;
+ if (!f.iter().readB32x4Const(&i32x4))
+ return false;
+
+ f.iter().setResult(f.constant(SimdConstant::CreateX4(i32x4), MIRType::Bool32x4));
+ return true;
+ }
+
+ // SIMD unsigned integer operations.
+ case Op::I8x16addSaturateU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_addSaturate, SimdSign::Unsigned);
+ case Op::I8x16subSaturateU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_subSaturate, SimdSign::Unsigned);
+ case Op::I8x16shiftRightByScalarU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
+ case Op::I8x16lessThanU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
+ case Op::I8x16lessThanOrEqualU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
+ case Op::I8x16greaterThanU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
+ case Op::I8x16greaterThanOrEqualU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
+ case Op::I8x16extractLaneU:
+ return EmitSimdOp(f, ValType::I8x16, SimdOperation::Fn_extractLane, SimdSign::Unsigned);
+
+ case Op::I16x8addSaturateU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_addSaturate, SimdSign::Unsigned);
+ case Op::I16x8subSaturateU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_subSaturate, SimdSign::Unsigned);
+ case Op::I16x8shiftRightByScalarU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
+ case Op::I16x8lessThanU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
+ case Op::I16x8lessThanOrEqualU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
+ case Op::I16x8greaterThanU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
+ case Op::I16x8greaterThanOrEqualU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
+ case Op::I16x8extractLaneU:
+ return EmitSimdOp(f, ValType::I16x8, SimdOperation::Fn_extractLane, SimdSign::Unsigned);
+
+ case Op::I32x4shiftRightByScalarU:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_shiftRightByScalar, SimdSign::Unsigned);
+ case Op::I32x4lessThanU:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThan, SimdSign::Unsigned);
+ case Op::I32x4lessThanOrEqualU:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_lessThanOrEqual, SimdSign::Unsigned);
+ case Op::I32x4greaterThanU:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThan, SimdSign::Unsigned);
+ case Op::I32x4greaterThanOrEqualU:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_greaterThanOrEqual, SimdSign::Unsigned);
+ case Op::I32x4fromFloat32x4U:
+ return EmitSimdOp(f, ValType::I32x4, SimdOperation::Fn_fromFloat32x4, SimdSign::Unsigned);
+
+ // Atomics
+ case Op::I32AtomicsLoad:
+ return EmitAtomicsLoad(f);
+ case Op::I32AtomicsStore:
+ return EmitAtomicsStore(f);
+ case Op::I32AtomicsBinOp:
+ return EmitAtomicsBinOp(f);
+ case Op::I32AtomicsCompareExchange:
+ return EmitAtomicsCompareExchange(f);
+ case Op::I32AtomicsExchange:
+ return EmitAtomicsExchange(f);
+ // Memory Operators
+ case Op::GrowMemory:
+ return EmitGrowMemory(f);
+ case Op::CurrentMemory:
+ return EmitCurrentMemory(f);
+ case Op::Limit:;
+ }
+
+ MOZ_CRASH("unexpected wasm opcode");
+}
+
+bool
+wasm::IonCompileFunction(IonCompileTask* task)
+{
+ MOZ_ASSERT(task->mode() == IonCompileTask::CompileMode::Ion);
+
+ const FuncBytes& func = task->func();
+ FuncCompileResults& results = task->results();
+
+ Decoder d(func.bytes());
+
+ // Build the local types vector.
+
+ ValTypeVector locals;
+ if (!locals.appendAll(func.sig().args()))
+ return false;
+ if (!DecodeLocalEntries(d, task->mg().kind, &locals))
+ return false;
+
+ // Set up for Ion compilation.
+
+ JitContext jitContext(&results.alloc());
+ const JitCompileOptions options;
+ MIRGraph graph(&results.alloc());
+ CompileInfo compileInfo(locals.length());
+ MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
+ IonOptimizations.get(OptimizationLevel::Wasm));
+ mir.initMinWasmHeapLength(task->mg().minMemoryLength);
+
+ // Capture the prologue's trap site before decoding the function.
+
+ TrapOffset prologueTrapOffset;
+
+ // Build MIR graph
+ {
+ FunctionCompiler f(task->mg(), d, func, locals, mir, results);
+ if (!f.init())
+ return false;
+
+ prologueTrapOffset = f.iter().trapOffset();
+
+ if (!f.startBlock())
+ return false;
+
+ if (!f.iter().readFunctionStart(f.sig().ret()))
+ return false;
+
+ while (!f.done()) {
+ if (!EmitExpr(f))
+ return false;
+ }
+
+ if (f.inDeadCode() || IsVoid(f.sig().ret()))
+ f.returnVoid();
+ else
+ f.returnExpr(f.iter().getResult());
+
+ if (!f.iter().readFunctionEnd())
+ return false;
+
+ f.finish();
+ }
+
+ // Compile MIR graph
+ {
+ jit::SpewBeginFunction(&mir, nullptr);
+ jit::AutoSpewEndFunction spewEndFunction(&mir);
+
+ if (!OptimizeMIR(&mir))
+ return false;
+
+ LIRGraph* lir = GenerateLIR(&mir);
+ if (!lir)
+ return false;
+
+ SigIdDesc sigId = task->mg().funcSigs[func.index()]->id;
+
+ CodeGenerator codegen(&mir, lir, &results.masm());
+ if (!codegen.generateWasm(sigId, prologueTrapOffset, &results.offsets()))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+wasm::CompileFunction(IonCompileTask* task)
+{
+ TraceLoggerThread* logger = TraceLoggerForCurrentThread();
+ AutoTraceLog logCompile(logger, TraceLogger_WasmCompilation);
+
+ switch (task->mode()) {
+ case wasm::IonCompileTask::CompileMode::Ion:
+ return wasm::IonCompileFunction(task);
+ case wasm::IonCompileTask::CompileMode::Baseline:
+ return wasm::BaselineCompileFunction(task);
+ case wasm::IonCompileTask::CompileMode::None:
+ break;
+ }
+
+ MOZ_CRASH("Uninitialized task");
+}
diff --git a/js/src/wasm/WasmIonCompile.h b/js/src/wasm/WasmIonCompile.h
new file mode 100644
index 0000000000..0af380ad8c
--- /dev/null
+++ b/js/src/wasm/WasmIonCompile.h
@@ -0,0 +1,159 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_ion_compile_h
+#define wasm_ion_compile_h
+
+#include "jit/MacroAssembler.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+struct ModuleGeneratorData;
+
+typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
+typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
+typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
+
+// The FuncBytes class represents a single, concurrently-compilable function.
+// A FuncBytes object is composed of the wasm function body bytes along with the
+// ambient metadata describing the function necessary to compile it.
+
+class FuncBytes
+{
+ Bytes bytes_;
+ uint32_t index_;
+ const SigWithId& sig_;
+ uint32_t lineOrBytecode_;
+ Uint32Vector callSiteLineNums_;
+
+ public:
+ FuncBytes(Bytes&& bytes,
+ uint32_t index,
+ const SigWithId& sig,
+ uint32_t lineOrBytecode,
+ Uint32Vector&& callSiteLineNums)
+ : bytes_(Move(bytes)),
+ index_(index),
+ sig_(sig),
+ lineOrBytecode_(lineOrBytecode),
+ callSiteLineNums_(Move(callSiteLineNums))
+ {}
+
+ Bytes& bytes() { return bytes_; }
+ const Bytes& bytes() const { return bytes_; }
+ uint32_t index() const { return index_; }
+ const SigWithId& sig() const { return sig_; }
+ uint32_t lineOrBytecode() const { return lineOrBytecode_; }
+ const Uint32Vector& callSiteLineNums() const { return callSiteLineNums_; }
+};
+
+typedef UniquePtr<FuncBytes> UniqueFuncBytes;
+
+// The FuncCompileResults class contains the results of compiling a single
+// function body, ready to be merged into the whole-module MacroAssembler.
+
+class FuncCompileResults
+{
+ jit::TempAllocator alloc_;
+ jit::MacroAssembler masm_;
+ FuncOffsets offsets_;
+
+ FuncCompileResults(const FuncCompileResults&) = delete;
+ FuncCompileResults& operator=(const FuncCompileResults&) = delete;
+
+ public:
+ explicit FuncCompileResults(LifoAlloc& lifo)
+ : alloc_(&lifo),
+ masm_(jit::MacroAssembler::WasmToken(), alloc_)
+ {}
+
+ jit::TempAllocator& alloc() { return alloc_; }
+ jit::MacroAssembler& masm() { return masm_; }
+ FuncOffsets& offsets() { return offsets_; }
+};
+
+// An IonCompileTask represents the task of compiling a single function body. An
+// IonCompileTask is filled with the wasm code to be compiled on the main
+// validation thread, sent off to an Ion compilation helper thread which creates
+// the FuncCompileResults, and finally sent back to the validation thread. To
+// save time allocating and freeing memory, IonCompileTasks are reset() and
+// reused.
+
+class IonCompileTask
+{
+ public:
+ enum class CompileMode { None, Baseline, Ion };
+
+ private:
+ const ModuleGeneratorData& mg_;
+ LifoAlloc lifo_;
+ UniqueFuncBytes func_;
+ CompileMode mode_;
+ Maybe<FuncCompileResults> results_;
+
+ IonCompileTask(const IonCompileTask&) = delete;
+ IonCompileTask& operator=(const IonCompileTask&) = delete;
+
+ public:
+ IonCompileTask(const ModuleGeneratorData& mg, size_t defaultChunkSize)
+ : mg_(mg), lifo_(defaultChunkSize), func_(nullptr), mode_(CompileMode::None)
+ {}
+ LifoAlloc& lifo() {
+ return lifo_;
+ }
+ const ModuleGeneratorData& mg() const {
+ return mg_;
+ }
+ void init(UniqueFuncBytes func, CompileMode mode) {
+ MOZ_ASSERT(!func_);
+ func_ = Move(func);
+ results_.emplace(lifo_);
+ mode_ = mode;
+ }
+ CompileMode mode() const {
+ return mode_;
+ }
+ const FuncBytes& func() const {
+ MOZ_ASSERT(func_);
+ return *func_;
+ }
+ FuncCompileResults& results() {
+ return *results_;
+ }
+ void reset(Bytes* recycled) {
+ if (func_)
+ *recycled = Move(func_->bytes());
+ func_.reset(nullptr);
+ results_.reset();
+ lifo_.releaseAll();
+ mode_ = CompileMode::None;
+ }
+};
+
+MOZ_MUST_USE bool
+IonCompileFunction(IonCompileTask* task);
+
+bool
+CompileFunction(IonCompileTask* task);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_ion_compile_h
diff --git a/js/src/wasm/WasmJS.cpp b/js/src/wasm/WasmJS.cpp
new file mode 100644
index 0000000000..07d6331b8b
--- /dev/null
+++ b/js/src/wasm/WasmJS.cpp
@@ -0,0 +1,2048 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmJS.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/Maybe.h"
+
+#include "jsprf.h"
+
+#include "builtin/Promise.h"
+#include "jit/JitOptions.h"
+#include "vm/Interpreter.h"
+#include "vm/String.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmModule.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#include "jsobjinlines.h"
+
+#include "vm/NativeObject-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+using mozilla::CheckedInt;
+using mozilla::IsNaN;
+using mozilla::IsSame;
+using mozilla::Nothing;
+
+bool
+wasm::HasCompilerSupport(ExclusiveContext* cx)
+{
+ if (gc::SystemPageSize() > wasm::PageSize)
+ return false;
+
+ if (!cx->jitSupportsFloatingPoint())
+ return false;
+
+ if (!cx->jitSupportsUnalignedAccesses())
+ return false;
+
+ if (!wasm::HaveSignalHandlers())
+ return false;
+
+#if defined(JS_CODEGEN_ARM)
+ // movw/t are required for the loadWasmActivationFromSymbolicAddress in
+ // GenerateProfilingPrologue/Epilogue to avoid using the constant pool.
+ if (!HasMOVWT())
+ return false;
+#endif
+
+#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
+ return false;
+#else
+ return true;
+#endif
+}
+
+bool
+wasm::HasSupport(ExclusiveContext* cx)
+{
+ return cx->options().wasm() && HasCompilerSupport(cx);
+}
+
+// ============================================================================
+// Imports
+
+template<typename T>
+JSObject*
+js::wasm::CreateCustomNaNObject(JSContext* cx, T* addr)
+{
+ MOZ_ASSERT(IsNaN(*addr));
+
+ RootedObject obj(cx, JS_NewPlainObject(cx));
+ if (!obj)
+ return nullptr;
+
+ int32_t* i32 = (int32_t*)addr;
+ RootedValue intVal(cx, Int32Value(i32[0]));
+ if (!JS_DefineProperty(cx, obj, "nan_low", intVal, JSPROP_ENUMERATE))
+ return nullptr;
+
+ if (IsSame<double, T>::value) {
+ intVal = Int32Value(i32[1]);
+ if (!JS_DefineProperty(cx, obj, "nan_high", intVal, JSPROP_ENUMERATE))
+ return nullptr;
+ }
+
+ return obj;
+}
+
+template JSObject* js::wasm::CreateCustomNaNObject(JSContext* cx, float* addr);
+template JSObject* js::wasm::CreateCustomNaNObject(JSContext* cx, double* addr);
+
+bool
+js::wasm::ReadCustomFloat32NaNObject(JSContext* cx, HandleValue v, uint32_t* ret)
+{
+ RootedObject obj(cx, &v.toObject());
+ RootedValue val(cx);
+
+ int32_t i32;
+ if (!JS_GetProperty(cx, obj, "nan_low", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32))
+ return false;
+
+ *ret = i32;
+ return true;
+}
+
+bool
+js::wasm::ReadCustomDoubleNaNObject(JSContext* cx, HandleValue v, uint64_t* ret)
+{
+ RootedObject obj(cx, &v.toObject());
+ RootedValue val(cx);
+
+ int32_t i32;
+ if (!JS_GetProperty(cx, obj, "nan_high", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32))
+ return false;
+ *ret = uint32_t(i32);
+ *ret <<= 32;
+
+ if (!JS_GetProperty(cx, obj, "nan_low", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32))
+ return false;
+ *ret |= uint32_t(i32);
+
+ return true;
+}
+
+JSObject*
+wasm::CreateI64Object(JSContext* cx, int64_t i64)
+{
+ RootedObject result(cx, JS_NewPlainObject(cx));
+ if (!result)
+ return nullptr;
+
+ RootedValue val(cx, Int32Value(uint32_t(i64)));
+ if (!JS_DefineProperty(cx, result, "low", val, JSPROP_ENUMERATE))
+ return nullptr;
+
+ val = Int32Value(uint32_t(i64 >> 32));
+ if (!JS_DefineProperty(cx, result, "high", val, JSPROP_ENUMERATE))
+ return nullptr;
+
+ return result;
+}
+
+bool
+wasm::ReadI64Object(JSContext* cx, HandleValue v, int64_t* i64)
+{
+ if (!v.isObject()) {
+ JS_ReportErrorASCII(cx, "i64 JS value must be an object");
+ return false;
+ }
+
+ RootedObject obj(cx, &v.toObject());
+
+ int32_t* i32 = (int32_t*)i64;
+
+ RootedValue val(cx);
+ if (!JS_GetProperty(cx, obj, "low", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32[0]))
+ return false;
+
+ if (!JS_GetProperty(cx, obj, "high", &val))
+ return false;
+ if (!ToInt32(cx, val, &i32[1]))
+ return false;
+
+ return true;
+}
+
+static bool
+ThrowBadImportArg(JSContext* cx)
+{
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_ARG);
+ return false;
+}
+
+static bool
+ThrowBadImportField(JSContext* cx, const char* field, const char* str)
+{
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_FIELD, field, str);
+ return false;
+}
+
+static bool
+GetProperty(JSContext* cx, HandleObject obj, const char* chars, MutableHandleValue v)
+{
+ JSAtom* atom = AtomizeUTF8Chars(cx, chars, strlen(chars));
+ if (!atom)
+ return false;
+
+ RootedId id(cx, AtomToId(atom));
+ return GetProperty(cx, obj, obj, id, v);
+}
+
+static bool
+GetImports(JSContext* cx,
+ const Module& module,
+ HandleObject importObj,
+ MutableHandle<FunctionVector> funcImports,
+ MutableHandleWasmTableObject tableImport,
+ MutableHandleWasmMemoryObject memoryImport,
+ ValVector* globalImports)
+{
+ const ImportVector& imports = module.imports();
+ if (!imports.empty() && !importObj)
+ return ThrowBadImportArg(cx);
+
+ const Metadata& metadata = module.metadata();
+
+ uint32_t globalIndex = 0;
+ const GlobalDescVector& globals = metadata.globals;
+ for (const Import& import : imports) {
+ RootedValue v(cx);
+ if (!GetProperty(cx, importObj, import.module.get(), &v))
+ return false;
+
+ if (!v.isObject())
+ return ThrowBadImportField(cx, import.module.get(), "an Object");
+
+ RootedObject obj(cx, &v.toObject());
+ if (!GetProperty(cx, obj, import.field.get(), &v))
+ return false;
+
+ switch (import.kind) {
+ case DefinitionKind::Function:
+ if (!IsFunctionObject(v))
+ return ThrowBadImportField(cx, import.field.get(), "a Function");
+
+ if (!funcImports.append(&v.toObject().as<JSFunction>()))
+ return false;
+
+ break;
+ case DefinitionKind::Table:
+ if (!v.isObject() || !v.toObject().is<WasmTableObject>())
+ return ThrowBadImportField(cx, import.field.get(), "a Table");
+
+ MOZ_ASSERT(!tableImport);
+ tableImport.set(&v.toObject().as<WasmTableObject>());
+ break;
+ case DefinitionKind::Memory:
+ if (!v.isObject() || !v.toObject().is<WasmMemoryObject>())
+ return ThrowBadImportField(cx, import.field.get(), "a Memory");
+
+ MOZ_ASSERT(!memoryImport);
+ memoryImport.set(&v.toObject().as<WasmMemoryObject>());
+ break;
+
+ case DefinitionKind::Global:
+ Val val;
+ const GlobalDesc& global = globals[globalIndex++];
+ MOZ_ASSERT(global.importIndex() == globalIndex - 1);
+ MOZ_ASSERT(!global.isMutable());
+ switch (global.type()) {
+ case ValType::I32: {
+ if (!v.isNumber())
+ return ThrowBadImportField(cx, import.field.get(), "a number");
+ int32_t i32;
+ if (!ToInt32(cx, v, &i32))
+ return false;
+ val = Val(uint32_t(i32));
+ break;
+ }
+ case ValType::I64: {
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in JS");
+ int64_t i64;
+ if (!ReadI64Object(cx, v, &i64))
+ return false;
+ val = Val(uint64_t(i64));
+ break;
+ }
+ case ValType::F32: {
+ if (JitOptions.wasmTestMode && v.isObject()) {
+ uint32_t bits;
+ if (!ReadCustomFloat32NaNObject(cx, v, &bits))
+ return false;
+ val = Val(RawF32::fromBits(bits));
+ break;
+ }
+ if (!v.isNumber())
+ return ThrowBadImportField(cx, import.field.get(), "a number");
+ double d;
+ if (!ToNumber(cx, v, &d))
+ return false;
+ val = Val(RawF32(float(d)));
+ break;
+ }
+ case ValType::F64: {
+ if (JitOptions.wasmTestMode && v.isObject()) {
+ uint64_t bits;
+ if (!ReadCustomDoubleNaNObject(cx, v, &bits))
+ return false;
+ val = Val(RawF64::fromBits(bits));
+ break;
+ }
+ if (!v.isNumber())
+ return ThrowBadImportField(cx, import.field.get(), "a number");
+ double d;
+ if (!ToNumber(cx, v, &d))
+ return false;
+ val = Val(RawF64(d));
+ break;
+ }
+ default: {
+ MOZ_CRASH("unexpected import value type");
+ }
+ }
+ if (!globalImports->append(val))
+ return false;
+ }
+ }
+
+ MOZ_ASSERT(globalIndex == globals.length() || !globals[globalIndex].isImport());
+
+ return true;
+}
+
+// ============================================================================
+// Fuzzing support
+
+static bool
+DescribeScriptedCaller(JSContext* cx, ScriptedCaller* scriptedCaller)
+{
+ // Note: JS::DescribeScriptedCaller returns whether a scripted caller was
+ // found, not whether an error was thrown. This wrapper function converts
+ // back to the more ordinary false-if-error form.
+
+ JS::AutoFilename af;
+ if (JS::DescribeScriptedCaller(cx, &af, &scriptedCaller->line, &scriptedCaller->column)) {
+ scriptedCaller->filename = DuplicateString(cx, af.get());
+ if (!scriptedCaller->filename)
+ return false;
+ }
+
+ return true;
+}
+
+bool
+wasm::Eval(JSContext* cx, Handle<TypedArrayObject*> code, HandleObject importObj,
+ MutableHandleWasmInstanceObject instanceObj)
+{
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly))
+ return false;
+
+ MutableBytes bytecode = cx->new_<ShareableBytes>();
+ if (!bytecode)
+ return false;
+
+ if (!bytecode->append((uint8_t*)code->viewDataEither().unwrap(), code->byteLength())) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ ScriptedCaller scriptedCaller;
+ if (!DescribeScriptedCaller(cx, &scriptedCaller))
+ return false;
+
+ CompileArgs compileArgs;
+ if (!compileArgs.initFromContext(cx, Move(scriptedCaller)))
+ return false;
+
+ UniqueChars error;
+ SharedModule module = Compile(*bytecode, compileArgs, &error);
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_COMPILE_ERROR,
+ error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+ RootedWasmTableObject table(cx);
+ RootedWasmMemoryObject memory(cx);
+ ValVector globals;
+ if (!GetImports(cx, *module, importObj, &funcs, &table, &memory, &globals))
+ return false;
+
+ return module->instantiate(cx, funcs, table, memory, globals, nullptr, instanceObj);
+}
+
+// ============================================================================
+// Common functions
+
+static bool
+ToNonWrappingUint32(JSContext* cx, HandleValue v, uint32_t max, const char* kind, const char* noun,
+ uint32_t* u32)
+{
+ double dbl;
+ if (!ToInteger(cx, v, &dbl))
+ return false;
+
+ if (dbl < 0 || dbl > max) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_UINT32,
+ kind, noun);
+ return false;
+ }
+
+ *u32 = uint32_t(dbl);
+ MOZ_ASSERT(double(*u32) == dbl);
+ return true;
+}
+
+static bool
+GetLimits(JSContext* cx, HandleObject obj, uint32_t max, const char* kind,
+ Limits* limits)
+{
+ JSAtom* initialAtom = Atomize(cx, "initial", strlen("initial"));
+ if (!initialAtom)
+ return false;
+ RootedId initialId(cx, AtomToId(initialAtom));
+
+ RootedValue initialVal(cx);
+ if (!GetProperty(cx, obj, obj, initialId, &initialVal))
+ return false;
+
+ if (!ToNonWrappingUint32(cx, initialVal, max, kind, "initial size", &limits->initial))
+ return false;
+
+ JSAtom* maximumAtom = Atomize(cx, "maximum", strlen("maximum"));
+ if (!maximumAtom)
+ return false;
+ RootedId maximumId(cx, AtomToId(maximumAtom));
+
+ bool found;
+ if (HasProperty(cx, obj, maximumId, &found) && found) {
+ RootedValue maxVal(cx);
+ if (!GetProperty(cx, obj, obj, maximumId, &maxVal))
+ return false;
+
+ limits->maximum.emplace();
+ if (!ToNonWrappingUint32(cx, maxVal, max, kind, "maximum size", limits->maximum.ptr()))
+ return false;
+
+ if (limits->initial > *limits->maximum) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_UINT32,
+ kind, "maximum size");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+// ============================================================================
+// WebAssembly.Module class and methods
+
+const ClassOps WasmModuleObject::classOps_ =
+{
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* getProperty */
+ nullptr, /* setProperty */
+ nullptr, /* enumerate */
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmModuleObject::finalize
+};
+
+const Class WasmModuleObject::class_ =
+{
+ "WebAssembly.Module",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmModuleObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmModuleObject::classOps_,
+};
+
+const JSPropertySpec WasmModuleObject::properties[] =
+{ JS_PS_END };
+
+const JSFunctionSpec WasmModuleObject::methods[] =
+{ JS_FS_END };
+
+const JSFunctionSpec WasmModuleObject::static_methods[] =
+{
+ JS_FN("imports", WasmModuleObject::imports, 1, 0),
+ JS_FN("exports", WasmModuleObject::exports, 1, 0),
+ JS_FS_END
+};
+
+/* static */ void
+WasmModuleObject::finalize(FreeOp* fop, JSObject* obj)
+{
+ obj->as<WasmModuleObject>().module().Release();
+}
+
+static bool
+IsModuleObject(JSObject* obj, Module** module)
+{
+ JSObject* unwrapped = CheckedUnwrap(obj);
+ if (!unwrapped || !unwrapped->is<WasmModuleObject>())
+ return false;
+
+ *module = &unwrapped->as<WasmModuleObject>().module();
+ return true;
+}
+
+static bool
+GetModuleArg(JSContext* cx, CallArgs args, const char* name, Module** module)
+{
+ if (!args.requireAtLeast(cx, name, 1))
+ return false;
+
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), module)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ return true;
+}
+
+struct KindNames
+{
+ RootedPropertyName kind;
+ RootedPropertyName table;
+ RootedPropertyName memory;
+
+ explicit KindNames(JSContext* cx) : kind(cx), table(cx), memory(cx) {}
+};
+
+static bool
+InitKindNames(JSContext* cx, KindNames* names)
+{
+ JSAtom* kind = Atomize(cx, "kind", strlen("kind"));
+ if (!kind)
+ return false;
+ names->kind = kind->asPropertyName();
+
+ JSAtom* table = Atomize(cx, "table", strlen("table"));
+ if (!table)
+ return false;
+ names->table = table->asPropertyName();
+
+ JSAtom* memory = Atomize(cx, "memory", strlen("memory"));
+ if (!memory)
+ return false;
+ names->memory = memory->asPropertyName();
+
+ return true;
+}
+
+static JSString*
+KindToString(JSContext* cx, const KindNames& names, DefinitionKind kind)
+{
+ switch (kind) {
+ case DefinitionKind::Function:
+ return cx->names().function;
+ case DefinitionKind::Table:
+ return names.table;
+ case DefinitionKind::Memory:
+ return names.memory;
+ case DefinitionKind::Global:
+ return cx->names().global;
+ }
+
+ MOZ_CRASH("invalid kind");
+}
+
+static JSString*
+UTF8CharsToString(JSContext* cx, const char* chars)
+{
+ return NewStringCopyUTF8Z<CanGC>(cx, JS::ConstUTF8CharsZ(chars, strlen(chars)));
+}
+
+/* static */ bool
+WasmModuleObject::imports(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Module* module;
+ if (!GetModuleArg(cx, args, "WebAssembly.Module.imports", &module))
+ return false;
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names))
+ return false;
+
+ AutoValueVector elems(cx);
+ if (!elems.reserve(module->imports().length()))
+ return false;
+
+ for (const Import& import : module->imports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(3))
+ return false;
+
+ JSString* moduleStr = UTF8CharsToString(cx, import.module.get());
+ if (!moduleStr)
+ return false;
+ props.infallibleAppend(IdValuePair(NameToId(cx->names().module), StringValue(moduleStr)));
+
+ JSString* nameStr = UTF8CharsToString(cx, import.field.get());
+ if (!nameStr)
+ return false;
+ props.infallibleAppend(IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, import.kind);
+ if (!kindStr)
+ return false;
+ props.infallibleAppend(IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+ JSObject* obj = ObjectGroup::newPlainObject(cx, props.begin(), props.length(), GenericObject);
+ if (!obj)
+ return false;
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr)
+ return false;
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */ bool
+WasmModuleObject::exports(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ Module* module;
+ if (!GetModuleArg(cx, args, "WebAssembly.Module.exports", &module))
+ return false;
+
+ KindNames names(cx);
+ if (!InitKindNames(cx, &names))
+ return false;
+
+ AutoValueVector elems(cx);
+ if (!elems.reserve(module->exports().length()))
+ return false;
+
+ for (const Export& exp : module->exports()) {
+ Rooted<IdValueVector> props(cx, IdValueVector(cx));
+ if (!props.reserve(2))
+ return false;
+
+ JSString* nameStr = UTF8CharsToString(cx, exp.fieldName());
+ if (!nameStr)
+ return false;
+ props.infallibleAppend(IdValuePair(NameToId(cx->names().name), StringValue(nameStr)));
+
+ JSString* kindStr = KindToString(cx, names, exp.kind());
+ if (!kindStr)
+ return false;
+ props.infallibleAppend(IdValuePair(NameToId(names.kind), StringValue(kindStr)));
+
+ JSObject* obj = ObjectGroup::newPlainObject(cx, props.begin(), props.length(), GenericObject);
+ if (!obj)
+ return false;
+
+ elems.infallibleAppend(ObjectValue(*obj));
+ }
+
+ JSObject* arr = NewDenseCopiedArray(cx, elems.length(), elems.begin());
+ if (!arr)
+ return false;
+
+ args.rval().setObject(*arr);
+ return true;
+}
+
+/* static */ WasmModuleObject*
+WasmModuleObject::create(ExclusiveContext* cx, Module& module, HandleObject proto)
+{
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmModuleObject>(cx, proto);
+ if (!obj)
+ return nullptr;
+
+ obj->initReservedSlot(MODULE_SLOT, PrivateValue(&module));
+ module.AddRef();
+ return obj;
+}
+
+static bool
+GetBufferSource(JSContext* cx, JSObject* obj, unsigned errorNumber, MutableBytes* bytecode)
+{
+ *bytecode = cx->new_<ShareableBytes>();
+ if (!*bytecode)
+ return false;
+
+ JSObject* unwrapped = CheckedUnwrap(obj);
+
+ size_t byteLength = 0;
+ uint8_t* ptr = nullptr;
+ if (unwrapped && unwrapped->is<TypedArrayObject>()) {
+ TypedArrayObject& view = unwrapped->as<TypedArrayObject>();
+ byteLength = view.byteLength();
+ ptr = (uint8_t*)view.viewDataEither().unwrap();
+ } else if (unwrapped && unwrapped->is<ArrayBufferObject>()) {
+ ArrayBufferObject& buffer = unwrapped->as<ArrayBufferObject>();
+ byteLength = buffer.byteLength();
+ ptr = buffer.dataPointer();
+ } else {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, errorNumber);
+ return false;
+ }
+
+ if (!(*bytecode)->append(ptr, byteLength)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+InitCompileArgs(JSContext* cx, CompileArgs* compileArgs)
+{
+ ScriptedCaller scriptedCaller;
+ if (!DescribeScriptedCaller(cx, &scriptedCaller))
+ return false;
+
+ return compileArgs->initFromContext(cx, Move(scriptedCaller));
+}
+
+/* static */ bool
+WasmModuleObject::construct(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, callArgs, "Module"))
+ return false;
+
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.Module", 1))
+ return false;
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG, &bytecode))
+ return false;
+
+ CompileArgs compileArgs;
+ if (!InitCompileArgs(cx, &compileArgs))
+ return false;
+
+ UniqueChars error;
+ SharedModule module = Compile(*bytecode, compileArgs, &error);
+ if (!module) {
+ if (error) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_COMPILE_ERROR,
+ error.get());
+ return false;
+ }
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, *module, proto));
+ if (!moduleObj)
+ return false;
+
+ callArgs.rval().setObject(*moduleObj);
+ return true;
+}
+
+Module&
+WasmModuleObject::module() const
+{
+ MOZ_ASSERT(is<WasmModuleObject>());
+ return *(Module*)getReservedSlot(MODULE_SLOT).toPrivate();
+}
+
+// ============================================================================
+// WebAssembly.Instance class and methods
+
+const ClassOps WasmInstanceObject::classOps_ =
+{
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* getProperty */
+ nullptr, /* setProperty */
+ nullptr, /* enumerate */
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmInstanceObject::finalize,
+ nullptr, /* call */
+ nullptr, /* hasInstance */
+ nullptr, /* construct */
+ WasmInstanceObject::trace
+};
+
+const Class WasmInstanceObject::class_ =
+{
+ "WebAssembly.Instance",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmInstanceObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmInstanceObject::classOps_,
+};
+
+const JSPropertySpec WasmInstanceObject::properties[] =
+{ JS_PS_END };
+
+const JSFunctionSpec WasmInstanceObject::methods[] =
+{ JS_FS_END };
+
+const JSFunctionSpec WasmInstanceObject::static_methods[] =
+{ JS_FS_END };
+
+bool
+WasmInstanceObject::isNewborn() const
+{
+ MOZ_ASSERT(is<WasmInstanceObject>());
+ return getReservedSlot(INSTANCE_SLOT).isUndefined();
+}
+
+/* static */ void
+WasmInstanceObject::finalize(FreeOp* fop, JSObject* obj)
+{
+ fop->delete_(&obj->as<WasmInstanceObject>().exports());
+ if (!obj->as<WasmInstanceObject>().isNewborn())
+ fop->delete_(&obj->as<WasmInstanceObject>().instance());
+}
+
+/* static */ void
+WasmInstanceObject::trace(JSTracer* trc, JSObject* obj)
+{
+ if (!obj->as<WasmInstanceObject>().isNewborn())
+ obj->as<WasmInstanceObject>().instance().tracePrivate(trc);
+}
+
+/* static */ WasmInstanceObject*
+WasmInstanceObject::create(JSContext* cx,
+ UniqueCode code,
+ HandleWasmMemoryObject memory,
+ SharedTableVector&& tables,
+ Handle<FunctionVector> funcImports,
+ const ValVector& globalImports,
+ HandleObject proto)
+{
+ UniquePtr<WeakExportMap> exports = js::MakeUnique<WeakExportMap>(cx->zone(), ExportMap());
+ if (!exports || !exports->init()) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ AutoSetNewObjectMetadata metadata(cx);
+ RootedWasmInstanceObject obj(cx, NewObjectWithGivenProto<WasmInstanceObject>(cx, proto));
+ if (!obj)
+ return nullptr;
+
+ obj->setReservedSlot(EXPORTS_SLOT, PrivateValue(exports.release()));
+ MOZ_ASSERT(obj->isNewborn());
+
+ MOZ_ASSERT(obj->isTenured(), "assumed by WasmTableObject write barriers");
+
+ // Root the Instance via WasmInstanceObject before any possible GC.
+ auto* instance = cx->new_<Instance>(cx,
+ obj,
+ Move(code),
+ memory,
+ Move(tables),
+ funcImports,
+ globalImports);
+ if (!instance)
+ return nullptr;
+
+ obj->initReservedSlot(INSTANCE_SLOT, PrivateValue(instance));
+ MOZ_ASSERT(!obj->isNewborn());
+
+ if (!instance->init(cx))
+ return nullptr;
+
+ return obj;
+}
+
+static bool
+Instantiate(JSContext* cx, const Module& module, HandleObject importObj,
+ MutableHandleWasmInstanceObject instanceObj)
+{
+ RootedObject instanceProto(cx, &cx->global()->getPrototype(JSProto_WasmInstance).toObject());
+
+ Rooted<FunctionVector> funcs(cx, FunctionVector(cx));
+ RootedWasmTableObject table(cx);
+ RootedWasmMemoryObject memory(cx);
+ ValVector globals;
+ if (!GetImports(cx, module, importObj, &funcs, &table, &memory, &globals))
+ return false;
+
+ return module.instantiate(cx, funcs, table, memory, globals, instanceProto, instanceObj);
+}
+
+/* static */ bool
+WasmInstanceObject::construct(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Instance"))
+ return false;
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Instance", 1))
+ return false;
+
+ Module* module;
+ if (!args[0].isObject() || !IsModuleObject(&args[0].toObject(), &module)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_MOD_ARG);
+ return false;
+ }
+
+ RootedObject importObj(cx);
+ if (!args.get(1).isUndefined()) {
+ if (!args[1].isObject())
+ return ThrowBadImportArg(cx);
+ importObj = &args[1].toObject();
+ }
+
+ RootedWasmInstanceObject instanceObj(cx);
+ if (!Instantiate(cx, *module, importObj, &instanceObj))
+ return false;
+
+ args.rval().setObject(*instanceObj);
+ return true;
+}
+
+Instance&
+WasmInstanceObject::instance() const
+{
+ MOZ_ASSERT(!isNewborn());
+ return *(Instance*)getReservedSlot(INSTANCE_SLOT).toPrivate();
+}
+
+WasmInstanceObject::WeakExportMap&
+WasmInstanceObject::exports() const
+{
+ return *(WeakExportMap*)getReservedSlot(EXPORTS_SLOT).toPrivate();
+}
+
+static bool
+WasmCall(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ RootedFunction callee(cx, &args.callee().as<JSFunction>());
+
+ Instance& instance = ExportedFunctionToInstance(callee);
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(callee);
+ return instance.callExport(cx, funcIndex, args);
+}
+
+/* static */ bool
+WasmInstanceObject::getExportedFunction(JSContext* cx, HandleWasmInstanceObject instanceObj,
+ uint32_t funcIndex, MutableHandleFunction fun)
+{
+ if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
+ fun.set(p->value());
+ return true;
+ }
+
+ const Instance& instance = instanceObj->instance();
+ unsigned numArgs = instance.metadata().lookupFuncExport(funcIndex).sig().args().length();
+
+ // asm.js needs to act like a normal JS function which means having the name
+ // from the original source and being callable as a constructor.
+ if (instance.isAsmJS()) {
+ RootedAtom name(cx, instance.code().getFuncAtom(cx, funcIndex));
+ if (!name)
+ return false;
+
+ fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED,
+ SingletonObject, JSFunction::ASMJS_CTOR));
+ if (!fun)
+ return false;
+ } else {
+ RootedAtom name(cx, NumberToAtom(cx, funcIndex));
+ if (!name)
+ return false;
+
+ fun.set(NewNativeFunction(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED));
+ if (!fun)
+ return false;
+ }
+
+ fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
+ fun->setExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT, Int32Value(funcIndex));
+
+ if (!instanceObj->exports().putNew(funcIndex, fun)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+const CodeRange&
+WasmInstanceObject::getExportedFunctionCodeRange(HandleFunction fun)
+{
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
+ MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun);
+ const Metadata& metadata = instance().metadata();
+ return metadata.codeRanges[metadata.lookupFuncExport(funcIndex).codeRangeIndex()];
+}
+
+bool
+wasm::IsExportedFunction(JSFunction* fun)
+{
+ return fun->maybeNative() == WasmCall;
+}
+
+bool
+wasm::IsExportedWasmFunction(JSFunction* fun)
+{
+ return IsExportedFunction(fun) && !ExportedFunctionToInstance(fun).isAsmJS();
+}
+
+bool
+wasm::IsExportedFunction(const Value& v, MutableHandleFunction f)
+{
+ if (!v.isObject())
+ return false;
+
+ JSObject& obj = v.toObject();
+ if (!obj.is<JSFunction>() || !IsExportedFunction(&obj.as<JSFunction>()))
+ return false;
+
+ f.set(&obj.as<JSFunction>());
+ return true;
+}
+
+Instance&
+wasm::ExportedFunctionToInstance(JSFunction* fun)
+{
+ return ExportedFunctionToInstanceObject(fun)->instance();
+}
+
+WasmInstanceObject*
+wasm::ExportedFunctionToInstanceObject(JSFunction* fun)
+{
+ MOZ_ASSERT(IsExportedFunction(fun));
+ const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT);
+ return &v.toObject().as<WasmInstanceObject>();
+}
+
+uint32_t
+wasm::ExportedFunctionToFuncIndex(JSFunction* fun)
+{
+ MOZ_ASSERT(IsExportedFunction(fun));
+ const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT);
+ return v.toInt32();
+}
+
+// ============================================================================
+// WebAssembly.Memory class and methods
+
+const ClassOps WasmMemoryObject::classOps_ =
+{
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* getProperty */
+ nullptr, /* setProperty */
+ nullptr, /* enumerate */
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmMemoryObject::finalize
+};
+
+const Class WasmMemoryObject::class_ =
+{
+ "WebAssembly.Memory",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmMemoryObject::classOps_
+};
+
+/* static */ void
+WasmMemoryObject::finalize(FreeOp* fop, JSObject* obj)
+{
+ WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
+ if (memory.hasObservers())
+ fop->delete_(&memory.observers());
+}
+
+/* static */ WasmMemoryObject*
+WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShared buffer,
+ HandleObject proto)
+{
+ AutoSetNewObjectMetadata metadata(cx);
+ auto* obj = NewObjectWithGivenProto<WasmMemoryObject>(cx, proto);
+ if (!obj)
+ return nullptr;
+
+ obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
+ MOZ_ASSERT(!obj->hasObservers());
+ return obj;
+}
+
+/* static */ bool
+WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Memory"))
+ return false;
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Memory", 1))
+ return false;
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_DESC_ARG, "memory");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+ Limits limits;
+ if (!GetLimits(cx, obj, UINT32_MAX / PageSize, "Memory", &limits))
+ return false;
+
+ limits.initial *= PageSize;
+ if (limits.maximum)
+ limits.maximum = Some(*limits.maximum * PageSize);
+
+ RootedArrayBufferObject buffer(cx,
+ ArrayBufferObject::createForWasm(cx, limits.initial, limits.maximum));
+ if (!buffer)
+ return false;
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
+ RootedWasmMemoryObject memoryObj(cx, WasmMemoryObject::create(cx, buffer, proto));
+ if (!memoryObj)
+ return false;
+
+ args.rval().setObject(*memoryObj);
+ return true;
+}
+
+static bool
+IsMemory(HandleValue v)
+{
+ return v.isObject() && v.toObject().is<WasmMemoryObject>();
+}
+
+/* static */ bool
+WasmMemoryObject::bufferGetterImpl(JSContext* cx, const CallArgs& args)
+{
+ args.rval().setObject(args.thisv().toObject().as<WasmMemoryObject>().buffer());
+ return true;
+}
+
+/* static */ bool
+WasmMemoryObject::bufferGetter(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, bufferGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmMemoryObject::properties[] =
+{
+ JS_PSG("buffer", WasmMemoryObject::bufferGetter, 0),
+ JS_PS_END
+};
+
+/* static */ bool
+WasmMemoryObject::growImpl(JSContext* cx, const CallArgs& args)
+{
+ RootedWasmMemoryObject memory(cx, &args.thisv().toObject().as<WasmMemoryObject>());
+
+ uint32_t delta;
+ if (!ToNonWrappingUint32(cx, args.get(0), UINT32_MAX, "Memory", "grow delta", &delta))
+ return false;
+
+ uint32_t ret = grow(memory, delta, cx);
+
+ if (ret == uint32_t(-1)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW, "memory");
+ return false;
+ }
+
+ args.rval().setInt32(ret);
+ return true;
+}
+
+/* static */ bool
+WasmMemoryObject::grow(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsMemory, growImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmMemoryObject::methods[] =
+{
+ JS_FN("grow", WasmMemoryObject::grow, 1, 0),
+ JS_FS_END
+};
+
+const JSFunctionSpec WasmMemoryObject::static_methods[] =
+{ JS_FS_END };
+
+ArrayBufferObjectMaybeShared&
+WasmMemoryObject::buffer() const
+{
+ return getReservedSlot(BUFFER_SLOT).toObject().as<ArrayBufferObjectMaybeShared>();
+}
+
+bool
+WasmMemoryObject::hasObservers() const
+{
+ return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
+}
+
+WasmMemoryObject::WeakInstanceSet&
+WasmMemoryObject::observers() const
+{
+ MOZ_ASSERT(hasObservers());
+ return *reinterpret_cast<WeakInstanceSet*>(getReservedSlot(OBSERVERS_SLOT).toPrivate());
+}
+
+WasmMemoryObject::WeakInstanceSet*
+WasmMemoryObject::getOrCreateObservers(JSContext* cx)
+{
+ if (!hasObservers()) {
+ auto observers = MakeUnique<WeakInstanceSet>(cx->zone(), InstanceSet());
+ if (!observers || !observers->init()) {
+ ReportOutOfMemory(cx);
+ return nullptr;
+ }
+
+ setReservedSlot(OBSERVERS_SLOT, PrivateValue(observers.release()));
+ }
+
+ return &observers();
+}
+
+bool
+WasmMemoryObject::movingGrowable() const
+{
+#ifdef WASM_HUGE_MEMORY
+ return false;
+#else
+ return !buffer().wasmMaxSize();
+#endif
+}
+
+bool
+WasmMemoryObject::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance)
+{
+ MOZ_ASSERT(movingGrowable());
+
+ WeakInstanceSet* observers = getOrCreateObservers(cx);
+ if (!observers)
+ return false;
+
+ if (!observers->putNew(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+/* static */ uint32_t
+WasmMemoryObject::grow(HandleWasmMemoryObject memory, uint32_t delta, JSContext* cx)
+{
+ RootedArrayBufferObject oldBuf(cx, &memory->buffer().as<ArrayBufferObject>());
+
+ MOZ_ASSERT(oldBuf->byteLength() % PageSize == 0);
+ uint32_t oldNumPages = oldBuf->byteLength() / PageSize;
+
+ CheckedInt<uint32_t> newSize = oldNumPages;
+ newSize += delta;
+ newSize *= PageSize;
+ if (!newSize.isValid())
+ return -1;
+
+ RootedArrayBufferObject newBuf(cx);
+ uint8_t* prevMemoryBase = nullptr;
+
+ if (Maybe<uint32_t> maxSize = oldBuf->wasmMaxSize()) {
+ if (newSize.value() > maxSize.value())
+ return -1;
+
+ if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf, &newBuf, cx))
+ return -1;
+ } else {
+#ifdef WASM_HUGE_MEMORY
+ if (!ArrayBufferObject::wasmGrowToSizeInPlace(newSize.value(), oldBuf, &newBuf, cx))
+ return -1;
+#else
+ MOZ_ASSERT(memory->movingGrowable());
+ prevMemoryBase = oldBuf->dataPointer();
+ if (!ArrayBufferObject::wasmMovingGrowToSize(newSize.value(), oldBuf, &newBuf, cx))
+ return -1;
+#endif
+ }
+
+ memory->setReservedSlot(BUFFER_SLOT, ObjectValue(*newBuf));
+
+ // Only notify moving-grow-observers after the BUFFER_SLOT has been updated
+ // since observers will call buffer().
+ if (memory->hasObservers()) {
+ MOZ_ASSERT(prevMemoryBase);
+ for (InstanceSet::Range r = memory->observers().all(); !r.empty(); r.popFront())
+ r.front()->instance().onMovingGrowMemory(prevMemoryBase);
+ }
+
+ return oldNumPages;
+}
+
+// ============================================================================
+// WebAssembly.Table class and methods
+
+const ClassOps WasmTableObject::classOps_ =
+{
+ nullptr, /* addProperty */
+ nullptr, /* delProperty */
+ nullptr, /* getProperty */
+ nullptr, /* setProperty */
+ nullptr, /* enumerate */
+ nullptr, /* resolve */
+ nullptr, /* mayResolve */
+ WasmTableObject::finalize,
+ nullptr, /* call */
+ nullptr, /* hasInstance */
+ nullptr, /* construct */
+ WasmTableObject::trace
+};
+
+const Class WasmTableObject::class_ =
+{
+ "WebAssembly.Table",
+ JSCLASS_DELAY_METADATA_BUILDER |
+ JSCLASS_HAS_RESERVED_SLOTS(WasmTableObject::RESERVED_SLOTS) |
+ JSCLASS_FOREGROUND_FINALIZE,
+ &WasmTableObject::classOps_
+};
+
+bool
+WasmTableObject::isNewborn() const
+{
+ MOZ_ASSERT(is<WasmTableObject>());
+ return getReservedSlot(TABLE_SLOT).isUndefined();
+}
+
+/* static */ void
+WasmTableObject::finalize(FreeOp* fop, JSObject* obj)
+{
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn())
+ tableObj.table().Release();
+}
+
+/* static */ void
+WasmTableObject::trace(JSTracer* trc, JSObject* obj)
+{
+ WasmTableObject& tableObj = obj->as<WasmTableObject>();
+ if (!tableObj.isNewborn())
+ tableObj.table().tracePrivate(trc);
+}
+
+/* static */ WasmTableObject*
+WasmTableObject::create(JSContext* cx, Limits limits)
+{
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmTable).toObject());
+
+ AutoSetNewObjectMetadata metadata(cx);
+ RootedWasmTableObject obj(cx, NewObjectWithGivenProto<WasmTableObject>(cx, proto));
+ if (!obj)
+ return nullptr;
+
+ MOZ_ASSERT(obj->isNewborn());
+
+ TableDesc td(TableKind::AnyFunction, limits);
+ td.external = true;
+
+ SharedTable table = Table::create(cx, td, obj);
+ if (!table)
+ return nullptr;
+
+ obj->initReservedSlot(TABLE_SLOT, PrivateValue(table.forget().take()));
+
+ MOZ_ASSERT(!obj->isNewborn());
+ return obj;
+}
+
+/* static */ bool
+WasmTableObject::construct(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+
+ if (!ThrowIfNotConstructing(cx, args, "Table"))
+ return false;
+
+ if (!args.requireAtLeast(cx, "WebAssembly.Table", 1))
+ return false;
+
+ if (!args.get(0).isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_DESC_ARG, "table");
+ return false;
+ }
+
+ RootedObject obj(cx, &args[0].toObject());
+
+ JSAtom* elementAtom = Atomize(cx, "element", strlen("element"));
+ if (!elementAtom)
+ return false;
+ RootedId elementId(cx, AtomToId(elementAtom));
+
+ RootedValue elementVal(cx);
+ if (!GetProperty(cx, obj, obj, elementId, &elementVal))
+ return false;
+
+ if (!elementVal.isString()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_ELEMENT);
+ return false;
+ }
+
+ JSLinearString* elementStr = elementVal.toString()->ensureLinear(cx);
+ if (!elementStr)
+ return false;
+
+ if (!StringEqualsAscii(elementStr, "anyfunc")) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_ELEMENT);
+ return false;
+ }
+
+ Limits limits;
+ if (!GetLimits(cx, obj, UINT32_MAX, "Table", &limits))
+ return false;
+
+ RootedWasmTableObject table(cx, WasmTableObject::create(cx, limits));
+ if (!table)
+ return false;
+
+ args.rval().setObject(*table);
+ return true;
+}
+
+static bool
+IsTable(HandleValue v)
+{
+ return v.isObject() && v.toObject().is<WasmTableObject>();
+}
+
+/* static */ bool
+WasmTableObject::lengthGetterImpl(JSContext* cx, const CallArgs& args)
+{
+ args.rval().setNumber(args.thisv().toObject().as<WasmTableObject>().table().length());
+ return true;
+}
+
+/* static */ bool
+WasmTableObject::lengthGetter(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, lengthGetterImpl>(cx, args);
+}
+
+const JSPropertySpec WasmTableObject::properties[] =
+{
+ JS_PSG("length", WasmTableObject::lengthGetter, 0),
+ JS_PS_END
+};
+
+/* static */ bool
+WasmTableObject::getImpl(JSContext* cx, const CallArgs& args)
+{
+ RootedWasmTableObject tableObj(cx, &args.thisv().toObject().as<WasmTableObject>());
+ const Table& table = tableObj->table();
+
+ uint32_t index;
+ if (!ToNonWrappingUint32(cx, args.get(0), table.length() - 1, "Table", "get index", &index))
+ return false;
+
+ ExternalTableElem& elem = table.externalArray()[index];
+ if (!elem.code) {
+ args.rval().setNull();
+ return true;
+ }
+
+ Instance& instance = *elem.tls->instance;
+ const CodeRange& codeRange = *instance.code().lookupRange(elem.code);
+ MOZ_ASSERT(codeRange.isFunction());
+
+ RootedWasmInstanceObject instanceObj(cx, instance.object());
+ RootedFunction fun(cx);
+ if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange.funcIndex(), &fun))
+ return false;
+
+ args.rval().setObject(*fun);
+ return true;
+}
+
+/* static */ bool
+WasmTableObject::get(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, getImpl>(cx, args);
+}
+
+/* static */ bool
+WasmTableObject::setImpl(JSContext* cx, const CallArgs& args)
+{
+ RootedWasmTableObject tableObj(cx, &args.thisv().toObject().as<WasmTableObject>());
+ Table& table = tableObj->table();
+
+ if (!args.requireAtLeast(cx, "set", 2))
+ return false;
+
+ uint32_t index;
+ if (!ToNonWrappingUint32(cx, args.get(0), table.length() - 1, "Table", "set index", &index))
+ return false;
+
+ RootedFunction value(cx);
+ if (!IsExportedFunction(args[1], &value) && !args[1].isNull()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_TABLE_VALUE);
+ return false;
+ }
+
+ if (value) {
+ RootedWasmInstanceObject instanceObj(cx, ExportedFunctionToInstanceObject(value));
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(value);
+
+#ifdef DEBUG
+ RootedFunction f(cx);
+ MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
+ MOZ_ASSERT(value == f);
+#endif
+
+ Instance& instance = instanceObj->instance();
+ const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
+ const CodeRange& codeRange = instance.metadata().codeRanges[funcExport.codeRangeIndex()];
+ void* code = instance.codeSegment().base() + codeRange.funcTableEntry();
+ table.set(index, code, instance);
+ } else {
+ table.setNull(index);
+ }
+
+ args.rval().setUndefined();
+ return true;
+}
+
+/* static */ bool
+WasmTableObject::set(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, setImpl>(cx, args);
+}
+
+/* static */ bool
+WasmTableObject::growImpl(JSContext* cx, const CallArgs& args)
+{
+ RootedWasmTableObject table(cx, &args.thisv().toObject().as<WasmTableObject>());
+
+ uint32_t delta;
+ if (!ToNonWrappingUint32(cx, args.get(0), UINT32_MAX, "Table", "grow delta", &delta))
+ return false;
+
+ uint32_t ret = table->table().grow(delta, cx);
+
+ if (ret == uint32_t(-1)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_GROW, "table");
+ return false;
+ }
+
+ args.rval().setInt32(ret);
+ return true;
+}
+
+/* static */ bool
+WasmTableObject::grow(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ return CallNonGenericMethod<IsTable, growImpl>(cx, args);
+}
+
+const JSFunctionSpec WasmTableObject::methods[] =
+{
+ JS_FN("get", WasmTableObject::get, 1, 0),
+ JS_FN("set", WasmTableObject::set, 2, 0),
+ JS_FN("grow", WasmTableObject::grow, 1, 0),
+ JS_FS_END
+};
+
+const JSFunctionSpec WasmTableObject::static_methods[] =
+{ JS_FS_END };
+
+Table&
+WasmTableObject::table() const
+{
+ return *(Table*)getReservedSlot(TABLE_SLOT).toPrivate();
+}
+
+// ============================================================================
+// WebAssembly class and static methods
+
+#if JS_HAS_TOSOURCE
+static bool
+WebAssembly_toSource(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setString(cx->names().WebAssembly);
+ return true;
+}
+#endif
+
+#ifdef SPIDERMONKEY_PROMISE
+static bool
+Nop(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs args = CallArgsFromVp(argc, vp);
+ args.rval().setUndefined();
+ return true;
+}
+
+static bool
+Reject(JSContext* cx, const CompileArgs& args, UniqueChars error, Handle<PromiseObject*> promise)
+{
+ if (!error) {
+ ReportOutOfMemory(cx);
+
+ RootedValue rejectionValue(cx);
+ if (!cx->getPendingException(&rejectionValue))
+ return false;
+
+ return promise->reject(cx, rejectionValue);
+ }
+
+ RootedObject stack(cx, promise->allocationSite());
+ RootedString filename(cx, JS_NewStringCopyZ(cx, args.scriptedCaller.filename.get()));
+ if (!filename)
+ return false;
+
+ unsigned line = args.scriptedCaller.line;
+ unsigned column = args.scriptedCaller.column;
+
+ // Ideally we'd report a JSMSG_WASM_COMPILE_ERROR here, but there's no easy
+ // way to create an ErrorObject for an arbitrary error code with multiple
+ // replacements.
+ UniqueChars str(JS_smprintf("wasm validation error: %s", error.get()));
+ if (!str)
+ return false;
+
+ RootedString message(cx, NewLatin1StringZ(cx, Move(str)));
+ if (!message)
+ return false;
+
+ RootedObject errorObj(cx,
+ ErrorObject::create(cx, JSEXN_WASMCOMPILEERROR, stack, filename, line, column, nullptr, message));
+ if (!errorObj)
+ return false;
+
+ RootedValue rejectionValue(cx, ObjectValue(*errorObj));
+ return promise->reject(cx, rejectionValue);
+}
+
+static bool
+ResolveCompilation(JSContext* cx, Module& module, Handle<PromiseObject*> promise)
+{
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, module, proto));
+ if (!moduleObj)
+ return false;
+
+ RootedValue resolutionValue(cx, ObjectValue(*moduleObj));
+ return promise->resolve(cx, resolutionValue);
+}
+
+struct CompileTask : PromiseTask
+{
+ MutableBytes bytecode;
+ CompileArgs compileArgs;
+ UniqueChars error;
+ SharedModule module;
+
+ CompileTask(JSContext* cx, Handle<PromiseObject*> promise)
+ : PromiseTask(cx, promise)
+ {}
+
+ void execute() override {
+ module = Compile(*bytecode, compileArgs, &error);
+ }
+
+ bool finishPromise(JSContext* cx, Handle<PromiseObject*> promise) override {
+ return module
+ ? ResolveCompilation(cx, *module, promise)
+ : Reject(cx, compileArgs, Move(error), promise);
+ }
+};
+
+static bool
+RejectWithPendingException(JSContext* cx, Handle<PromiseObject*> promise)
+{
+ if (!cx->isExceptionPending())
+ return false;
+
+ RootedValue rejectionValue(cx);
+ if (!GetAndClearException(cx, &rejectionValue))
+ return false;
+
+ return promise->reject(cx, rejectionValue);
+}
+
+static bool
+RejectWithPendingException(JSContext* cx, Handle<PromiseObject*> promise, CallArgs& callArgs)
+{
+ if (!RejectWithPendingException(cx, promise))
+ return false;
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool
+GetBufferSource(JSContext* cx, CallArgs callArgs, const char* name, MutableBytes* bytecode)
+{
+ if (!callArgs.requireAtLeast(cx, name, 1))
+ return false;
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_BUF_ARG);
+ return false;
+ }
+
+ return GetBufferSource(cx, &callArgs[0].toObject(), JSMSG_WASM_BAD_BUF_ARG, bytecode);
+}
+
+static bool
+WebAssembly_compile(JSContext* cx, unsigned argc, Value* vp)
+{
+ if (!cx->startAsyncTaskCallback || !cx->finishAsyncTaskCallback) {
+ JS_ReportErrorASCII(cx, "WebAssembly.compile not supported in this runtime.");
+ return false;
+ }
+
+ RootedFunction nopFun(cx, NewNativeFunction(cx, Nop, 0, nullptr));
+ if (!nopFun)
+ return false;
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::create(cx, nopFun));
+ if (!promise)
+ return false;
+
+ auto task = cx->make_unique<CompileTask>(cx, promise);
+ if (!task)
+ return false;
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.compile", &task->bytecode))
+ return RejectWithPendingException(cx, promise, callArgs);
+
+ if (!InitCompileArgs(cx, &task->compileArgs))
+ return false;
+
+ if (!StartPromiseTask(cx, Move(task)))
+ return false;
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+
+static bool
+ResolveInstantiation(JSContext* cx, Module& module, HandleObject importObj,
+ Handle<PromiseObject*> promise)
+{
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ RootedObject moduleObj(cx, WasmModuleObject::create(cx, module, proto));
+ if (!moduleObj)
+ return false;
+
+ RootedWasmInstanceObject instanceObj(cx);
+ if (!Instantiate(cx, module, importObj, &instanceObj))
+ return RejectWithPendingException(cx, promise);
+
+ RootedObject resultObj(cx, JS_NewPlainObject(cx));
+ if (!resultObj)
+ return false;
+
+ RootedValue val(cx, ObjectValue(*moduleObj));
+ if (!JS_DefineProperty(cx, resultObj, "module", val, JSPROP_ENUMERATE))
+ return false;
+
+ val = ObjectValue(*instanceObj);
+ if (!JS_DefineProperty(cx, resultObj, "instance", val, JSPROP_ENUMERATE))
+ return false;
+
+ val = ObjectValue(*resultObj);
+ return promise->resolve(cx, val);
+}
+
+struct InstantiateTask : CompileTask
+{
+ PersistentRootedObject importObj;
+
+ InstantiateTask(JSContext* cx, Handle<PromiseObject*> promise, HandleObject importObj)
+ : CompileTask(cx, promise),
+ importObj(cx, importObj)
+ {}
+
+ bool finishPromise(JSContext* cx, Handle<PromiseObject*> promise) override {
+ return module
+ ? ResolveInstantiation(cx, *module, importObj, promise)
+ : Reject(cx, compileArgs, Move(error), promise);
+ }
+};
+
+static bool
+GetInstantiateArgs(JSContext* cx, CallArgs callArgs, MutableHandleObject firstArg,
+ MutableHandleObject importObj)
+{
+ if (!callArgs.requireAtLeast(cx, "WebAssembly.instantiate", 1))
+ return false;
+
+ if (!callArgs[0].isObject()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_BUF_MOD_ARG);
+ return false;
+ }
+
+ firstArg.set(&callArgs[0].toObject());
+
+ if (!callArgs.get(1).isUndefined()) {
+ if (!callArgs[1].isObject())
+ return ThrowBadImportArg(cx);
+ importObj.set(&callArgs[1].toObject());
+ }
+
+ return true;
+}
+
+static bool
+WebAssembly_instantiate(JSContext* cx, unsigned argc, Value* vp)
+{
+ if (!cx->startAsyncTaskCallback || !cx->finishAsyncTaskCallback) {
+ JS_ReportErrorASCII(cx, "WebAssembly.instantiate not supported in this runtime.");
+ return false;
+ }
+
+ RootedFunction nopFun(cx, NewNativeFunction(cx, Nop, 0, nullptr));
+ if (!nopFun)
+ return false;
+
+ Rooted<PromiseObject*> promise(cx, PromiseObject::create(cx, nopFun));
+ if (!promise)
+ return false;
+
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ RootedObject firstArg(cx);
+ RootedObject importObj(cx);
+ if (!GetInstantiateArgs(cx, callArgs, &firstArg, &importObj))
+ return RejectWithPendingException(cx, promise, callArgs);
+
+ Module* module;
+ if (IsModuleObject(firstArg, &module)) {
+ RootedWasmInstanceObject instanceObj(cx);
+ if (!Instantiate(cx, *module, importObj, &instanceObj))
+ return RejectWithPendingException(cx, promise, callArgs);
+
+ RootedValue resolutionValue(cx, ObjectValue(*instanceObj));
+ if (!promise->resolve(cx, resolutionValue))
+ return false;
+ } else {
+ auto task = cx->make_unique<InstantiateTask>(cx, promise, importObj);
+ if (!task)
+ return false;
+
+ if (!GetBufferSource(cx, firstArg, JSMSG_WASM_BAD_BUF_MOD_ARG, &task->bytecode))
+ return RejectWithPendingException(cx, promise, callArgs);
+
+ if (!InitCompileArgs(cx, &task->compileArgs))
+ return false;
+
+ if (!StartPromiseTask(cx, Move(task)))
+ return false;
+ }
+
+ callArgs.rval().setObject(*promise);
+ return true;
+}
+#endif
+
+static bool
+WebAssembly_validate(JSContext* cx, unsigned argc, Value* vp)
+{
+ CallArgs callArgs = CallArgsFromVp(argc, vp);
+
+ MutableBytes bytecode;
+ if (!GetBufferSource(cx, callArgs, "WebAssembly.validate", &bytecode))
+ return false;
+
+ CompileArgs compileArgs;
+ if (!InitCompileArgs(cx, &compileArgs))
+ return false;
+
+ UniqueChars error;
+ bool validated = !!Compile(*bytecode, compileArgs, &error);
+
+ // If the reason for validation failure was OOM (signalled by null error
+ // message), report out-of-memory so that validate's return is always
+ // correct.
+ if (!validated && !error) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ callArgs.rval().setBoolean(validated);
+ return true;
+}
+
+static const JSFunctionSpec WebAssembly_static_methods[] =
+{
+#if JS_HAS_TOSOURCE
+ JS_FN(js_toSource_str, WebAssembly_toSource, 0, 0),
+#endif
+#ifdef SPIDERMONKEY_PROMISE
+ JS_FN("compile", WebAssembly_compile, 1, 0),
+ JS_FN("instantiate", WebAssembly_instantiate, 2, 0),
+#endif
+ JS_FN("validate", WebAssembly_validate, 1, 0),
+ JS_FS_END
+};
+
+const Class js::WebAssemblyClass =
+{
+ js_WebAssembly_str,
+ JSCLASS_HAS_CACHED_PROTO(JSProto_WebAssembly)
+};
+
+template <class Class>
+static bool
+InitConstructor(JSContext* cx, HandleObject wasm, const char* name, MutableHandleObject proto)
+{
+ proto.set(NewBuiltinClassInstance<PlainObject>(cx, SingletonObject));
+ if (!proto)
+ return false;
+
+ if (!DefinePropertiesAndFunctions(cx, proto, Class::properties, Class::methods))
+ return false;
+
+ RootedAtom className(cx, Atomize(cx, name, strlen(name)));
+ if (!className)
+ return false;
+
+ RootedFunction ctor(cx, NewNativeConstructor(cx, Class::construct, 1, className));
+ if (!ctor)
+ return false;
+
+ if (!DefinePropertiesAndFunctions(cx, ctor, nullptr, Class::static_methods))
+ return false;
+
+ if (!LinkConstructorAndPrototype(cx, ctor, proto))
+ return false;
+
+ RootedId id(cx, AtomToId(className));
+ RootedValue ctorValue(cx, ObjectValue(*ctor));
+ return DefineProperty(cx, wasm, id, ctorValue, nullptr, nullptr, 0);
+}
+
+static bool
+InitErrorClass(JSContext* cx, HandleObject wasm, const char* name, JSExnType exn)
+{
+ Handle<GlobalObject*> global = cx->global();
+ RootedObject proto(cx, GlobalObject::getOrCreateCustomErrorPrototype(cx, global, exn));
+ if (!proto)
+ return false;
+
+ RootedAtom className(cx, Atomize(cx, name, strlen(name)));
+ if (!className)
+ return false;
+
+ RootedId id(cx, AtomToId(className));
+ RootedValue ctorValue(cx, global->getConstructor(GetExceptionProtoKey(exn)));
+ return DefineProperty(cx, wasm, id, ctorValue, nullptr, nullptr, 0);
+}
+
+JSObject*
+js::InitWebAssemblyClass(JSContext* cx, HandleObject obj)
+{
+ MOZ_RELEASE_ASSERT(HasSupport(cx));
+
+ Handle<GlobalObject*> global = obj.as<GlobalObject>();
+ MOZ_ASSERT(!global->isStandardClassResolved(JSProto_WebAssembly));
+
+ RootedObject proto(cx, global->getOrCreateObjectPrototype(cx));
+ if (!proto)
+ return nullptr;
+
+ RootedObject wasm(cx, NewObjectWithGivenProto(cx, &WebAssemblyClass, proto, SingletonObject));
+ if (!wasm)
+ return nullptr;
+
+ if (!JS_DefineFunctions(cx, wasm, WebAssembly_static_methods))
+ return nullptr;
+
+ RootedObject moduleProto(cx), instanceProto(cx), memoryProto(cx), tableProto(cx);
+ if (!InitConstructor<WasmModuleObject>(cx, wasm, "Module", &moduleProto))
+ return nullptr;
+ if (!InitConstructor<WasmInstanceObject>(cx, wasm, "Instance", &instanceProto))
+ return nullptr;
+ if (!InitConstructor<WasmMemoryObject>(cx, wasm, "Memory", &memoryProto))
+ return nullptr;
+ if (!InitConstructor<WasmTableObject>(cx, wasm, "Table", &tableProto))
+ return nullptr;
+ if (!InitErrorClass(cx, wasm, "CompileError", JSEXN_WASMCOMPILEERROR))
+ return nullptr;
+ if (!InitErrorClass(cx, wasm, "RuntimeError", JSEXN_WASMRUNTIMEERROR))
+ return nullptr;
+
+ // Perform the final fallible write of the WebAssembly object to a global
+ // object property at the end. Only after that succeeds write all the
+ // constructor and prototypes to the JSProto slots. This ensures that
+ // initialization is atomic since a failed initialization can be retried.
+
+ if (!JS_DefineProperty(cx, global, js_WebAssembly_str, wasm, JSPROP_RESOLVING))
+ return nullptr;
+
+ global->setPrototype(JSProto_WasmModule, ObjectValue(*moduleProto));
+ global->setPrototype(JSProto_WasmInstance, ObjectValue(*instanceProto));
+ global->setPrototype(JSProto_WasmMemory, ObjectValue(*memoryProto));
+ global->setPrototype(JSProto_WasmTable, ObjectValue(*tableProto));
+ global->setConstructor(JSProto_WebAssembly, ObjectValue(*wasm));
+
+ MOZ_ASSERT(global->isStandardClassResolved(JSProto_WebAssembly));
+ return wasm;
+}
diff --git a/js/src/wasm/WasmJS.h b/js/src/wasm/WasmJS.h
new file mode 100644
index 0000000000..5480797c5e
--- /dev/null
+++ b/js/src/wasm/WasmJS.h
@@ -0,0 +1,267 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_js_h
+#define wasm_js_h
+
+#include "gc/Policy.h"
+#include "vm/NativeObject.h"
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+class TypedArrayObject;
+
+namespace wasm {
+
+// Creates a testing-only NaN JS object with fields as described above, for
+// T=float or T=double.
+
+template<typename T>
+JSObject*
+CreateCustomNaNObject(JSContext* cx, T* addr);
+
+// Converts a testing-only NaN JS object with a nan_low field to a float32 NaN
+// with nan_low as the payload.
+
+bool
+ReadCustomFloat32NaNObject(JSContext* cx, HandleValue v, uint32_t* ret);
+
+// Converts a testing-only NaN JS object with nan_{low,high} components to a
+// double NaN with nan_low|(nan_high)>>32 as the payload.
+
+bool
+ReadCustomDoubleNaNObject(JSContext* cx, HandleValue v, uint64_t* ret);
+
+// Creates a JS object containing two fields (low: low 32 bits; high: high 32
+// bits) of a given Int64 value. For testing purposes only.
+
+JSObject*
+CreateI64Object(JSContext* cx, int64_t i64);
+
+// Reads an int64 from a JS object with the same shape as described in the
+// comment above. For testing purposes only.
+
+bool
+ReadI64Object(JSContext* cx, HandleValue v, int64_t* i64);
+
+// Return whether WebAssembly can be compiled on this platform.
+// This must be checked and must be true to call any of the top-level wasm
+// eval/compile methods.
+
+bool
+HasCompilerSupport(ExclusiveContext* cx);
+
+// Return whether WebAssembly is enabled on this platform.
+
+bool
+HasSupport(ExclusiveContext* cx);
+
+// Compiles the given binary wasm module given the ArrayBufferObject
+// and links the module's imports with the given import object.
+
+MOZ_MUST_USE bool
+Eval(JSContext* cx, Handle<TypedArrayObject*> code, HandleObject importObj,
+ MutableHandleWasmInstanceObject instanceObj);
+
+// The field name of the export object on the instance object.
+
+extern const char InstanceExportField[];
+
+// These accessors can be used to probe JS values for being an exported wasm
+// function.
+
+extern bool
+IsExportedFunction(JSFunction* fun);
+
+extern bool
+IsExportedWasmFunction(JSFunction* fun);
+
+extern bool
+IsExportedFunction(const Value& v, MutableHandleFunction f);
+
+extern Instance&
+ExportedFunctionToInstance(JSFunction* fun);
+
+extern WasmInstanceObject*
+ExportedFunctionToInstanceObject(JSFunction* fun);
+
+extern uint32_t
+ExportedFunctionToFuncIndex(JSFunction* fun);
+
+} // namespace wasm
+
+// The class of the WebAssembly global namespace object.
+
+extern const Class WebAssemblyClass;
+
+JSObject*
+InitWebAssemblyClass(JSContext* cx, HandleObject global);
+
+// The class of WebAssembly.Module. Each WasmModuleObject owns a
+// wasm::Module. These objects are used both as content-facing JS objects and as
+// internal implementation details of asm.js.
+
+class WasmModuleObject : public NativeObject
+{
+ static const unsigned MODULE_SLOT = 0;
+ static const ClassOps classOps_;
+ static void finalize(FreeOp* fop, JSObject* obj);
+ static bool imports(JSContext* cx, unsigned argc, Value* vp);
+ static bool exports(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const Class class_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmModuleObject* create(ExclusiveContext* cx,
+ wasm::Module& module,
+ HandleObject proto = nullptr);
+ wasm::Module& module() const;
+};
+
+// The class of WebAssembly.Instance. Each WasmInstanceObject owns a
+// wasm::Instance. These objects are used both as content-facing JS objects and
+// as internal implementation details of asm.js.
+
+class WasmInstanceObject : public NativeObject
+{
+ static const unsigned INSTANCE_SLOT = 0;
+ static const unsigned EXPORTS_SLOT = 1;
+ static const ClassOps classOps_;
+ bool isNewborn() const;
+ static void finalize(FreeOp* fop, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+
+ // ExportMap maps from function definition index to exported function
+ // object. This map is weak to avoid holding objects alive; the point is
+ // just to ensure a unique object identity for any given function object.
+ using ExportMap = GCHashMap<uint32_t,
+ ReadBarrieredFunction,
+ DefaultHasher<uint32_t>,
+ SystemAllocPolicy>;
+ using WeakExportMap = JS::WeakCache<ExportMap>;
+ WeakExportMap& exports() const;
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const Class class_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmInstanceObject* create(JSContext* cx,
+ UniquePtr<wasm::Code> code,
+ HandleWasmMemoryObject memory,
+ Vector<RefPtr<wasm::Table>, 0, SystemAllocPolicy>&& tables,
+ Handle<FunctionVector> funcImports,
+ const wasm::ValVector& globalImports,
+ HandleObject proto);
+ wasm::Instance& instance() const;
+
+ static bool getExportedFunction(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ uint32_t funcIndex,
+ MutableHandleFunction fun);
+
+ const wasm::CodeRange& getExportedFunctionCodeRange(HandleFunction fun);
+};
+
+// The class of WebAssembly.Memory. A WasmMemoryObject references an ArrayBuffer
+// or SharedArrayBuffer object which owns the actual memory.
+
+class WasmMemoryObject : public NativeObject
+{
+ static const unsigned BUFFER_SLOT = 0;
+ static const unsigned OBSERVERS_SLOT = 1;
+ static const ClassOps classOps_;
+ static void finalize(FreeOp* fop, JSObject* obj);
+ static bool bufferGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool bufferGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+
+ using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject,
+ MovableCellHasher<ReadBarrieredWasmInstanceObject>,
+ SystemAllocPolicy>;
+ using WeakInstanceSet = JS::WeakCache<InstanceSet>;
+ bool hasObservers() const;
+ WeakInstanceSet& observers() const;
+ WeakInstanceSet* getOrCreateObservers(JSContext* cx);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 2;
+ static const Class class_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ static WasmMemoryObject* create(ExclusiveContext* cx,
+ Handle<ArrayBufferObjectMaybeShared*> buffer,
+ HandleObject proto);
+ ArrayBufferObjectMaybeShared& buffer() const;
+
+ bool movingGrowable() const;
+ bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+ static uint32_t grow(HandleWasmMemoryObject memory, uint32_t delta, JSContext* cx);
+};
+
+// The class of WebAssembly.Table. A WasmTableObject holds a refcount on a
+// wasm::Table, allowing a Table to be shared between multiple Instances
+// (eventually between multiple threads).
+
+class WasmTableObject : public NativeObject
+{
+ static const unsigned TABLE_SLOT = 0;
+ static const ClassOps classOps_;
+ bool isNewborn() const;
+ static void finalize(FreeOp* fop, JSObject* obj);
+ static void trace(JSTracer* trc, JSObject* obj);
+ static bool lengthGetterImpl(JSContext* cx, const CallArgs& args);
+ static bool lengthGetter(JSContext* cx, unsigned argc, Value* vp);
+ static bool getImpl(JSContext* cx, const CallArgs& args);
+ static bool get(JSContext* cx, unsigned argc, Value* vp);
+ static bool setImpl(JSContext* cx, const CallArgs& args);
+ static bool set(JSContext* cx, unsigned argc, Value* vp);
+ static bool growImpl(JSContext* cx, const CallArgs& args);
+ static bool grow(JSContext* cx, unsigned argc, Value* vp);
+
+ public:
+ static const unsigned RESERVED_SLOTS = 1;
+ static const Class class_;
+ static const JSPropertySpec properties[];
+ static const JSFunctionSpec methods[];
+ static const JSFunctionSpec static_methods[];
+ static bool construct(JSContext*, unsigned, Value*);
+
+ // Note that, after creation, a WasmTableObject's table() is not initialized
+ // and must be initialized before use.
+
+ static WasmTableObject* create(JSContext* cx, wasm::Limits limits);
+ wasm::Table& table() const;
+};
+
+} // namespace js
+
+#endif // wasm_js_h
diff --git a/js/src/wasm/WasmModule.cpp b/js/src/wasm/WasmModule.cpp
new file mode 100644
index 0000000000..be7ddba8f6
--- /dev/null
+++ b/js/src/wasm/WasmModule.cpp
@@ -0,0 +1,1069 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmModule.h"
+
+#include "jsnspr.h"
+
+#include "jit/JitOptions.h"
+#include "wasm/WasmCompile.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+#include "wasm/WasmSerialize.h"
+
+#include "jsatominlines.h"
+
+#include "vm/ArrayBufferObject-inl.h"
+#include "vm/Debugger-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::IsNaN;
+
+const char wasm::InstanceExportField[] = "exports";
+
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+// On MIPS, CodeLabels are instruction immediates so InternalLinks only
+// patch instruction immediates.
+LinkData::InternalLink::InternalLink(Kind kind)
+{
+ MOZ_ASSERT(kind == CodeLabel || kind == InstructionImmediate);
+}
+
+bool
+LinkData::InternalLink::isRawPointerPatch()
+{
+ return false;
+}
+#else
+// On the rest, CodeLabels are raw pointers so InternalLinks only patch
+// raw pointers.
+LinkData::InternalLink::InternalLink(Kind kind)
+{
+ MOZ_ASSERT(kind == CodeLabel || kind == RawPointer);
+}
+
+bool
+LinkData::InternalLink::isRawPointerPatch()
+{
+ return true;
+}
+#endif
+
+size_t
+LinkData::SymbolicLinkArray::serializedSize() const
+{
+ size_t size = 0;
+ for (const Uint32Vector& offsets : *this)
+ size += SerializedPodVectorSize(offsets);
+ return size;
+}
+
+uint8_t*
+LinkData::SymbolicLinkArray::serialize(uint8_t* cursor) const
+{
+ for (const Uint32Vector& offsets : *this)
+ cursor = SerializePodVector(cursor, offsets);
+ return cursor;
+}
+
+const uint8_t*
+LinkData::SymbolicLinkArray::deserialize(const uint8_t* cursor)
+{
+ for (Uint32Vector& offsets : *this) {
+ cursor = DeserializePodVector(cursor, &offsets);
+ if (!cursor)
+ return nullptr;
+ }
+ return cursor;
+}
+
+size_t
+LinkData::SymbolicLinkArray::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ size_t size = 0;
+ for (const Uint32Vector& offsets : *this)
+ size += offsets.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+size_t
+LinkData::serializedSize() const
+{
+ return sizeof(pod()) +
+ SerializedPodVectorSize(internalLinks) +
+ symbolicLinks.serializedSize();
+}
+
+uint8_t*
+LinkData::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
+ cursor = SerializePodVector(cursor, internalLinks);
+ cursor = symbolicLinks.serialize(cursor);
+ return cursor;
+}
+
+const uint8_t*
+LinkData::deserialize(const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
+ (cursor = DeserializePodVector(cursor, &internalLinks)) &&
+ (cursor = symbolicLinks.deserialize(cursor));
+ return cursor;
+}
+
+size_t
+LinkData::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return internalLinks.sizeOfExcludingThis(mallocSizeOf) +
+ symbolicLinks.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+Import::serializedSize() const
+{
+ return module.serializedSize() +
+ field.serializedSize() +
+ sizeof(kind);
+}
+
+uint8_t*
+Import::serialize(uint8_t* cursor) const
+{
+ cursor = module.serialize(cursor);
+ cursor = field.serialize(cursor);
+ cursor = WriteScalar<DefinitionKind>(cursor, kind);
+ return cursor;
+}
+
+const uint8_t*
+Import::deserialize(const uint8_t* cursor)
+{
+ (cursor = module.deserialize(cursor)) &&
+ (cursor = field.deserialize(cursor)) &&
+ (cursor = ReadScalar<DefinitionKind>(cursor, &kind));
+ return cursor;
+}
+
+size_t
+Import::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return module.sizeOfExcludingThis(mallocSizeOf) +
+ field.sizeOfExcludingThis(mallocSizeOf);
+}
+
+Export::Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind)
+ : fieldName_(Move(fieldName))
+{
+ pod.kind_ = kind;
+ pod.index_ = index;
+}
+
+Export::Export(UniqueChars fieldName, DefinitionKind kind)
+ : fieldName_(Move(fieldName))
+{
+ pod.kind_ = kind;
+ pod.index_ = 0;
+}
+
+uint32_t
+Export::funcIndex() const
+{
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Function);
+ return pod.index_;
+}
+
+uint32_t
+Export::globalIndex() const
+{
+ MOZ_ASSERT(pod.kind_ == DefinitionKind::Global);
+ return pod.index_;
+}
+
+size_t
+Export::serializedSize() const
+{
+ return fieldName_.serializedSize() +
+ sizeof(pod);
+}
+
+uint8_t*
+Export::serialize(uint8_t* cursor) const
+{
+ cursor = fieldName_.serialize(cursor);
+ cursor = WriteBytes(cursor, &pod, sizeof(pod));
+ return cursor;
+}
+
+const uint8_t*
+Export::deserialize(const uint8_t* cursor)
+{
+ (cursor = fieldName_.deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &pod, sizeof(pod)));
+ return cursor;
+}
+
+size_t
+Export::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return fieldName_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+size_t
+ElemSegment::serializedSize() const
+{
+ return sizeof(tableIndex) +
+ sizeof(offset) +
+ SerializedPodVectorSize(elemFuncIndices) +
+ SerializedPodVectorSize(elemCodeRangeIndices);
+}
+
+uint8_t*
+ElemSegment::serialize(uint8_t* cursor) const
+{
+ cursor = WriteBytes(cursor, &tableIndex, sizeof(tableIndex));
+ cursor = WriteBytes(cursor, &offset, sizeof(offset));
+ cursor = SerializePodVector(cursor, elemFuncIndices);
+ cursor = SerializePodVector(cursor, elemCodeRangeIndices);
+ return cursor;
+}
+
+const uint8_t*
+ElemSegment::deserialize(const uint8_t* cursor)
+{
+ (cursor = ReadBytes(cursor, &tableIndex, sizeof(tableIndex))) &&
+ (cursor = ReadBytes(cursor, &offset, sizeof(offset))) &&
+ (cursor = DeserializePodVector(cursor, &elemFuncIndices)) &&
+ (cursor = DeserializePodVector(cursor, &elemCodeRangeIndices));
+ return cursor;
+}
+
+size_t
+ElemSegment::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return elemFuncIndices.sizeOfExcludingThis(mallocSizeOf) +
+ elemCodeRangeIndices.sizeOfExcludingThis(mallocSizeOf);
+}
+
+/* virtual */ void
+Module::serializedSize(size_t* maybeBytecodeSize, size_t* maybeCompiledSize) const
+{
+ if (maybeBytecodeSize)
+ *maybeBytecodeSize = bytecode_->bytes.length();
+
+ if (maybeCompiledSize) {
+ *maybeCompiledSize = assumptions_.serializedSize() +
+ SerializedPodVectorSize(code_) +
+ linkData_.serializedSize() +
+ SerializedVectorSize(imports_) +
+ SerializedVectorSize(exports_) +
+ SerializedPodVectorSize(dataSegments_) +
+ SerializedVectorSize(elemSegments_) +
+ metadata_->serializedSize();
+ }
+}
+
+/* virtual */ void
+Module::serialize(uint8_t* maybeBytecodeBegin, size_t maybeBytecodeSize,
+ uint8_t* maybeCompiledBegin, size_t maybeCompiledSize) const
+{
+ MOZ_ASSERT(!!maybeBytecodeBegin == !!maybeBytecodeSize);
+ MOZ_ASSERT(!!maybeCompiledBegin == !!maybeCompiledSize);
+
+ if (maybeBytecodeBegin) {
+ // Bytecode deserialization is not guarded by Assumptions and thus must not
+ // change incompatibly between builds. Thus, for simplicity, the format
+ // of the bytecode file is simply a .wasm file (thus, backwards
+ // compatibility is ensured by backwards compatibility of the wasm
+ // binary format).
+
+ const Bytes& bytes = bytecode_->bytes;
+ uint8_t* bytecodeEnd = WriteBytes(maybeBytecodeBegin, bytes.begin(), bytes.length());
+ MOZ_RELEASE_ASSERT(bytecodeEnd == maybeBytecodeBegin + maybeBytecodeSize);
+ }
+
+ if (maybeCompiledBegin) {
+ // Assumption must be serialized at the beginning of the compiled bytes so
+ // that compiledAssumptionsMatch can detect a build-id mismatch before any
+ // other decoding occurs.
+
+ uint8_t* cursor = maybeCompiledBegin;
+ cursor = assumptions_.serialize(cursor);
+ cursor = SerializePodVector(cursor, code_);
+ cursor = linkData_.serialize(cursor);
+ cursor = SerializeVector(cursor, imports_);
+ cursor = SerializeVector(cursor, exports_);
+ cursor = SerializePodVector(cursor, dataSegments_);
+ cursor = SerializeVector(cursor, elemSegments_);
+ cursor = metadata_->serialize(cursor);
+ MOZ_RELEASE_ASSERT(cursor == maybeCompiledBegin + maybeCompiledSize);
+ }
+}
+
+/* static */ bool
+Module::assumptionsMatch(const Assumptions& current, const uint8_t* compiledBegin,
+ size_t compiledSize)
+{
+ Assumptions cached;
+ if (!cached.deserialize(compiledBegin, compiledSize))
+ return false;
+
+ return current == cached;
+}
+
+/* static */ SharedModule
+Module::deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
+ const uint8_t* compiledBegin, size_t compiledSize,
+ Metadata* maybeMetadata)
+{
+ MutableBytes bytecode = js_new<ShareableBytes>();
+ if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeSize))
+ return nullptr;
+
+ memcpy(bytecode->bytes.begin(), bytecodeBegin, bytecodeSize);
+
+ Assumptions assumptions;
+ const uint8_t* cursor = assumptions.deserialize(compiledBegin, compiledSize);
+ if (!cursor)
+ return nullptr;
+
+ Bytes code;
+ cursor = DeserializePodVector(cursor, &code);
+ if (!cursor)
+ return nullptr;
+
+ LinkData linkData;
+ cursor = linkData.deserialize(cursor);
+ if (!cursor)
+ return nullptr;
+
+ ImportVector imports;
+ cursor = DeserializeVector(cursor, &imports);
+ if (!cursor)
+ return nullptr;
+
+ ExportVector exports;
+ cursor = DeserializeVector(cursor, &exports);
+ if (!cursor)
+ return nullptr;
+
+ DataSegmentVector dataSegments;
+ cursor = DeserializePodVector(cursor, &dataSegments);
+ if (!cursor)
+ return nullptr;
+
+ ElemSegmentVector elemSegments;
+ cursor = DeserializeVector(cursor, &elemSegments);
+ if (!cursor)
+ return nullptr;
+
+ MutableMetadata metadata;
+ if (maybeMetadata) {
+ metadata = maybeMetadata;
+ } else {
+ metadata = js_new<Metadata>();
+ if (!metadata)
+ return nullptr;
+ }
+ cursor = metadata->deserialize(cursor);
+ if (!cursor)
+ return nullptr;
+
+ MOZ_RELEASE_ASSERT(cursor == compiledBegin + compiledSize);
+ MOZ_RELEASE_ASSERT(!!maybeMetadata == metadata->isAsmJS());
+
+ return js_new<Module>(Move(assumptions),
+ Move(code),
+ Move(linkData),
+ Move(imports),
+ Move(exports),
+ Move(dataSegments),
+ Move(elemSegments),
+ *metadata,
+ *bytecode);
+}
+
+/* virtual */ JSObject*
+Module::createObject(JSContext* cx)
+{
+ if (!GlobalObject::ensureConstructor(cx, cx->global(), JSProto_WebAssembly))
+ return nullptr;
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmModule).toObject());
+ return WasmModuleObject::create(cx, *this, proto);
+}
+
+struct MemUnmap
+{
+ uint32_t size;
+ MemUnmap() : size(0) {}
+ explicit MemUnmap(uint32_t size) : size(size) {}
+ void operator()(uint8_t* p) { MOZ_ASSERT(size); PR_MemUnmap(p, size); }
+};
+
+typedef UniquePtr<uint8_t, MemUnmap> UniqueMapping;
+
+static UniqueMapping
+MapFile(PRFileDesc* file, PRFileInfo* info)
+{
+ if (PR_GetOpenFileInfo(file, info) != PR_SUCCESS)
+ return nullptr;
+
+ PRFileMap* map = PR_CreateFileMap(file, info->size, PR_PROT_READONLY);
+ if (!map)
+ return nullptr;
+
+ // PRFileMap objects do not need to be kept alive after the memory has been
+ // mapped, so unconditionally close the PRFileMap, regardless of whether
+ // PR_MemMap succeeds.
+ uint8_t* memory = (uint8_t*)PR_MemMap(map, 0, info->size);
+ PR_CloseFileMap(map);
+ return UniqueMapping(memory, MemUnmap(info->size));
+}
+
+bool
+wasm::CompiledModuleAssumptionsMatch(PRFileDesc* compiled, JS::BuildIdCharVector&& buildId)
+{
+ PRFileInfo info;
+ UniqueMapping mapping = MapFile(compiled, &info);
+ if (!mapping)
+ return false;
+
+ Assumptions assumptions(Move(buildId));
+ return Module::assumptionsMatch(assumptions, mapping.get(), info.size);
+}
+
+SharedModule
+wasm::DeserializeModule(PRFileDesc* bytecodeFile, PRFileDesc* maybeCompiledFile,
+ JS::BuildIdCharVector&& buildId, UniqueChars filename,
+ unsigned line, unsigned column)
+{
+ PRFileInfo bytecodeInfo;
+ UniqueMapping bytecodeMapping = MapFile(bytecodeFile, &bytecodeInfo);
+ if (!bytecodeMapping)
+ return nullptr;
+
+ if (PRFileDesc* compiledFile = maybeCompiledFile) {
+ PRFileInfo compiledInfo;
+ UniqueMapping compiledMapping = MapFile(compiledFile, &compiledInfo);
+ if (!compiledMapping)
+ return nullptr;
+
+ return Module::deserialize(bytecodeMapping.get(), bytecodeInfo.size,
+ compiledMapping.get(), compiledInfo.size);
+ }
+
+ // Since the compiled file's assumptions don't match, we must recompile from
+ // bytecode. The bytecode file format is simply that of a .wasm (see
+ // Module::serialize).
+
+ MutableBytes bytecode = js_new<ShareableBytes>();
+ if (!bytecode || !bytecode->bytes.initLengthUninitialized(bytecodeInfo.size))
+ return nullptr;
+
+ memcpy(bytecode->bytes.begin(), bytecodeMapping.get(), bytecodeInfo.size);
+
+ ScriptedCaller scriptedCaller;
+ scriptedCaller.filename = Move(filename);
+ scriptedCaller.line = line;
+ scriptedCaller.column = column;
+
+ CompileArgs args(Assumptions(Move(buildId)), Move(scriptedCaller));
+
+ UniqueChars error;
+ return Compile(*bytecode, Move(args), &error);
+}
+
+/* virtual */ void
+Module::addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ size_t* code,
+ size_t* data) const
+{
+ *data += mallocSizeOf(this) +
+ assumptions_.sizeOfExcludingThis(mallocSizeOf) +
+ code_.sizeOfExcludingThis(mallocSizeOf) +
+ linkData_.sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(imports_, mallocSizeOf) +
+ SizeOfVectorExcludingThis(exports_, mallocSizeOf) +
+ dataSegments_.sizeOfExcludingThis(mallocSizeOf) +
+ SizeOfVectorExcludingThis(elemSegments_, mallocSizeOf) +
+ metadata_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenMetadata) +
+ bytecode_->sizeOfIncludingThisIfNotSeen(mallocSizeOf, seenBytes);
+}
+
+
+// Extracting machine code as JS object. The result has the "code" property, as
+// a Uint8Array, and the "segments" property as array objects. The objects
+// contain offsets in the "code" array and basic information about a code
+// segment/function body.
+bool
+Module::extractCode(JSContext* cx, MutableHandleValue vp)
+{
+ RootedPlainObject result(cx, NewBuiltinClassInstance<PlainObject>(cx));
+ if (!result)
+ return false;
+
+ RootedObject code(cx, JS_NewUint8Array(cx, code_.length()));
+ if (!code)
+ return false;
+
+ memcpy(code->as<TypedArrayObject>().viewDataUnshared(), code_.begin(), code_.length());
+
+ RootedValue value(cx, ObjectValue(*code));
+ if (!JS_DefineProperty(cx, result, "code", value, JSPROP_ENUMERATE))
+ return false;
+
+ RootedObject segments(cx, NewDenseEmptyArray(cx));
+ if (!segments)
+ return false;
+
+ for (const CodeRange& p : metadata_->codeRanges) {
+ RootedObject segment(cx, NewObjectWithGivenProto<PlainObject>(cx, nullptr));
+ if (!segment)
+ return false;
+
+ value.setNumber((uint32_t)p.begin());
+ if (!JS_DefineProperty(cx, segment, "begin", value, JSPROP_ENUMERATE))
+ return false;
+
+ value.setNumber((uint32_t)p.end());
+ if (!JS_DefineProperty(cx, segment, "end", value, JSPROP_ENUMERATE))
+ return false;
+
+ value.setNumber((uint32_t)p.kind());
+ if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE))
+ return false;
+
+ if (p.isFunction()) {
+ value.setNumber((uint32_t)p.funcIndex());
+ if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE))
+ return false;
+
+ value.setNumber((uint32_t)p.funcNonProfilingEntry());
+ if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE))
+ return false;
+
+ value.setNumber((uint32_t)p.funcProfilingEpilogue());
+ if (!JS_DefineProperty(cx, segment, "funcBodyEnd", value, JSPROP_ENUMERATE))
+ return false;
+ }
+
+ if (!NewbornArrayPush(cx, segments, ObjectValue(*segment)))
+ return false;
+ }
+
+ value.setObject(*segments);
+ if (!JS_DefineProperty(cx, result, "segments", value, JSPROP_ENUMERATE))
+ return false;
+
+ vp.setObject(*result);
+ return true;
+}
+
+static uint32_t
+EvaluateInitExpr(const ValVector& globalImports, InitExpr initExpr)
+{
+ switch (initExpr.kind()) {
+ case InitExpr::Kind::Constant:
+ return initExpr.val().i32();
+ case InitExpr::Kind::GetGlobal:
+ return globalImports[initExpr.globalIndex()].i32();
+ }
+
+ MOZ_CRASH("bad initializer expression");
+}
+
+bool
+Module::initSegments(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ Handle<FunctionVector> funcImports,
+ HandleWasmMemoryObject memoryObj,
+ const ValVector& globalImports) const
+{
+ Instance& instance = instanceObj->instance();
+ const SharedTableVector& tables = instance.tables();
+
+ // Perform all error checks up front so that this function does not perform
+ // partial initialization if an error is reported.
+
+ for (const ElemSegment& seg : elemSegments_) {
+ uint32_t numElems = seg.elemCodeRangeIndices.length();
+ if (!numElems)
+ continue;
+
+ uint32_t tableLength = tables[seg.tableIndex]->length();
+ uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
+
+ if (offset > tableLength || tableLength - offset < numElems) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_FIT,
+ "elem", "table");
+ return false;
+ }
+ }
+
+ if (memoryObj) {
+ for (const DataSegment& seg : dataSegments_) {
+ if (!seg.length)
+ continue;
+
+ uint32_t memoryLength = memoryObj->buffer().byteLength();
+ uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
+
+ if (offset > memoryLength || memoryLength - offset < seg.length) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_FIT,
+ "data", "memory");
+ return false;
+ }
+ }
+ } else {
+ MOZ_ASSERT(dataSegments_.empty());
+ }
+
+ // Now that initialization can't fail partway through, write data/elem
+ // segments into memories/tables.
+
+ for (const ElemSegment& seg : elemSegments_) {
+ Table& table = *tables[seg.tableIndex];
+ uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
+ bool profilingEnabled = instance.code().profilingEnabled();
+ const CodeRangeVector& codeRanges = metadata().codeRanges;
+ uint8_t* codeBase = instance.codeBase();
+
+ for (uint32_t i = 0; i < seg.elemCodeRangeIndices.length(); i++) {
+ uint32_t funcIndex = seg.elemFuncIndices[i];
+ if (funcIndex < funcImports.length() && IsExportedWasmFunction(funcImports[funcIndex])) {
+ MOZ_ASSERT(!metadata().isAsmJS());
+ MOZ_ASSERT(!table.isTypedFunction());
+
+ HandleFunction f = funcImports[funcIndex];
+ WasmInstanceObject* exportInstanceObj = ExportedFunctionToInstanceObject(f);
+ const CodeRange& cr = exportInstanceObj->getExportedFunctionCodeRange(f);
+ Instance& exportInstance = exportInstanceObj->instance();
+ table.set(offset + i, exportInstance.codeBase() + cr.funcTableEntry(), exportInstance);
+ } else {
+ const CodeRange& cr = codeRanges[seg.elemCodeRangeIndices[i]];
+ uint32_t entryOffset = table.isTypedFunction()
+ ? profilingEnabled
+ ? cr.funcProfilingEntry()
+ : cr.funcNonProfilingEntry()
+ : cr.funcTableEntry();
+ table.set(offset + i, codeBase + entryOffset, instance);
+ }
+ }
+ }
+
+ if (memoryObj) {
+ uint8_t* memoryBase = memoryObj->buffer().dataPointerEither().unwrap(/* memcpy */);
+
+ for (const DataSegment& seg : dataSegments_) {
+ MOZ_ASSERT(seg.bytecodeOffset <= bytecode_->length());
+ MOZ_ASSERT(seg.length <= bytecode_->length() - seg.bytecodeOffset);
+ uint32_t offset = EvaluateInitExpr(globalImports, seg.offset);
+ memcpy(memoryBase + offset, bytecode_->begin() + seg.bytecodeOffset, seg.length);
+ }
+ }
+
+ return true;
+}
+
+bool
+Module::instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const
+{
+ MOZ_ASSERT(funcImports.length() == metadata_->funcImports.length());
+
+ if (metadata().isAsmJS())
+ return true;
+
+ for (size_t i = 0; i < metadata_->funcImports.length(); i++) {
+ HandleFunction f = funcImports[i];
+ if (!IsExportedFunction(f) || ExportedFunctionToInstance(f).isAsmJS())
+ continue;
+
+ uint32_t funcIndex = ExportedFunctionToFuncIndex(f);
+ Instance& instance = ExportedFunctionToInstance(f);
+ const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
+
+ if (funcExport.sig() != metadata_->funcImports[i].sig()) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_SIG);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool
+CheckLimits(JSContext* cx, uint32_t declaredMin, Maybe<uint32_t> declaredMax, uint32_t actualLength,
+ Maybe<uint32_t> actualMax, bool isAsmJS, const char* kind)
+{
+ if (isAsmJS) {
+ MOZ_ASSERT(actualLength >= declaredMin);
+ MOZ_ASSERT(!declaredMax);
+ MOZ_ASSERT(actualLength == actualMax.value());
+ return true;
+ }
+
+ if (actualLength < declaredMin || actualLength > declaredMax.valueOr(UINT32_MAX)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, kind);
+ return false;
+ }
+
+ if ((actualMax && declaredMax && *actualMax > *declaredMax) || (!actualMax && declaredMax)) {
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_MAX, kind);
+ return false;
+ }
+
+ return true;
+}
+
+// asm.js module instantiation supplies its own buffer, but for wasm, create and
+// initialize the buffer if one is requested. Either way, the buffer is wrapped
+// in a WebAssembly.Memory object which is what the Instance stores.
+bool
+Module::instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const
+{
+ if (!metadata_->usesMemory()) {
+ MOZ_ASSERT(!memory);
+ MOZ_ASSERT(dataSegments_.empty());
+ return true;
+ }
+
+ uint32_t declaredMin = metadata_->minMemoryLength;
+ Maybe<uint32_t> declaredMax = metadata_->maxMemoryLength;
+
+ if (memory) {
+ ArrayBufferObjectMaybeShared& buffer = memory->buffer();
+ MOZ_ASSERT_IF(metadata_->isAsmJS(), buffer.isPreparedForAsmJS());
+ MOZ_ASSERT_IF(!metadata_->isAsmJS(), buffer.as<ArrayBufferObject>().isWasm());
+
+ if (!CheckLimits(cx, declaredMin, declaredMax, buffer.byteLength(), buffer.wasmMaxSize(),
+ metadata_->isAsmJS(), "Memory")) {
+ return false;
+ }
+ } else {
+ MOZ_ASSERT(!metadata_->isAsmJS());
+ MOZ_ASSERT(metadata_->memoryUsage == MemoryUsage::Unshared);
+
+ RootedArrayBufferObjectMaybeShared buffer(cx,
+ ArrayBufferObject::createForWasm(cx, declaredMin, declaredMax));
+ if (!buffer)
+ return false;
+
+ RootedObject proto(cx, &cx->global()->getPrototype(JSProto_WasmMemory).toObject());
+
+ memory.set(WasmMemoryObject::create(cx, buffer, proto));
+ if (!memory)
+ return false;
+ }
+
+ return true;
+}
+
+bool
+Module::instantiateTable(JSContext* cx, MutableHandleWasmTableObject tableObj,
+ SharedTableVector* tables) const
+{
+ if (tableObj) {
+ MOZ_ASSERT(!metadata_->isAsmJS());
+
+ MOZ_ASSERT(metadata_->tables.length() == 1);
+ const TableDesc& td = metadata_->tables[0];
+ MOZ_ASSERT(td.external);
+
+ Table& table = tableObj->table();
+ if (!CheckLimits(cx, td.limits.initial, td.limits.maximum, table.length(), table.maximum(),
+ metadata_->isAsmJS(), "Table")) {
+ return false;
+ }
+
+ if (!tables->append(&table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ } else {
+ for (const TableDesc& td : metadata_->tables) {
+ SharedTable table;
+ if (td.external) {
+ MOZ_ASSERT(!tableObj);
+ MOZ_ASSERT(td.kind == TableKind::AnyFunction);
+
+ tableObj.set(WasmTableObject::create(cx, td.limits));
+ if (!tableObj)
+ return false;
+
+ table = &tableObj->table();
+ } else {
+ table = Table::create(cx, td, /* HandleWasmTableObject = */ nullptr);
+ if (!table)
+ return false;
+ }
+
+ if (!tables->emplaceBack(table)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ }
+ }
+
+ return true;
+}
+
+static bool
+GetFunctionExport(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ Handle<FunctionVector> funcImports,
+ const Export& exp,
+ MutableHandleValue val)
+{
+ if (exp.funcIndex() < funcImports.length() &&
+ IsExportedWasmFunction(funcImports[exp.funcIndex()]))
+ {
+ val.setObject(*funcImports[exp.funcIndex()]);
+ return true;
+ }
+
+ RootedFunction fun(cx);
+ if (!instanceObj->getExportedFunction(cx, instanceObj, exp.funcIndex(), &fun))
+ return false;
+
+ val.setObject(*fun);
+ return true;
+}
+
+static bool
+GetGlobalExport(JSContext* cx, const GlobalDescVector& globals, uint32_t globalIndex,
+ const ValVector& globalImports, MutableHandleValue jsval)
+{
+ const GlobalDesc& global = globals[globalIndex];
+
+ // Imports are located upfront in the globals array.
+ Val val;
+ switch (global.kind()) {
+ case GlobalKind::Import: val = globalImports[globalIndex]; break;
+ case GlobalKind::Variable: MOZ_CRASH("mutable variables can't be exported");
+ case GlobalKind::Constant: val = global.constantValue(); break;
+ }
+
+ switch (global.type()) {
+ case ValType::I32: {
+ jsval.set(Int32Value(val.i32()));
+ return true;
+ }
+ case ValType::I64: {
+ MOZ_ASSERT(JitOptions.wasmTestMode, "no int64 in asm.js/wasm");
+ RootedObject obj(cx, CreateI64Object(cx, val.i64()));
+ if (!obj)
+ return false;
+ jsval.set(ObjectValue(*obj));
+ return true;
+ }
+ case ValType::F32: {
+ float f = val.f32().fp();
+ if (JitOptions.wasmTestMode && IsNaN(f)) {
+ uint32_t bits = val.f32().bits();
+ RootedObject obj(cx, CreateCustomNaNObject(cx, (float*)&bits));
+ if (!obj)
+ return false;
+ jsval.set(ObjectValue(*obj));
+ return true;
+ }
+ jsval.set(DoubleValue(double(f)));
+ return true;
+ }
+ case ValType::F64: {
+ double d = val.f64().fp();
+ if (JitOptions.wasmTestMode && IsNaN(d)) {
+ uint64_t bits = val.f64().bits();
+ RootedObject obj(cx, CreateCustomNaNObject(cx, (double*)&bits));
+ if (!obj)
+ return false;
+ jsval.set(ObjectValue(*obj));
+ return true;
+ }
+ jsval.set(DoubleValue(d));
+ return true;
+ }
+ default: {
+ break;
+ }
+ }
+ MOZ_CRASH("unexpected type when creating global exports");
+}
+
+static bool
+CreateExportObject(JSContext* cx,
+ HandleWasmInstanceObject instanceObj,
+ Handle<FunctionVector> funcImports,
+ HandleWasmTableObject tableObj,
+ HandleWasmMemoryObject memoryObj,
+ const ValVector& globalImports,
+ const ExportVector& exports,
+ MutableHandleObject exportObj)
+{
+ const Instance& instance = instanceObj->instance();
+ const Metadata& metadata = instance.metadata();
+
+ if (metadata.isAsmJS() && exports.length() == 1 && strlen(exports[0].fieldName()) == 0) {
+ RootedValue val(cx);
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exports[0], &val))
+ return false;
+ exportObj.set(&val.toObject());
+ return true;
+ }
+
+ if (metadata.isAsmJS())
+ exportObj.set(NewBuiltinClassInstance<PlainObject>(cx));
+ else
+ exportObj.set(NewObjectWithGivenProto<PlainObject>(cx, nullptr));
+ if (!exportObj)
+ return false;
+
+ for (const Export& exp : exports) {
+ JSAtom* atom = AtomizeUTF8Chars(cx, exp.fieldName(), strlen(exp.fieldName()));
+ if (!atom)
+ return false;
+
+ RootedId id(cx, AtomToId(atom));
+ RootedValue val(cx);
+ switch (exp.kind()) {
+ case DefinitionKind::Function:
+ if (!GetFunctionExport(cx, instanceObj, funcImports, exp, &val))
+ return false;
+ break;
+ case DefinitionKind::Table:
+ val = ObjectValue(*tableObj);
+ break;
+ case DefinitionKind::Memory:
+ val = ObjectValue(*memoryObj);
+ break;
+ case DefinitionKind::Global:
+ if (!GetGlobalExport(cx, metadata.globals, exp.globalIndex(), globalImports, &val))
+ return false;
+ break;
+ }
+
+ if (!JS_DefinePropertyById(cx, exportObj, id, val, JSPROP_ENUMERATE))
+ return false;
+ }
+
+ if (!metadata.isAsmJS()) {
+ if (!JS_FreezeObject(cx, exportObj))
+ return false;
+ }
+
+ return true;
+}
+
+bool
+Module::instantiate(JSContext* cx,
+ Handle<FunctionVector> funcImports,
+ HandleWasmTableObject tableImport,
+ HandleWasmMemoryObject memoryImport,
+ const ValVector& globalImports,
+ HandleObject instanceProto,
+ MutableHandleWasmInstanceObject instance) const
+{
+ if (!instantiateFunctions(cx, funcImports))
+ return false;
+
+ RootedWasmMemoryObject memory(cx, memoryImport);
+ if (!instantiateMemory(cx, &memory))
+ return false;
+
+ RootedWasmTableObject table(cx, tableImport);
+ SharedTableVector tables;
+ if (!instantiateTable(cx, &table, &tables))
+ return false;
+
+ // To support viewing the source of an instance (Instance::createText), the
+ // instance must hold onto a ref of the bytecode (keeping it alive). This
+ // wastes memory for most users, so we try to only save the source when a
+ // developer actually cares: when the compartment is debuggable (which is
+ // true when the web console is open) or a names section is present (since
+ // this going to be stripped for non-developer builds).
+
+ const ShareableBytes* maybeBytecode = nullptr;
+ if (cx->compartment()->isDebuggee() || !metadata_->funcNames.empty())
+ maybeBytecode = bytecode_.get();
+
+ auto codeSegment = CodeSegment::create(cx, code_, linkData_, *metadata_, memory);
+ if (!codeSegment)
+ return false;
+
+ auto code = cx->make_unique<Code>(Move(codeSegment), *metadata_, maybeBytecode);
+ if (!code)
+ return false;
+
+ instance.set(WasmInstanceObject::create(cx,
+ Move(code),
+ memory,
+ Move(tables),
+ funcImports,
+ globalImports,
+ instanceProto));
+ if (!instance)
+ return false;
+
+ RootedObject exportObj(cx);
+ if (!CreateExportObject(cx, instance, funcImports, table, memory, globalImports, exports_, &exportObj))
+ return false;
+
+ JSAtom* atom = Atomize(cx, InstanceExportField, strlen(InstanceExportField));
+ if (!atom)
+ return false;
+ RootedId id(cx, AtomToId(atom));
+
+ RootedValue val(cx, ObjectValue(*exportObj));
+ if (!JS_DefinePropertyById(cx, instance, id, val, JSPROP_ENUMERATE))
+ return false;
+
+ // Register the instance with the JSCompartment so that it can find out
+ // about global events like profiling being enabled in the compartment.
+ // Registration does not require a fully-initialized instance and must
+ // precede initSegments as the final pre-requisite for a live instance.
+
+ if (!cx->compartment()->wasm.registerInstance(cx, instance))
+ return false;
+
+ // Perform initialization as the final step after the instance is fully
+ // constructed since this can make the instance live to content (even if the
+ // start function fails).
+
+ if (!initSegments(cx, instance, funcImports, memory, globalImports))
+ return false;
+
+ // Now that the instance is fully live and initialized, the start function.
+ // Note that failure may cause instantiation to throw, but the instance may
+ // still be live via edges created by initSegments or the start function.
+
+ if (metadata_->startFuncIndex) {
+ FixedInvokeArgs<0> args(cx);
+ if (!instance->instance().callExport(cx, *metadata_->startFuncIndex, args))
+ return false;
+ }
+
+ uint32_t mode = uint32_t(metadata().isAsmJS() ? Telemetry::ASMJS : Telemetry::WASM);
+ cx->runtime()->addTelemetry(JS_TELEMETRY_AOT_USAGE, mode);
+
+ return true;
+}
diff --git a/js/src/wasm/WasmModule.h b/js/src/wasm/WasmModule.h
new file mode 100644
index 0000000000..40c920708d
--- /dev/null
+++ b/js/src/wasm/WasmModule.h
@@ -0,0 +1,242 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_module_h
+#define wasm_module_h
+
+#include "js/TypeDecls.h"
+
+#include "wasm/WasmCode.h"
+#include "wasm/WasmTable.h"
+
+namespace js {
+namespace wasm {
+
+// LinkData contains all the metadata necessary to patch all the locations
+// that depend on the absolute address of a CodeSegment.
+//
+// LinkData is built incrementing by ModuleGenerator and then stored immutably
+// in Module.
+
+struct LinkDataCacheablePod
+{
+ uint32_t functionCodeLength;
+ uint32_t globalDataLength;
+ uint32_t interruptOffset;
+ uint32_t outOfBoundsOffset;
+ uint32_t unalignedAccessOffset;
+
+ LinkDataCacheablePod() { mozilla::PodZero(this); }
+};
+
+struct LinkData : LinkDataCacheablePod
+{
+ LinkDataCacheablePod& pod() { return *this; }
+ const LinkDataCacheablePod& pod() const { return *this; }
+
+ struct InternalLink {
+ enum Kind {
+ RawPointer,
+ CodeLabel,
+ InstructionImmediate
+ };
+ MOZ_INIT_OUTSIDE_CTOR uint32_t patchAtOffset;
+ MOZ_INIT_OUTSIDE_CTOR uint32_t targetOffset;
+
+ InternalLink() = default;
+ explicit InternalLink(Kind kind);
+ bool isRawPointerPatch();
+ };
+ typedef Vector<InternalLink, 0, SystemAllocPolicy> InternalLinkVector;
+
+ struct SymbolicLinkArray : EnumeratedArray<SymbolicAddress, SymbolicAddress::Limit, Uint32Vector> {
+ WASM_DECLARE_SERIALIZABLE(SymbolicLinkArray)
+ };
+
+ InternalLinkVector internalLinks;
+ SymbolicLinkArray symbolicLinks;
+
+ WASM_DECLARE_SERIALIZABLE(LinkData)
+};
+
+typedef UniquePtr<LinkData> UniqueLinkData;
+typedef UniquePtr<const LinkData> UniqueConstLinkData;
+
+// Export describes the export of a definition in a Module to a field in the
+// export object. For functions, Export stores an index into the
+// FuncExportVector in Metadata. For memory and table exports, there is
+// at most one (default) memory/table so no index is needed. Note: a single
+// definition can be exported by multiple Exports in the ExportVector.
+//
+// ExportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+class Export
+{
+ CacheableChars fieldName_;
+ struct CacheablePod {
+ DefinitionKind kind_;
+ uint32_t index_;
+ } pod;
+
+ public:
+ Export() = default;
+ explicit Export(UniqueChars fieldName, uint32_t index, DefinitionKind kind);
+ explicit Export(UniqueChars fieldName, DefinitionKind kind);
+
+ const char* fieldName() const { return fieldName_.get(); }
+
+ DefinitionKind kind() const { return pod.kind_; }
+ uint32_t funcIndex() const;
+ uint32_t globalIndex() const;
+
+ WASM_DECLARE_SERIALIZABLE(Export)
+};
+
+typedef Vector<Export, 0, SystemAllocPolicy> ExportVector;
+
+// ElemSegment represents an element segment in the module where each element
+// describes both its function index and its code range.
+
+struct ElemSegment
+{
+ uint32_t tableIndex;
+ InitExpr offset;
+ Uint32Vector elemFuncIndices;
+ Uint32Vector elemCodeRangeIndices;
+
+ ElemSegment() = default;
+ ElemSegment(uint32_t tableIndex, InitExpr offset, Uint32Vector&& elemFuncIndices)
+ : tableIndex(tableIndex), offset(offset), elemFuncIndices(Move(elemFuncIndices))
+ {}
+
+ WASM_DECLARE_SERIALIZABLE(ElemSegment)
+};
+
+typedef Vector<ElemSegment, 0, SystemAllocPolicy> ElemSegmentVector;
+
+// Module represents a compiled wasm module and primarily provides two
+// operations: instantiation and serialization. A Module can be instantiated any
+// number of times to produce new Instance objects. A Module can be serialized
+// any number of times such that the serialized bytes can be deserialized later
+// to produce a new, equivalent Module.
+//
+// Since fully linked-and-instantiated code (represented by CodeSegment) cannot
+// be shared between instances, Module stores an unlinked, uninstantiated copy
+// of the code (represented by the Bytes) and creates a new CodeSegment each
+// time it is instantiated. In the future, Module will store a shareable,
+// immutable CodeSegment that can be shared by all its instances.
+
+class Module : public JS::WasmModule
+{
+ const Assumptions assumptions_;
+ const Bytes code_;
+ const LinkData linkData_;
+ const ImportVector imports_;
+ const ExportVector exports_;
+ const DataSegmentVector dataSegments_;
+ const ElemSegmentVector elemSegments_;
+ const SharedMetadata metadata_;
+ const SharedBytes bytecode_;
+
+ bool instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports) const;
+ bool instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) const;
+ bool instantiateTable(JSContext* cx,
+ MutableHandleWasmTableObject table,
+ SharedTableVector* tables) const;
+ bool initSegments(JSContext* cx,
+ HandleWasmInstanceObject instance,
+ Handle<FunctionVector> funcImports,
+ HandleWasmMemoryObject memory,
+ const ValVector& globalImports) const;
+
+ public:
+ Module(Assumptions&& assumptions,
+ Bytes&& code,
+ LinkData&& linkData,
+ ImportVector&& imports,
+ ExportVector&& exports,
+ DataSegmentVector&& dataSegments,
+ ElemSegmentVector&& elemSegments,
+ const Metadata& metadata,
+ const ShareableBytes& bytecode)
+ : assumptions_(Move(assumptions)),
+ code_(Move(code)),
+ linkData_(Move(linkData)),
+ imports_(Move(imports)),
+ exports_(Move(exports)),
+ dataSegments_(Move(dataSegments)),
+ elemSegments_(Move(elemSegments)),
+ metadata_(&metadata),
+ bytecode_(&bytecode)
+ {}
+ ~Module() override { /* Note: can be called on any thread */ }
+
+ const Metadata& metadata() const { return *metadata_; }
+ const ImportVector& imports() const { return imports_; }
+ const ExportVector& exports() const { return exports_; }
+
+ // Instantiate this module with the given imports:
+
+ bool instantiate(JSContext* cx,
+ Handle<FunctionVector> funcImports,
+ HandleWasmTableObject tableImport,
+ HandleWasmMemoryObject memoryImport,
+ const ValVector& globalImports,
+ HandleObject instanceProto,
+ MutableHandleWasmInstanceObject instanceObj) const;
+
+ // Structured clone support:
+
+ void serializedSize(size_t* maybeBytecodeSize, size_t* maybeCompiledSize) const override;
+ void serialize(uint8_t* maybeBytecodeBegin, size_t maybeBytecodeSize,
+ uint8_t* maybeCompiledBegin, size_t maybeCompiledSize) const override;
+ static bool assumptionsMatch(const Assumptions& current, const uint8_t* compiledBegin,
+ size_t compiledSize);
+ static RefPtr<Module> deserialize(const uint8_t* bytecodeBegin, size_t bytecodeSize,
+ const uint8_t* compiledBegin, size_t compiledSize,
+ Metadata* maybeMetadata = nullptr);
+ JSObject* createObject(JSContext* cx) override;
+
+ // about:memory reporting:
+
+ void addSizeOfMisc(MallocSizeOf mallocSizeOf,
+ Metadata::SeenSet* seenMetadata,
+ ShareableBytes::SeenSet* seenBytes,
+ size_t* code, size_t* data) const;
+
+ // Generated code analysis support:
+
+ bool extractCode(JSContext* cx, MutableHandleValue vp);
+};
+
+typedef RefPtr<Module> SharedModule;
+
+// JS API implementations:
+
+bool
+CompiledModuleAssumptionsMatch(PRFileDesc* compiled, JS::BuildIdCharVector&& buildId);
+
+SharedModule
+DeserializeModule(PRFileDesc* bytecode, PRFileDesc* maybeCompiled, JS::BuildIdCharVector&& buildId,
+ UniqueChars filename, unsigned line, unsigned column);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_module_h
diff --git a/js/src/wasm/WasmSerialize.h b/js/src/wasm/WasmSerialize.h
new file mode 100644
index 0000000000..79d759b27c
--- /dev/null
+++ b/js/src/wasm/WasmSerialize.h
@@ -0,0 +1,174 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_serialize_h
+#define wasm_serialize_h
+
+#include "js/Vector.h"
+
+namespace js {
+namespace wasm {
+
+// Factor out common serialization, cloning and about:memory size-computation
+// functions for reuse when serializing wasm and asm.js modules.
+
+static inline uint8_t*
+WriteBytes(uint8_t* dst, const void* src, size_t nbytes)
+{
+ memcpy(dst, src, nbytes);
+ return dst + nbytes;
+}
+
+static inline const uint8_t*
+ReadBytes(const uint8_t* src, void* dst, size_t nbytes)
+{
+ memcpy(dst, src, nbytes);
+ return src + nbytes;
+}
+
+static inline const uint8_t*
+ReadBytesChecked(const uint8_t* src, size_t* remain, void* dst, size_t nbytes)
+{
+ if (*remain < nbytes)
+ return nullptr;
+ memcpy(dst, src, nbytes);
+ *remain -= nbytes;
+ return src + nbytes;
+}
+
+template <class T>
+static inline uint8_t*
+WriteScalar(uint8_t* dst, T t)
+{
+ memcpy(dst, &t, sizeof(t));
+ return dst + sizeof(t);
+}
+
+template <class T>
+static inline const uint8_t*
+ReadScalar(const uint8_t* src, T* dst)
+{
+ memcpy(dst, src, sizeof(*dst));
+ return src + sizeof(*dst);
+}
+
+template <class T>
+static inline const uint8_t*
+ReadScalarChecked(const uint8_t* src, size_t* remain, T* dst)
+{
+ if (*remain < sizeof(*dst))
+ return nullptr;
+ memcpy(dst, src, sizeof(*dst));
+ *remain -= sizeof(*dst);
+ return src + sizeof(*dst);
+}
+
+template <class T, size_t N>
+static inline size_t
+SerializedVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ size_t size = sizeof(uint32_t);
+ for (size_t i = 0; i < vec.length(); i++)
+ size += vec[i].serializedSize();
+ return size;
+}
+
+template <class T, size_t N>
+static inline uint8_t*
+SerializeVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ for (size_t i = 0; i < vec.length(); i++)
+ cursor = vec[i].serialize(cursor);
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t*
+DeserializeVector(const uint8_t* cursor, mozilla::Vector<T, N, SystemAllocPolicy>* vec)
+{
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->resize(length))
+ return nullptr;
+ for (size_t i = 0; i < vec->length(); i++) {
+ if (!(cursor = (*vec)[i].deserialize(cursor)))
+ return nullptr;
+ }
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline size_t
+SizeOfVectorExcludingThis(const mozilla::Vector<T, N, SystemAllocPolicy>& vec,
+ MallocSizeOf mallocSizeOf)
+{
+ size_t size = vec.sizeOfExcludingThis(mallocSizeOf);
+ for (const T& t : vec)
+ size += t.sizeOfExcludingThis(mallocSizeOf);
+ return size;
+}
+
+template <class T, size_t N>
+static inline size_t
+SerializedPodVectorSize(const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ return sizeof(uint32_t) +
+ vec.length() * sizeof(T);
+}
+
+template <class T, size_t N>
+static inline uint8_t*
+SerializePodVector(uint8_t* cursor, const mozilla::Vector<T, N, SystemAllocPolicy>& vec)
+{
+ // This binary format must not change without taking into consideration the
+ // constraints in Assumptions::serialize.
+
+ cursor = WriteScalar<uint32_t>(cursor, vec.length());
+ cursor = WriteBytes(cursor, vec.begin(), vec.length() * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t*
+DeserializePodVector(const uint8_t* cursor, mozilla::Vector<T, N, SystemAllocPolicy>* vec)
+{
+ uint32_t length;
+ cursor = ReadScalar<uint32_t>(cursor, &length);
+ if (!vec->initLengthUninitialized(length))
+ return nullptr;
+ cursor = ReadBytes(cursor, vec->begin(), length * sizeof(T));
+ return cursor;
+}
+
+template <class T, size_t N>
+static inline const uint8_t*
+DeserializePodVectorChecked(const uint8_t* cursor, size_t* remain, mozilla::Vector<T, N, SystemAllocPolicy>* vec)
+{
+ uint32_t length;
+ cursor = ReadScalarChecked<uint32_t>(cursor, remain, &length);
+ if (!cursor || !vec->initLengthUninitialized(length))
+ return nullptr;
+ cursor = ReadBytesChecked(cursor, remain, vec->begin(), length * sizeof(T));
+ return cursor;
+}
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_serialize_h
diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
new file mode 100644
index 0000000000..78d21369d7
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.cpp
@@ -0,0 +1,1499 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmSignalHandlers.h"
+
+#include "mozilla/DebugOnly.h"
+#include "mozilla/PodOperations.h"
+
+#include "jit/AtomicOperations.h"
+#include "jit/Disassembler.h"
+#include "vm/Runtime.h"
+#include "wasm/WasmInstance.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using JS::GenericNaN;
+using mozilla::DebugOnly;
+using mozilla::PodArrayZero;
+
+#if defined(ANDROID)
+# include <sys/system_properties.h>
+# if defined(MOZ_LINKER)
+extern "C" MFBT_API bool IsSignalHandlingBroken();
+# endif
+#endif
+
+// For platforms where the signal/exception handler runs on the same
+// thread/stack as the victim (Unix and Windows), we can use TLS to find any
+// currently executing wasm code.
+static JSRuntime*
+RuntimeForCurrentThread()
+{
+ PerThreadData* threadData = TlsPerThreadData.get();
+ if (!threadData)
+ return nullptr;
+
+ return threadData->runtimeIfOnOwnerThread();
+}
+
+// Crashing inside the signal handler can cause the handler to be recursively
+// invoked, eventually blowing the stack without actually showing a crash
+// report dialog via Breakpad. To guard against this we watch for such
+// recursion and fall through to the next handler immediately rather than
+// trying to handle it.
+class AutoSetHandlingSegFault
+{
+ JSRuntime* rt;
+
+ public:
+ explicit AutoSetHandlingSegFault(JSRuntime* rt)
+ : rt(rt)
+ {
+ MOZ_ASSERT(!rt->handlingSegFault);
+ rt->handlingSegFault = true;
+ }
+
+ ~AutoSetHandlingSegFault()
+ {
+ MOZ_ASSERT(rt->handlingSegFault);
+ rt->handlingSegFault = false;
+ }
+};
+
+#if defined(XP_WIN)
+# define XMM_sig(p,i) ((p)->Xmm##i)
+# define EIP_sig(p) ((p)->Eip)
+# define RIP_sig(p) ((p)->Rip)
+# define RAX_sig(p) ((p)->Rax)
+# define RCX_sig(p) ((p)->Rcx)
+# define RDX_sig(p) ((p)->Rdx)
+# define RBX_sig(p) ((p)->Rbx)
+# define RSP_sig(p) ((p)->Rsp)
+# define RBP_sig(p) ((p)->Rbp)
+# define RSI_sig(p) ((p)->Rsi)
+# define RDI_sig(p) ((p)->Rdi)
+# define R8_sig(p) ((p)->R8)
+# define R9_sig(p) ((p)->R9)
+# define R10_sig(p) ((p)->R10)
+# define R11_sig(p) ((p)->R11)
+# define R12_sig(p) ((p)->R12)
+# define R13_sig(p) ((p)->R13)
+# define R14_sig(p) ((p)->R14)
+# define R15_sig(p) ((p)->R15)
+#elif defined(__OpenBSD__)
+# define XMM_sig(p,i) ((p)->sc_fpstate->fx_xmm[i])
+# define EIP_sig(p) ((p)->sc_eip)
+# define RIP_sig(p) ((p)->sc_rip)
+# define RAX_sig(p) ((p)->sc_rax)
+# define RCX_sig(p) ((p)->sc_rcx)
+# define RDX_sig(p) ((p)->sc_rdx)
+# define RBX_sig(p) ((p)->sc_rbx)
+# define RSP_sig(p) ((p)->sc_rsp)
+# define RBP_sig(p) ((p)->sc_rbp)
+# define RSI_sig(p) ((p)->sc_rsi)
+# define RDI_sig(p) ((p)->sc_rdi)
+# define R8_sig(p) ((p)->sc_r8)
+# define R9_sig(p) ((p)->sc_r9)
+# define R10_sig(p) ((p)->sc_r10)
+# define R11_sig(p) ((p)->sc_r11)
+# define R12_sig(p) ((p)->sc_r12)
+# define R13_sig(p) ((p)->sc_r13)
+# define R14_sig(p) ((p)->sc_r14)
+# if defined(__arm__)
+# define R15_sig(p) ((p)->sc_pc)
+# else
+# define R15_sig(p) ((p)->sc_r15)
+# endif
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->sc_elr)
+# define RFP_sig(p) ((p)->sc_x[29])
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->sc_pc)
+# define RFP_sig(p) ((p)->sc_regs[30])
+# endif
+#elif defined(__linux__) || defined(SOLARIS)
+# if defined(__linux__)
+# define XMM_sig(p,i) ((p)->uc_mcontext.fpregs->_xmm[i])
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
+# else
+# define XMM_sig(p,i) ((p)->uc_mcontext.fpregs.fp_reg_set.fpchip_state.xmm[i])
+# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_PC])
+# endif
+# define RIP_sig(p) ((p)->uc_mcontext.gregs[REG_RIP])
+# define RAX_sig(p) ((p)->uc_mcontext.gregs[REG_RAX])
+# define RCX_sig(p) ((p)->uc_mcontext.gregs[REG_RCX])
+# define RDX_sig(p) ((p)->uc_mcontext.gregs[REG_RDX])
+# define RBX_sig(p) ((p)->uc_mcontext.gregs[REG_RBX])
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.gregs[REG_RBP])
+# define RSI_sig(p) ((p)->uc_mcontext.gregs[REG_RSI])
+# define RDI_sig(p) ((p)->uc_mcontext.gregs[REG_RDI])
+# define R8_sig(p) ((p)->uc_mcontext.gregs[REG_R8])
+# define R9_sig(p) ((p)->uc_mcontext.gregs[REG_R9])
+# define R10_sig(p) ((p)->uc_mcontext.gregs[REG_R10])
+# define R11_sig(p) ((p)->uc_mcontext.gregs[REG_R11])
+# define R12_sig(p) ((p)->uc_mcontext.gregs[REG_R12])
+# define R13_sig(p) ((p)->uc_mcontext.gregs[REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.gregs[REG_R14])
+# if defined(__linux__) && defined(__arm__)
+# define R15_sig(p) ((p)->uc_mcontext.arm_pc)
+# else
+# define R15_sig(p) ((p)->uc_mcontext.gregs[REG_R15])
+# endif
+# if defined(__linux__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# endif
+# if defined(__linux__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.pc)
+# define RSP_sig(p) ((p)->uc_mcontext.gregs[29])
+# define RFP_sig(p) ((p)->uc_mcontext.gregs[30])
+# endif
+#elif defined(__NetBSD__)
+# define XMM_sig(p,i) (((struct fxsave64*)(p)->uc_mcontext.__fpregs)->fx_xmm[i])
+# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
+# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
+# define RAX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RAX])
+# define RCX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RCX])
+# define RDX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDX])
+# define RBX_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBX])
+# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
+# define RBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RBP])
+# define RSI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSI])
+# define RDI_sig(p) ((p)->uc_mcontext.__gregs[_REG_RDI])
+# define R8_sig(p) ((p)->uc_mcontext.__gregs[_REG_R8])
+# define R9_sig(p) ((p)->uc_mcontext.__gregs[_REG_R9])
+# define R10_sig(p) ((p)->uc_mcontext.__gregs[_REG_R10])
+# define R11_sig(p) ((p)->uc_mcontext.__gregs[_REG_R11])
+# define R12_sig(p) ((p)->uc_mcontext.__gregs[_REG_R12])
+# define R13_sig(p) ((p)->uc_mcontext.__gregs[_REG_R13])
+# define R14_sig(p) ((p)->uc_mcontext.__gregs[_REG_R14])
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# if defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_X29])
+# endif
+# if defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
+# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
+# endif
+#elif defined(__DragonFly__) || defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# if defined(__DragonFly__)
+# define XMM_sig(p,i) (((union savefpu*)(p)->uc_mcontext.mc_fpregs)->sv_xmm.sv_xmm[i])
+# else
+# define XMM_sig(p,i) (((struct savefpu*)(p)->uc_mcontext.mc_fpstate)->sv_xmm[i])
+# endif
+# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
+# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
+# define RAX_sig(p) ((p)->uc_mcontext.mc_rax)
+# define RCX_sig(p) ((p)->uc_mcontext.mc_rcx)
+# define RDX_sig(p) ((p)->uc_mcontext.mc_rdx)
+# define RBX_sig(p) ((p)->uc_mcontext.mc_rbx)
+# define RSP_sig(p) ((p)->uc_mcontext.mc_rsp)
+# define RBP_sig(p) ((p)->uc_mcontext.mc_rbp)
+# define RSI_sig(p) ((p)->uc_mcontext.mc_rsi)
+# define RDI_sig(p) ((p)->uc_mcontext.mc_rdi)
+# define R8_sig(p) ((p)->uc_mcontext.mc_r8)
+# define R9_sig(p) ((p)->uc_mcontext.mc_r9)
+# define R10_sig(p) ((p)->uc_mcontext.mc_r10)
+# define R11_sig(p) ((p)->uc_mcontext.mc_r11)
+# define R12_sig(p) ((p)->uc_mcontext.mc_r12)
+# define R13_sig(p) ((p)->uc_mcontext.mc_r13)
+# define R14_sig(p) ((p)->uc_mcontext.mc_r14)
+# if defined(__FreeBSD__) && defined(__arm__)
+# define R15_sig(p) ((p)->uc_mcontext.__gregs[_REG_R15])
+# else
+# define R15_sig(p) ((p)->uc_mcontext.mc_r15)
+# endif
+# if defined(__FreeBSD__) && defined(__aarch64__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_elr)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_gpregs.gp_x[29])
+# endif
+# if defined(__FreeBSD__) && defined(__mips__)
+# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
+# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
+# endif
+#elif defined(XP_DARWIN)
+# define EIP_sig(p) ((p)->uc_mcontext->__ss.__eip)
+# define RIP_sig(p) ((p)->uc_mcontext->__ss.__rip)
+# define R15_sig(p) ((p)->uc_mcontext->__ss.__pc)
+#else
+# error "Don't know how to read/write to the thread state via the mcontext_t."
+#endif
+
+#if defined(XP_WIN)
+# include "jswin.h"
+#else
+# include <signal.h>
+# include <sys/mman.h>
+#endif
+
+#if defined(__FreeBSD__) || defined(__FreeBSD_kernel__)
+# include <sys/ucontext.h> // for ucontext_t, mcontext_t
+#endif
+
+#if defined(JS_CPU_X64)
+# if defined(__DragonFly__)
+# include <machine/npx.h> // for union savefpu
+# elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__)
+# include <machine/fpu.h> // for struct savefpu/fxsave64
+# endif
+#endif
+
+#if defined(ANDROID)
+// Not all versions of the Android NDK define ucontext_t or mcontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See: https://chromiumcodereview.appspot.com/10829122/
+// See: http://code.google.com/p/android/issues/detail?id=34784
+# if !defined(__BIONIC_HAVE_UCONTEXT_T)
+# if defined(__arm__)
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+# if !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+# include <asm/sigcontext.h>
+# endif
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__mips__)
+
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used so don't define them here.
+} ucontext_t;
+
+# elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EIP = 14 };
+# endif // defined(__i386__)
+# endif // !defined(__BIONIC_HAVE_UCONTEXT_T)
+#endif // defined(ANDROID)
+
+#if !defined(XP_WIN)
+# define CONTEXT ucontext_t
+#endif
+
+// Define a context type for use in the emulator code. This is usually just
+// the same as CONTEXT, but on Mac we use a different structure since we call
+// into the emulator code from a Mach exception handler rather than a
+// sigaction-style signal handler.
+#if defined(XP_DARWIN)
+# if defined(JS_CPU_X64)
+struct macos_x64_context {
+ x86_thread_state64_t thread;
+ x86_float_state64_t float_;
+};
+# define EMULATOR_CONTEXT macos_x64_context
+# elif defined(JS_CPU_X86)
+struct macos_x86_context {
+ x86_thread_state_t thread;
+ x86_float_state_t float_;
+};
+# define EMULATOR_CONTEXT macos_x86_context
+# elif defined(JS_CPU_ARM)
+struct macos_arm_context {
+ arm_thread_state_t thread;
+ arm_neon_state_t float_;
+};
+# define EMULATOR_CONTEXT macos_arm_context
+# else
+# error Unsupported architecture
+# endif
+#else
+# define EMULATOR_CONTEXT CONTEXT
+#endif
+
+#if defined(JS_CPU_X64)
+# define PC_sig(p) RIP_sig(p)
+#elif defined(JS_CPU_X86)
+# define PC_sig(p) EIP_sig(p)
+#elif defined(JS_CPU_ARM)
+# define PC_sig(p) R15_sig(p)
+#elif defined(__aarch64__)
+# define PC_sig(p) EPC_sig(p)
+#elif defined(JS_CPU_MIPS)
+# define PC_sig(p) EPC_sig(p)
+#endif
+
+static uint8_t**
+ContextToPC(CONTEXT* context)
+{
+#ifdef JS_CODEGEN_NONE
+ MOZ_CRASH();
+#else
+ return reinterpret_cast<uint8_t**>(&PC_sig(context));
+#endif
+}
+
+#if defined(WASM_HUGE_MEMORY)
+MOZ_COLD static void
+SetFPRegToNaN(size_t size, void* fp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
+ memset(fp_reg, 0, Simd128DataSize);
+ switch (size) {
+ case 4: *static_cast<float*>(fp_reg) = GenericNaN(); break;
+ case 8: *static_cast<double*>(fp_reg) = GenericNaN(); break;
+ default:
+ // All SIMD accesses throw on OOB.
+ MOZ_CRASH("unexpected size in SetFPRegToNaN");
+ }
+}
+
+MOZ_COLD static void
+SetGPRegToZero(void* gp_reg)
+{
+ memset(gp_reg, 0, sizeof(intptr_t));
+}
+
+MOZ_COLD static void
+SetFPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* fp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
+ memset(fp_reg, 0, Simd128DataSize);
+ AtomicOperations::memcpySafeWhenRacy(fp_reg, addr, size);
+}
+
+MOZ_COLD static void
+SetGPRegToLoadedValue(SharedMem<void*> addr, size_t size, void* gp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= sizeof(void*));
+ memset(gp_reg, 0, sizeof(void*));
+ AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
+}
+
+MOZ_COLD static void
+SetGPRegToLoadedValueSext32(SharedMem<void*> addr, size_t size, void* gp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= sizeof(int32_t));
+ int8_t msb = AtomicOperations::loadSafeWhenRacy(addr.cast<uint8_t*>() + (size - 1));
+ memset(gp_reg, 0, sizeof(void*));
+ memset(gp_reg, msb >> 7, sizeof(int32_t));
+ AtomicOperations::memcpySafeWhenRacy(gp_reg, addr, size);
+}
+
+MOZ_COLD static void
+StoreValueFromFPReg(SharedMem<void*> addr, size_t size, const void* fp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= Simd128DataSize);
+ AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(fp_reg), size);
+}
+
+MOZ_COLD static void
+StoreValueFromGPReg(SharedMem<void*> addr, size_t size, const void* gp_reg)
+{
+ MOZ_RELEASE_ASSERT(size <= sizeof(void*));
+ AtomicOperations::memcpySafeWhenRacy(addr, const_cast<void*>(gp_reg), size);
+}
+
+MOZ_COLD static void
+StoreValueFromGPImm(SharedMem<void*> addr, size_t size, int32_t imm)
+{
+ MOZ_RELEASE_ASSERT(size <= sizeof(imm));
+ AtomicOperations::memcpySafeWhenRacy(addr, static_cast<void*>(&imm), size);
+}
+
+# if !defined(XP_DARWIN)
+MOZ_COLD static void*
+AddressOfFPRegisterSlot(CONTEXT* context, FloatRegisters::Encoding encoding)
+{
+ switch (encoding) {
+ case X86Encoding::xmm0: return &XMM_sig(context, 0);
+ case X86Encoding::xmm1: return &XMM_sig(context, 1);
+ case X86Encoding::xmm2: return &XMM_sig(context, 2);
+ case X86Encoding::xmm3: return &XMM_sig(context, 3);
+ case X86Encoding::xmm4: return &XMM_sig(context, 4);
+ case X86Encoding::xmm5: return &XMM_sig(context, 5);
+ case X86Encoding::xmm6: return &XMM_sig(context, 6);
+ case X86Encoding::xmm7: return &XMM_sig(context, 7);
+ case X86Encoding::xmm8: return &XMM_sig(context, 8);
+ case X86Encoding::xmm9: return &XMM_sig(context, 9);
+ case X86Encoding::xmm10: return &XMM_sig(context, 10);
+ case X86Encoding::xmm11: return &XMM_sig(context, 11);
+ case X86Encoding::xmm12: return &XMM_sig(context, 12);
+ case X86Encoding::xmm13: return &XMM_sig(context, 13);
+ case X86Encoding::xmm14: return &XMM_sig(context, 14);
+ case X86Encoding::xmm15: return &XMM_sig(context, 15);
+ default: break;
+ }
+ MOZ_CRASH();
+}
+
+MOZ_COLD static void*
+AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
+{
+ switch (code) {
+ case X86Encoding::rax: return &RAX_sig(context);
+ case X86Encoding::rcx: return &RCX_sig(context);
+ case X86Encoding::rdx: return &RDX_sig(context);
+ case X86Encoding::rbx: return &RBX_sig(context);
+ case X86Encoding::rsp: return &RSP_sig(context);
+ case X86Encoding::rbp: return &RBP_sig(context);
+ case X86Encoding::rsi: return &RSI_sig(context);
+ case X86Encoding::rdi: return &RDI_sig(context);
+ case X86Encoding::r8: return &R8_sig(context);
+ case X86Encoding::r9: return &R9_sig(context);
+ case X86Encoding::r10: return &R10_sig(context);
+ case X86Encoding::r11: return &R11_sig(context);
+ case X86Encoding::r12: return &R12_sig(context);
+ case X86Encoding::r13: return &R13_sig(context);
+ case X86Encoding::r14: return &R14_sig(context);
+ case X86Encoding::r15: return &R15_sig(context);
+ default: break;
+ }
+ MOZ_CRASH();
+}
+# else
+MOZ_COLD static void*
+AddressOfFPRegisterSlot(EMULATOR_CONTEXT* context, FloatRegisters::Encoding encoding)
+{
+ switch (encoding) {
+ case X86Encoding::xmm0: return &context->float_.__fpu_xmm0;
+ case X86Encoding::xmm1: return &context->float_.__fpu_xmm1;
+ case X86Encoding::xmm2: return &context->float_.__fpu_xmm2;
+ case X86Encoding::xmm3: return &context->float_.__fpu_xmm3;
+ case X86Encoding::xmm4: return &context->float_.__fpu_xmm4;
+ case X86Encoding::xmm5: return &context->float_.__fpu_xmm5;
+ case X86Encoding::xmm6: return &context->float_.__fpu_xmm6;
+ case X86Encoding::xmm7: return &context->float_.__fpu_xmm7;
+ case X86Encoding::xmm8: return &context->float_.__fpu_xmm8;
+ case X86Encoding::xmm9: return &context->float_.__fpu_xmm9;
+ case X86Encoding::xmm10: return &context->float_.__fpu_xmm10;
+ case X86Encoding::xmm11: return &context->float_.__fpu_xmm11;
+ case X86Encoding::xmm12: return &context->float_.__fpu_xmm12;
+ case X86Encoding::xmm13: return &context->float_.__fpu_xmm13;
+ case X86Encoding::xmm14: return &context->float_.__fpu_xmm14;
+ case X86Encoding::xmm15: return &context->float_.__fpu_xmm15;
+ default: break;
+ }
+ MOZ_CRASH();
+}
+
+MOZ_COLD static void*
+AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
+{
+ switch (code) {
+ case X86Encoding::rax: return &context->thread.__rax;
+ case X86Encoding::rcx: return &context->thread.__rcx;
+ case X86Encoding::rdx: return &context->thread.__rdx;
+ case X86Encoding::rbx: return &context->thread.__rbx;
+ case X86Encoding::rsp: return &context->thread.__rsp;
+ case X86Encoding::rbp: return &context->thread.__rbp;
+ case X86Encoding::rsi: return &context->thread.__rsi;
+ case X86Encoding::rdi: return &context->thread.__rdi;
+ case X86Encoding::r8: return &context->thread.__r8;
+ case X86Encoding::r9: return &context->thread.__r9;
+ case X86Encoding::r10: return &context->thread.__r10;
+ case X86Encoding::r11: return &context->thread.__r11;
+ case X86Encoding::r12: return &context->thread.__r12;
+ case X86Encoding::r13: return &context->thread.__r13;
+ case X86Encoding::r14: return &context->thread.__r14;
+ case X86Encoding::r15: return &context->thread.__r15;
+ default: break;
+ }
+ MOZ_CRASH();
+}
+# endif // !XP_DARWIN
+
+MOZ_COLD static void
+SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
+ const Disassembler::OtherOperand& value)
+{
+ if (value.kind() == Disassembler::OtherOperand::FPR)
+ SetFPRegToNaN(size, AddressOfFPRegisterSlot(context, value.fpr()));
+ else
+ SetGPRegToZero(AddressOfGPRegisterSlot(context, value.gpr()));
+}
+
+MOZ_COLD static void
+SetRegisterToLoadedValue(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
+ const Disassembler::OtherOperand& value)
+{
+ if (value.kind() == Disassembler::OtherOperand::FPR)
+ SetFPRegToLoadedValue(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
+ else
+ SetGPRegToLoadedValue(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
+}
+
+MOZ_COLD static void
+SetRegisterToLoadedValueSext32(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
+ const Disassembler::OtherOperand& value)
+{
+ SetGPRegToLoadedValueSext32(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
+}
+
+MOZ_COLD static void
+StoreValueFromRegister(EMULATOR_CONTEXT* context, SharedMem<void*> addr, size_t size,
+ const Disassembler::OtherOperand& value)
+{
+ if (value.kind() == Disassembler::OtherOperand::FPR)
+ StoreValueFromFPReg(addr, size, AddressOfFPRegisterSlot(context, value.fpr()));
+ else if (value.kind() == Disassembler::OtherOperand::GPR)
+ StoreValueFromGPReg(addr, size, AddressOfGPRegisterSlot(context, value.gpr()));
+ else
+ StoreValueFromGPImm(addr, size, value.imm());
+}
+
+MOZ_COLD static uint8_t*
+ComputeAccessAddress(EMULATOR_CONTEXT* context, const Disassembler::ComplexAddress& address)
+{
+ MOZ_RELEASE_ASSERT(!address.isPCRelative(), "PC-relative addresses not supported yet");
+
+ uintptr_t result = address.disp();
+
+ if (address.hasBase()) {
+ uintptr_t base;
+ StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
+ AddressOfGPRegisterSlot(context, address.base()));
+ result += base;
+ }
+
+ if (address.hasIndex()) {
+ uintptr_t index;
+ StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
+ AddressOfGPRegisterSlot(context, address.index()));
+ MOZ_ASSERT(address.scale() < 32, "address shift overflow");
+ result += index * (uintptr_t(1) << address.scale());
+ }
+
+ return reinterpret_cast<uint8_t*>(result);
+}
+
+MOZ_COLD static void
+HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
+ const Instance& instance, uint8_t** ppc)
+{
+ MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
+
+ const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
+ if (!memoryAccess) {
+ // If there is no associated MemoryAccess for the faulting PC, this must be
+ // experimental SIMD.js or Atomics. When these are converted to
+ // non-experimental wasm features, this case, as well as outOfBoundsCode,
+ // can be removed.
+ *ppc = instance.codeSegment().outOfBoundsCode();
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase()));
+
+ // On WASM_HUGE_MEMORY platforms, asm.js code may fault. asm.js does not
+ // trap on fault and so has no trap out-of-line path. Instead, stores are
+ // silently ignored (by advancing the pc past the store and resuming) and
+ // loads silently succeed with a JS-semantics-determined value.
+
+ if (memoryAccess->hasTrapOutOfLineCode()) {
+ *ppc = memoryAccess->trapOutOfLineCode(instance.codeBase());
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(instance.isAsmJS());
+
+ // Disassemble the instruction which caused the trap so that we can extract
+ // information about it and decide what to do.
+ Disassembler::HeapAccess access;
+ uint8_t* end = Disassembler::DisassembleHeapAccess(pc, &access);
+ const Disassembler::ComplexAddress& address = access.address();
+ MOZ_RELEASE_ASSERT(end > pc);
+ MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(end));
+
+ // Check x64 asm.js heap access invariants.
+ MOZ_RELEASE_ASSERT(address.disp() >= 0);
+ MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
+ MOZ_RELEASE_ASSERT(!address.hasIndex() || address.index() != HeapReg.code());
+ MOZ_RELEASE_ASSERT(address.scale() == 0);
+ if (address.hasBase()) {
+ uintptr_t base;
+ StoreValueFromGPReg(SharedMem<void*>::unshared(&base), sizeof(uintptr_t),
+ AddressOfGPRegisterSlot(context, address.base()));
+ MOZ_RELEASE_ASSERT(reinterpret_cast<uint8_t*>(base) == instance.memoryBase());
+ }
+ if (address.hasIndex()) {
+ uintptr_t index;
+ StoreValueFromGPReg(SharedMem<void*>::unshared(&index), sizeof(uintptr_t),
+ AddressOfGPRegisterSlot(context, address.index()));
+ MOZ_RELEASE_ASSERT(uint32_t(index) == index);
+ }
+
+ // Determine the actual effective address of the faulting access. We can't
+ // rely on the faultingAddress given to us by the OS, because we need the
+ // address of the start of the access, and the OS may sometimes give us an
+ // address somewhere in the middle of the heap access.
+ uint8_t* accessAddress = ComputeAccessAddress(context, address);
+ MOZ_RELEASE_ASSERT(size_t(faultingAddress - accessAddress) < access.size(),
+ "Given faulting address does not appear to be within computed "
+ "faulting address range");
+ MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
+ "Access begins outside the asm.js heap");
+ MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
+ instance.memoryMappedSize(),
+ "Access extends beyond the asm.js heap guard region");
+ MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
+ instance.memoryLength(),
+ "Computed access address is not actually out of bounds");
+
+ // The basic sandbox model is that all heap accesses are a heap base
+ // register plus an index, and the index is always computed with 32-bit
+ // operations, so we know it can only be 4 GiB off of the heap base.
+ //
+ // However, we wish to support the optimization of folding immediates
+ // and scaled indices into addresses, and any address arithmetic we fold
+ // gets done at full pointer width, so it doesn't get properly wrapped.
+ // We support this by extending HugeMappedSize to the greatest size that
+ // could be reached by such an unwrapped address, and then when we arrive
+ // here in the signal handler for such an access, we compute the fully
+ // wrapped address, and perform the load or store on it.
+ //
+ // Taking a signal is really slow, but in theory programs really shouldn't
+ // be hitting this anyway.
+ intptr_t unwrappedOffset = accessAddress - instance.memoryBase().unwrap(/* for value */);
+ uint32_t wrappedOffset = uint32_t(unwrappedOffset);
+ size_t size = access.size();
+ MOZ_RELEASE_ASSERT(wrappedOffset + size > wrappedOffset);
+ bool inBounds = wrappedOffset + size < instance.memoryLength();
+
+ if (inBounds) {
+ // We now know that this is an access that is actually in bounds when
+ // properly wrapped. Complete the load or store with the wrapped
+ // address.
+ SharedMem<uint8_t*> wrappedAddress = instance.memoryBase() + wrappedOffset;
+ MOZ_RELEASE_ASSERT(wrappedAddress >= instance.memoryBase());
+ MOZ_RELEASE_ASSERT(wrappedAddress + size > wrappedAddress);
+ MOZ_RELEASE_ASSERT(wrappedAddress + size <= instance.memoryBase() + instance.memoryLength());
+ switch (access.kind()) {
+ case Disassembler::HeapAccess::Load:
+ SetRegisterToLoadedValue(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
+ break;
+ case Disassembler::HeapAccess::LoadSext32:
+ SetRegisterToLoadedValueSext32(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
+ break;
+ case Disassembler::HeapAccess::Store:
+ StoreValueFromRegister(context, wrappedAddress.cast<void*>(), size, access.otherOperand());
+ break;
+ case Disassembler::HeapAccess::LoadSext64:
+ MOZ_CRASH("no int64 accesses in asm.js");
+ case Disassembler::HeapAccess::Unknown:
+ MOZ_CRASH("Failed to disassemble instruction");
+ }
+ } else {
+ // We now know that this is an out-of-bounds access made by an asm.js
+ // load/store that we should handle.
+ switch (access.kind()) {
+ case Disassembler::HeapAccess::Load:
+ case Disassembler::HeapAccess::LoadSext32:
+ // Assign the JS-defined result value to the destination register
+ // (ToInt32(undefined) or ToNumber(undefined), determined by the
+ // type of the destination register). Very conveniently, we can
+ // infer the type from the register class, since all SIMD accesses
+ // throw on out of bounds (see above), so the only types using FP
+ // registers are float32 and double.
+ SetRegisterToCoercedUndefined(context, access.size(), access.otherOperand());
+ break;
+ case Disassembler::HeapAccess::Store:
+ // Do nothing.
+ break;
+ case Disassembler::HeapAccess::LoadSext64:
+ MOZ_CRASH("no int64 accesses in asm.js");
+ case Disassembler::HeapAccess::Unknown:
+ MOZ_CRASH("Failed to disassemble instruction");
+ }
+ }
+
+ *ppc = end;
+}
+
+#else // WASM_HUGE_MEMORY
+
+MOZ_COLD static void
+HandleMemoryAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
+ const Instance& instance, uint8_t** ppc)
+{
+ MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
+
+ const MemoryAccess* memoryAccess = instance.code().lookupMemoryAccess(pc);
+ if (!memoryAccess) {
+ // See explanation in the WASM_HUGE_MEMORY HandleMemoryAccess.
+ *ppc = instance.codeSegment().outOfBoundsCode();
+ return;
+ }
+
+ MOZ_RELEASE_ASSERT(memoryAccess->hasTrapOutOfLineCode());
+ *ppc = memoryAccess->trapOutOfLineCode(instance.codeBase());
+}
+
+#endif // WASM_HUGE_MEMORY
+
+MOZ_COLD static bool
+IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
+{
+ size_t accessLimit = instance.memoryMappedSize();
+
+ return instance.metadata().usesMemory() &&
+ faultingAddress >= instance.memoryBase() &&
+ faultingAddress < instance.memoryBase() + accessLimit;
+}
+
+#if defined(XP_WIN)
+
+static bool
+HandleFault(PEXCEPTION_POINTERS exception)
+{
+ EXCEPTION_RECORD* record = exception->ExceptionRecord;
+ CONTEXT* context = exception->ContextRecord;
+
+ if (record->ExceptionCode != EXCEPTION_ACCESS_VIOLATION)
+ return false;
+
+ uint8_t** ppc = ContextToPC(context);
+ uint8_t* pc = *ppc;
+
+ if (record->NumberParameters < 2)
+ return false;
+
+ // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
+ JSRuntime* rt = RuntimeForCurrentThread();
+ if (!rt || rt->handlingSegFault)
+ return false;
+ AutoSetHandlingSegFault handling(rt);
+
+ WasmActivation* activation = rt->wasmActivationStack();
+ if (!activation)
+ return false;
+
+ const Instance* instance = activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (!instance)
+ return false;
+
+ uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(record->ExceptionInformation[1]);
+
+ // This check isn't necessary, but, since we can, check anyway to make
+ // sure we aren't covering up a real bug.
+ if (!IsHeapAccessAddress(*instance, faultingAddress))
+ return false;
+
+ if (!instance->codeSegment().containsFunctionPC(pc)) {
+ // On Windows, it is possible for InterruptRunningCode to execute
+ // between a faulting heap access and the handling of the fault due
+ // to InterruptRunningCode's use of SuspendThread. When this happens,
+ // after ResumeThread, the exception handler is called with pc equal to
+ // instance.interrupt, which is logically wrong. The Right Thing would
+ // be for the OS to make fault-handling atomic (so that CONTEXT.pc was
+ // always the logically-faulting pc). Fortunately, we can detect this
+ // case and silence the exception ourselves (the exception will
+ // retrigger after the interrupt jumps back to resumePC).
+ return pc == instance->codeSegment().interruptCode() &&
+ instance->codeSegment().containsFunctionPC(activation->resumePC());
+ }
+
+ HandleMemoryAccess(context, pc, faultingAddress, *instance, ppc);
+ return true;
+}
+
+static LONG WINAPI
+WasmFaultHandler(LPEXCEPTION_POINTERS exception)
+{
+ if (HandleFault(exception))
+ return EXCEPTION_CONTINUE_EXECUTION;
+
+ // No need to worry about calling other handlers, the OS does this for us.
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+#elif defined(XP_DARWIN)
+# include <mach/exc.h>
+
+static uint8_t**
+ContextToPC(EMULATOR_CONTEXT* context)
+{
+# if defined(JS_CPU_X64)
+ static_assert(sizeof(context->thread.__rip) == sizeof(void*),
+ "stored IP should be compile-time pointer-sized");
+ return reinterpret_cast<uint8_t**>(&context->thread.__rip);
+# elif defined(JS_CPU_X86)
+ static_assert(sizeof(context->thread.uts.ts32.__eip) == sizeof(void*),
+ "stored IP should be compile-time pointer-sized");
+ return reinterpret_cast<uint8_t**>(&context->thread.uts.ts32.__eip);
+# elif defined(JS_CPU_ARM)
+ static_assert(sizeof(context->thread.__pc) == sizeof(void*),
+ "stored IP should be compile-time pointer-sized");
+ return reinterpret_cast<uint8_t**>(&context->thread.__pc);
+# else
+# error Unsupported architecture
+# endif
+}
+
+// This definition was generated by mig (the Mach Interface Generator) for the
+// routine 'exception_raise' (exc.defs).
+#pragma pack(4)
+typedef struct {
+ mach_msg_header_t Head;
+ /* start of the kernel processed data */
+ mach_msg_body_t msgh_body;
+ mach_msg_port_descriptor_t thread;
+ mach_msg_port_descriptor_t task;
+ /* end of the kernel processed data */
+ NDR_record_t NDR;
+ exception_type_t exception;
+ mach_msg_type_number_t codeCnt;
+ int64_t code[2];
+} Request__mach_exception_raise_t;
+#pragma pack()
+
+// The full Mach message also includes a trailer.
+struct ExceptionRequest
+{
+ Request__mach_exception_raise_t body;
+ mach_msg_trailer_t trailer;
+};
+
+static bool
+HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
+{
+ // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
+ if (rt->handlingSegFault)
+ return false;
+ AutoSetHandlingSegFault handling(rt);
+
+ // Get the port of the JSRuntime's thread from the message.
+ mach_port_t rtThread = request.body.thread.name;
+
+ // Read out the JSRuntime thread's register state.
+ EMULATOR_CONTEXT context;
+# if defined(JS_CPU_X64)
+ unsigned int thread_state_count = x86_THREAD_STATE64_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE64_COUNT;
+ int thread_state = x86_THREAD_STATE64;
+ int float_state = x86_FLOAT_STATE64;
+# elif defined(JS_CPU_X86)
+ unsigned int thread_state_count = x86_THREAD_STATE_COUNT;
+ unsigned int float_state_count = x86_FLOAT_STATE_COUNT;
+ int thread_state = x86_THREAD_STATE;
+ int float_state = x86_FLOAT_STATE;
+# elif defined(JS_CPU_ARM)
+ unsigned int thread_state_count = ARM_THREAD_STATE_COUNT;
+ unsigned int float_state_count = ARM_NEON_STATE_COUNT;
+ int thread_state = ARM_THREAD_STATE;
+ int float_state = ARM_NEON_STATE;
+# else
+# error Unsupported architecture
+# endif
+ kern_return_t kret;
+ kret = thread_get_state(rtThread, thread_state,
+ (thread_state_t)&context.thread, &thread_state_count);
+ if (kret != KERN_SUCCESS)
+ return false;
+ kret = thread_get_state(rtThread, float_state,
+ (thread_state_t)&context.float_, &float_state_count);
+ if (kret != KERN_SUCCESS)
+ return false;
+
+ uint8_t** ppc = ContextToPC(&context);
+ uint8_t* pc = *ppc;
+
+ if (request.body.exception != EXC_BAD_ACCESS || request.body.codeCnt != 2)
+ return false;
+
+ WasmActivation* activation = rt->wasmActivationStack();
+ if (!activation)
+ return false;
+
+ const Instance* instance = activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (!instance || !instance->codeSegment().containsFunctionPC(pc))
+ return false;
+
+ uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(request.body.code[1]);
+
+ // This check isn't necessary, but, since we can, check anyway to make
+ // sure we aren't covering up a real bug.
+ if (!IsHeapAccessAddress(*instance, faultingAddress))
+ return false;
+
+ HandleMemoryAccess(&context, pc, faultingAddress, *instance, ppc);
+
+ // Update the thread state with the new pc and register values.
+ kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
+ if (kret != KERN_SUCCESS)
+ return false;
+ kret = thread_set_state(rtThread, thread_state, (thread_state_t)&context.thread, thread_state_count);
+ if (kret != KERN_SUCCESS)
+ return false;
+
+ return true;
+}
+
+// Taken from mach_exc in /usr/include/mach/mach_exc.defs.
+static const mach_msg_id_t sExceptionId = 2405;
+
+// The choice of id here is arbitrary, the only constraint is that sQuitId != sExceptionId.
+static const mach_msg_id_t sQuitId = 42;
+
+static void
+MachExceptionHandlerThread(JSRuntime* rt)
+{
+ mach_port_t port = rt->wasmMachExceptionHandler.port();
+ kern_return_t kret;
+
+ while(true) {
+ ExceptionRequest request;
+ kret = mach_msg(&request.body.Head, MACH_RCV_MSG, 0, sizeof(request),
+ port, MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+
+ // If we fail even receiving the message, we can't even send a reply!
+ // Rather than hanging the faulting thread (hanging the browser), crash.
+ if (kret != KERN_SUCCESS) {
+ fprintf(stderr, "MachExceptionHandlerThread: mach_msg failed with %d\n", (int)kret);
+ MOZ_CRASH();
+ }
+
+ // There are only two messages we should be receiving: an exception
+ // message that occurs when the runtime's thread faults and the quit
+ // message sent when the runtime is shutting down.
+ if (request.body.Head.msgh_id == sQuitId)
+ break;
+ if (request.body.Head.msgh_id != sExceptionId) {
+ fprintf(stderr, "Unexpected msg header id %d\n", (int)request.body.Head.msgh_bits);
+ MOZ_CRASH();
+ }
+
+ // Some thread just commited an EXC_BAD_ACCESS and has been suspended by
+ // the kernel. The kernel is waiting for us to reply with instructions.
+ // Our default is the "not handled" reply (by setting the RetCode field
+ // of the reply to KERN_FAILURE) which tells the kernel to continue
+ // searching at the process and system level. If this is an asm.js
+ // expected exception, we handle it and return KERN_SUCCESS.
+ bool handled = HandleMachException(rt, request);
+ kern_return_t replyCode = handled ? KERN_SUCCESS : KERN_FAILURE;
+
+ // This magic incantation to send a reply back to the kernel was derived
+ // from the exc_server generated by 'mig -v /usr/include/mach/mach_exc.defs'.
+ __Reply__exception_raise_t reply;
+ reply.Head.msgh_bits = MACH_MSGH_BITS(MACH_MSGH_BITS_REMOTE(request.body.Head.msgh_bits), 0);
+ reply.Head.msgh_size = sizeof(reply);
+ reply.Head.msgh_remote_port = request.body.Head.msgh_remote_port;
+ reply.Head.msgh_local_port = MACH_PORT_NULL;
+ reply.Head.msgh_id = request.body.Head.msgh_id + 100;
+ reply.NDR = NDR_record;
+ reply.RetCode = replyCode;
+ mach_msg(&reply.Head, MACH_SEND_MSG, sizeof(reply), 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ }
+}
+
+MachExceptionHandler::MachExceptionHandler()
+ : installed_(false),
+ thread_(),
+ port_(MACH_PORT_NULL)
+{}
+
+void
+MachExceptionHandler::uninstall()
+{
+ if (installed_) {
+ thread_port_t thread = mach_thread_self();
+ kern_return_t kret = thread_set_exception_ports(thread,
+ EXC_MASK_BAD_ACCESS,
+ MACH_PORT_NULL,
+ EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
+ THREAD_STATE_NONE);
+ mach_port_deallocate(mach_task_self(), thread);
+ if (kret != KERN_SUCCESS)
+ MOZ_CRASH();
+ installed_ = false;
+ }
+ if (thread_.joinable()) {
+ // Break the handler thread out of the mach_msg loop.
+ mach_msg_header_t msg;
+ msg.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0);
+ msg.msgh_size = sizeof(msg);
+ msg.msgh_remote_port = port_;
+ msg.msgh_local_port = MACH_PORT_NULL;
+ msg.msgh_reserved = 0;
+ msg.msgh_id = sQuitId;
+ kern_return_t kret = mach_msg(&msg, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
+ MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
+ if (kret != KERN_SUCCESS) {
+ fprintf(stderr, "MachExceptionHandler: failed to send quit message: %d\n", (int)kret);
+ MOZ_CRASH();
+ }
+
+ // Wait for the handler thread to complete before deallocating the port.
+ thread_.join();
+ }
+ if (port_ != MACH_PORT_NULL) {
+ DebugOnly<kern_return_t> kret = mach_port_destroy(mach_task_self(), port_);
+ MOZ_ASSERT(kret == KERN_SUCCESS);
+ port_ = MACH_PORT_NULL;
+ }
+}
+
+bool
+MachExceptionHandler::install(JSRuntime* rt)
+{
+ MOZ_ASSERT(!installed());
+ kern_return_t kret;
+ mach_port_t thread;
+
+ // Get a port which can send and receive data.
+ kret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port_);
+ if (kret != KERN_SUCCESS)
+ goto error;
+ kret = mach_port_insert_right(mach_task_self(), port_, port_, MACH_MSG_TYPE_MAKE_SEND);
+ if (kret != KERN_SUCCESS)
+ goto error;
+
+ // Create a thread to block on reading port_.
+ if (!thread_.init(MachExceptionHandlerThread, rt))
+ goto error;
+
+ // Direct exceptions on this thread to port_ (and thus our handler thread).
+ // Note: we are totally clobbering any existing *thread* exception ports and
+ // not even attempting to forward. Breakpad and gdb both use the *process*
+ // exception ports which are only called if the thread doesn't handle the
+ // exception, so we should be fine.
+ thread = mach_thread_self();
+ kret = thread_set_exception_ports(thread,
+ EXC_MASK_BAD_ACCESS,
+ port_,
+ EXCEPTION_DEFAULT | MACH_EXCEPTION_CODES,
+ THREAD_STATE_NONE);
+ mach_port_deallocate(mach_task_self(), thread);
+ if (kret != KERN_SUCCESS)
+ goto error;
+
+ installed_ = true;
+ return true;
+
+ error:
+ uninstall();
+ return false;
+}
+
+#else // If not Windows or Mac, assume Unix
+
+enum class Signal {
+ SegFault,
+ BusError
+};
+
+// Be very cautious and default to not handling; we don't want to accidentally
+// silence real crashes from real bugs.
+template<Signal signal>
+static bool
+HandleFault(int signum, siginfo_t* info, void* ctx)
+{
+ // The signals we're expecting come from access violations, accessing
+ // mprotected memory. If the signal originates anywhere else, don't try
+ // to handle it.
+ if (signal == Signal::SegFault)
+ MOZ_RELEASE_ASSERT(signum == SIGSEGV);
+ else
+ MOZ_RELEASE_ASSERT(signum == SIGBUS);
+
+ CONTEXT* context = (CONTEXT*)ctx;
+ uint8_t** ppc = ContextToPC(context);
+ uint8_t* pc = *ppc;
+
+ // Don't allow recursive handling of signals, see AutoSetHandlingSegFault.
+ JSRuntime* rt = RuntimeForCurrentThread();
+ if (!rt || rt->handlingSegFault)
+ return false;
+ AutoSetHandlingSegFault handling(rt);
+
+ WasmActivation* activation = rt->wasmActivationStack();
+ if (!activation)
+ return false;
+
+ const Instance* instance = activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (!instance || !instance->codeSegment().containsFunctionPC(pc))
+ return false;
+
+ uint8_t* faultingAddress = reinterpret_cast<uint8_t*>(info->si_addr);
+
+ // Although it's not strictly necessary, to make sure we're not covering up
+ // any real bugs, check that the faulting address is indeed in the
+ // instance's memory.
+ if (!faultingAddress) {
+ // On some Linux systems, the kernel apparently sometimes "gives up" and
+ // passes a null faultingAddress with si_code set to SI_KERNEL.
+ // This is observed on some automation machines for some out-of-bounds
+ // atomic accesses on x86/64.
+#ifdef SI_KERNEL
+ if (info->si_code != SI_KERNEL)
+ return false;
+#else
+ return false;
+#endif
+ } else {
+ if (!IsHeapAccessAddress(*instance, faultingAddress))
+ return false;
+ }
+
+#ifdef JS_CODEGEN_ARM
+ if (signal == Signal::BusError) {
+ *ppc = instance->codeSegment().unalignedAccessCode();
+ return true;
+ }
+#endif
+
+ HandleMemoryAccess(context, pc, faultingAddress, *instance, ppc);
+ return true;
+}
+
+static struct sigaction sPrevSEGVHandler;
+static struct sigaction sPrevSIGBUSHandler;
+
+template<Signal signal>
+static void
+WasmFaultHandler(int signum, siginfo_t* info, void* context)
+{
+ if (HandleFault<signal>(signum, info, context))
+ return;
+
+ struct sigaction* previousSignal = signum == SIGSEGV
+ ? &sPrevSEGVHandler
+ : &sPrevSIGBUSHandler;
+
+ // This signal is not for any asm.js code we expect, so we need to forward
+ // the signal to the next handler. If there is no next handler (SIG_IGN or
+ // SIG_DFL), then it's time to crash. To do this, we set the signal back to
+ // its original disposition and return. This will cause the faulting op to
+ // be re-executed which will crash in the normal way. The advantage of
+ // doing this to calling _exit() is that we remove ourselves from the crash
+ // stack which improves crash reports. If there is a next handler, call it.
+ // It will either crash synchronously, fix up the instruction so that
+ // execution can continue and return, or trigger a crash by returning the
+ // signal to it's original disposition and returning.
+ //
+ // Note: the order of these tests matter.
+ if (previousSignal->sa_flags & SA_SIGINFO)
+ previousSignal->sa_sigaction(signum, info, context);
+ else if (previousSignal->sa_handler == SIG_DFL || previousSignal->sa_handler == SIG_IGN)
+ sigaction(signum, previousSignal, nullptr);
+ else
+ previousSignal->sa_handler(signum);
+}
+# endif // XP_WIN || XP_DARWIN || assume unix
+
+static void
+RedirectIonBackedgesToInterruptCheck(JSRuntime* rt)
+{
+ if (jit::JitRuntime* jitRuntime = rt->jitRuntime()) {
+ // If the backedge list is being mutated, the pc must be in C++ code and
+ // thus not in a JIT iloop. We assume that the interrupt flag will be
+ // checked at least once before entering JIT code (if not, no big deal;
+ // the browser will just request another interrupt in a second).
+ if (!jitRuntime->preventBackedgePatching())
+ jitRuntime->patchIonBackedges(rt, jit::JitRuntime::BackedgeInterruptCheck);
+ }
+}
+
+// The return value indicates whether the PC was changed, not whether there was
+// a failure.
+static bool
+RedirectJitCodeToInterruptCheck(JSRuntime* rt, CONTEXT* context)
+{
+ RedirectIonBackedgesToInterruptCheck(rt);
+
+ if (WasmActivation* activation = rt->wasmActivationStack()) {
+#ifdef JS_SIMULATOR
+ (void)ContextToPC(context); // silence static 'unused' errors
+
+ void* pc = rt->simulator()->get_pc_as<void*>();
+
+ const Instance* instance = activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (instance && instance->codeSegment().containsFunctionPC(pc))
+ rt->simulator()->set_resume_pc(instance->codeSegment().interruptCode());
+#else
+ uint8_t** ppc = ContextToPC(context);
+ uint8_t* pc = *ppc;
+
+ const Instance* instance = activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+ if (instance && instance->codeSegment().containsFunctionPC(pc)) {
+ activation->setResumePC(pc);
+ *ppc = instance->codeSegment().interruptCode();
+ return true;
+ }
+#endif
+ }
+
+ return false;
+}
+
+#if !defined(XP_WIN)
+// For the interrupt signal, pick a signal number that:
+// - is not otherwise used by mozilla or standard libraries
+// - defaults to nostop and noprint on gdb/lldb so that noone is bothered
+// SIGVTALRM a relative of SIGALRM, so intended for user code, but, unlike
+// SIGALRM, not used anywhere else in Mozilla.
+static const int sInterruptSignal = SIGVTALRM;
+
+static void
+JitInterruptHandler(int signum, siginfo_t* info, void* context)
+{
+ if (JSRuntime* rt = RuntimeForCurrentThread()) {
+ RedirectJitCodeToInterruptCheck(rt, (CONTEXT*)context);
+ rt->finishHandlingJitInterrupt();
+ }
+}
+#endif
+
+static bool sTriedInstallSignalHandlers = false;
+static bool sHaveSignalHandlers = false;
+
+static bool
+ProcessHasSignalHandlers()
+{
+ // We assume that there are no races creating the first JSRuntime of the process.
+ if (sTriedInstallSignalHandlers)
+ return sHaveSignalHandlers;
+ sTriedInstallSignalHandlers = true;
+
+ // Developers might want to forcibly disable signals to avoid seeing
+ // spurious SIGSEGVs in the debugger.
+ if (getenv("JS_DISABLE_SLOW_SCRIPT_SIGNALS") || getenv("JS_NO_SIGNALS"))
+ return false;
+
+#if defined(ANDROID)
+ // Before Android 4.4 (SDK version 19), there is a bug
+ // https://android-review.googlesource.com/#/c/52333
+ // in Bionic's pthread_join which causes pthread_join to return early when
+ // pthread_kill is used (on any thread). Nobody expects the pthread_cond_wait
+ // EINTRquisition.
+ char version_string[PROP_VALUE_MAX];
+ PodArrayZero(version_string);
+ if (__system_property_get("ro.build.version.sdk", version_string) > 0) {
+ if (atol(version_string) < 19)
+ return false;
+ }
+# if defined(MOZ_LINKER)
+ // Signal handling is broken on some android systems.
+ if (IsSignalHandlingBroken())
+ return false;
+# endif
+#endif
+
+ // The interrupt handler allows the main thread to be paused from another
+ // thread (see InterruptRunningJitCode).
+#if defined(XP_WIN)
+ // Windows uses SuspendThread to stop the main thread from another thread.
+#else
+ struct sigaction interruptHandler;
+ interruptHandler.sa_flags = SA_SIGINFO;
+ interruptHandler.sa_sigaction = &JitInterruptHandler;
+ sigemptyset(&interruptHandler.sa_mask);
+ struct sigaction prev;
+ if (sigaction(sInterruptSignal, &interruptHandler, &prev))
+ MOZ_CRASH("unable to install interrupt handler");
+
+ // There shouldn't be any other handlers installed for sInterruptSignal. If
+ // there are, we could always forward, but we need to understand what we're
+ // doing to avoid problematic interference.
+ if ((prev.sa_flags & SA_SIGINFO && prev.sa_sigaction) ||
+ (prev.sa_handler != SIG_DFL && prev.sa_handler != SIG_IGN))
+ {
+ MOZ_CRASH("contention for interrupt signal");
+ }
+#endif // defined(XP_WIN)
+
+ // Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
+ // access and/or unaligned accesses.
+# if defined(XP_WIN)
+ if (!AddVectoredExceptionHandler(/* FirstHandler = */ true, WasmFaultHandler))
+ return false;
+# elif defined(XP_DARWIN)
+ // OSX handles seg faults via the Mach exception handler above, so don't
+ // install WasmFaultHandler.
+# else
+ // SA_NODEFER allows us to reenter the signal handler if we crash while
+ // handling the signal, and fall through to the Breakpad handler by testing
+ // handlingSegFault.
+
+ // Allow handling OOB with signals on all architectures
+ struct sigaction faultHandler;
+ faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
+ faultHandler.sa_sigaction = WasmFaultHandler<Signal::SegFault>;
+ sigemptyset(&faultHandler.sa_mask);
+ if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
+ MOZ_CRASH("unable to install segv handler");
+
+# if defined(JS_CODEGEN_ARM)
+ // On Arm Handle Unaligned Accesses
+ struct sigaction busHandler;
+ busHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
+ busHandler.sa_sigaction = WasmFaultHandler<Signal::BusError>;
+ sigemptyset(&busHandler.sa_mask);
+ if (sigaction(SIGBUS, &busHandler, &sPrevSIGBUSHandler))
+ MOZ_CRASH("unable to install sigbus handler");
+# endif
+# endif
+
+ sHaveSignalHandlers = true;
+ return true;
+}
+
+bool
+wasm::EnsureSignalHandlers(JSRuntime* rt)
+{
+ // Nothing to do if the platform doesn't support it.
+ if (!ProcessHasSignalHandlers())
+ return true;
+
+#if defined(XP_DARWIN)
+ // On OSX, each JSRuntime gets its own handler thread.
+ if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt))
+ return false;
+#endif
+
+ return true;
+}
+
+bool
+wasm::HaveSignalHandlers()
+{
+ MOZ_ASSERT(sTriedInstallSignalHandlers);
+ return sHaveSignalHandlers;
+}
+
+// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by
+// C++ code at every Baseline JIT loop backedge) and jitStackLimit_ (which is
+// checked at every Baseline and Ion JIT function prologue). The remaining
+// sources of potential iloops (Ion loop backedges and all wasm code) are
+// handled by this function:
+// 1. Ion loop backedges are patched to instead point to a stub that handles
+// the interrupt;
+// 2. if the main thread's pc is inside wasm code, the pc is updated to point
+// to a stub that handles the interrupt.
+void
+js::InterruptRunningJitCode(JSRuntime* rt)
+{
+ // If signal handlers weren't installed, then Ion and wasm emit normal
+ // interrupt checks and don't need asynchronous interruption.
+ if (!HaveSignalHandlers())
+ return;
+
+ // Do nothing if we're already handling an interrupt here, to avoid races
+ // below and in JitRuntime::patchIonBackedges.
+ if (!rt->startHandlingJitInterrupt())
+ return;
+
+ // If we are on runtime's main thread, then: pc is not in wasm code (so
+ // nothing to do for wasm) and we can patch Ion backedges without any
+ // special synchronization.
+ if (rt == RuntimeForCurrentThread()) {
+ RedirectIonBackedgesToInterruptCheck(rt);
+ rt->finishHandlingJitInterrupt();
+ return;
+ }
+
+ // We are not on the runtime's main thread, so to do 1 and 2 above, we need
+ // to halt the runtime's main thread first.
+#if defined(XP_WIN)
+ // On Windows, we can simply suspend the main thread and work directly on
+ // its context from this thread. SuspendThread can sporadically fail if the
+ // thread is in the middle of a syscall. Rather than retrying in a loop,
+ // just wait for the next request for interrupt.
+ HANDLE thread = (HANDLE)rt->ownerThreadNative();
+ if (SuspendThread(thread) != -1) {
+ CONTEXT context;
+ context.ContextFlags = CONTEXT_CONTROL;
+ if (GetThreadContext(thread, &context)) {
+ if (RedirectJitCodeToInterruptCheck(rt, &context))
+ SetThreadContext(thread, &context);
+ }
+ ResumeThread(thread);
+ }
+ rt->finishHandlingJitInterrupt();
+#else
+ // On Unix, we instead deliver an async signal to the main thread which
+ // halts the thread and callers our JitInterruptHandler (which has already
+ // been installed by EnsureSignalHandlersInstalled).
+ pthread_t thread = (pthread_t)rt->ownerThreadNative();
+ pthread_kill(thread, sInterruptSignal);
+#endif
+}
+
+MOZ_COLD bool
+js::wasm::IsPCInWasmCode(void *pc)
+{
+ JSRuntime* rt = RuntimeForCurrentThread();
+ if (!rt)
+ return false;
+
+ MOZ_RELEASE_ASSERT(!rt->handlingSegFault);
+
+ WasmActivation* activation = rt->wasmActivationStack();
+ if (!activation)
+ return false;
+
+ return !!activation->compartment()->wasm.lookupInstanceDeprecated(pc);
+}
diff --git a/js/src/wasm/WasmSignalHandlers.h b/js/src/wasm/WasmSignalHandlers.h
new file mode 100644
index 0000000000..c9ca808f5c
--- /dev/null
+++ b/js/src/wasm/WasmSignalHandlers.h
@@ -0,0 +1,81 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2014 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_signal_handlers_h
+#define wasm_signal_handlers_h
+
+#include "mozilla/Attributes.h"
+
+#if defined(XP_DARWIN)
+# include <mach/mach.h>
+#endif
+#include "threading/Thread.h"
+
+struct JSRuntime;
+
+namespace js {
+
+// Force any currently-executing asm.js/ion code to call HandleExecutionInterrupt.
+extern void
+InterruptRunningJitCode(JSRuntime* rt);
+
+namespace wasm {
+
+// Ensure the given JSRuntime is set up to use signals. Failure to enable signal
+// handlers indicates some catastrophic failure and creation of the runtime must
+// fail.
+MOZ_MUST_USE bool
+EnsureSignalHandlers(JSRuntime* rt);
+
+// Return whether signals can be used in this process for interrupts or
+// asm.js/wasm out-of-bounds.
+bool
+HaveSignalHandlers();
+
+#if defined(XP_DARWIN)
+// On OSX we are forced to use the lower-level Mach exception mechanism instead
+// of Unix signals. Mach exceptions are not handled on the victim's stack but
+// rather require an extra thread. For simplicity, we create one such thread
+// per JSRuntime (upon the first use of asm.js in the JSRuntime). This thread
+// and related resources are owned by AsmJSMachExceptionHandler which is owned
+// by JSRuntime.
+class MachExceptionHandler
+{
+ bool installed_;
+ js::Thread thread_;
+ mach_port_t port_;
+
+ void uninstall();
+
+ public:
+ MachExceptionHandler();
+ ~MachExceptionHandler() { uninstall(); }
+ mach_port_t port() const { return port_; }
+ bool installed() const { return installed_; }
+ bool install(JSRuntime* rt);
+};
+#endif
+
+// Test whether the given PC is within the innermost wasm activation. Return
+// false if it is not, or it cannot be determined.
+bool IsPCInWasmCode(void *pc);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_signal_handlers_h
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
new file mode 100644
index 0000000000..4f56430b9c
--- /dev/null
+++ b/js/src/wasm/WasmStubs.cpp
@@ -0,0 +1,1151 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmStubs.h"
+
+#include "mozilla/ArrayUtils.h"
+
+#include "wasm/WasmCode.h"
+#include "wasm/WasmIonCompile.h"
+
+#include "jit/MacroAssembler-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::ArrayLength;
+
+static void
+AssertStackAlignment(MacroAssembler& masm, uint32_t alignment, uint32_t addBeforeAssert = 0)
+{
+ MOZ_ASSERT((sizeof(Frame) + masm.framePushed() + addBeforeAssert) % alignment == 0);
+ masm.assertStackAlignment(alignment, addBeforeAssert);
+}
+
+static unsigned
+StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, unsigned bytesToPush)
+{
+ return StackDecrementForCall(alignment, sizeof(Frame) + masm.framePushed(), bytesToPush);
+}
+
+template <class VectorT>
+static unsigned
+StackArgBytes(const VectorT& args)
+{
+ ABIArgIter<VectorT> iter(args);
+ while (!iter.done())
+ iter++;
+ return iter.stackBytesConsumedSoFar();
+}
+
+template <class VectorT>
+static unsigned
+StackDecrementForCall(MacroAssembler& masm, uint32_t alignment, const VectorT& args,
+ unsigned extraBytes = 0)
+{
+ return StackDecrementForCall(masm, alignment, StackArgBytes(args) + extraBytes);
+}
+
+#if defined(JS_CODEGEN_ARM)
+// The ARM system ABI also includes d15 & s31 in the non volatile float registers.
+// Also exclude lr (a.k.a. r14) as we preserve it manually)
+static const LiveRegisterSet NonVolatileRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask&
+ ~(uint32_t(1) << Registers::lr)),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask
+ | (1ULL << FloatRegisters::d15)
+ | (1ULL << FloatRegisters::s31)));
+#else
+static const LiveRegisterSet NonVolatileRegs =
+ LiveRegisterSet(GeneralRegisterSet(Registers::NonVolatileMask),
+ FloatRegisterSet(FloatRegisters::NonVolatileMask));
+#endif
+
+#if defined(JS_CODEGEN_MIPS32)
+// Mips is using one more double slot due to stack alignment for double values.
+// Look at MacroAssembler::PushRegsInMask(RegisterSet set)
+static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t) +
+ NonVolatileRegs.fpus().getPushSizeInBytes() +
+ sizeof(double);
+#elif defined(JS_CODEGEN_NONE)
+static const unsigned FramePushedAfterSave = 0;
+#else
+static const unsigned FramePushedAfterSave = NonVolatileRegs.gprs().size() * sizeof(intptr_t)
+ + NonVolatileRegs.fpus().getPushSizeInBytes();
+#endif
+static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void*);
+
+// Generate a stub that enters wasm from a C++ caller via the native ABI. The
+// signature of the entry point is Module::ExportFuncPtr. The exported wasm
+// function has an ABI derived from its specific signature, so this function
+// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
+Offsets
+wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
+{
+ masm.haltingAlign(CodeAlignment);
+
+ Offsets offsets;
+ offsets.begin = masm.currentOffset();
+
+ // Save the return address if it wasn't already saved by the call insn.
+#if defined(JS_CODEGEN_ARM)
+ masm.push(lr);
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ masm.push(ra);
+#endif
+
+ // Save all caller non-volatile registers before we clobber them here and in
+ // the asm.js callee (which does not preserve non-volatile registers).
+ masm.setFramePushed(0);
+ masm.PushRegsInMask(NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == FramePushedAfterSave);
+
+ // Put the 'argv' argument into a non-argument/return/TLS register so that
+ // we can use 'argv' while we fill in the arguments for the asm.js callee.
+ Register argv = ABINonArgReturnReg0;
+ Register scratch = ABINonArgReturnReg1;
+
+ // Read the arguments of wasm::ExportFuncPtr according to the native ABI.
+ // The entry stub's frame is only 1 word, not the usual 2 for wasm::Frame.
+ const unsigned argBase = sizeof(void*) + masm.framePushed();
+ ABIArgGenerator abi;
+ ABIArg arg;
+
+ // arg 1: ExportArg*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR)
+ masm.movePtr(arg.gpr(), argv);
+ else
+ masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), argv);
+
+ // Arg 2: TlsData*
+ arg = abi.next(MIRType::Pointer);
+ if (arg.kind() == ABIArg::GPR)
+ masm.movePtr(arg.gpr(), WasmTlsReg);
+ else
+ masm.loadPtr(Address(masm.getStackPointer(), argBase + arg.offsetFromArgBase()), WasmTlsReg);
+
+ // Setup pinned registers that are assumed throughout wasm code.
+ masm.loadWasmPinnedRegsFromTls();
+
+ // Save 'argv' on the stack so that we can recover it after the call. Use
+ // a second non-argument/return register as temporary scratch.
+ masm.Push(argv);
+
+ // Save the stack pointer in the WasmActivation right before dynamically
+ // aligning the stack so that it may be recovered on return or throw.
+ MOZ_ASSERT(masm.framePushed() == FramePushedForEntrySP);
+ masm.loadWasmActivationFromTls(scratch);
+ masm.storeStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
+
+ // Dynamically align the stack since ABIStackAlignment is not necessarily
+ // WasmStackAlignment. We'll use entrySP to recover the original stack
+ // pointer on return.
+ masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
+
+ // Bump the stack for the call.
+ masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), WasmStackAlignment));
+
+ // Copy parameters out of argv and into the registers/stack-slots specified by
+ // the system ABI.
+ for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
+ unsigned argOffset = iter.index() * sizeof(ExportArg);
+ Address src(argv, argOffset);
+ MIRType type = iter.mirType();
+ switch (iter->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32)
+ masm.load32(src, iter->gpr());
+ else if (type == MIRType::Int64)
+ masm.load64(src, iter->gpr64());
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64)
+ masm.load64(src, iter->gpr64());
+ else
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ break;
+#endif
+ case ABIArg::FPU: {
+ static_assert(sizeof(ExportArg) >= jit::Simd128DataSize,
+ "ExportArg must be big enough to store SIMD values");
+ switch (type) {
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ masm.loadUnalignedSimd128Int(src, iter->fpu());
+ break;
+ case MIRType::Float32x4:
+ masm.loadUnalignedSimd128Float(src, iter->fpu());
+ break;
+ case MIRType::Double:
+ masm.loadDouble(src, iter->fpu());
+ break;
+ case MIRType::Float32:
+ masm.loadFloat32(src, iter->fpu());
+ break;
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected FPU type");
+ break;
+ }
+ break;
+ }
+ case ABIArg::Stack:
+ switch (type) {
+ case MIRType::Int32:
+ masm.load32(src, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+ case MIRType::Int64: {
+ Register sp = masm.getStackPointer();
+#if JS_BITS_PER_WORD == 32
+ masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
+ masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64LOW_OFFSET));
+ masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
+ masm.store32(scratch, Address(sp, iter->offsetFromArgBase() + INT64HIGH_OFFSET));
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, Address(sp, iter->offsetFromArgBase()));
+#endif
+ break;
+ }
+ case MIRType::Double:
+ masm.loadDouble(src, ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg,
+ Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+ case MIRType::Float32:
+ masm.loadFloat32(src, ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg,
+ Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+ case MIRType::Int8x16:
+ case MIRType::Int16x8:
+ case MIRType::Int32x4:
+ case MIRType::Bool8x16:
+ case MIRType::Bool16x8:
+ case MIRType::Bool32x4:
+ masm.loadUnalignedSimd128Int(src, ScratchSimd128Reg);
+ masm.storeAlignedSimd128Int(
+ ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+ case MIRType::Float32x4:
+ masm.loadUnalignedSimd128Float(src, ScratchSimd128Reg);
+ masm.storeAlignedSimd128Float(
+ ScratchSimd128Reg, Address(masm.getStackPointer(), iter->offsetFromArgBase()));
+ break;
+ default:
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unexpected stack arg type");
+ }
+ break;
+ }
+ }
+
+ // Call into the real function.
+ masm.assertStackAlignment(WasmStackAlignment);
+ masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
+
+ // Recover the stack pointer value before dynamic alignment.
+ masm.loadWasmActivationFromTls(scratch);
+ masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
+ masm.setFramePushed(FramePushedForEntrySP);
+
+ // Recover the 'argv' pointer which was saved before aligning the stack.
+ masm.Pop(argv);
+
+ // Store the return value in argv[0]
+ switch (fe.sig().ret()) {
+ case ExprType::Void:
+ break;
+ case ExprType::I32:
+ masm.store32(ReturnReg, Address(argv, 0));
+ break;
+ case ExprType::I64:
+ masm.store64(ReturnReg64, Address(argv, 0));
+ break;
+ case ExprType::F32:
+ if (!JitOptions.wasmTestMode)
+ masm.canonicalizeFloat(ReturnFloat32Reg);
+ masm.storeFloat32(ReturnFloat32Reg, Address(argv, 0));
+ break;
+ case ExprType::F64:
+ if (!JitOptions.wasmTestMode)
+ masm.canonicalizeDouble(ReturnDoubleReg);
+ masm.storeDouble(ReturnDoubleReg, Address(argv, 0));
+ break;
+ case ExprType::I8x16:
+ case ExprType::I16x8:
+ case ExprType::I32x4:
+ case ExprType::B8x16:
+ case ExprType::B16x8:
+ case ExprType::B32x4:
+ // We don't have control on argv alignment, do an unaligned access.
+ masm.storeUnalignedSimd128Int(ReturnSimd128Reg, Address(argv, 0));
+ break;
+ case ExprType::F32x4:
+ // We don't have control on argv alignment, do an unaligned access.
+ masm.storeUnalignedSimd128Float(ReturnSimd128Reg, Address(argv, 0));
+ break;
+ case ExprType::Limit:
+ MOZ_CRASH("Limit");
+ }
+
+ // Restore clobbered non-volatile registers of the caller.
+ masm.PopRegsInMask(NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ masm.move32(Imm32(true), ReturnReg);
+ masm.ret();
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+static void
+StackCopy(MacroAssembler& masm, MIRType type, Register scratch, Address src, Address dst)
+{
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ masm.store32(scratch, dst);
+ } else if (type == MIRType::Int64) {
+#if JS_BITS_PER_WORD == 32
+ masm.load32(Address(src.base, src.offset + INT64LOW_OFFSET), scratch);
+ masm.store32(scratch, Address(dst.base, dst.offset + INT64LOW_OFFSET));
+ masm.load32(Address(src.base, src.offset + INT64HIGH_OFFSET), scratch);
+ masm.store32(scratch, Address(dst.base, dst.offset + INT64HIGH_OFFSET));
+#else
+ Register64 scratch64(scratch);
+ masm.load64(src, scratch64);
+ masm.store64(scratch64, dst);
+#endif
+ } else if (type == MIRType::Float32) {
+ masm.loadFloat32(src, ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, dst);
+ } else {
+ MOZ_ASSERT(type == MIRType::Double);
+ masm.loadDouble(src, ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, dst);
+ }
+}
+
+typedef bool ToValue;
+
+static void
+FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argOffset,
+ unsigned offsetToCallerStackArgs, Register scratch, ToValue toValue)
+{
+ for (ABIArgValTypeIter i(args); !i.done(); i++) {
+ Address dst(masm.getStackPointer(), argOffset + i.index() * sizeof(Value));
+
+ MIRType type = i.mirType();
+ switch (i->kind()) {
+ case ABIArg::GPR:
+ if (type == MIRType::Int32) {
+ if (toValue)
+ masm.storeValue(JSVAL_TYPE_INT32, i->gpr(), dst);
+ else
+ masm.store32(i->gpr(), dst);
+ } else if (type == MIRType::Int64) {
+ // We can't box int64 into Values (yet).
+ if (toValue)
+ masm.breakpoint();
+ else
+ masm.store64(i->gpr64(), dst);
+ } else {
+ MOZ_CRASH("unexpected input type?");
+ }
+ break;
+#ifdef JS_CODEGEN_REGISTER_PAIR
+ case ABIArg::GPR_PAIR:
+ if (type == MIRType::Int64)
+ masm.store64(i->gpr64(), dst);
+ else
+ MOZ_CRASH("wasm uses hardfp for function calls.");
+ break;
+#endif
+ case ABIArg::FPU: {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ FloatRegister srcReg = i->fpu();
+ if (type == MIRType::Double) {
+ if (toValue) {
+ // Preserve the NaN pattern in the input.
+ masm.moveDouble(srcReg, ScratchDoubleReg);
+ srcReg = ScratchDoubleReg;
+ masm.canonicalizeDouble(srcReg);
+ }
+ masm.storeDouble(srcReg, dst);
+ } else {
+ MOZ_ASSERT(type == MIRType::Float32);
+ if (toValue) {
+ // JS::Values can't store Float32, so convert to a Double.
+ masm.convertFloat32ToDouble(srcReg, ScratchDoubleReg);
+ masm.canonicalizeDouble(ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, dst);
+ } else {
+ // Preserve the NaN pattern in the input.
+ masm.moveFloat32(srcReg, ScratchFloat32Reg);
+ masm.canonicalizeFloat(ScratchFloat32Reg);
+ masm.storeFloat32(ScratchFloat32Reg, dst);
+ }
+ }
+ break;
+ }
+ case ABIArg::Stack: {
+ Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
+ if (toValue) {
+ if (type == MIRType::Int32) {
+ masm.load32(src, scratch);
+ masm.storeValue(JSVAL_TYPE_INT32, scratch, dst);
+ } else if (type == MIRType::Int64) {
+ // We can't box int64 into Values (yet).
+ masm.breakpoint();
+ } else {
+ MOZ_ASSERT(IsFloatingPointType(type));
+ if (type == MIRType::Float32) {
+ masm.loadFloat32(src, ScratchFloat32Reg);
+ masm.convertFloat32ToDouble(ScratchFloat32Reg, ScratchDoubleReg);
+ } else {
+ masm.loadDouble(src, ScratchDoubleReg);
+ }
+ masm.canonicalizeDouble(ScratchDoubleReg);
+ masm.storeDouble(ScratchDoubleReg, dst);
+ }
+ } else {
+ StackCopy(masm, type, scratch, src, dst);
+ }
+ break;
+ }
+ }
+ }
+}
+
+// Generate a wrapper function with the standard intra-wasm call ABI which simply
+// calls an import. This wrapper function allows any import to be treated like a
+// normal wasm function for the purposes of exports and table calls. In
+// particular, the wrapper function provides:
+// - a table entry, so JS imports can be put into tables
+// - normal (non-)profiling entries, so that, if the import is re-exported,
+// an entry stub can be generated and called without any special cases
+FuncOffsets
+wasm::GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId)
+{
+ masm.setFramePushed(0);
+
+ unsigned tlsBytes = sizeof(void*);
+ unsigned framePushed = StackDecrementForCall(masm, WasmStackAlignment, fi.sig().args(), tlsBytes);
+
+ FuncOffsets offsets;
+ GenerateFunctionPrologue(masm, framePushed, sigId, &offsets);
+
+ // The argument register state is already setup by our caller. We just need
+ // to be sure not to clobber it before the call.
+ Register scratch = ABINonArgReg0;
+
+ // Copy our frame's stack arguments to the callee frame's stack argument.
+ unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
+ ABIArgValTypeIter i(fi.sig().args());
+ for (; !i.done(); i++) {
+ if (i->kind() != ABIArg::Stack)
+ continue;
+
+ Address src(masm.getStackPointer(), offsetToCallerStackArgs + i->offsetFromArgBase());
+ Address dst(masm.getStackPointer(), i->offsetFromArgBase());
+ StackCopy(masm, i.mirType(), scratch, src, dst);
+ }
+
+ // Save the TLS register so it can be restored later.
+ uint32_t tlsStackOffset = i.stackBytesConsumedSoFar();
+ masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), tlsStackOffset));
+
+ // Call the import exit stub.
+ CallSiteDesc desc(CallSiteDesc::Dynamic);
+ masm.wasmCallImport(desc, CalleeDesc::import(fi.tlsDataOffset()));
+
+ // Restore the TLS register and pinned regs, per wasm function ABI.
+ masm.loadPtr(Address(masm.getStackPointer(), tlsStackOffset), WasmTlsReg);
+ masm.loadWasmPinnedRegsFromTls();
+
+ GenerateFunctionEpilogue(masm, framePushed, &offsets);
+
+ masm.wasmEmitTrapOutOfLineCode();
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into an appropriate callImport C++
+// function, having boxed all the ABI arguments into a homogeneous Value array.
+ProfilingOffsets
+wasm::GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
+ Label* throwLabel)
+{
+ masm.setFramePushed(0);
+
+ // Argument types for Module::callImport_*:
+ static const MIRType typeArray[] = { MIRType::Pointer, // Instance*
+ MIRType::Pointer, // funcImportIndex
+ MIRType::Int32, // argc
+ MIRType::Pointer }; // argv
+ MIRTypeVector invokeArgTypes;
+ MOZ_ALWAYS_TRUE(invokeArgTypes.append(typeArray, ArrayLength(typeArray)));
+
+ // At the point of the call, the stack layout shall be (sp grows to the left):
+ // | stack args | padding | Value argv[] | padding | retaddr | caller stack args |
+ // The padding between stack args and argv ensures that argv is aligned. The
+ // padding between argv and retaddr ensures that sp is aligned.
+ unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
+ unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
+ unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
+
+ ProfilingOffsets offsets;
+ GenerateExitPrologue(masm, framePushed, ExitReason::ImportInterp, &offsets);
+
+ // Fill the argument array.
+ unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
+ Register scratch = ABINonArgReturnReg0;
+ FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
+
+ // Prepare the arguments for the call to Module::callImport_*.
+ ABIArgMIRTypeIter i(invokeArgTypes);
+
+ // argument 0: Instance*
+ Address instancePtr(WasmTlsReg, offsetof(TlsData, instance));
+ if (i->kind() == ABIArg::GPR) {
+ masm.loadPtr(instancePtr, i->gpr());
+ } else {
+ masm.loadPtr(instancePtr, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+
+ // argument 1: funcImportIndex
+ if (i->kind() == ABIArg::GPR)
+ masm.mov(ImmWord(funcImportIndex), i->gpr());
+ else
+ masm.store32(Imm32(funcImportIndex), Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ i++;
+
+ // argument 2: argc
+ unsigned argc = fi.sig().args().length();
+ if (i->kind() == ABIArg::GPR)
+ masm.mov(ImmWord(argc), i->gpr());
+ else
+ masm.store32(Imm32(argc), Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ i++;
+
+ // argument 3: argv
+ Address argv(masm.getStackPointer(), argOffset);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Make the call, test whether it succeeded, and extract the return value.
+ AssertStackAlignment(masm, ABIStackAlignment);
+ switch (fi.sig().ret()) {
+ case ExprType::Void:
+ masm.call(SymbolicAddress::CallImport_Void);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ break;
+ case ExprType::I32:
+ masm.call(SymbolicAddress::CallImport_I32);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.load32(argv, ReturnReg);
+ break;
+ case ExprType::I64:
+ masm.call(SymbolicAddress::CallImport_I64);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.load64(argv, ReturnReg64);
+ break;
+ case ExprType::F32:
+ masm.call(SymbolicAddress::CallImport_F64);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.loadDouble(argv, ReturnDoubleReg);
+ masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
+ break;
+ case ExprType::F64:
+ masm.call(SymbolicAddress::CallImport_F64);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.loadDouble(argv, ReturnDoubleReg);
+ break;
+ case ExprType::I8x16:
+ case ExprType::I16x8:
+ case ExprType::I32x4:
+ case ExprType::F32x4:
+ case ExprType::B8x16:
+ case ExprType::B16x8:
+ case ExprType::B32x4:
+ MOZ_CRASH("SIMD types shouldn't be returned from a FFI");
+ case ExprType::Limit:
+ MOZ_CRASH("Limit");
+ }
+
+ // The native ABI preserves the TLS, heap and global registers since they
+ // are non-volatile.
+ MOZ_ASSERT(NonVolatileRegs.has(WasmTlsReg));
+#if defined(JS_CODEGEN_X64) || \
+ defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(NonVolatileRegs.has(HeapReg));
+#endif
+#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(NonVolatileRegs.has(GlobalReg));
+#endif
+
+ GenerateExitEpilogue(masm, framePushed, ExitReason::ImportInterp, &offsets);
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+static const unsigned SavedTlsReg = sizeof(void*);
+
+// Generate a stub that is called via the internal ABI derived from the
+// signature of the import and calls into a compatible JIT function,
+// having boxed all the ABI arguments into the JIT stack frame layout.
+ProfilingOffsets
+wasm::GenerateImportJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
+{
+ masm.setFramePushed(0);
+
+ // JIT calls use the following stack layout (sp grows to the left):
+ // | retaddr | descriptor | callee | argc | this | arg1..N |
+ // After the JIT frame, the global register (if present) is saved since the
+ // JIT's ABI does not preserve non-volatile regs. Also, unlike most ABIs,
+ // the JIT ABI requires that sp be JitStackAlignment-aligned *after* pushing
+ // the return address.
+ static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
+ unsigned sizeOfRetAddr = sizeof(void*);
+ unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
+ unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
+ unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
+ sizeOfRetAddr;
+
+ ProfilingOffsets offsets;
+ GenerateExitPrologue(masm, jitFramePushed, ExitReason::ImportJit, &offsets);
+
+ // 1. Descriptor
+ size_t argOffset = 0;
+ uint32_t descriptor = MakeFrameDescriptor(jitFramePushed, JitFrame_Entry,
+ JitFrameLayout::Size());
+ masm.storePtr(ImmWord(uintptr_t(descriptor)), Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+
+ // 2. Callee
+ Register callee = ABINonArgReturnReg0; // live until call
+ Register scratch = ABINonArgReturnReg1; // repeatedly clobbered
+
+ // 2.1. Get callee
+ masm.loadWasmGlobalPtr(fi.tlsDataOffset() + offsetof(FuncImportTls, obj), callee);
+
+ // 2.2. Save callee
+ masm.storePtr(callee, Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+
+ // 2.3. Load callee executable entry point
+ masm.loadPtr(Address(callee, JSFunction::offsetOfNativeOrScript()), callee);
+ masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
+
+ // 3. Argc
+ unsigned argc = fi.sig().args().length();
+ masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(size_t);
+
+ // 4. |this| value
+ masm.storeValue(UndefinedValue(), Address(masm.getStackPointer(), argOffset));
+ argOffset += sizeof(Value);
+
+ // 5. Fill the arguments
+ unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
+ FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
+ argOffset += fi.sig().args().length() * sizeof(Value);
+ MOZ_ASSERT(argOffset == jitFrameBytes);
+
+ // 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
+ // must be kept live for the benefit of the epilogue, so push it on the
+ // stack so that it can be restored before the epilogue.
+ static_assert(SavedTlsReg == sizeof(void*), "stack frame accounting");
+ masm.storePtr(WasmTlsReg, Address(masm.getStackPointer(), jitFrameBytes));
+
+ {
+ // Enable Activation.
+ //
+ // This sequence requires two registers, and needs to preserve the
+ // 'callee' register, so there are three live registers.
+ MOZ_ASSERT(callee == WasmIonExitRegCallee);
+ Register cx = WasmIonExitRegE0;
+ Register act = WasmIonExitRegE1;
+
+ // JitActivation* act = cx->activation();
+ masm.movePtr(SymbolicAddress::Context, cx);
+ masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
+
+ // act.active_ = true;
+ masm.store8(Imm32(1), Address(act, JitActivation::offsetOfActiveUint8()));
+
+ // cx->jitActivation = act;
+ masm.storePtr(act, Address(cx, offsetof(JSContext, jitActivation)));
+
+ // cx->profilingActivation_ = act;
+ masm.storePtr(act, Address(cx, JSContext::offsetOfProfilingActivation()));
+ }
+
+ AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
+ masm.callJitNoProfiler(callee);
+ AssertStackAlignment(masm, JitStackAlignment, sizeOfRetAddr);
+
+ {
+ // Disable Activation.
+ //
+ // This sequence needs three registers, and must preserve the JSReturnReg_Data and
+ // JSReturnReg_Type, so there are five live registers.
+ MOZ_ASSERT(JSReturnReg_Data == WasmIonExitRegReturnData);
+ MOZ_ASSERT(JSReturnReg_Type == WasmIonExitRegReturnType);
+ Register cx = WasmIonExitRegD0;
+ Register act = WasmIonExitRegD1;
+ Register tmp = WasmIonExitRegD2;
+
+ // JitActivation* act = cx->activation();
+ masm.movePtr(SymbolicAddress::Context, cx);
+ masm.loadPtr(Address(cx, JSContext::offsetOfActivation()), act);
+
+ // cx->jitTop = act->prevJitTop_;
+ masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitTop()), tmp);
+ masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitTop)));
+
+ // cx->jitActivation = act->prevJitActivation_;
+ masm.loadPtr(Address(act, JitActivation::offsetOfPrevJitActivation()), tmp);
+ masm.storePtr(tmp, Address(cx, offsetof(JSContext, jitActivation)));
+
+ // cx->profilingActivation = act->prevProfilingActivation_;
+ masm.loadPtr(Address(act, Activation::offsetOfPrevProfiling()), tmp);
+ masm.storePtr(tmp, Address(cx, JSContext::offsetOfProfilingActivation()));
+
+ // act->active_ = false;
+ masm.store8(Imm32(0), Address(act, JitActivation::offsetOfActiveUint8()));
+ }
+
+ // As explained above, the frame was aligned for the JIT ABI such that
+ // (sp + sizeof(void*)) % JitStackAlignment == 0
+ // But now we possibly want to call one of several different C++ functions,
+ // so subtract the sizeof(void*) so that sp is aligned for an ABI call.
+ static_assert(ABIStackAlignment <= JitStackAlignment, "subsumes");
+ masm.reserveStack(sizeOfRetAddr);
+ unsigned nativeFramePushed = masm.framePushed();
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+ masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
+
+ Label oolConvert;
+ switch (fi.sig().ret()) {
+ case ExprType::Void:
+ break;
+ case ExprType::I32:
+ masm.convertValueToInt32(JSReturnOperand, ReturnDoubleReg, ReturnReg, &oolConvert,
+ /* -0 check */ false);
+ break;
+ case ExprType::I64:
+ // We don't expect int64 to be returned from Ion yet, because of a
+ // guard in callImport.
+ masm.breakpoint();
+ break;
+ case ExprType::F32:
+ masm.convertValueToFloat(JSReturnOperand, ReturnFloat32Reg, &oolConvert);
+ break;
+ case ExprType::F64:
+ masm.convertValueToDouble(JSReturnOperand, ReturnDoubleReg, &oolConvert);
+ break;
+ case ExprType::I8x16:
+ case ExprType::I16x8:
+ case ExprType::I32x4:
+ case ExprType::F32x4:
+ case ExprType::B8x16:
+ case ExprType::B16x8:
+ case ExprType::B32x4:
+ MOZ_CRASH("SIMD types shouldn't be returned from an import");
+ case ExprType::Limit:
+ MOZ_CRASH("Limit");
+ }
+
+ Label done;
+ masm.bind(&done);
+
+ // Ion code does not respect the system ABI's callee-saved register
+ // conventions so reload any assumed-non-volatile registers. Note that the
+ // reserveStack(sizeOfRetAddr) above means that the stack pointer is at a
+ // different offset than when WasmTlsReg was stored.
+ masm.loadPtr(Address(masm.getStackPointer(), jitFrameBytes + sizeOfRetAddr), WasmTlsReg);
+
+ GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, &offsets);
+
+ if (oolConvert.used()) {
+ masm.bind(&oolConvert);
+ masm.setFramePushed(nativeFramePushed);
+
+ // Coercion calls use the following stack layout (sp grows to the left):
+ // | args | padding | Value argv[1] | padding | exit Frame |
+ MIRTypeVector coerceArgTypes;
+ JS_ALWAYS_TRUE(coerceArgTypes.append(MIRType::Pointer));
+ unsigned offsetToCoerceArgv = AlignBytes(StackArgBytes(coerceArgTypes), sizeof(Value));
+ MOZ_ASSERT(nativeFramePushed >= offsetToCoerceArgv + sizeof(Value));
+ AssertStackAlignment(masm, ABIStackAlignment);
+
+ // Store return value into argv[0]
+ masm.storeValue(JSReturnOperand, Address(masm.getStackPointer(), offsetToCoerceArgv));
+
+ // argument 0: argv
+ ABIArgMIRTypeIter i(coerceArgTypes);
+ Address argv(masm.getStackPointer(), offsetToCoerceArgv);
+ if (i->kind() == ABIArg::GPR) {
+ masm.computeEffectiveAddress(argv, i->gpr());
+ } else {
+ masm.computeEffectiveAddress(argv, scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ }
+ i++;
+ MOZ_ASSERT(i.done());
+
+ // Call coercion function
+ AssertStackAlignment(masm, ABIStackAlignment);
+ switch (fi.sig().ret()) {
+ case ExprType::I32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
+ break;
+ case ExprType::F64:
+ masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
+ break;
+ case ExprType::F32:
+ masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
+ masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
+ masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
+ masm.convertDoubleToFloat32(ReturnDoubleReg, ReturnFloat32Reg);
+ break;
+ default:
+ MOZ_CRASH("Unsupported convert type");
+ }
+
+ masm.jump(&done);
+ masm.setFramePushed(0);
+ }
+
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+// Generate a stub that calls into ReportTrap with the right trap reason.
+// This stub is called with ABIStackAlignment by a trap out-of-line path. A
+// profiling prologue/epilogue is used so that stack unwinding picks up the
+// current WasmActivation. Unwinding will begin at the caller of this trap exit.
+ProfilingOffsets
+wasm::GenerateTrapExit(MacroAssembler& masm, Trap trap, Label* throwLabel)
+{
+ masm.haltingAlign(CodeAlignment);
+
+ masm.setFramePushed(0);
+
+ MIRTypeVector args;
+ MOZ_ALWAYS_TRUE(args.append(MIRType::Int32));
+
+ uint32_t framePushed = StackDecrementForCall(masm, ABIStackAlignment, args);
+
+ ProfilingOffsets offsets;
+ GenerateExitPrologue(masm, framePushed, ExitReason::Trap, &offsets);
+
+ ABIArgMIRTypeIter i(args);
+ if (i->kind() == ABIArg::GPR)
+ masm.move32(Imm32(int32_t(trap)), i->gpr());
+ else
+ masm.store32(Imm32(int32_t(trap)), Address(masm.getStackPointer(), i->offsetFromArgBase()));
+ i++;
+ MOZ_ASSERT(i.done());
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::ReportTrap);
+
+ masm.jump(throwLabel);
+
+ GenerateExitEpilogue(masm, framePushed, ExitReason::Trap, &offsets);
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+// Generate a stub which is only used by the signal handlers to handle out of
+// bounds access by experimental SIMD.js and Atomics and unaligned accesses on
+// ARM. This stub is executed by direct PC transfer from the faulting memory
+// access and thus the stack depth is unknown. Since WasmActivation::fp is not
+// set before calling the error reporter, the current wasm activation will be
+// lost. This stub should be removed when SIMD.js and Atomics are moved to wasm
+// and given proper traps and when we use a non-faulting strategy for unaligned
+// ARM access.
+static Offsets
+GenerateGenericMemoryAccessTrap(MacroAssembler& masm, SymbolicAddress reporter, Label* throwLabel)
+{
+ masm.haltingAlign(CodeAlignment);
+
+ Offsets offsets;
+ offsets.begin = masm.currentOffset();
+
+ // sp can be anything at this point, so ensure it is aligned when calling
+ // into C++. We unconditionally jump to throw so don't worry about
+ // restoring sp.
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace)
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+
+ masm.call(reporter);
+ masm.jump(throwLabel);
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+Offsets
+wasm::GenerateOutOfBoundsExit(MacroAssembler& masm, Label* throwLabel)
+{
+ return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportOutOfBounds, throwLabel);
+}
+
+Offsets
+wasm::GenerateUnalignedExit(MacroAssembler& masm, Label* throwLabel)
+{
+ return GenerateGenericMemoryAccessTrap(masm, SymbolicAddress::ReportUnalignedAccess, throwLabel);
+}
+
+static const LiveRegisterSet AllRegsExceptSP(
+ GeneralRegisterSet(Registers::AllMask & ~(uint32_t(1) << Registers::StackPointer)),
+ FloatRegisterSet(FloatRegisters::AllMask));
+
+// The async interrupt-callback exit is called from arbitrarily-interrupted wasm
+// code. That means we must first save *all* registers and restore *all*
+// registers (except the stack pointer) when we resume. The address to resume to
+// (assuming that js::HandleExecutionInterrupt doesn't indicate that the
+// execution should be aborted) is stored in WasmActivation::resumePC_.
+// Unfortunately, loading this requires a scratch register which we don't have
+// after restoring all registers. To hack around this, push the resumePC on the
+// stack so that it can be popped directly into PC.
+Offsets
+wasm::GenerateInterruptExit(MacroAssembler& masm, Label* throwLabel)
+{
+ masm.haltingAlign(CodeAlignment);
+
+ Offsets offsets;
+ offsets.begin = masm.currentOffset();
+
+#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
+ // Be very careful here not to perturb the machine state before saving it
+ // to the stack. In particular, add/sub instructions may set conditions in
+ // the flags register.
+ masm.push(Imm32(0)); // space for resumePC
+ masm.pushFlags(); // after this we are safe to use sub
+ masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
+ masm.PushRegsInMask(AllRegsExceptSP); // save all GP/FP registers (except SP)
+
+ Register scratch = ABINonArgReturnReg0;
+
+ // Store resumePC into the reserved space.
+ masm.loadWasmActivationFromSymbolicAddress(scratch);
+ masm.loadPtr(Address(scratch, WasmActivation::offsetOfResumePC()), scratch);
+ masm.storePtr(scratch, Address(masm.getStackPointer(), masm.framePushed() + sizeof(void*)));
+
+ // We know that StackPointer is word-aligned, but not necessarily
+ // stack-aligned, so we need to align it dynamically.
+ masm.moveStackPtrTo(ABINonVolatileReg);
+ masm.andToStackPtr(Imm32(~(ABIStackAlignment - 1)));
+ if (ShadowStackSpace)
+ masm.subFromStackPtr(Imm32(ShadowStackSpace));
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+ masm.branchIfFalseBool(ReturnReg, throwLabel);
+
+ // Restore the StackPointer to its position before the call.
+ masm.moveToStackPtr(ABINonVolatileReg);
+
+ // Restore the machine state to before the interrupt.
+ masm.PopRegsInMask(AllRegsExceptSP); // restore all GP/FP registers (except SP)
+ masm.popFlags(); // after this, nothing that sets conditions
+ masm.ret(); // pop resumePC into PC
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
+ // Reserve space to store resumePC and HeapReg.
+ masm.subFromStackPtr(Imm32(2 * sizeof(intptr_t)));
+ // set to zero so we can use masm.framePushed() below.
+ masm.setFramePushed(0);
+ static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+ // save all registers,except sp. After this stack is alligned.
+ masm.PushRegsInMask(AllRegsExceptSP);
+
+ // Save the stack pointer in a non-volatile register.
+ masm.moveStackPtrTo(s0);
+ // Align the stack.
+ masm.ma_and(StackPointer, StackPointer, Imm32(~(ABIStackAlignment - 1)));
+
+ // Store resumePC into the reserved space.
+ masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
+ masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
+ masm.storePtr(IntArgReg1, Address(s0, masm.framePushed()));
+ // Store HeapReg into the reserved space.
+ masm.storePtr(HeapReg, Address(s0, masm.framePushed() + sizeof(intptr_t)));
+
+# ifdef USES_O32_ABI
+ // MIPS ABI requires rewserving stack for registes $a0 to $a3.
+ masm.subFromStackPtr(Imm32(4 * sizeof(intptr_t)));
+# endif
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+# ifdef USES_O32_ABI
+ masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));
+# endif
+
+ masm.branchIfFalseBool(ReturnReg, throwLabel);
+
+ // This will restore stack to the address before the call.
+ masm.moveToStackPtr(s0);
+ masm.PopRegsInMask(AllRegsExceptSP);
+
+ // Pop resumePC into PC. Clobber HeapReg to make the jump and restore it
+ // during jump delay slot.
+ masm.loadPtr(Address(StackPointer, 0), HeapReg);
+ // Reclaim the reserve space.
+ masm.addToStackPtr(Imm32(2 * sizeof(intptr_t)));
+ masm.as_jr(HeapReg);
+ masm.loadPtr(Address(StackPointer, -sizeof(intptr_t)), HeapReg);
+#elif defined(JS_CODEGEN_ARM)
+ masm.setFramePushed(0); // set to zero so we can use masm.framePushed() below
+
+ // Save all GPR, except the stack pointer.
+ masm.PushRegsInMask(LiveRegisterSet(
+ GeneralRegisterSet(Registers::AllMask & ~(1<<Registers::sp)),
+ FloatRegisterSet(uint32_t(0))));
+
+ // Save both the APSR and FPSCR in non-volatile registers.
+ masm.as_mrs(r4);
+ masm.as_vmrs(r5);
+ // Save the stack pointer in a non-volatile register.
+ masm.mov(sp,r6);
+ // Align the stack.
+ masm.as_bic(sp, sp, Imm8(7));
+
+ // Store resumePC into the return PC stack slot.
+ masm.loadWasmActivationFromSymbolicAddress(IntArgReg0);
+ masm.loadPtr(Address(IntArgReg0, WasmActivation::offsetOfResumePC()), IntArgReg1);
+ masm.storePtr(IntArgReg1, Address(r6, 14 * sizeof(uint32_t*)));
+
+ // Save all FP registers
+ static_assert(!SupportsSimd, "high lanes of SIMD registers need to be saved too.");
+ masm.PushRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask)));
+
+ masm.assertStackAlignment(ABIStackAlignment);
+ masm.call(SymbolicAddress::HandleExecutionInterrupt);
+
+ masm.branchIfFalseBool(ReturnReg, throwLabel);
+
+ // Restore the machine state to before the interrupt. this will set the pc!
+
+ // Restore all FP registers
+ masm.PopRegsInMask(LiveRegisterSet(GeneralRegisterSet(0),
+ FloatRegisterSet(FloatRegisters::AllDoubleMask)));
+ masm.mov(r6,sp);
+ masm.as_vmsr(r5);
+ masm.as_msr(r4);
+ // Restore all GP registers
+ masm.startDataTransferM(IsLoad, sp, IA, WriteBack);
+ masm.transferReg(r0);
+ masm.transferReg(r1);
+ masm.transferReg(r2);
+ masm.transferReg(r3);
+ masm.transferReg(r4);
+ masm.transferReg(r5);
+ masm.transferReg(r6);
+ masm.transferReg(r7);
+ masm.transferReg(r8);
+ masm.transferReg(r9);
+ masm.transferReg(r10);
+ masm.transferReg(r11);
+ masm.transferReg(r12);
+ masm.transferReg(lr);
+ masm.finishDataTransfer();
+ masm.ret();
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_CRASH();
+#elif defined (JS_CODEGEN_NONE)
+ MOZ_CRASH();
+#else
+# error "Unknown architecture!"
+#endif
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
+
+// Generate a stub that restores the stack pointer to what it was on entry to
+// the wasm activation, sets the return register to 'false' and then executes a
+// return which will return from this wasm activation to the caller. This stub
+// should only be called after the caller has reported an error (or, in the case
+// of the interrupt stub, intends to interrupt execution).
+Offsets
+wasm::GenerateThrowStub(MacroAssembler& masm, Label* throwLabel)
+{
+ masm.haltingAlign(CodeAlignment);
+
+ masm.bind(throwLabel);
+
+ Offsets offsets;
+ offsets.begin = masm.currentOffset();
+
+ // We are about to pop all frames in this WasmActivation. Set fp to null to
+ // maintain the invariant that fp is either null or pointing to a valid
+ // frame.
+ Register scratch = ABINonArgReturnReg0;
+ masm.loadWasmActivationFromSymbolicAddress(scratch);
+ masm.storePtr(ImmWord(0), Address(scratch, WasmActivation::offsetOfFP()));
+
+ masm.setFramePushed(FramePushedForEntrySP);
+ masm.loadStackPtr(Address(scratch, WasmActivation::offsetOfEntrySP()));
+ masm.Pop(scratch);
+ masm.PopRegsInMask(NonVolatileRegs);
+ MOZ_ASSERT(masm.framePushed() == 0);
+
+ masm.mov(ImmWord(0), ReturnReg);
+ masm.ret();
+
+ offsets.end = masm.currentOffset();
+ return offsets;
+}
diff --git a/js/src/wasm/WasmStubs.h b/js/src/wasm/WasmStubs.h
new file mode 100644
index 0000000000..d644aa83d9
--- /dev/null
+++ b/js/src/wasm/WasmStubs.h
@@ -0,0 +1,64 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_stubs_h
+#define wasm_stubs_h
+
+#include "wasm/WasmTypes.h"
+
+namespace js {
+
+namespace jit { class MacroAssembler; class Label; }
+
+namespace wasm {
+
+class FuncExport;
+class FuncImport;
+
+extern Offsets
+GenerateEntry(jit::MacroAssembler& masm, const FuncExport& fe);
+
+extern FuncOffsets
+GenerateImportFunction(jit::MacroAssembler& masm, const FuncImport& fi, SigIdDesc sigId);
+
+extern ProfilingOffsets
+GenerateImportInterpExit(jit::MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
+ jit::Label* throwLabel);
+
+extern ProfilingOffsets
+GenerateImportJitExit(jit::MacroAssembler& masm, const FuncImport& fi, jit::Label* throwLabel);
+
+extern ProfilingOffsets
+GenerateTrapExit(jit::MacroAssembler& masm, Trap trap, jit::Label* throwLabel);
+
+extern Offsets
+GenerateOutOfBoundsExit(jit::MacroAssembler& masm, jit::Label* throwLabel);
+
+extern Offsets
+GenerateUnalignedExit(jit::MacroAssembler& masm, jit::Label* throwLabel);
+
+extern Offsets
+GenerateInterruptExit(jit::MacroAssembler& masm, jit::Label* throwLabel);
+
+extern Offsets
+GenerateThrowStub(jit::MacroAssembler& masm, jit::Label* throwLabel);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_stubs_h
diff --git a/js/src/wasm/WasmTable.cpp b/js/src/wasm/WasmTable.cpp
new file mode 100644
index 0000000000..ed0ad44583
--- /dev/null
+++ b/js/src/wasm/WasmTable.cpp
@@ -0,0 +1,211 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTable.h"
+
+#include "mozilla/CheckedInt.h"
+
+#include "jscntxt.h"
+
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmJS.h"
+
+using namespace js;
+using namespace js::wasm;
+using mozilla::CheckedInt;
+
+Table::Table(JSContext* cx, const TableDesc& desc, HandleWasmTableObject maybeObject,
+ UniqueByteArray array)
+ : maybeObject_(maybeObject),
+ observers_(cx->zone(), InstanceSet()),
+ array_(Move(array)),
+ kind_(desc.kind),
+ length_(desc.limits.initial),
+ maximum_(desc.limits.maximum),
+ external_(desc.external)
+{}
+
+/* static */ SharedTable
+Table::create(JSContext* cx, const TableDesc& desc, HandleWasmTableObject maybeObject)
+{
+ // The raw element type of a Table depends on whether it is external: an
+ // external table can contain functions from multiple instances and thus
+ // must store an additional instance pointer in each element.
+ UniqueByteArray array;
+ if (desc.external)
+ array.reset((uint8_t*)cx->pod_calloc<ExternalTableElem>(desc.limits.initial));
+ else
+ array.reset((uint8_t*)cx->pod_calloc<void*>(desc.limits.initial));
+ if (!array)
+ return nullptr;
+
+ return SharedTable(cx->new_<Table>(cx, desc, maybeObject, Move(array)));
+}
+
+void
+Table::tracePrivate(JSTracer* trc)
+{
+ // If this table has a WasmTableObject, then this method is only called by
+ // WasmTableObject's trace hook so maybeObject_ must already be marked.
+ // TraceEdge is called so that the pointer can be updated during a moving
+ // GC. TraceWeakEdge may sound better, but it is less efficient given that
+ // we know object_ is already marked.
+ if (maybeObject_) {
+ MOZ_ASSERT(!gc::IsAboutToBeFinalized(&maybeObject_));
+ TraceEdge(trc, &maybeObject_, "wasm table object");
+ }
+
+ if (external_) {
+ ExternalTableElem* array = externalArray();
+ for (uint32_t i = 0; i < length_; i++) {
+ if (array[i].tls)
+ array[i].tls->instance->trace(trc);
+ else
+ MOZ_ASSERT(!array[i].code);
+ }
+ }
+}
+
+void
+Table::trace(JSTracer* trc)
+{
+ // The trace hook of WasmTableObject will call Table::tracePrivate at
+ // which point we can mark the rest of the children. If there is no
+ // WasmTableObject, call Table::tracePrivate directly. Redirecting through
+ // the WasmTableObject avoids marking the entire Table on each incoming
+ // edge (once per dependent Instance).
+ if (maybeObject_)
+ TraceEdge(trc, &maybeObject_, "wasm table object");
+ else
+ tracePrivate(trc);
+}
+
+void**
+Table::internalArray() const
+{
+ MOZ_ASSERT(!external_);
+ return (void**)array_.get();
+}
+
+ExternalTableElem*
+Table::externalArray() const
+{
+ MOZ_ASSERT(external_);
+ return (ExternalTableElem*)array_.get();
+}
+
+void
+Table::set(uint32_t index, void* code, Instance& instance)
+{
+ if (external_) {
+ ExternalTableElem& elem = externalArray()[index];
+ if (elem.tls)
+ JSObject::writeBarrierPre(elem.tls->instance->objectUnbarriered());
+
+ elem.code = code;
+ elem.tls = &instance.tlsData();
+
+ MOZ_ASSERT(elem.tls->instance->objectUnbarriered()->isTenured(), "no writeBarrierPost");
+ } else {
+ internalArray()[index] = code;
+ }
+}
+
+void
+Table::setNull(uint32_t index)
+{
+ // Only external tables can set elements to null after initialization.
+ ExternalTableElem& elem = externalArray()[index];
+ if (elem.tls)
+ JSObject::writeBarrierPre(elem.tls->instance->objectUnbarriered());
+
+ elem.code = nullptr;
+ elem.tls = nullptr;
+}
+
+uint32_t
+Table::grow(uint32_t delta, JSContext* cx)
+{
+ // This isn't just an optimization: movingGrowable() assumes that
+ // onMovingGrowTable does not fire when length == maximum.
+ if (!delta)
+ return length_;
+
+ uint32_t oldLength = length_;
+
+ CheckedInt<uint32_t> newLength = oldLength;
+ newLength += delta;
+ if (!newLength.isValid())
+ return -1;
+
+ if (maximum_ && newLength.value() > maximum_.value())
+ return -1;
+
+ MOZ_ASSERT(movingGrowable());
+
+ JSRuntime* rt = cx; // Use JSRuntime's MallocProvider to avoid throwing.
+
+ // Note that realloc does not release array_'s pointee (which is returned by
+ // externalArray()) on failure which is exactly what we need here.
+ ExternalTableElem* newArray = rt->pod_realloc(externalArray(), length_, newLength.value());
+ if (!newArray)
+ return -1;
+ Unused << array_.release();
+ array_.reset((uint8_t*)newArray);
+
+ // Realloc does not zero the delta for us.
+ PodZero(newArray + length_, delta);
+ length_ = newLength.value();
+
+ if (observers_.initialized()) {
+ for (InstanceSet::Range r = observers_.all(); !r.empty(); r.popFront())
+ r.front()->instance().onMovingGrowTable();
+ }
+
+ return oldLength;
+}
+
+bool
+Table::movingGrowable() const
+{
+ return !maximum_ || length_ < maximum_.value();
+}
+
+bool
+Table::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance)
+{
+ MOZ_ASSERT(movingGrowable());
+
+ if (!observers_.initialized() && !observers_.init()) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ if (!observers_.putNew(instance)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+
+ return true;
+}
+
+size_t
+Table::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return mallocSizeOf(array_.get());
+}
diff --git a/js/src/wasm/WasmTable.h b/js/src/wasm/WasmTable.h
new file mode 100644
index 0000000000..22c01411ad
--- /dev/null
+++ b/js/src/wasm/WasmTable.h
@@ -0,0 +1,89 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_table_h
+#define wasm_table_h
+
+#include "gc/Policy.h"
+#include "wasm/WasmCode.h"
+
+namespace js {
+namespace wasm {
+
+// A Table is an indexable array of opaque values. Tables are first-class
+// stateful objects exposed to WebAssembly. asm.js also uses Tables to represent
+// its homogeneous function-pointer tables.
+
+class Table : public ShareableBase<Table>
+{
+ using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject,
+ MovableCellHasher<ReadBarrieredWasmInstanceObject>,
+ SystemAllocPolicy>;
+ typedef UniquePtr<uint8_t[], JS::FreePolicy> UniqueByteArray;
+
+ ReadBarrieredWasmTableObject maybeObject_;
+ JS::WeakCache<InstanceSet> observers_;
+ UniqueByteArray array_;
+ const TableKind kind_;
+ uint32_t length_;
+ const Maybe<uint32_t> maximum_;
+ const bool external_;
+
+ template <class> friend struct js::MallocProvider;
+ Table(JSContext* cx, const TableDesc& td, HandleWasmTableObject maybeObject,
+ UniqueByteArray array);
+
+ void tracePrivate(JSTracer* trc);
+ friend class js::WasmTableObject;
+
+ public:
+ static RefPtr<Table> create(JSContext* cx, const TableDesc& desc,
+ HandleWasmTableObject maybeObject);
+ void trace(JSTracer* trc);
+
+ bool external() const { return external_; }
+ bool isTypedFunction() const { return kind_ == TableKind::TypedFunction; }
+ uint32_t length() const { return length_; }
+ Maybe<uint32_t> maximum() const { return maximum_; }
+ uint8_t* base() const { return array_.get(); }
+
+ // All updates must go through a set() function with the exception of
+ // (profiling) updates to the callee pointer that do not change which
+ // logical function is being called.
+
+ void** internalArray() const;
+ ExternalTableElem* externalArray() const;
+ void set(uint32_t index, void* code, Instance& instance);
+ void setNull(uint32_t index);
+
+ uint32_t grow(uint32_t delta, JSContext* cx);
+ bool movingGrowable() const;
+ bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
+
+ // about:memory reporting:
+
+ size_t sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const;
+};
+
+typedef RefPtr<Table> SharedTable;
+typedef Vector<SharedTable, 0, SystemAllocPolicy> SharedTableVector;
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_table_h
diff --git a/js/src/wasm/WasmTextToBinary.cpp b/js/src/wasm/WasmTextToBinary.cpp
new file mode 100644
index 0000000000..e61a826503
--- /dev/null
+++ b/js/src/wasm/WasmTextToBinary.cpp
@@ -0,0 +1,4843 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTextToBinary.h"
+
+#include "mozilla/CheckedInt.h"
+#include "mozilla/MathAlgorithms.h"
+#include "mozilla/Maybe.h"
+
+#include "jsdtoa.h"
+#include "jsnum.h"
+#include "jsprf.h"
+#include "jsstr.h"
+
+#include "ds/LifoAlloc.h"
+#include "js/CharacterEncoding.h"
+#include "js/HashTable.h"
+#include "wasm/WasmAST.h"
+#include "wasm/WasmBinaryFormat.h"
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace js::wasm;
+
+using mozilla::BitwiseCast;
+using mozilla::CeilingLog2;
+using mozilla::CountLeadingZeroes32;
+using mozilla::CheckedInt;
+using mozilla::FloatingPoint;
+using mozilla::IsPowerOfTwo;
+using mozilla::Maybe;
+using mozilla::PositiveInfinity;
+using mozilla::SpecificNaN;
+
+/*****************************************************************************/
+// wasm text token stream
+
+namespace {
+
+class WasmToken
+{
+ public:
+ enum FloatLiteralKind
+ {
+ HexNumber,
+ DecNumber,
+ Infinity,
+ NaN
+ };
+
+ enum Kind
+ {
+ Align,
+ AnyFunc,
+ BinaryOpcode,
+ Block,
+ Br,
+ BrIf,
+ BrTable,
+ Call,
+ CallIndirect,
+ CloseParen,
+ ComparisonOpcode,
+ Const,
+ ConversionOpcode,
+ CurrentMemory,
+ Data,
+ Drop,
+ Elem,
+ Else,
+ End,
+ EndOfFile,
+ Equal,
+ Error,
+ Export,
+ Float,
+ Func,
+ GetGlobal,
+ GetLocal,
+ Global,
+ GrowMemory,
+ If,
+ Import,
+ Index,
+ Memory,
+ NegativeZero,
+ Load,
+ Local,
+ Loop,
+ Module,
+ Mutable,
+ Name,
+ Nop,
+ Offset,
+ OpenParen,
+ Param,
+ Result,
+ Return,
+ SetGlobal,
+ SetLocal,
+ SignedInteger,
+ Start,
+ Store,
+ Table,
+ TeeLocal,
+ TernaryOpcode,
+ Text,
+ Then,
+ Type,
+ UnaryOpcode,
+ Unreachable,
+ UnsignedInteger,
+ ValueType
+ };
+ private:
+ Kind kind_;
+ const char16_t* begin_;
+ const char16_t* end_;
+ union {
+ uint32_t index_;
+ uint64_t uint_;
+ int64_t sint_;
+ FloatLiteralKind floatLiteralKind_;
+ ValType valueType_;
+ Op op_;
+ } u;
+ public:
+ WasmToken()
+ : kind_(Kind(-1)),
+ begin_(nullptr),
+ end_(nullptr),
+ u()
+ { }
+ WasmToken(Kind kind, const char16_t* begin, const char16_t* end)
+ : kind_(kind),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(kind_ != Error);
+ MOZ_ASSERT((kind == EndOfFile) == (begin == end));
+ }
+ explicit WasmToken(uint32_t index, const char16_t* begin, const char16_t* end)
+ : kind_(Index),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ u.index_ = index;
+ }
+ explicit WasmToken(uint64_t uint, const char16_t* begin, const char16_t* end)
+ : kind_(UnsignedInteger),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ u.uint_ = uint;
+ }
+ explicit WasmToken(int64_t sint, const char16_t* begin, const char16_t* end)
+ : kind_(SignedInteger),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ u.sint_ = sint;
+ }
+ explicit WasmToken(FloatLiteralKind floatLiteralKind,
+ const char16_t* begin, const char16_t* end)
+ : kind_(Float),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ u.floatLiteralKind_ = floatLiteralKind;
+ }
+ explicit WasmToken(Kind kind, ValType valueType, const char16_t* begin, const char16_t* end)
+ : kind_(kind),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ MOZ_ASSERT(kind_ == ValueType || kind_ == Const);
+ u.valueType_ = valueType;
+ }
+ explicit WasmToken(Kind kind, Op op, const char16_t* begin, const char16_t* end)
+ : kind_(kind),
+ begin_(begin),
+ end_(end)
+ {
+ MOZ_ASSERT(begin != end);
+ MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
+ kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
+ kind_ == Load || kind_ == Store);
+ u.op_ = op;
+ }
+ explicit WasmToken(const char16_t* begin)
+ : kind_(Error),
+ begin_(begin),
+ end_(begin)
+ {}
+ Kind kind() const {
+ MOZ_ASSERT(kind_ != Kind(-1));
+ return kind_;
+ }
+ const char16_t* begin() const {
+ return begin_;
+ }
+ const char16_t* end() const {
+ return end_;
+ }
+ AstName text() const {
+ MOZ_ASSERT(kind_ == Text);
+ MOZ_ASSERT(begin_[0] == '"');
+ MOZ_ASSERT(end_[-1] == '"');
+ MOZ_ASSERT(end_ - begin_ >= 2);
+ return AstName(begin_ + 1, end_ - begin_ - 2);
+ }
+ AstName name() const {
+ return AstName(begin_, end_ - begin_);
+ }
+ uint32_t index() const {
+ MOZ_ASSERT(kind_ == Index);
+ return u.index_;
+ }
+ uint64_t uint() const {
+ MOZ_ASSERT(kind_ == UnsignedInteger);
+ return u.uint_;
+ }
+ int64_t sint() const {
+ MOZ_ASSERT(kind_ == SignedInteger);
+ return u.sint_;
+ }
+ FloatLiteralKind floatLiteralKind() const {
+ MOZ_ASSERT(kind_ == Float);
+ return u.floatLiteralKind_;
+ }
+ ValType valueType() const {
+ MOZ_ASSERT(kind_ == ValueType || kind_ == Const);
+ return u.valueType_;
+ }
+ Op op() const {
+ MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
+ kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
+ kind_ == Load || kind_ == Store);
+ return u.op_;
+ }
+ bool isOpcode() const {
+ switch (kind_) {
+ case BinaryOpcode:
+ case Block:
+ case Br:
+ case BrIf:
+ case BrTable:
+ case Call:
+ case CallIndirect:
+ case ComparisonOpcode:
+ case Const:
+ case ConversionOpcode:
+ case CurrentMemory:
+ case Drop:
+ case GetGlobal:
+ case GetLocal:
+ case GrowMemory:
+ case If:
+ case Load:
+ case Loop:
+ case Nop:
+ case Return:
+ case SetGlobal:
+ case SetLocal:
+ case Store:
+ case TeeLocal:
+ case TernaryOpcode:
+ case UnaryOpcode:
+ case Unreachable:
+ return true;
+ case Align:
+ case AnyFunc:
+ case CloseParen:
+ case Data:
+ case Elem:
+ case Else:
+ case EndOfFile:
+ case Equal:
+ case End:
+ case Error:
+ case Export:
+ case Float:
+ case Func:
+ case Global:
+ case Mutable:
+ case Import:
+ case Index:
+ case Memory:
+ case NegativeZero:
+ case Local:
+ case Module:
+ case Name:
+ case Offset:
+ case OpenParen:
+ case Param:
+ case Result:
+ case SignedInteger:
+ case Start:
+ case Table:
+ case Text:
+ case Then:
+ case Type:
+ case UnsignedInteger:
+ case ValueType:
+ return false;
+ }
+ MOZ_CRASH("unexpected token kind");
+ }
+};
+
+struct InlineImport
+{
+ WasmToken module;
+ WasmToken field;
+};
+
+} // end anonymous namespace
+
+static bool
+IsWasmNewLine(char16_t c)
+{
+ return c == '\n';
+}
+
+static bool
+IsWasmSpace(char16_t c)
+{
+ switch (c) {
+ case ' ':
+ case '\n':
+ case '\r':
+ case '\t':
+ case '\v':
+ case '\f':
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool
+IsWasmDigit(char16_t c)
+{
+ return c >= '0' && c <= '9';
+}
+
+static bool
+IsWasmLetter(char16_t c)
+{
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static bool
+IsNameAfterDollar(char16_t c)
+{
+ return IsWasmLetter(c) || IsWasmDigit(c) || c == '_' || c == '$' || c == '-' || c == '.';
+}
+
+static bool
+IsHexDigit(char c, uint8_t* value)
+{
+ if (c >= '0' && c <= '9') {
+ *value = c - '0';
+ return true;
+ }
+
+ if (c >= 'a' && c <= 'f') {
+ *value = 10 + (c - 'a');
+ return true;
+ }
+
+ if (c >= 'A' && c <= 'F') {
+ *value = 10 + (c - 'A');
+ return true;
+ }
+
+ return false;
+}
+
+static WasmToken
+LexHexFloatLiteral(const char16_t* begin, const char16_t* end, const char16_t** curp)
+{
+ const char16_t* cur = begin;
+
+ if (cur != end && (*cur == '-' || *cur == '+'))
+ cur++;
+
+ MOZ_ASSERT(cur != end && *cur == '0');
+ cur++;
+ MOZ_ASSERT(cur != end && *cur == 'x');
+ cur++;
+
+ uint8_t digit;
+ while (cur != end && IsHexDigit(*cur, &digit))
+ cur++;
+
+ if (cur != end && *cur == '.')
+ cur++;
+
+ while (cur != end && IsHexDigit(*cur, &digit))
+ cur++;
+
+ if (cur != end && *cur == 'p') {
+ cur++;
+
+ if (cur != end && (*cur == '-' || *cur == '+'))
+ cur++;
+
+ while (cur != end && IsWasmDigit(*cur))
+ cur++;
+ }
+
+ *curp = cur;
+ return WasmToken(WasmToken::HexNumber, begin, cur);
+}
+
+static WasmToken
+LexDecFloatLiteral(const char16_t* begin, const char16_t* end, const char16_t** curp)
+{
+ const char16_t* cur = begin;
+
+ if (cur != end && (*cur == '-' || *cur == '+'))
+ cur++;
+
+ while (cur != end && IsWasmDigit(*cur))
+ cur++;
+
+ if (cur != end && *cur == '.')
+ cur++;
+
+ while (cur != end && IsWasmDigit(*cur))
+ cur++;
+
+ if (cur != end && *cur == 'e') {
+ cur++;
+
+ if (cur != end && (*cur == '-' || *cur == '+'))
+ cur++;
+
+ while (cur != end && IsWasmDigit(*cur))
+ cur++;
+ }
+
+ *curp = cur;
+ return WasmToken(WasmToken::DecNumber, begin, cur);
+}
+
+static bool
+ConsumeTextByte(const char16_t** curp, const char16_t* end, uint8_t* byte = nullptr)
+{
+ const char16_t*& cur = *curp;
+ MOZ_ASSERT(cur != end);
+
+ if (*cur != '\\') {
+ if (byte)
+ *byte = *cur;
+ cur++;
+ return true;
+ }
+
+ if (++cur == end)
+ return false;
+
+ uint8_t u8;
+ switch (*cur) {
+ case 'n': u8 = '\n'; break;
+ case 't': u8 = '\t'; break;
+ case '\\': u8 = '\\'; break;
+ case '\"': u8 = '\"'; break;
+ case '\'': u8 = '\''; break;
+ default: {
+ uint8_t highNibble;
+ if (!IsHexDigit(*cur, &highNibble))
+ return false;
+
+ if (++cur == end)
+ return false;
+
+ uint8_t lowNibble;
+ if (!IsHexDigit(*cur, &lowNibble))
+ return false;
+
+ u8 = lowNibble | (highNibble << 4);
+ break;
+ }
+ }
+
+ if (byte)
+ *byte = u8;
+ cur++;
+ return true;
+}
+
+namespace {
+
+class WasmTokenStream
+{
+ static const uint32_t LookaheadSize = 2;
+
+ const char16_t* cur_;
+ const char16_t* const end_;
+ const char16_t* lineStart_;
+ unsigned line_;
+ uint32_t lookaheadIndex_;
+ uint32_t lookaheadDepth_;
+ WasmToken lookahead_[LookaheadSize];
+
+ bool consume(const char16_t* match) {
+ const char16_t* p = cur_;
+ for (; *match; p++, match++) {
+ if (p == end_ || *p != *match)
+ return false;
+ }
+ cur_ = p;
+ return true;
+ }
+ WasmToken fail(const char16_t* begin) const {
+ return WasmToken(begin);
+ }
+
+ WasmToken nan(const char16_t* begin);
+ WasmToken literal(const char16_t* begin);
+ WasmToken next();
+ void skipSpaces();
+
+ public:
+ WasmTokenStream(const char16_t* text, UniqueChars* error)
+ : cur_(text),
+ end_(text + js_strlen(text)),
+ lineStart_(text),
+ line_(1),
+ lookaheadIndex_(0),
+ lookaheadDepth_(0)
+ {}
+ void generateError(WasmToken token, UniqueChars* error) {
+ unsigned column = token.begin() - lineStart_ + 1;
+ error->reset(JS_smprintf("parsing wasm text at %u:%u", line_, column));
+ }
+ void generateError(WasmToken token, const char* msg, UniqueChars* error) {
+ unsigned column = token.begin() - lineStart_ + 1;
+ error->reset(JS_smprintf("parsing wasm text at %u:%u: %s", line_, column, msg));
+ }
+ WasmToken peek() {
+ if (!lookaheadDepth_) {
+ lookahead_[lookaheadIndex_] = next();
+ lookaheadDepth_ = 1;
+ }
+ return lookahead_[lookaheadIndex_];
+ }
+ WasmToken get() {
+ static_assert(LookaheadSize == 2, "can just flip");
+ if (lookaheadDepth_) {
+ lookaheadDepth_--;
+ WasmToken ret = lookahead_[lookaheadIndex_];
+ lookaheadIndex_ ^= 1;
+ return ret;
+ }
+ return next();
+ }
+ void unget(WasmToken token) {
+ static_assert(LookaheadSize == 2, "can just flip");
+ lookaheadDepth_++;
+ lookaheadIndex_ ^= 1;
+ lookahead_[lookaheadIndex_] = token;
+ }
+
+ // Helpers:
+ bool getIf(WasmToken::Kind kind, WasmToken* token) {
+ if (peek().kind() == kind) {
+ *token = get();
+ return true;
+ }
+ return false;
+ }
+ bool getIf(WasmToken::Kind kind) {
+ WasmToken token;
+ if (getIf(kind, &token))
+ return true;
+ return false;
+ }
+ AstName getIfName() {
+ WasmToken token;
+ if (getIf(WasmToken::Name, &token))
+ return token.name();
+ return AstName();
+ }
+ AstName getIfText() {
+ WasmToken token;
+ if (getIf(WasmToken::Text, &token))
+ return token.text();
+ return AstName();
+ }
+ bool getIfRef(AstRef* ref) {
+ WasmToken token = peek();
+ if (token.kind() == WasmToken::Name || token.kind() == WasmToken::Index)
+ return matchRef(ref, nullptr);
+ return false;
+ }
+ bool getIfOpcode(WasmToken* token) {
+ *token = peek();
+ if (token->isOpcode()) {
+ (void)get();
+ return true;
+ }
+ return false;
+ }
+ bool match(WasmToken::Kind expect, WasmToken* token, UniqueChars* error) {
+ *token = get();
+ if (token->kind() == expect)
+ return true;
+ generateError(*token, error);
+ return false;
+ }
+ bool match(WasmToken::Kind expect, UniqueChars* error) {
+ WasmToken token;
+ return match(expect, &token, error);
+ }
+ bool matchRef(AstRef* ref, UniqueChars* error) {
+ WasmToken token = get();
+ switch (token.kind()) {
+ case WasmToken::Name:
+ *ref = AstRef(token.name());
+ break;
+ case WasmToken::Index:
+ *ref = AstRef(token.index());
+ break;
+ default:
+ generateError(token, error);
+ return false;
+ }
+ return true;
+ }
+};
+
+} // end anonymous namespace
+
+WasmToken
+WasmTokenStream::nan(const char16_t* begin)
+{
+ if (consume(u":")) {
+ if (!consume(u"0x"))
+ return fail(begin);
+
+ uint8_t digit;
+ while (cur_ != end_ && IsHexDigit(*cur_, &digit))
+ cur_++;
+ }
+
+ return WasmToken(WasmToken::NaN, begin, cur_);
+}
+
+WasmToken
+WasmTokenStream::literal(const char16_t* begin)
+{
+ CheckedInt<uint64_t> u = 0;
+ if (consume(u"0x")) {
+ if (cur_ == end_)
+ return fail(begin);
+
+ do {
+ if (*cur_ == '.' || *cur_ == 'p')
+ return LexHexFloatLiteral(begin, end_, &cur_);
+
+ uint8_t digit;
+ if (!IsHexDigit(*cur_, &digit))
+ break;
+
+ u *= 16;
+ u += digit;
+ if (!u.isValid())
+ return LexHexFloatLiteral(begin, end_, &cur_);
+
+ cur_++;
+ } while (cur_ != end_);
+
+ if (*begin == '-') {
+ uint64_t value = u.value();
+ if (value == 0)
+ return WasmToken(WasmToken::NegativeZero, begin, cur_);
+ if (value > uint64_t(INT64_MIN))
+ return LexHexFloatLiteral(begin, end_, &cur_);
+
+ value = -value;
+ return WasmToken(int64_t(value), begin, cur_);
+ }
+ } else {
+ while (cur_ != end_) {
+ if (*cur_ == '.' || *cur_ == 'e')
+ return LexDecFloatLiteral(begin, end_, &cur_);
+
+ if (!IsWasmDigit(*cur_))
+ break;
+
+ u *= 10;
+ u += *cur_ - '0';
+ if (!u.isValid())
+ return LexDecFloatLiteral(begin, end_, &cur_);
+
+ cur_++;
+ }
+
+ if (*begin == '-') {
+ uint64_t value = u.value();
+ if (value == 0)
+ return WasmToken(WasmToken::NegativeZero, begin, cur_);
+ if (value > uint64_t(INT64_MIN))
+ return LexDecFloatLiteral(begin, end_, &cur_);
+
+ value = -value;
+ return WasmToken(int64_t(value), begin, cur_);
+ }
+ }
+
+ CheckedInt<uint32_t> index = u.value();
+ if (index.isValid())
+ return WasmToken(index.value(), begin, cur_);
+
+ return WasmToken(u.value(), begin, cur_);
+}
+
+void
+WasmTokenStream::skipSpaces()
+{
+ while (cur_ != end_) {
+ char16_t ch = *cur_;
+ if (ch == ';' && consume(u";;")) {
+ // Skipping single line comment.
+ while (cur_ != end_ && !IsWasmNewLine(*cur_))
+ cur_++;
+ } else if (ch == '(' && consume(u"(;")) {
+ // Skipping multi-line and possibly nested comments.
+ size_t level = 1;
+ while (cur_ != end_) {
+ char16_t ch = *cur_;
+ if (ch == '(' && consume(u"(;")) {
+ level++;
+ } else if (ch == ';' && consume(u";)")) {
+ if (--level == 0)
+ break;
+ } else {
+ cur_++;
+ if (IsWasmNewLine(ch)) {
+ lineStart_ = cur_;
+ line_++;
+ }
+ }
+ }
+ } else if (IsWasmSpace(ch)) {
+ cur_++;
+ if (IsWasmNewLine(ch)) {
+ lineStart_ = cur_;
+ line_++;
+ }
+ } else
+ break; // non-whitespace found
+ }
+}
+
+WasmToken
+WasmTokenStream::next()
+{
+ skipSpaces();
+
+ if (cur_ == end_)
+ return WasmToken(WasmToken::EndOfFile, cur_, cur_);
+
+ const char16_t* begin = cur_;
+ switch (*begin) {
+ case '"':
+ cur_++;
+ while (true) {
+ if (cur_ == end_)
+ return fail(begin);
+ if (*cur_ == '"')
+ break;
+ if (!ConsumeTextByte(&cur_, end_))
+ return fail(begin);
+ }
+ cur_++;
+ return WasmToken(WasmToken::Text, begin, cur_);
+
+ case '$':
+ cur_++;
+ while (cur_ != end_ && IsNameAfterDollar(*cur_))
+ cur_++;
+ return WasmToken(WasmToken::Name, begin, cur_);
+
+ case '(':
+ cur_++;
+ return WasmToken(WasmToken::OpenParen, begin, cur_);
+
+ case ')':
+ cur_++;
+ return WasmToken(WasmToken::CloseParen, begin, cur_);
+
+ case '=':
+ cur_++;
+ return WasmToken(WasmToken::Equal, begin, cur_);
+
+ case '+': case '-':
+ cur_++;
+ if (consume(u"infinity"))
+ return WasmToken(WasmToken::Infinity, begin, cur_);
+ if (consume(u"nan"))
+ return nan(begin);
+ if (!IsWasmDigit(*cur_))
+ break;
+ MOZ_FALLTHROUGH;
+ case '0': case '1': case '2': case '3': case '4':
+ case '5': case '6': case '7': case '8': case '9':
+ return literal(begin);
+
+ case 'a':
+ if (consume(u"align"))
+ return WasmToken(WasmToken::Align, begin, cur_);
+ if (consume(u"anyfunc"))
+ return WasmToken(WasmToken::AnyFunc, begin, cur_);
+ break;
+
+ case 'b':
+ if (consume(u"block"))
+ return WasmToken(WasmToken::Block, begin, cur_);
+ if (consume(u"br")) {
+ if (consume(u"_table"))
+ return WasmToken(WasmToken::BrTable, begin, cur_);
+ if (consume(u"_if"))
+ return WasmToken(WasmToken::BrIf, begin, cur_);
+ return WasmToken(WasmToken::Br, begin, cur_);
+ }
+ break;
+
+ case 'c':
+ if (consume(u"call")) {
+ if (consume(u"_indirect"))
+ return WasmToken(WasmToken::CallIndirect, begin, cur_);
+ return WasmToken(WasmToken::Call, begin, cur_);
+ }
+ if (consume(u"current_memory"))
+ return WasmToken(WasmToken::CurrentMemory, begin, cur_);
+ break;
+
+ case 'd':
+ if (consume(u"data"))
+ return WasmToken(WasmToken::Data, begin, cur_);
+ if (consume(u"drop"))
+ return WasmToken(WasmToken::Drop, begin, cur_);
+ break;
+
+ case 'e':
+ if (consume(u"elem"))
+ return WasmToken(WasmToken::Elem, begin, cur_);
+ if (consume(u"else"))
+ return WasmToken(WasmToken::Else, begin, cur_);
+ if (consume(u"end"))
+ return WasmToken(WasmToken::End, begin, cur_);
+ if (consume(u"export"))
+ return WasmToken(WasmToken::Export, begin, cur_);
+ break;
+
+ case 'f':
+ if (consume(u"func"))
+ return WasmToken(WasmToken::Func, begin, cur_);
+
+ if (consume(u"f32")) {
+ if (!consume(u"."))
+ return WasmToken(WasmToken::ValueType, ValType::F32, begin, cur_);
+
+ switch (*cur_) {
+ case 'a':
+ if (consume(u"abs"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Abs, begin, cur_);
+ if (consume(u"add"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Add, begin, cur_);
+ break;
+ case 'c':
+ if (consume(u"ceil"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Ceil, begin, cur_);
+ if (consume(u"const"))
+ return WasmToken(WasmToken::Const, ValType::F32, begin, cur_);
+ if (consume(u"convert_s/i32")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32ConvertSI32,
+ begin, cur_);
+ }
+ if (consume(u"convert_u/i32")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32ConvertUI32,
+ begin, cur_);
+ }
+ if (consume(u"convert_s/i64")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32ConvertSI64,
+ begin, cur_);
+ }
+ if (consume(u"convert_u/i64")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32ConvertUI64,
+ begin, cur_);
+ }
+ if (consume(u"copysign"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32CopySign, begin, cur_);
+ break;
+ case 'd':
+ if (consume(u"demote/f64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32DemoteF64,
+ begin, cur_);
+ if (consume(u"div"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Div, begin, cur_);
+ break;
+ case 'e':
+ if (consume(u"eq"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Eq, begin, cur_);
+ break;
+ case 'f':
+ if (consume(u"floor"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Floor, begin, cur_);
+ break;
+ case 'g':
+ if (consume(u"ge"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Ge, begin, cur_);
+ if (consume(u"gt"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Gt, begin, cur_);
+ break;
+ case 'l':
+ if (consume(u"le"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Le, begin, cur_);
+ if (consume(u"lt"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Lt, begin, cur_);
+ if (consume(u"load"))
+ return WasmToken(WasmToken::Load, Op::F32Load, begin, cur_);
+ break;
+ case 'm':
+ if (consume(u"max"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Max, begin, cur_);
+ if (consume(u"min"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Min, begin, cur_);
+ if (consume(u"mul"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Mul, begin, cur_);
+ break;
+ case 'n':
+ if (consume(u"nearest"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Nearest, begin, cur_);
+ if (consume(u"neg"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Neg, begin, cur_);
+ if (consume(u"ne"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F32Ne, begin, cur_);
+ break;
+ case 'r':
+ if (consume(u"reinterpret/i32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::F32ReinterpretI32,
+ begin, cur_);
+ break;
+ case 's':
+ if (consume(u"sqrt"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Sqrt, begin, cur_);
+ if (consume(u"sub"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F32Sub, begin, cur_);
+ if (consume(u"store"))
+ return WasmToken(WasmToken::Store, Op::F32Store, begin, cur_);
+ break;
+ case 't':
+ if (consume(u"trunc"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F32Trunc, begin, cur_);
+ break;
+ }
+ break;
+ }
+ if (consume(u"f64")) {
+ if (!consume(u"."))
+ return WasmToken(WasmToken::ValueType, ValType::F64, begin, cur_);
+
+ switch (*cur_) {
+ case 'a':
+ if (consume(u"abs"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Abs, begin, cur_);
+ if (consume(u"add"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Add, begin, cur_);
+ break;
+ case 'c':
+ if (consume(u"ceil"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Ceil, begin, cur_);
+ if (consume(u"const"))
+ return WasmToken(WasmToken::Const, ValType::F64, begin, cur_);
+ if (consume(u"convert_s/i32")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F64ConvertSI32,
+ begin, cur_);
+ }
+ if (consume(u"convert_u/i32")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F64ConvertUI32,
+ begin, cur_);
+ }
+ if (consume(u"convert_s/i64")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F64ConvertSI64,
+ begin, cur_);
+ }
+ if (consume(u"convert_u/i64")) {
+ return WasmToken(WasmToken::ConversionOpcode, Op::F64ConvertUI64,
+ begin, cur_);
+ }
+ if (consume(u"copysign"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64CopySign, begin, cur_);
+ break;
+ case 'd':
+ if (consume(u"div"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Div, begin, cur_);
+ break;
+ case 'e':
+ if (consume(u"eq"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Eq, begin, cur_);
+ break;
+ case 'f':
+ if (consume(u"floor"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Floor, begin, cur_);
+ break;
+ case 'g':
+ if (consume(u"ge"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Ge, begin, cur_);
+ if (consume(u"gt"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Gt, begin, cur_);
+ break;
+ case 'l':
+ if (consume(u"le"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Le, begin, cur_);
+ if (consume(u"lt"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Lt, begin, cur_);
+ if (consume(u"load"))
+ return WasmToken(WasmToken::Load, Op::F64Load, begin, cur_);
+ break;
+ case 'm':
+ if (consume(u"max"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Max, begin, cur_);
+ if (consume(u"min"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Min, begin, cur_);
+ if (consume(u"mul"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Mul, begin, cur_);
+ break;
+ case 'n':
+ if (consume(u"nearest"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Nearest, begin, cur_);
+ if (consume(u"neg"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Neg, begin, cur_);
+ if (consume(u"ne"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::F64Ne, begin, cur_);
+ break;
+ case 'p':
+ if (consume(u"promote/f32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::F64PromoteF32,
+ begin, cur_);
+ break;
+ case 'r':
+ if (consume(u"reinterpret/i64"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64ReinterpretI64,
+ begin, cur_);
+ break;
+ case 's':
+ if (consume(u"sqrt"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Sqrt, begin, cur_);
+ if (consume(u"sub"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::F64Sub, begin, cur_);
+ if (consume(u"store"))
+ return WasmToken(WasmToken::Store, Op::F64Store, begin, cur_);
+ break;
+ case 't':
+ if (consume(u"trunc"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::F64Trunc, begin, cur_);
+ break;
+ }
+ break;
+ }
+ break;
+
+ case 'g':
+ if (consume(u"get_global"))
+ return WasmToken(WasmToken::GetGlobal, begin, cur_);
+ if (consume(u"get_local"))
+ return WasmToken(WasmToken::GetLocal, begin, cur_);
+ if (consume(u"global"))
+ return WasmToken(WasmToken::Global, begin, cur_);
+ if (consume(u"grow_memory"))
+ return WasmToken(WasmToken::GrowMemory, begin, cur_);
+ break;
+
+ case 'i':
+ if (consume(u"i32")) {
+ if (!consume(u"."))
+ return WasmToken(WasmToken::ValueType, ValType::I32, begin, cur_);
+
+ switch (*cur_) {
+ case 'a':
+ if (consume(u"add"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Add, begin, cur_);
+ if (consume(u"and"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32And, begin, cur_);
+ break;
+ case 'c':
+ if (consume(u"const"))
+ return WasmToken(WasmToken::Const, ValType::I32, begin, cur_);
+ if (consume(u"clz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I32Clz, begin, cur_);
+ if (consume(u"ctz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I32Ctz, begin, cur_);
+ break;
+ case 'd':
+ if (consume(u"div_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32DivS, begin, cur_);
+ if (consume(u"div_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32DivU, begin, cur_);
+ break;
+ case 'e':
+ if (consume(u"eqz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I32Eqz, begin, cur_);
+ if (consume(u"eq"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32Eq, begin, cur_);
+ break;
+ case 'g':
+ if (consume(u"ge_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32GeS, begin, cur_);
+ if (consume(u"ge_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32GeU, begin, cur_);
+ if (consume(u"gt_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32GtS, begin, cur_);
+ if (consume(u"gt_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32GtU, begin, cur_);
+ break;
+ case 'l':
+ if (consume(u"le_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32LeS, begin, cur_);
+ if (consume(u"le_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32LeU, begin, cur_);
+ if (consume(u"lt_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32LtS, begin, cur_);
+ if (consume(u"lt_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32LtU, begin, cur_);
+ if (consume(u"load")) {
+ if (IsWasmSpace(*cur_))
+ return WasmToken(WasmToken::Load, Op::I32Load, begin, cur_);
+ if (consume(u"8_s"))
+ return WasmToken(WasmToken::Load, Op::I32Load8S, begin, cur_);
+ if (consume(u"8_u"))
+ return WasmToken(WasmToken::Load, Op::I32Load8U, begin, cur_);
+ if (consume(u"16_s"))
+ return WasmToken(WasmToken::Load, Op::I32Load16S, begin, cur_);
+ if (consume(u"16_u"))
+ return WasmToken(WasmToken::Load, Op::I32Load16U, begin, cur_);
+ break;
+ }
+ break;
+ case 'm':
+ if (consume(u"mul"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Mul, begin, cur_);
+ break;
+ case 'n':
+ if (consume(u"ne"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I32Ne, begin, cur_);
+ break;
+ case 'o':
+ if (consume(u"or"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Or, begin, cur_);
+ break;
+ case 'p':
+ if (consume(u"popcnt"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I32Popcnt, begin, cur_);
+ break;
+ case 'r':
+ if (consume(u"reinterpret/f32"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I32ReinterpretF32,
+ begin, cur_);
+ if (consume(u"rem_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32RemS, begin, cur_);
+ if (consume(u"rem_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32RemU, begin, cur_);
+ if (consume(u"rotr"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Rotr, begin, cur_);
+ if (consume(u"rotl"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Rotl, begin, cur_);
+ break;
+ case 's':
+ if (consume(u"sub"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Sub, begin, cur_);
+ if (consume(u"shl"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Shl, begin, cur_);
+ if (consume(u"shr_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32ShrS, begin, cur_);
+ if (consume(u"shr_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32ShrU, begin, cur_);
+ if (consume(u"store")) {
+ if (IsWasmSpace(*cur_))
+ return WasmToken(WasmToken::Store, Op::I32Store, begin, cur_);
+ if (consume(u"8"))
+ return WasmToken(WasmToken::Store, Op::I32Store8, begin, cur_);
+ if (consume(u"16"))
+ return WasmToken(WasmToken::Store, Op::I32Store16, begin, cur_);
+ break;
+ }
+ break;
+ case 't':
+ if (consume(u"trunc_s/f32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I32TruncSF32,
+ begin, cur_);
+ if (consume(u"trunc_s/f64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I32TruncSF64,
+ begin, cur_);
+ if (consume(u"trunc_u/f32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I32TruncUF32,
+ begin, cur_);
+ if (consume(u"trunc_u/f64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I32TruncUF64,
+ begin, cur_);
+ break;
+ case 'w':
+ if (consume(u"wrap/i64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I32WrapI64,
+ begin, cur_);
+ break;
+ case 'x':
+ if (consume(u"xor"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I32Xor, begin, cur_);
+ break;
+ }
+ break;
+ }
+ if (consume(u"i64")) {
+ if (!consume(u"."))
+ return WasmToken(WasmToken::ValueType, ValType::I64, begin, cur_);
+
+ switch (*cur_) {
+ case 'a':
+ if (consume(u"add"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Add, begin, cur_);
+ if (consume(u"and"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64And, begin, cur_);
+ break;
+ case 'c':
+ if (consume(u"const"))
+ return WasmToken(WasmToken::Const, ValType::I64, begin, cur_);
+ if (consume(u"clz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I64Clz, begin, cur_);
+ if (consume(u"ctz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I64Ctz, begin, cur_);
+ break;
+ case 'd':
+ if (consume(u"div_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64DivS, begin, cur_);
+ if (consume(u"div_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64DivU, begin, cur_);
+ break;
+ case 'e':
+ if (consume(u"eqz"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I64Eqz, begin, cur_);
+ if (consume(u"eq"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64Eq, begin, cur_);
+ if (consume(u"extend_s/i32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64ExtendSI32,
+ begin, cur_);
+ if (consume(u"extend_u/i32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64ExtendUI32,
+ begin, cur_);
+ break;
+ case 'g':
+ if (consume(u"ge_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64GeS, begin, cur_);
+ if (consume(u"ge_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64GeU, begin, cur_);
+ if (consume(u"gt_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64GtS, begin, cur_);
+ if (consume(u"gt_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64GtU, begin, cur_);
+ break;
+ case 'l':
+ if (consume(u"le_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64LeS, begin, cur_);
+ if (consume(u"le_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64LeU, begin, cur_);
+ if (consume(u"lt_s"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64LtS, begin, cur_);
+ if (consume(u"lt_u"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64LtU, begin, cur_);
+ if (consume(u"load")) {
+ if (IsWasmSpace(*cur_))
+ return WasmToken(WasmToken::Load, Op::I64Load, begin, cur_);
+ if (consume(u"8_s"))
+ return WasmToken(WasmToken::Load, Op::I64Load8S, begin, cur_);
+ if (consume(u"8_u"))
+ return WasmToken(WasmToken::Load, Op::I64Load8U, begin, cur_);
+ if (consume(u"16_s"))
+ return WasmToken(WasmToken::Load, Op::I64Load16S, begin, cur_);
+ if (consume(u"16_u"))
+ return WasmToken(WasmToken::Load, Op::I64Load16U, begin, cur_);
+ if (consume(u"32_s"))
+ return WasmToken(WasmToken::Load, Op::I64Load32S, begin, cur_);
+ if (consume(u"32_u"))
+ return WasmToken(WasmToken::Load, Op::I64Load32U, begin, cur_);
+ break;
+ }
+ break;
+ case 'm':
+ if (consume(u"mul"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Mul, begin, cur_);
+ break;
+ case 'n':
+ if (consume(u"ne"))
+ return WasmToken(WasmToken::ComparisonOpcode, Op::I64Ne, begin, cur_);
+ break;
+ case 'o':
+ if (consume(u"or"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Or, begin, cur_);
+ break;
+ case 'p':
+ if (consume(u"popcnt"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I64Popcnt, begin, cur_);
+ break;
+ case 'r':
+ if (consume(u"reinterpret/f64"))
+ return WasmToken(WasmToken::UnaryOpcode, Op::I64ReinterpretF64,
+ begin, cur_);
+ if (consume(u"rem_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64RemS, begin, cur_);
+ if (consume(u"rem_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64RemU, begin, cur_);
+ if (consume(u"rotr"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Rotr, begin, cur_);
+ if (consume(u"rotl"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Rotl, begin, cur_);
+ break;
+ case 's':
+ if (consume(u"sub"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Sub, begin, cur_);
+ if (consume(u"shl"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Shl, begin, cur_);
+ if (consume(u"shr_s"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64ShrS, begin, cur_);
+ if (consume(u"shr_u"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64ShrU, begin, cur_);
+ if (consume(u"store")) {
+ if (IsWasmSpace(*cur_))
+ return WasmToken(WasmToken::Store, Op::I64Store, begin, cur_);
+ if (consume(u"8"))
+ return WasmToken(WasmToken::Store, Op::I64Store8, begin, cur_);
+ if (consume(u"16"))
+ return WasmToken(WasmToken::Store, Op::I64Store16, begin, cur_);
+ if (consume(u"32"))
+ return WasmToken(WasmToken::Store, Op::I64Store32, begin, cur_);
+ break;
+ }
+ break;
+ case 't':
+ if (consume(u"trunc_s/f32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncSF32,
+ begin, cur_);
+ if (consume(u"trunc_s/f64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncSF64,
+ begin, cur_);
+ if (consume(u"trunc_u/f32"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncUF32,
+ begin, cur_);
+ if (consume(u"trunc_u/f64"))
+ return WasmToken(WasmToken::ConversionOpcode, Op::I64TruncUF64,
+ begin, cur_);
+ break;
+ case 'x':
+ if (consume(u"xor"))
+ return WasmToken(WasmToken::BinaryOpcode, Op::I64Xor, begin, cur_);
+ break;
+ }
+ break;
+ }
+ if (consume(u"import"))
+ return WasmToken(WasmToken::Import, begin, cur_);
+ if (consume(u"infinity"))
+ return WasmToken(WasmToken::Infinity, begin, cur_);
+ if (consume(u"if"))
+ return WasmToken(WasmToken::If, begin, cur_);
+ break;
+
+ case 'l':
+ if (consume(u"local"))
+ return WasmToken(WasmToken::Local, begin, cur_);
+ if (consume(u"loop"))
+ return WasmToken(WasmToken::Loop, begin, cur_);
+ break;
+
+ case 'm':
+ if (consume(u"module"))
+ return WasmToken(WasmToken::Module, begin, cur_);
+ if (consume(u"memory"))
+ return WasmToken(WasmToken::Memory, begin, cur_);
+ if (consume(u"mut"))
+ return WasmToken(WasmToken::Mutable, begin, cur_);
+ break;
+
+ case 'n':
+ if (consume(u"nan"))
+ return nan(begin);
+ if (consume(u"nop"))
+ return WasmToken(WasmToken::Nop, begin, cur_);
+ break;
+
+ case 'o':
+ if (consume(u"offset"))
+ return WasmToken(WasmToken::Offset, begin, cur_);
+ break;
+
+ case 'p':
+ if (consume(u"param"))
+ return WasmToken(WasmToken::Param, begin, cur_);
+ break;
+
+ case 'r':
+ if (consume(u"result"))
+ return WasmToken(WasmToken::Result, begin, cur_);
+ if (consume(u"return"))
+ return WasmToken(WasmToken::Return, begin, cur_);
+ break;
+
+ case 's':
+ if (consume(u"select"))
+ return WasmToken(WasmToken::TernaryOpcode, Op::Select, begin, cur_);
+ if (consume(u"set_global"))
+ return WasmToken(WasmToken::SetGlobal, begin, cur_);
+ if (consume(u"set_local"))
+ return WasmToken(WasmToken::SetLocal, begin, cur_);
+ if (consume(u"start"))
+ return WasmToken(WasmToken::Start, begin, cur_);
+ break;
+
+ case 't':
+ if (consume(u"table"))
+ return WasmToken(WasmToken::Table, begin, cur_);
+ if (consume(u"tee_local"))
+ return WasmToken(WasmToken::TeeLocal, begin, cur_);
+ if (consume(u"then"))
+ return WasmToken(WasmToken::Then, begin, cur_);
+ if (consume(u"type"))
+ return WasmToken(WasmToken::Type, begin, cur_);
+ break;
+
+ case 'u':
+ if (consume(u"unreachable"))
+ return WasmToken(WasmToken::Unreachable, begin, cur_);
+ break;
+
+ default:
+ break;
+ }
+
+ return fail(begin);
+}
+
+/*****************************************************************************/
+// wasm text format parser
+
+namespace {
+
+struct WasmParseContext
+{
+ WasmTokenStream ts;
+ LifoAlloc& lifo;
+ UniqueChars* error;
+ DtoaState* dtoaState;
+
+ WasmParseContext(const char16_t* text, LifoAlloc& lifo, UniqueChars* error)
+ : ts(text, error),
+ lifo(lifo),
+ error(error),
+ dtoaState(NewDtoaState())
+ {}
+
+ bool fail(const char* message) {
+ error->reset(js_strdup(message));
+ return false;
+ }
+ ~WasmParseContext() {
+ DestroyDtoaState(dtoaState);
+ }
+};
+
+} // end anonymous namespace
+
+static AstExpr*
+ParseExprInsideParens(WasmParseContext& c);
+
+static AstExpr*
+ParseExprBody(WasmParseContext& c, WasmToken token, bool inParens);
+
+static AstExpr*
+ParseExpr(WasmParseContext& c, bool inParens)
+{
+ WasmToken openParen;
+ if (!inParens || !c.ts.getIf(WasmToken::OpenParen, &openParen))
+ return new(c.lifo) AstPop();
+
+ // Special case: If we have an open paren, but it's a "(then ...", then
+ // we don't have an expresion following us, so we pop here too. This
+ // handles "(if (then ...))" which pops the condition.
+ if (c.ts.peek().kind() == WasmToken::Then) {
+ c.ts.unget(openParen);
+ return new(c.lifo) AstPop();
+ }
+
+ AstExpr* expr = ParseExprInsideParens(c);
+ if (!expr)
+ return nullptr;
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return expr;
+}
+
+static bool
+ParseExprList(WasmParseContext& c, AstExprVector* exprs, bool inParens)
+{
+ for (;;) {
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ AstExpr* expr = ParseExprInsideParens(c);
+ if (!expr || !exprs->append(expr))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ continue;
+ }
+
+ WasmToken token;
+ if (c.ts.getIfOpcode(&token)) {
+ AstExpr* expr = ParseExprBody(c, token, false);
+ if (!expr || !exprs->append(expr))
+ return false;
+ continue;
+ }
+
+ break;
+ }
+
+ return true;
+}
+
+static bool
+ParseBlockSignature(WasmParseContext& c, ExprType* type)
+{
+ WasmToken token;
+ if (c.ts.getIf(WasmToken::ValueType, &token))
+ *type = ToExprType(token.valueType());
+ else
+ *type = ExprType::Void;
+
+ return true;
+}
+
+static AstBlock*
+ParseBlock(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExprVector exprs(c.lifo);
+
+ AstName name = c.ts.getIfName();
+
+ // Compatibility syntax sugar: If a second label is present, we'll wrap
+ // this loop in a block.
+ AstName otherName;
+ if (op == Op::Loop) {
+ AstName maybeName = c.ts.getIfName();
+ if (!maybeName.empty()) {
+ otherName = name;
+ name = maybeName;
+ }
+ }
+
+ ExprType type;
+ if (!ParseBlockSignature(c, &type))
+ return nullptr;
+
+ if (!ParseExprList(c, &exprs, inParens))
+ return nullptr;
+
+ if (!inParens) {
+ if (!c.ts.match(WasmToken::End, c.error))
+ return nullptr;
+ }
+
+ AstBlock* result = new(c.lifo) AstBlock(op, type, name, Move(exprs));
+
+ if (op == Op::Loop && !otherName.empty()) {
+ if (!exprs.append(result))
+ return nullptr;
+ result = new(c.lifo) AstBlock(Op::Block, type, otherName, Move(exprs));
+ }
+
+ return result;
+}
+
+static AstBranch*
+ParseBranch(WasmParseContext& c, Op op, bool inParens)
+{
+ MOZ_ASSERT(op == Op::Br || op == Op::BrIf);
+
+ AstRef target;
+ if (!c.ts.matchRef(&target, c.error))
+ return nullptr;
+
+ AstExpr* value = nullptr;
+ if (inParens) {
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ value = ParseExprInsideParens(c);
+ if (!value)
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ }
+ }
+
+ AstExpr* cond = nullptr;
+ if (op == Op::BrIf) {
+ if (inParens && c.ts.getIf(WasmToken::OpenParen)) {
+ cond = ParseExprInsideParens(c);
+ if (!cond)
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ } else {
+ cond = new(c.lifo) AstPop();
+ if (!cond)
+ return nullptr;
+ }
+ }
+
+ return new(c.lifo) AstBranch(op, ExprType::Void, cond, target, value);
+}
+
+static bool
+ParseArgs(WasmParseContext& c, AstExprVector* args)
+{
+ while (c.ts.getIf(WasmToken::OpenParen)) {
+ AstExpr* arg = ParseExprInsideParens(c);
+ if (!arg || !args->append(arg))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ }
+
+ return true;
+}
+
+static AstCall*
+ParseCall(WasmParseContext& c, bool inParens)
+{
+ AstRef func;
+ if (!c.ts.matchRef(&func, c.error))
+ return nullptr;
+
+ AstExprVector args(c.lifo);
+ if (inParens) {
+ if (!ParseArgs(c, &args))
+ return nullptr;
+ }
+
+ return new(c.lifo) AstCall(Op::Call, ExprType::Void, func, Move(args));
+}
+
+static AstCallIndirect*
+ParseCallIndirect(WasmParseContext& c, bool inParens)
+{
+ AstRef sig;
+ if (!c.ts.matchRef(&sig, c.error))
+ return nullptr;
+
+ AstExprVector args(c.lifo);
+ AstExpr* index;
+ if (inParens) {
+ if (!ParseArgs(c, &args))
+ return nullptr;
+
+ if (args.empty())
+ index = new(c.lifo) AstPop();
+ else
+ index = args.popCopy();
+ } else {
+ index = new(c.lifo) AstPop();
+ }
+
+ return new(c.lifo) AstCallIndirect(sig, ExprType::Void, Move(args), index);
+}
+
+static uint_fast8_t
+CountLeadingZeroes4(uint8_t x)
+{
+ MOZ_ASSERT((x & -0x10) == 0);
+ return CountLeadingZeroes32(x) - 28;
+}
+
+template <typename T>
+static T
+ushl(T lhs, unsigned rhs)
+{
+ return rhs < sizeof(T) * CHAR_BIT ? (lhs << rhs) : 0;
+}
+
+template <typename T>
+static T
+ushr(T lhs, unsigned rhs)
+{
+ return rhs < sizeof(T) * CHAR_BIT ? (lhs >> rhs) : 0;
+}
+
+template<typename Float>
+static AstConst*
+ParseNaNLiteral(WasmParseContext& c, WasmToken token, const char16_t* cur, bool isNegated)
+{
+ const char16_t* end = token.end();
+
+ MOZ_ALWAYS_TRUE(*cur++ == 'n' && *cur++ == 'a' && *cur++ == 'n');
+
+ typedef FloatingPoint<Float> Traits;
+ typedef typename Traits::Bits Bits;
+
+ Bits value;
+ if (cur != end) {
+ MOZ_ALWAYS_TRUE(*cur++ == ':' && *cur++ == '0' && *cur++ == 'x');
+ if (cur == end)
+ goto error;
+ CheckedInt<Bits> u = 0;
+ do {
+ uint8_t digit = 0;
+ MOZ_ALWAYS_TRUE(IsHexDigit(*cur, &digit));
+ u *= 16;
+ u += digit;
+ cur++;
+ } while (cur != end);
+ if (!u.isValid())
+ goto error;
+ value = u.value();
+ if ((value & ~Traits::kSignificandBits) != 0)
+ goto error;
+ // NaN payloads must contain at least one set bit.
+ if (value == 0)
+ goto error;
+ } else {
+ // Produce the spec's default NaN.
+ value = (Traits::kSignificandBits + 1) >> 1;
+ }
+
+ value = (isNegated ? Traits::kSignBit : 0) | Traits::kExponentBits | value;
+ return new (c.lifo) AstConst(Val(Raw<Float>::fromBits(value)));
+
+ error:
+ c.ts.generateError(token, c.error);
+ return nullptr;
+}
+
+template <typename Float>
+static bool
+ParseHexFloatLiteral(const char16_t* cur, const char16_t* end, Float* result)
+{
+ MOZ_ALWAYS_TRUE(*cur++ == '0' && *cur++ == 'x');
+ typedef FloatingPoint<Float> Traits;
+ typedef typename Traits::Bits Bits;
+ static const unsigned numBits = sizeof(Float) * CHAR_BIT;
+ static const Bits allOnes = ~Bits(0);
+ static const Bits mostSignificantBit = ~(allOnes >> 1);
+
+ // Significand part.
+ Bits significand = 0;
+ CheckedInt<int32_t> exponent = 0;
+ bool sawFirstNonZero = false;
+ bool discardedExtraNonZero = false;
+ const char16_t* dot = nullptr;
+ int significandPos;
+ for (; cur != end; cur++) {
+ if (*cur == '.') {
+ MOZ_ASSERT(!dot);
+ dot = cur;
+ continue;
+ }
+
+ uint8_t digit;
+ if (!IsHexDigit(*cur, &digit))
+ break;
+ if (!sawFirstNonZero) {
+ if (digit == 0)
+ continue;
+ // We've located the first non-zero digit; we can now determine the
+ // initial exponent. If we're after the dot, count the number of
+ // zeros from the dot to here, and adjust for the number of leading
+ // zero bits in the digit. Set up significandPos to put the first
+ // nonzero at the most significant bit.
+ int_fast8_t lz = CountLeadingZeroes4(digit);
+ ptrdiff_t zeroAdjustValue = !dot ? 1 : dot + 1 - cur;
+ CheckedInt<ptrdiff_t> zeroAdjust = zeroAdjustValue;
+ zeroAdjust *= 4;
+ zeroAdjust -= lz + 1;
+ if (!zeroAdjust.isValid())
+ return false;
+ exponent = zeroAdjust.value();
+ significandPos = numBits - (4 - lz);
+ sawFirstNonZero = true;
+ } else {
+ // We've already seen a non-zero; just take 4 more bits.
+ if (!dot)
+ exponent += 4;
+ if (significandPos > -4)
+ significandPos -= 4;
+ }
+
+ // Or the newly parsed digit into significand at signicandPos.
+ if (significandPos >= 0) {
+ significand |= ushl(Bits(digit), significandPos);
+ } else if (significandPos > -4) {
+ significand |= ushr(digit, 4 - significandPos);
+ discardedExtraNonZero = (digit & ~ushl(allOnes, 4 - significandPos)) != 0;
+ } else if (digit != 0) {
+ discardedExtraNonZero = true;
+ }
+ }
+
+ // Exponent part.
+ if (cur != end) {
+ MOZ_ALWAYS_TRUE(*cur++ == 'p');
+ bool isNegated = false;
+ if (cur != end && (*cur == '-' || *cur == '+'))
+ isNegated = *cur++ == '-';
+ CheckedInt<int32_t> parsedExponent = 0;
+ while (cur != end && IsWasmDigit(*cur))
+ parsedExponent = parsedExponent * 10 + (*cur++ - '0');
+ if (isNegated)
+ parsedExponent = -parsedExponent;
+ exponent += parsedExponent;
+ }
+
+ MOZ_ASSERT(cur == end);
+ if (!exponent.isValid())
+ return false;
+
+ // Create preliminary exponent and significand encodings of the results.
+ Bits encodedExponent, encodedSignificand, discardedSignificandBits;
+ if (significand == 0) {
+ // Zero. The exponent is encoded non-biased.
+ encodedExponent = 0;
+ encodedSignificand = 0;
+ discardedSignificandBits = 0;
+ } else if (MOZ_UNLIKELY(exponent.value() <= int32_t(-Traits::kExponentBias))) {
+ // Underflow to subnormal or zero.
+ encodedExponent = 0;
+ encodedSignificand = ushr(significand,
+ numBits - Traits::kExponentShift -
+ exponent.value() - Traits::kExponentBias);
+ discardedSignificandBits =
+ ushl(significand,
+ Traits::kExponentShift + exponent.value() + Traits::kExponentBias);
+ } else if (MOZ_LIKELY(exponent.value() <= int32_t(Traits::kExponentBias))) {
+ // Normal (non-zero). The significand's leading 1 is encoded implicitly.
+ encodedExponent = (Bits(exponent.value()) + Traits::kExponentBias) <<
+ Traits::kExponentShift;
+ MOZ_ASSERT(significand & mostSignificantBit);
+ encodedSignificand = ushr(significand, numBits - Traits::kExponentShift - 1) &
+ Traits::kSignificandBits;
+ discardedSignificandBits = ushl(significand, Traits::kExponentShift + 1);
+ } else {
+ // Overflow to infinity.
+ encodedExponent = Traits::kExponentBits;
+ encodedSignificand = 0;
+ discardedSignificandBits = 0;
+ }
+ MOZ_ASSERT((encodedExponent & ~Traits::kExponentBits) == 0);
+ MOZ_ASSERT((encodedSignificand & ~Traits::kSignificandBits) == 0);
+ MOZ_ASSERT(encodedExponent != Traits::kExponentBits || encodedSignificand == 0);
+ Bits bits = encodedExponent | encodedSignificand;
+
+ // Apply rounding. If this overflows the significand, it carries into the
+ // exponent bit according to the magic of the IEEE 754 encoding.
+ bits += (discardedSignificandBits & mostSignificantBit) &&
+ ((discardedSignificandBits & ~mostSignificantBit) ||
+ discardedExtraNonZero ||
+ // ties to even
+ (encodedSignificand & 1));
+
+ *result = BitwiseCast<Float>(bits);
+ return true;
+}
+
+template <typename Float>
+static AstConst*
+ParseFloatLiteral(WasmParseContext& c, WasmToken token)
+{
+ Float result;
+ switch (token.kind()) {
+ case WasmToken::Index: result = token.index(); break;
+ case WasmToken::UnsignedInteger: result = token.uint(); break;
+ case WasmToken::SignedInteger: result = token.sint(); break;
+ case WasmToken::NegativeZero: result = -0.; break;
+ case WasmToken::Float: break;
+ default: c.ts.generateError(token, c.error); return nullptr;
+ }
+
+ if (token.kind() != WasmToken::Float)
+ return new (c.lifo) AstConst(Val(Raw<Float>(result)));
+
+ const char16_t* begin = token.begin();
+ const char16_t* end = token.end();
+ const char16_t* cur = begin;
+
+ bool isNegated = false;
+ if (*cur == '-' || *cur == '+')
+ isNegated = *cur++ == '-';
+
+ switch (token.floatLiteralKind()) {
+ case WasmToken::Infinity: {
+ result = PositiveInfinity<Float>();
+ break;
+ }
+ case WasmToken::NaN: {
+ return ParseNaNLiteral<Float>(c, token, cur, isNegated);
+ }
+ case WasmToken::HexNumber: {
+ if (!ParseHexFloatLiteral(cur, end, &result)) {
+ c.ts.generateError(token, c.error);
+ return nullptr;
+ }
+ break;
+ }
+ case WasmToken::DecNumber: {
+ // Call into JS' strtod. Tokenization has already required that the
+ // string is well-behaved.
+ LifoAlloc::Mark mark = c.lifo.mark();
+ char* buffer = c.lifo.newArray<char>(end - cur + 1);
+ if (!buffer)
+ return nullptr;
+ for (ptrdiff_t i = 0; i < end - cur; ++i)
+ buffer[i] = char(cur[i]);
+ buffer[end - cur] = '\0';
+ char* strtod_end;
+ int err;
+ result = (Float)js_strtod_harder(c.dtoaState, buffer, &strtod_end, &err);
+ if (err != 0 || strtod_end == buffer) {
+ c.lifo.release(mark);
+ c.ts.generateError(token, c.error);
+ return nullptr;
+ }
+ c.lifo.release(mark);
+ break;
+ }
+ }
+
+ if (isNegated)
+ result = -result;
+
+ return new (c.lifo) AstConst(Val(Raw<Float>(result)));
+}
+
+static AstConst*
+ParseConst(WasmParseContext& c, WasmToken constToken)
+{
+ WasmToken val = c.ts.get();
+ switch (constToken.valueType()) {
+ case ValType::I32: {
+ switch (val.kind()) {
+ case WasmToken::Index:
+ return new(c.lifo) AstConst(Val(val.index()));
+ case WasmToken::SignedInteger: {
+ CheckedInt<int32_t> sint = val.sint();
+ if (!sint.isValid())
+ break;
+ return new(c.lifo) AstConst(Val(uint32_t(sint.value())));
+ }
+ case WasmToken::NegativeZero:
+ return new(c.lifo) AstConst(Val(uint32_t(0)));
+ default:
+ break;
+ }
+ break;
+ }
+ case ValType::I64: {
+ switch (val.kind()) {
+ case WasmToken::Index:
+ return new(c.lifo) AstConst(Val(uint64_t(val.index())));
+ case WasmToken::UnsignedInteger:
+ return new(c.lifo) AstConst(Val(val.uint()));
+ case WasmToken::SignedInteger:
+ return new(c.lifo) AstConst(Val(uint64_t(val.sint())));
+ case WasmToken::NegativeZero:
+ return new(c.lifo) AstConst(Val(uint64_t(0)));
+ default:
+ break;
+ }
+ break;
+ }
+ case ValType::F32: {
+ return ParseFloatLiteral<float>(c, val);
+ }
+ case ValType::F64: {
+ return ParseFloatLiteral<double>(c, val);
+ }
+ default:
+ break;
+ }
+ c.ts.generateError(constToken, c.error);
+ return nullptr;
+}
+
+static AstGetLocal*
+ParseGetLocal(WasmParseContext& c)
+{
+ AstRef local;
+ if (!c.ts.matchRef(&local, c.error))
+ return nullptr;
+
+ return new(c.lifo) AstGetLocal(local);
+}
+
+static AstGetGlobal*
+ParseGetGlobal(WasmParseContext& c)
+{
+ AstRef local;
+ if (!c.ts.matchRef(&local, c.error))
+ return nullptr;
+ return new(c.lifo) AstGetGlobal(local);
+}
+
+static AstSetGlobal*
+ParseSetGlobal(WasmParseContext& c, bool inParens)
+{
+ AstRef global;
+ if (!c.ts.matchRef(&global, c.error))
+ return nullptr;
+
+ AstExpr* value = ParseExpr(c, inParens);
+ if (!value)
+ return nullptr;
+
+ return new(c.lifo) AstSetGlobal(global, *value);
+}
+
+static AstSetLocal*
+ParseSetLocal(WasmParseContext& c, bool inParens)
+{
+ AstRef local;
+ if (!c.ts.matchRef(&local, c.error))
+ return nullptr;
+
+ AstExpr* value = ParseExpr(c, inParens);
+ if (!value)
+ return nullptr;
+
+ return new(c.lifo) AstSetLocal(local, *value);
+}
+
+static AstTeeLocal*
+ParseTeeLocal(WasmParseContext& c, bool inParens)
+{
+ AstRef local;
+ if (!c.ts.matchRef(&local, c.error))
+ return nullptr;
+
+ AstExpr* value = ParseExpr(c, inParens);
+ if (!value)
+ return nullptr;
+
+ return new(c.lifo) AstTeeLocal(local, *value);
+}
+
+static AstReturn*
+ParseReturn(WasmParseContext& c, bool inParens)
+{
+ AstExpr* maybeExpr = nullptr;
+
+ if (c.ts.peek().kind() != WasmToken::CloseParen) {
+ maybeExpr = ParseExpr(c, inParens);
+ if (!maybeExpr)
+ return nullptr;
+ }
+
+ return new(c.lifo) AstReturn(maybeExpr);
+}
+
+static AstUnaryOperator*
+ParseUnaryOperator(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExpr* operand = ParseExpr(c, inParens);
+ if (!operand)
+ return nullptr;
+
+ return new(c.lifo) AstUnaryOperator(op, operand);
+}
+
+static AstBinaryOperator*
+ParseBinaryOperator(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExpr* lhs = ParseExpr(c, inParens);
+ if (!lhs)
+ return nullptr;
+
+ AstExpr* rhs = ParseExpr(c, inParens);
+ if (!rhs)
+ return nullptr;
+
+ return new(c.lifo) AstBinaryOperator(op, lhs, rhs);
+}
+
+static AstComparisonOperator*
+ParseComparisonOperator(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExpr* lhs = ParseExpr(c, inParens);
+ if (!lhs)
+ return nullptr;
+
+ AstExpr* rhs = ParseExpr(c, inParens);
+ if (!rhs)
+ return nullptr;
+
+ return new(c.lifo) AstComparisonOperator(op, lhs, rhs);
+}
+
+static AstTernaryOperator*
+ParseTernaryOperator(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExpr* op0 = ParseExpr(c, inParens);
+ if (!op0)
+ return nullptr;
+
+ AstExpr* op1 = ParseExpr(c, inParens);
+ if (!op1)
+ return nullptr;
+
+ AstExpr* op2 = ParseExpr(c, inParens);
+ if (!op2)
+ return nullptr;
+
+ return new(c.lifo) AstTernaryOperator(op, op0, op1, op2);
+}
+
+static AstConversionOperator*
+ParseConversionOperator(WasmParseContext& c, Op op, bool inParens)
+{
+ AstExpr* operand = ParseExpr(c, inParens);
+ if (!operand)
+ return nullptr;
+
+ return new(c.lifo) AstConversionOperator(op, operand);
+}
+
+static AstDrop*
+ParseDrop(WasmParseContext& c, bool inParens)
+{
+ AstExpr* value = ParseExpr(c, inParens);
+ if (!value)
+ return nullptr;
+
+ return new(c.lifo) AstDrop(*value);
+}
+
+static AstIf*
+ParseIf(WasmParseContext& c, bool inParens)
+{
+ AstName name = c.ts.getIfName();
+
+ ExprType type;
+ if (!ParseBlockSignature(c, &type))
+ return nullptr;
+
+ AstExpr* cond = ParseExpr(c, inParens);
+ if (!cond)
+ return nullptr;
+
+ if (inParens) {
+ if (!c.ts.match(WasmToken::OpenParen, c.error))
+ return nullptr;
+ }
+
+ AstExprVector thenExprs(c.lifo);
+ if (!inParens || c.ts.getIf(WasmToken::Then)) {
+ if (!ParseExprList(c, &thenExprs, inParens))
+ return nullptr;
+ } else {
+ AstExpr* thenBranch = ParseExprInsideParens(c);
+ if (!thenBranch || !thenExprs.append(thenBranch))
+ return nullptr;
+ }
+ if (inParens) {
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ }
+
+ AstExprVector elseExprs(c.lifo);
+ if (!inParens || c.ts.getIf(WasmToken::OpenParen)) {
+ if (c.ts.getIf(WasmToken::Else)) {
+ if (!ParseExprList(c, &elseExprs, inParens))
+ return nullptr;
+ } else if (inParens) {
+ AstExpr* elseBranch = ParseExprInsideParens(c);
+ if (!elseBranch || !elseExprs.append(elseBranch))
+ return nullptr;
+ }
+ if (inParens) {
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ } else {
+ if (!c.ts.match(WasmToken::End, c.error))
+ return nullptr;
+ }
+ }
+
+ return new(c.lifo) AstIf(type, cond, name, Move(thenExprs), Move(elseExprs));
+}
+
+static bool
+ParseLoadStoreAddress(WasmParseContext& c, int32_t* offset, uint32_t* alignLog2, AstExpr** base,
+ bool inParens)
+{
+ *offset = 0;
+ if (c.ts.getIf(WasmToken::Offset)) {
+ if (!c.ts.match(WasmToken::Equal, c.error))
+ return false;
+ WasmToken val = c.ts.get();
+ switch (val.kind()) {
+ case WasmToken::Index:
+ *offset = val.index();
+ break;
+ default:
+ c.ts.generateError(val, c.error);
+ return false;
+ }
+ }
+
+ *alignLog2 = UINT32_MAX;
+ if (c.ts.getIf(WasmToken::Align)) {
+ if (!c.ts.match(WasmToken::Equal, c.error))
+ return false;
+ WasmToken val = c.ts.get();
+ switch (val.kind()) {
+ case WasmToken::Index:
+ if (!IsPowerOfTwo(val.index())) {
+ c.ts.generateError(val, "non-power-of-two alignment", c.error);
+ return false;
+ }
+ *alignLog2 = CeilingLog2(val.index());
+ break;
+ default:
+ c.ts.generateError(val, c.error);
+ return false;
+ }
+ }
+
+ *base = ParseExpr(c, inParens);
+ if (!*base)
+ return false;
+
+ return true;
+}
+
+static AstLoad*
+ParseLoad(WasmParseContext& c, Op op, bool inParens)
+{
+ int32_t offset;
+ uint32_t alignLog2;
+ AstExpr* base;
+ if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+ return nullptr;
+
+ if (alignLog2 == UINT32_MAX) {
+ switch (op) {
+ case Op::I32Load8S:
+ case Op::I32Load8U:
+ case Op::I64Load8S:
+ case Op::I64Load8U:
+ alignLog2 = 0;
+ break;
+ case Op::I32Load16S:
+ case Op::I32Load16U:
+ case Op::I64Load16S:
+ case Op::I64Load16U:
+ alignLog2 = 1;
+ break;
+ case Op::I32Load:
+ case Op::F32Load:
+ case Op::I64Load32S:
+ case Op::I64Load32U:
+ alignLog2 = 2;
+ break;
+ case Op::I64Load:
+ case Op::F64Load:
+ alignLog2 = 3;
+ break;
+ default:
+ MOZ_CRASH("Bad load op");
+ }
+ }
+
+ uint32_t flags = alignLog2;
+
+ return new(c.lifo) AstLoad(op, AstLoadStoreAddress(base, flags, offset));
+}
+
+static AstStore*
+ParseStore(WasmParseContext& c, Op op, bool inParens)
+{
+ int32_t offset;
+ uint32_t alignLog2;
+ AstExpr* base;
+ if (!ParseLoadStoreAddress(c, &offset, &alignLog2, &base, inParens))
+ return nullptr;
+
+ if (alignLog2 == UINT32_MAX) {
+ switch (op) {
+ case Op::I32Store8:
+ case Op::I64Store8:
+ alignLog2 = 0;
+ break;
+ case Op::I32Store16:
+ case Op::I64Store16:
+ alignLog2 = 1;
+ break;
+ case Op::I32Store:
+ case Op::F32Store:
+ case Op::I64Store32:
+ alignLog2 = 2;
+ break;
+ case Op::I64Store:
+ case Op::F64Store:
+ alignLog2 = 3;
+ break;
+ default:
+ MOZ_CRASH("Bad load op");
+ }
+ }
+
+ AstExpr* value = ParseExpr(c, inParens);
+ if (!value)
+ return nullptr;
+
+ uint32_t flags = alignLog2;
+
+ return new(c.lifo) AstStore(op, AstLoadStoreAddress(base, flags, offset), value);
+}
+
+static AstBranchTable*
+ParseBranchTable(WasmParseContext& c, WasmToken brTable, bool inParens)
+{
+ AstRefVector table(c.lifo);
+
+ AstRef target;
+ while (c.ts.getIfRef(&target)) {
+ if (!table.append(target))
+ return nullptr;
+ }
+
+ if (table.empty()) {
+ c.ts.generateError(c.ts.get(), c.error);
+ return nullptr;
+ }
+
+ AstRef def = table.popCopy();
+
+ AstExpr* index = ParseExpr(c, inParens);
+ if (!index)
+ return nullptr;
+
+ AstExpr* value = nullptr;
+ if (inParens) {
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ value = index;
+ index = ParseExprInsideParens(c);
+ if (!index)
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ }
+ }
+
+ return new(c.lifo) AstBranchTable(*index, def, Move(table), value);
+}
+
+static AstGrowMemory*
+ParseGrowMemory(WasmParseContext& c, bool inParens)
+{
+ AstExpr* operand = ParseExpr(c, inParens);
+ if (!operand)
+ return nullptr;
+
+ return new(c.lifo) AstGrowMemory(operand);
+}
+
+static AstExpr*
+ParseExprBody(WasmParseContext& c, WasmToken token, bool inParens)
+{
+ switch (token.kind()) {
+ case WasmToken::Unreachable:
+ return new(c.lifo) AstUnreachable;
+ case WasmToken::BinaryOpcode:
+ return ParseBinaryOperator(c, token.op(), inParens);
+ case WasmToken::Block:
+ return ParseBlock(c, Op::Block, inParens);
+ case WasmToken::Br:
+ return ParseBranch(c, Op::Br, inParens);
+ case WasmToken::BrIf:
+ return ParseBranch(c, Op::BrIf, inParens);
+ case WasmToken::BrTable:
+ return ParseBranchTable(c, token, inParens);
+ case WasmToken::Call:
+ return ParseCall(c, inParens);
+ case WasmToken::CallIndirect:
+ return ParseCallIndirect(c, inParens);
+ case WasmToken::ComparisonOpcode:
+ return ParseComparisonOperator(c, token.op(), inParens);
+ case WasmToken::Const:
+ return ParseConst(c, token);
+ case WasmToken::ConversionOpcode:
+ return ParseConversionOperator(c, token.op(), inParens);
+ case WasmToken::Drop:
+ return ParseDrop(c, inParens);
+ case WasmToken::If:
+ return ParseIf(c, inParens);
+ case WasmToken::GetGlobal:
+ return ParseGetGlobal(c);
+ case WasmToken::GetLocal:
+ return ParseGetLocal(c);
+ case WasmToken::Load:
+ return ParseLoad(c, token.op(), inParens);
+ case WasmToken::Loop:
+ return ParseBlock(c, Op::Loop, inParens);
+ case WasmToken::Return:
+ return ParseReturn(c, inParens);
+ case WasmToken::SetGlobal:
+ return ParseSetGlobal(c, inParens);
+ case WasmToken::SetLocal:
+ return ParseSetLocal(c, inParens);
+ case WasmToken::Store:
+ return ParseStore(c, token.op(), inParens);
+ case WasmToken::TeeLocal:
+ return ParseTeeLocal(c, inParens);
+ case WasmToken::TernaryOpcode:
+ return ParseTernaryOperator(c, token.op(), inParens);
+ case WasmToken::UnaryOpcode:
+ return ParseUnaryOperator(c, token.op(), inParens);
+ case WasmToken::Nop:
+ return new(c.lifo) AstNop();
+ case WasmToken::CurrentMemory:
+ return new(c.lifo) AstCurrentMemory();
+ case WasmToken::GrowMemory:
+ return ParseGrowMemory(c, inParens);
+ default:
+ c.ts.generateError(token, c.error);
+ return nullptr;
+ }
+}
+
+static AstExpr*
+ParseExprInsideParens(WasmParseContext& c)
+{
+ WasmToken token = c.ts.get();
+
+ return ParseExprBody(c, token, true);
+}
+
+static bool
+ParseValueTypeList(WasmParseContext& c, AstValTypeVector* vec)
+{
+ WasmToken token;
+ while (c.ts.getIf(WasmToken::ValueType, &token)) {
+ if (!vec->append(token.valueType()))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ParseResult(WasmParseContext& c, ExprType* result)
+{
+ if (*result != ExprType::Void) {
+ c.ts.generateError(c.ts.peek(), c.error);
+ return false;
+ }
+
+ WasmToken token;
+ if (!c.ts.match(WasmToken::ValueType, &token, c.error))
+ return false;
+
+ *result = ToExprType(token.valueType());
+ return true;
+}
+
+static bool
+ParseLocalOrParam(WasmParseContext& c, AstNameVector* locals, AstValTypeVector* localTypes)
+{
+ if (c.ts.peek().kind() != WasmToken::Name)
+ return locals->append(AstName()) && ParseValueTypeList(c, localTypes);
+
+ WasmToken token;
+ return locals->append(c.ts.get().name()) &&
+ c.ts.match(WasmToken::ValueType, &token, c.error) &&
+ localTypes->append(token.valueType());
+}
+
+static bool
+ParseInlineImport(WasmParseContext& c, InlineImport* import)
+{
+ return c.ts.match(WasmToken::Text, &import->module, c.error) &&
+ c.ts.match(WasmToken::Text, &import->field, c.error);
+}
+
+static bool
+ParseInlineExport(WasmParseContext& c, DefinitionKind kind, AstModule* module, AstRef ref)
+{
+ WasmToken name;
+ if (!c.ts.match(WasmToken::Text, &name, c.error))
+ return false;
+
+ AstExport* exp = new(c.lifo) AstExport(name.text(), kind, ref);
+ return exp && module->append(exp);
+}
+
+static bool
+MaybeParseTypeUse(WasmParseContext& c, AstRef* sig)
+{
+ WasmToken openParen;
+ if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+ if (c.ts.getIf(WasmToken::Type)) {
+ if (!c.ts.matchRef(sig, c.error))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ } else {
+ c.ts.unget(openParen);
+ }
+ }
+ return true;
+}
+
+static bool
+ParseFuncSig(WasmParseContext& c, AstSig* sig)
+{
+ AstValTypeVector args(c.lifo);
+ ExprType result = ExprType::Void;
+
+ while (c.ts.getIf(WasmToken::OpenParen)) {
+ WasmToken token = c.ts.get();
+ switch (token.kind()) {
+ case WasmToken::Param:
+ if (!ParseValueTypeList(c, &args))
+ return false;
+ break;
+ case WasmToken::Result:
+ if (!ParseResult(c, &result))
+ return false;
+ break;
+ default:
+ c.ts.generateError(token, c.error);
+ return false;
+ }
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ }
+
+ *sig = AstSig(Move(args), result);
+ return true;
+}
+
+static bool
+ParseFuncType(WasmParseContext& c, AstRef* ref, AstModule* module)
+{
+ if (!MaybeParseTypeUse(c, ref))
+ return false;
+
+ if (ref->isInvalid()) {
+ AstSig sig(c.lifo);
+ if (!ParseFuncSig(c, &sig))
+ return false;
+ uint32_t sigIndex;
+ if (!module->declare(Move(sig), &sigIndex))
+ return false;
+ ref->setIndex(sigIndex);
+ }
+
+ return true;
+}
+
+static bool
+ParseFunc(WasmParseContext& c, AstModule* module)
+{
+ AstValTypeVector vars(c.lifo);
+ AstValTypeVector args(c.lifo);
+ AstNameVector locals(c.lifo);
+
+ AstName funcName = c.ts.getIfName();
+
+ // Inline imports and exports.
+ WasmToken openParen;
+ if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+ if (c.ts.getIf(WasmToken::Import)) {
+ if (module->funcs().length()) {
+ c.ts.generateError(openParen, "import after function definition", c.error);
+ return false;
+ }
+
+ InlineImport names;
+ if (!ParseInlineImport(c, &names))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ AstRef sig;
+ if (!ParseFuncType(c, &sig, module))
+ return false;
+
+ auto* imp = new(c.lifo) AstImport(funcName, names.module.text(), names.field.text(), sig);
+ return imp && module->append(imp);
+ }
+
+ if (c.ts.getIf(WasmToken::Export)) {
+ AstRef ref = funcName.empty()
+ ? AstRef(module->funcImportNames().length() + module->funcs().length())
+ : AstRef(funcName);
+ if (!ParseInlineExport(c, DefinitionKind::Function, module, ref))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ } else {
+ c.ts.unget(openParen);
+ }
+ }
+
+ AstRef sigRef;
+ if (!MaybeParseTypeUse(c, &sigRef))
+ return false;
+
+ AstExprVector body(c.lifo);
+
+ ExprType result = ExprType::Void;
+ while (c.ts.getIf(WasmToken::OpenParen)) {
+ WasmToken token = c.ts.get();
+ switch (token.kind()) {
+ case WasmToken::Local:
+ if (!ParseLocalOrParam(c, &locals, &vars))
+ return false;
+ break;
+ case WasmToken::Param:
+ if (!vars.empty()) {
+ c.ts.generateError(token, c.error);
+ return false;
+ }
+ if (!ParseLocalOrParam(c, &locals, &args))
+ return false;
+ break;
+ case WasmToken::Result:
+ if (!ParseResult(c, &result))
+ return false;
+ break;
+ default:
+ c.ts.unget(token);
+ AstExpr* expr = ParseExprInsideParens(c);
+ if (!expr || !body.append(expr))
+ return false;
+ break;
+ }
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ }
+
+ if (!ParseExprList(c, &body, true))
+ return false;
+
+ if (sigRef.isInvalid()) {
+ uint32_t sigIndex;
+ if (!module->declare(AstSig(Move(args), result), &sigIndex))
+ return false;
+ sigRef.setIndex(sigIndex);
+ }
+
+ auto* func = new(c.lifo) AstFunc(funcName, sigRef, Move(vars), Move(locals), Move(body));
+ return func && module->append(func);
+}
+
+static AstSig*
+ParseTypeDef(WasmParseContext& c)
+{
+ AstName name = c.ts.getIfName();
+
+ if (!c.ts.match(WasmToken::OpenParen, c.error))
+ return nullptr;
+ if (!c.ts.match(WasmToken::Func, c.error))
+ return nullptr;
+
+ AstSig sig(c.lifo);
+ if (!ParseFuncSig(c, &sig))
+ return nullptr;
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return new(c.lifo) AstSig(name, Move(sig));
+}
+
+static bool
+MaybeParseOwnerIndex(WasmParseContext& c)
+{
+ if (c.ts.peek().kind() == WasmToken::Index) {
+ WasmToken elemIndex = c.ts.get();
+ if (elemIndex.index()) {
+ c.ts.generateError(elemIndex, "can't handle non-default memory/table yet", c.error);
+ return false;
+ }
+ }
+ return true;
+}
+
+static AstExpr*
+ParseInitializerExpression(WasmParseContext& c)
+{
+ if (!c.ts.match(WasmToken::OpenParen, c.error))
+ return nullptr;
+
+ AstExpr* initExpr = ParseExprInsideParens(c);
+ if (!initExpr)
+ return nullptr;
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return initExpr;
+}
+
+static AstDataSegment*
+ParseDataSegment(WasmParseContext& c)
+{
+ if (!MaybeParseOwnerIndex(c))
+ return nullptr;
+
+ AstExpr* offset = ParseInitializerExpression(c);
+ if (!offset)
+ return nullptr;
+
+ AstNameVector fragments(c.lifo);
+
+ WasmToken text;
+ while (c.ts.getIf(WasmToken::Text, &text)) {
+ if (!fragments.append(text.text()))
+ return nullptr;
+ }
+
+ return new(c.lifo) AstDataSegment(offset, Move(fragments));
+}
+
+static bool
+ParseLimits(WasmParseContext& c, Limits* limits)
+{
+ WasmToken initial;
+ if (!c.ts.match(WasmToken::Index, &initial, c.error))
+ return false;
+
+ Maybe<uint32_t> maximum;
+ WasmToken token;
+ if (c.ts.getIf(WasmToken::Index, &token))
+ maximum.emplace(token.index());
+
+ Limits r = { initial.index(), maximum };
+ *limits = r;
+ return true;
+}
+
+static bool
+ParseMemory(WasmParseContext& c, WasmToken token, AstModule* module)
+{
+ AstName name = c.ts.getIfName();
+
+ WasmToken openParen;
+ if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+ if (c.ts.getIf(WasmToken::Import)) {
+ InlineImport names;
+ if (!ParseInlineImport(c, &names))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ Limits memory;
+ if (!ParseLimits(c, &memory))
+ return false;
+
+ auto* imp = new(c.lifo) AstImport(name, names.module.text(), names.field.text(),
+ DefinitionKind::Memory, memory);
+ return imp && module->append(imp);
+ }
+
+ if (c.ts.getIf(WasmToken::Export)) {
+ AstRef ref = name.empty() ? AstRef(module->memories().length()) : AstRef(name);
+ if (!ParseInlineExport(c, DefinitionKind::Memory, module, ref))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ } else {
+ c.ts.unget(openParen);
+ }
+ }
+
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ if (!c.ts.match(WasmToken::Data, c.error))
+ return false;
+
+ AstNameVector fragments(c.lifo);
+
+ WasmToken data;
+ size_t pages = 0;
+ size_t totalLength = 0;
+ while (c.ts.getIf(WasmToken::Text, &data)) {
+ if (!fragments.append(data.text()))
+ return false;
+ totalLength += data.text().length();
+ }
+
+ if (fragments.length()) {
+ AstExpr* offset = new(c.lifo) AstConst(Val(uint32_t(0)));
+ if (!offset)
+ return false;
+
+ AstDataSegment* segment = new(c.lifo) AstDataSegment(offset, Move(fragments));
+ if (!segment || !module->append(segment))
+ return false;
+
+ pages = AlignBytes<size_t>(totalLength, PageSize) / PageSize;
+ if (pages != uint32_t(pages))
+ return false;
+ }
+
+ Limits memory = { uint32_t(pages), Some(uint32_t(pages)) };
+ if (!module->addMemory(name, memory))
+ return false;
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ return true;
+ }
+
+ Limits memory;
+ if (!ParseLimits(c, &memory))
+ return false;
+
+ return module->addMemory(name, memory);
+}
+
+static bool
+ParseStartFunc(WasmParseContext& c, WasmToken token, AstModule* module)
+{
+ AstRef func;
+ if (!c.ts.matchRef(&func, c.error))
+ return false;
+
+ if (!module->setStartFunc(AstStartFunc(func))) {
+ c.ts.generateError(token, c.error);
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ParseGlobalType(WasmParseContext& c, WasmToken* typeToken, bool* isMutable)
+{
+ *isMutable = false;
+
+ // Either (mut i32) or i32.
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ // Immutable by default.
+ *isMutable = c.ts.getIf(WasmToken::Mutable);
+ if (!c.ts.match(WasmToken::ValueType, typeToken, c.error))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ return true;
+ }
+
+ return c.ts.match(WasmToken::ValueType, typeToken, c.error);
+}
+
+static bool
+ParseElemType(WasmParseContext& c)
+{
+ // Only AnyFunc is allowed at the moment.
+ return c.ts.match(WasmToken::AnyFunc, c.error);
+}
+
+static bool
+ParseTableSig(WasmParseContext& c, Limits* table)
+{
+ return ParseLimits(c, table) &&
+ ParseElemType(c);
+}
+
+static AstImport*
+ParseImport(WasmParseContext& c, AstModule* module)
+{
+ AstName name = c.ts.getIfName();
+
+ WasmToken moduleName;
+ if (!c.ts.match(WasmToken::Text, &moduleName, c.error))
+ return nullptr;
+
+ WasmToken fieldName;
+ if (!c.ts.match(WasmToken::Text, &fieldName, c.error))
+ return nullptr;
+
+ AstRef sigRef;
+ WasmToken openParen;
+ if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+ if (c.ts.getIf(WasmToken::Memory)) {
+ if (name.empty())
+ name = c.ts.getIfName();
+
+ Limits memory;
+ if (!ParseLimits(c, &memory))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
+ DefinitionKind::Memory, memory);
+ }
+ if (c.ts.getIf(WasmToken::Table)) {
+ if (name.empty())
+ name = c.ts.getIfName();
+
+ Limits table;
+ if (!ParseTableSig(c, &table))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
+ DefinitionKind::Table, table);
+ }
+ if (c.ts.getIf(WasmToken::Global)) {
+ if (name.empty())
+ name = c.ts.getIfName();
+
+ WasmToken typeToken;
+ bool isMutable;
+ if (!ParseGlobalType(c, &typeToken, &isMutable))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(),
+ AstGlobal(AstName(), typeToken.valueType(), isMutable));
+ }
+ if (c.ts.getIf(WasmToken::Func)) {
+ if (name.empty())
+ name = c.ts.getIfName();
+
+ AstRef sigRef;
+ if (!ParseFuncType(c, &sigRef, module))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(), sigRef);
+ }
+
+ if (c.ts.getIf(WasmToken::Type)) {
+ if (!c.ts.matchRef(&sigRef, c.error))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ } else {
+ c.ts.unget(openParen);
+ }
+ }
+
+ if (sigRef.isInvalid()) {
+ AstSig sig(c.lifo);
+ if (!ParseFuncSig(c, &sig))
+ return nullptr;
+
+ uint32_t sigIndex;
+ if (!module->declare(Move(sig), &sigIndex))
+ return nullptr;
+ sigRef.setIndex(sigIndex);
+ }
+
+ return new(c.lifo) AstImport(name, moduleName.text(), fieldName.text(), sigRef);
+}
+
+static AstExport*
+ParseExport(WasmParseContext& c)
+{
+ WasmToken name;
+ if (!c.ts.match(WasmToken::Text, &name, c.error))
+ return nullptr;
+
+ WasmToken exportee = c.ts.get();
+ switch (exportee.kind()) {
+ case WasmToken::Index:
+ return new(c.lifo) AstExport(name.text(), DefinitionKind::Function, AstRef(exportee.index()));
+ case WasmToken::Name:
+ return new(c.lifo) AstExport(name.text(), DefinitionKind::Function, AstRef(exportee.name()));
+ case WasmToken::Table: {
+ AstRef ref;
+ if (!c.ts.getIfRef(&ref))
+ ref = AstRef(0);
+ return new(c.lifo) AstExport(name.text(), DefinitionKind::Table, ref);
+ }
+ case WasmToken::Memory: {
+ AstRef ref;
+ if (!c.ts.getIfRef(&ref))
+ ref = AstRef(0);
+ return new(c.lifo) AstExport(name.text(), DefinitionKind::Memory, ref);
+ }
+ case WasmToken::Global: {
+ AstRef ref;
+ if (!c.ts.matchRef(&ref, c.error))
+ return nullptr;
+ return new(c.lifo) AstExport(name.text(), DefinitionKind::Global, ref);
+ }
+ case WasmToken::OpenParen: {
+ exportee = c.ts.get();
+
+ DefinitionKind kind;
+ switch (exportee.kind()) {
+ case WasmToken::Func:
+ kind = DefinitionKind::Function;
+ break;
+ case WasmToken::Table:
+ kind = DefinitionKind::Table;
+ break;
+ case WasmToken::Memory:
+ kind = DefinitionKind::Memory;
+ break;
+ case WasmToken::Global:
+ kind = DefinitionKind::Global;
+ break;
+ default:
+ c.ts.generateError(exportee, c.error);
+ return nullptr;
+ }
+
+ AstRef ref;
+ if (!c.ts.matchRef(&ref, c.error))
+ return nullptr;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+
+ return new(c.lifo) AstExport(name.text(), kind, ref);
+ }
+ default:
+ break;
+ }
+
+ c.ts.generateError(exportee, c.error);
+ return nullptr;
+}
+
+static bool
+ParseTable(WasmParseContext& c, WasmToken token, AstModule* module)
+{
+ AstName name = c.ts.getIfName();
+
+ if (c.ts.getIf(WasmToken::OpenParen)) {
+ // Either an import and we're done, or an export and continue.
+ if (c.ts.getIf(WasmToken::Import)) {
+ InlineImport names;
+ if (!ParseInlineImport(c, &names))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ Limits table;
+ if (!ParseTableSig(c, &table))
+ return false;
+
+ auto* import = new(c.lifo) AstImport(name, names.module.text(), names.field.text(),
+ DefinitionKind::Table, table);
+
+ return import && module->append(import);
+ }
+
+ if (!c.ts.match(WasmToken::Export, c.error)) {
+ c.ts.generateError(token, c.error);
+ return false;
+ }
+
+ AstRef ref = name.empty() ? AstRef(module->tables().length()) : AstRef(name);
+ if (!ParseInlineExport(c, DefinitionKind::Table, module, ref))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ }
+
+ // Either: min max? anyfunc
+ if (c.ts.peek().kind() == WasmToken::Index) {
+ Limits table;
+ if (!ParseTableSig(c, &table))
+ return false;
+ return module->addTable(name, table);
+ }
+
+ // Or: anyfunc (elem 1 2 ...)
+ if (!ParseElemType(c))
+ return false;
+
+ if (!c.ts.match(WasmToken::OpenParen, c.error))
+ return false;
+ if (!c.ts.match(WasmToken::Elem, c.error))
+ return false;
+
+ AstRefVector elems(c.lifo);
+
+ AstRef elem;
+ while (c.ts.getIfRef(&elem)) {
+ if (!elems.append(elem))
+ return false;
+ }
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ uint32_t numElements = uint32_t(elems.length());
+ if (numElements != elems.length())
+ return false;
+
+ Limits r = { numElements, Some(numElements) };
+ if (!module->addTable(name, r))
+ return false;
+
+ auto* zero = new(c.lifo) AstConst(Val(uint32_t(0)));
+ if (!zero)
+ return false;
+
+ AstElemSegment* segment = new(c.lifo) AstElemSegment(zero, Move(elems));
+ return segment && module->append(segment);
+}
+
+static AstElemSegment*
+ParseElemSegment(WasmParseContext& c)
+{
+ if (!MaybeParseOwnerIndex(c))
+ return nullptr;
+
+ AstExpr* offset = ParseInitializerExpression(c);
+ if (!offset)
+ return nullptr;
+
+ AstRefVector elems(c.lifo);
+
+ AstRef elem;
+ while (c.ts.getIfRef(&elem)) {
+ if (!elems.append(elem))
+ return nullptr;
+ }
+
+ return new(c.lifo) AstElemSegment(offset, Move(elems));
+}
+
+static bool
+ParseGlobal(WasmParseContext& c, AstModule* module)
+{
+ AstName name = c.ts.getIfName();
+
+ WasmToken typeToken;
+ bool isMutable;
+
+ WasmToken openParen;
+ if (c.ts.getIf(WasmToken::OpenParen, &openParen)) {
+ if (c.ts.getIf(WasmToken::Import)) {
+ if (module->globals().length()) {
+ c.ts.generateError(openParen, "import after global definition", c.error);
+ return false;
+ }
+
+ InlineImport names;
+ if (!ParseInlineImport(c, &names))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+
+ if (!ParseGlobalType(c, &typeToken, &isMutable))
+ return false;
+
+ auto* imp = new(c.lifo) AstImport(name, names.module.text(), names.field.text(),
+ AstGlobal(AstName(), typeToken.valueType(),
+ isMutable));
+ return imp && module->append(imp);
+ }
+
+ if (c.ts.getIf(WasmToken::Export)) {
+ AstRef ref = name.empty() ? AstRef(module->globals().length()) : AstRef(name);
+ if (!ParseInlineExport(c, DefinitionKind::Global, module, ref))
+ return false;
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return false;
+ } else {
+ c.ts.unget(openParen);
+ }
+ }
+
+ if (!ParseGlobalType(c, &typeToken, &isMutable))
+ return false;
+
+ AstExpr* init = ParseInitializerExpression(c);
+ if (!init)
+ return false;
+
+ auto* glob = new(c.lifo) AstGlobal(name, typeToken.valueType(), isMutable, Some(init));
+ return glob && module->append(glob);
+}
+
+static AstModule*
+ParseBinaryModule(WasmParseContext& c, AstModule* module)
+{
+ // By convention with EncodeBinaryModule, a binary module only contains a
+ // data section containing the raw bytes contained in the module.
+ AstNameVector fragments(c.lifo);
+
+ WasmToken text;
+ while (c.ts.getIf(WasmToken::Text, &text)) {
+ if (!fragments.append(text.text()))
+ return nullptr;
+ }
+
+ auto* data = new(c.lifo) AstDataSegment(nullptr, Move(fragments));
+ if (!data || !module->append(data))
+ return nullptr;
+
+ return module;
+}
+
+static AstModule*
+ParseModule(const char16_t* text, LifoAlloc& lifo, UniqueChars* error, bool* binary)
+{
+ WasmParseContext c(text, lifo, error);
+
+ *binary = false;
+
+ if (!c.ts.match(WasmToken::OpenParen, c.error))
+ return nullptr;
+ if (!c.ts.match(WasmToken::Module, c.error))
+ return nullptr;
+
+ auto* module = new(c.lifo) AstModule(c.lifo);
+ if (!module || !module->init())
+ return nullptr;
+
+ if (c.ts.peek().kind() == WasmToken::Text) {
+ *binary = true;
+ return ParseBinaryModule(c, module);
+ }
+
+ while (c.ts.getIf(WasmToken::OpenParen)) {
+ WasmToken section = c.ts.get();
+
+ switch (section.kind()) {
+ case WasmToken::Type: {
+ AstSig* sig = ParseTypeDef(c);
+ if (!sig || !module->append(sig))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Start: {
+ if (!ParseStartFunc(c, section, module))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Memory: {
+ if (!ParseMemory(c, section, module))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Global: {
+ if (!ParseGlobal(c, module))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Data: {
+ AstDataSegment* segment = ParseDataSegment(c);
+ if (!segment || !module->append(segment))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Import: {
+ AstImport* imp = ParseImport(c, module);
+ if (!imp || !module->append(imp))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Export: {
+ AstExport* exp = ParseExport(c);
+ if (!exp || !module->append(exp))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Table: {
+ if (!ParseTable(c, section, module))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Elem: {
+ AstElemSegment* segment = ParseElemSegment(c);
+ if (!segment || !module->append(segment))
+ return nullptr;
+ break;
+ }
+ case WasmToken::Func: {
+ if (!ParseFunc(c, module))
+ return nullptr;
+ break;
+ }
+ default:
+ c.ts.generateError(section, c.error);
+ return nullptr;
+ }
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ }
+
+ if (!c.ts.match(WasmToken::CloseParen, c.error))
+ return nullptr;
+ if (!c.ts.match(WasmToken::EndOfFile, c.error))
+ return nullptr;
+
+ return module;
+}
+
+/*****************************************************************************/
+// wasm name resolution
+
+namespace {
+
+class Resolver
+{
+ UniqueChars* error_;
+ AstNameMap varMap_;
+ AstNameMap globalMap_;
+ AstNameMap sigMap_;
+ AstNameMap funcMap_;
+ AstNameMap importMap_;
+ AstNameMap tableMap_;
+ AstNameMap memoryMap_;
+ AstNameVector targetStack_;
+
+ bool registerName(AstNameMap& map, AstName name, size_t index) {
+ AstNameMap::AddPtr p = map.lookupForAdd(name);
+ if (!p) {
+ if (!map.add(p, name, index))
+ return false;
+ } else {
+ return false;
+ }
+ return true;
+ }
+ bool resolveName(AstNameMap& map, AstName name, size_t* index) {
+ AstNameMap::Ptr p = map.lookup(name);
+ if (p) {
+ *index = p->value();
+ return true;
+ }
+ return false;
+ }
+ bool resolveRef(AstNameMap& map, AstRef& ref) {
+ AstNameMap::Ptr p = map.lookup(ref.name());
+ if (p) {
+ ref.setIndex(p->value());
+ return true;
+ }
+ return false;
+ }
+ bool failResolveLabel(const char* kind, AstName name) {
+ TwoByteChars chars(name.begin(), name.length());
+ UniqueChars utf8Chars(CharsToNewUTF8CharsZ(nullptr, chars).c_str());
+ error_->reset(JS_smprintf("%s label '%s' not found", kind, utf8Chars.get()));
+ return false;
+ }
+
+ public:
+ explicit Resolver(LifoAlloc& lifo, UniqueChars* error)
+ : error_(error),
+ varMap_(lifo),
+ globalMap_(lifo),
+ sigMap_(lifo),
+ funcMap_(lifo),
+ importMap_(lifo),
+ tableMap_(lifo),
+ memoryMap_(lifo),
+ targetStack_(lifo)
+ {}
+ bool init() {
+ return sigMap_.init() &&
+ funcMap_.init() &&
+ importMap_.init() &&
+ tableMap_.init() &&
+ memoryMap_.init() &&
+ varMap_.init() &&
+ globalMap_.init();
+ }
+ void beginFunc() {
+ varMap_.clear();
+ MOZ_ASSERT(targetStack_.empty());
+ }
+
+#define REGISTER(what, map) \
+ bool register##what##Name(AstName name, size_t index) { \
+ return name.empty() || registerName(map, name, index); \
+ }
+
+ REGISTER(Sig, sigMap_)
+ REGISTER(Func, funcMap_)
+ REGISTER(Import, importMap_)
+ REGISTER(Var, varMap_)
+ REGISTER(Global, globalMap_)
+ REGISTER(Table, tableMap_)
+ REGISTER(Memory, memoryMap_)
+
+#undef REGISTER
+
+ bool pushTarget(AstName name) {
+ return targetStack_.append(name);
+ }
+ void popTarget(AstName name) {
+ MOZ_ASSERT(targetStack_.back() == name);
+ targetStack_.popBack();
+ }
+
+#define RESOLVE(map, label) \
+ bool resolve##label(AstRef& ref) { \
+ MOZ_ASSERT(!ref.isInvalid()); \
+ if (!ref.name().empty() && !resolveRef(map, ref)) \
+ return failResolveLabel(#label, ref.name()); \
+ return true; \
+ }
+
+ RESOLVE(sigMap_, Signature)
+ RESOLVE(funcMap_, Function)
+ RESOLVE(importMap_, Import)
+ RESOLVE(varMap_, Local)
+ RESOLVE(globalMap_, Global)
+ RESOLVE(tableMap_, Table)
+ RESOLVE(memoryMap_, Memory)
+
+#undef RESOLVE
+
+ bool resolveBranchTarget(AstRef& ref) {
+ if (ref.name().empty())
+ return true;
+ for (size_t i = 0, e = targetStack_.length(); i < e; i++) {
+ if (targetStack_[e - i - 1] == ref.name()) {
+ ref.setIndex(i);
+ return true;
+ }
+ }
+ return failResolveLabel("branch target", ref.name());
+ }
+
+ bool fail(const char* message) {
+ error_->reset(JS_smprintf("%s", message));
+ return false;
+ }
+};
+
+} // end anonymous namespace
+
+static bool
+ResolveExpr(Resolver& r, AstExpr& expr);
+
+static bool
+ResolveExprList(Resolver& r, const AstExprVector& v)
+{
+ for (size_t i = 0; i < v.length(); i++) {
+ if (!ResolveExpr(r, *v[i]))
+ return false;
+ }
+ return true;
+}
+
+static bool
+ResolveBlock(Resolver& r, AstBlock& b)
+{
+ if (!r.pushTarget(b.name()))
+ return false;
+
+ if (!ResolveExprList(r, b.exprs()))
+ return false;
+
+ r.popTarget(b.name());
+ return true;
+}
+
+static bool
+ResolveDropOperator(Resolver& r, AstDrop& drop)
+{
+ return ResolveExpr(r, drop.value());
+}
+
+static bool
+ResolveBranch(Resolver& r, AstBranch& br)
+{
+ if (!r.resolveBranchTarget(br.target()))
+ return false;
+
+ if (br.maybeValue() && !ResolveExpr(r, *br.maybeValue()))
+ return false;
+
+ if (br.op() == Op::BrIf) {
+ if (!ResolveExpr(r, br.cond()))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ResolveArgs(Resolver& r, const AstExprVector& args)
+{
+ for (AstExpr* arg : args) {
+ if (!ResolveExpr(r, *arg))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+ResolveCall(Resolver& r, AstCall& c)
+{
+ MOZ_ASSERT(c.op() == Op::Call);
+
+ if (!ResolveArgs(r, c.args()))
+ return false;
+
+ if (!r.resolveFunction(c.func()))
+ return false;
+
+ return true;
+}
+
+static bool
+ResolveCallIndirect(Resolver& r, AstCallIndirect& c)
+{
+ if (!ResolveArgs(r, c.args()))
+ return false;
+
+ if (!ResolveExpr(r, *c.index()))
+ return false;
+
+ if (!r.resolveSignature(c.sig()))
+ return false;
+
+ return true;
+}
+
+static bool
+ResolveFirst(Resolver& r, AstFirst& f)
+{
+ return ResolveExprList(r, f.exprs());
+}
+
+static bool
+ResolveGetLocal(Resolver& r, AstGetLocal& gl)
+{
+ return r.resolveLocal(gl.local());
+}
+
+static bool
+ResolveSetLocal(Resolver& r, AstSetLocal& sl)
+{
+ if (!ResolveExpr(r, sl.value()))
+ return false;
+
+ if (!r.resolveLocal(sl.local()))
+ return false;
+
+ return true;
+}
+
+static bool
+ResolveGetGlobal(Resolver& r, AstGetGlobal& gl)
+{
+ return r.resolveGlobal(gl.global());
+}
+
+static bool
+ResolveSetGlobal(Resolver& r, AstSetGlobal& sl)
+{
+ if (!ResolveExpr(r, sl.value()))
+ return false;
+
+ if (!r.resolveGlobal(sl.global()))
+ return false;
+
+ return true;
+}
+
+static bool
+ResolveTeeLocal(Resolver& r, AstTeeLocal& sl)
+{
+ if (!ResolveExpr(r, sl.value()))
+ return false;
+
+ if (!r.resolveLocal(sl.local()))
+ return false;
+
+ return true;
+}
+
+static bool
+ResolveUnaryOperator(Resolver& r, AstUnaryOperator& b)
+{
+ return ResolveExpr(r, *b.operand());
+}
+
+static bool
+ResolveGrowMemory(Resolver& r, AstGrowMemory& gm)
+{
+ return ResolveExpr(r, *gm.operand());
+}
+
+static bool
+ResolveBinaryOperator(Resolver& r, AstBinaryOperator& b)
+{
+ return ResolveExpr(r, *b.lhs()) &&
+ ResolveExpr(r, *b.rhs());
+}
+
+static bool
+ResolveTernaryOperator(Resolver& r, AstTernaryOperator& b)
+{
+ return ResolveExpr(r, *b.op0()) &&
+ ResolveExpr(r, *b.op1()) &&
+ ResolveExpr(r, *b.op2());
+}
+
+static bool
+ResolveComparisonOperator(Resolver& r, AstComparisonOperator& b)
+{
+ return ResolveExpr(r, *b.lhs()) &&
+ ResolveExpr(r, *b.rhs());
+}
+
+static bool
+ResolveConversionOperator(Resolver& r, AstConversionOperator& b)
+{
+ return ResolveExpr(r, *b.operand());
+}
+
+static bool
+ResolveIfElse(Resolver& r, AstIf& i)
+{
+ if (!ResolveExpr(r, i.cond()))
+ return false;
+ if (!r.pushTarget(i.name()))
+ return false;
+ if (!ResolveExprList(r, i.thenExprs()))
+ return false;
+ if (i.hasElse()) {
+ if (!ResolveExprList(r, i.elseExprs()))
+ return false;
+ }
+ r.popTarget(i.name());
+ return true;
+}
+
+static bool
+ResolveLoadStoreAddress(Resolver& r, const AstLoadStoreAddress &address)
+{
+ return ResolveExpr(r, address.base());
+}
+
+static bool
+ResolveLoad(Resolver& r, AstLoad& l)
+{
+ return ResolveLoadStoreAddress(r, l.address());
+}
+
+static bool
+ResolveStore(Resolver& r, AstStore& s)
+{
+ return ResolveLoadStoreAddress(r, s.address()) &&
+ ResolveExpr(r, s.value());
+}
+
+static bool
+ResolveReturn(Resolver& r, AstReturn& ret)
+{
+ return !ret.maybeExpr() || ResolveExpr(r, *ret.maybeExpr());
+}
+
+static bool
+ResolveBranchTable(Resolver& r, AstBranchTable& bt)
+{
+ if (!r.resolveBranchTarget(bt.def()))
+ return false;
+
+ for (AstRef& elem : bt.table()) {
+ if (!r.resolveBranchTarget(elem))
+ return false;
+ }
+
+ if (bt.maybeValue() && !ResolveExpr(r, *bt.maybeValue()))
+ return false;
+
+ return ResolveExpr(r, bt.index());
+}
+
+static bool
+ResolveExpr(Resolver& r, AstExpr& expr)
+{
+ switch (expr.kind()) {
+ case AstExprKind::Nop:
+ case AstExprKind::Pop:
+ case AstExprKind::Unreachable:
+ case AstExprKind::CurrentMemory:
+ return true;
+ case AstExprKind::Drop:
+ return ResolveDropOperator(r, expr.as<AstDrop>());
+ case AstExprKind::BinaryOperator:
+ return ResolveBinaryOperator(r, expr.as<AstBinaryOperator>());
+ case AstExprKind::Block:
+ return ResolveBlock(r, expr.as<AstBlock>());
+ case AstExprKind::Branch:
+ return ResolveBranch(r, expr.as<AstBranch>());
+ case AstExprKind::Call:
+ return ResolveCall(r, expr.as<AstCall>());
+ case AstExprKind::CallIndirect:
+ return ResolveCallIndirect(r, expr.as<AstCallIndirect>());
+ case AstExprKind::ComparisonOperator:
+ return ResolveComparisonOperator(r, expr.as<AstComparisonOperator>());
+ case AstExprKind::Const:
+ return true;
+ case AstExprKind::ConversionOperator:
+ return ResolveConversionOperator(r, expr.as<AstConversionOperator>());
+ case AstExprKind::First:
+ return ResolveFirst(r, expr.as<AstFirst>());
+ case AstExprKind::GetGlobal:
+ return ResolveGetGlobal(r, expr.as<AstGetGlobal>());
+ case AstExprKind::GetLocal:
+ return ResolveGetLocal(r, expr.as<AstGetLocal>());
+ case AstExprKind::If:
+ return ResolveIfElse(r, expr.as<AstIf>());
+ case AstExprKind::Load:
+ return ResolveLoad(r, expr.as<AstLoad>());
+ case AstExprKind::Return:
+ return ResolveReturn(r, expr.as<AstReturn>());
+ case AstExprKind::SetGlobal:
+ return ResolveSetGlobal(r, expr.as<AstSetGlobal>());
+ case AstExprKind::SetLocal:
+ return ResolveSetLocal(r, expr.as<AstSetLocal>());
+ case AstExprKind::Store:
+ return ResolveStore(r, expr.as<AstStore>());
+ case AstExprKind::BranchTable:
+ return ResolveBranchTable(r, expr.as<AstBranchTable>());
+ case AstExprKind::TeeLocal:
+ return ResolveTeeLocal(r, expr.as<AstTeeLocal>());
+ case AstExprKind::TernaryOperator:
+ return ResolveTernaryOperator(r, expr.as<AstTernaryOperator>());
+ case AstExprKind::UnaryOperator:
+ return ResolveUnaryOperator(r, expr.as<AstUnaryOperator>());
+ case AstExprKind::GrowMemory:
+ return ResolveGrowMemory(r, expr.as<AstGrowMemory>());
+ }
+ MOZ_CRASH("Bad expr kind");
+}
+
+static bool
+ResolveFunc(Resolver& r, AstFunc& func)
+{
+ r.beginFunc();
+
+ for (size_t i = 0; i < func.locals().length(); i++) {
+ if (!r.registerVarName(func.locals()[i], i))
+ return r.fail("duplicate var");
+ }
+
+ for (AstExpr* expr : func.body()) {
+ if (!ResolveExpr(r, *expr))
+ return false;
+ }
+ return true;
+}
+
+static bool
+ResolveModule(LifoAlloc& lifo, AstModule* module, UniqueChars* error)
+{
+ Resolver r(lifo, error);
+
+ if (!r.init())
+ return false;
+
+ size_t numSigs = module->sigs().length();
+ for (size_t i = 0; i < numSigs; i++) {
+ AstSig* sig = module->sigs()[i];
+ if (!r.registerSigName(sig->name(), i))
+ return r.fail("duplicate signature");
+ }
+
+ size_t lastFuncIndex = 0;
+ size_t lastGlobalIndex = 0;
+ size_t lastMemoryIndex = 0;
+ size_t lastTableIndex = 0;
+ for (AstImport* imp : module->imports()) {
+ switch (imp->kind()) {
+ case DefinitionKind::Function:
+ if (!r.registerFuncName(imp->name(), lastFuncIndex++))
+ return r.fail("duplicate import");
+ if (!r.resolveSignature(imp->funcSig()))
+ return false;
+ break;
+ case DefinitionKind::Global:
+ if (!r.registerGlobalName(imp->name(), lastGlobalIndex++))
+ return r.fail("duplicate import");
+ break;
+ case DefinitionKind::Memory:
+ if (!r.registerMemoryName(imp->name(), lastMemoryIndex++))
+ return r.fail("duplicate import");
+ break;
+ case DefinitionKind::Table:
+ if (!r.registerTableName(imp->name(), lastTableIndex++))
+ return r.fail("duplicate import");
+ break;
+ }
+ }
+
+ for (AstFunc* func : module->funcs()) {
+ if (!r.resolveSignature(func->sig()))
+ return false;
+ if (!r.registerFuncName(func->name(), lastFuncIndex++))
+ return r.fail("duplicate function");
+ }
+
+ for (const AstGlobal* global : module->globals()) {
+ if (!r.registerGlobalName(global->name(), lastGlobalIndex++))
+ return r.fail("duplicate import");
+ if (global->hasInit() && !ResolveExpr(r, global->init()))
+ return false;
+ }
+
+ for (const AstResizable& table : module->tables()) {
+ if (table.imported)
+ continue;
+ if (!r.registerTableName(table.name, lastTableIndex++))
+ return r.fail("duplicate import");
+ }
+
+ for (const AstResizable& memory : module->memories()) {
+ if (memory.imported)
+ continue;
+ if (!r.registerMemoryName(memory.name, lastMemoryIndex++))
+ return r.fail("duplicate import");
+ }
+
+ for (AstExport* export_ : module->exports()) {
+ switch (export_->kind()) {
+ case DefinitionKind::Function:
+ if (!r.resolveFunction(export_->ref()))
+ return false;
+ break;
+ case DefinitionKind::Global:
+ if (!r.resolveGlobal(export_->ref()))
+ return false;
+ break;
+ case DefinitionKind::Table:
+ if (!r.resolveTable(export_->ref()))
+ return false;
+ break;
+ case DefinitionKind::Memory:
+ if (!r.resolveMemory(export_->ref()))
+ return false;
+ break;
+ }
+ }
+
+ for (AstFunc* func : module->funcs()) {
+ if (!ResolveFunc(r, *func))
+ return false;
+ }
+
+ if (module->hasStartFunc()) {
+ if (!r.resolveFunction(module->startFunc().func()))
+ return false;
+ }
+
+ for (AstDataSegment* segment : module->dataSegments()) {
+ if (!ResolveExpr(r, *segment->offset()))
+ return false;
+ }
+
+ for (AstElemSegment* segment : module->elemSegments()) {
+ if (!ResolveExpr(r, *segment->offset()))
+ return false;
+ for (AstRef& ref : segment->elems()) {
+ if (!r.resolveFunction(ref))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*****************************************************************************/
+// wasm function body serialization
+
+static bool
+EncodeExpr(Encoder& e, AstExpr& expr);
+
+static bool
+EncodeExprList(Encoder& e, const AstExprVector& v)
+{
+ for (size_t i = 0; i < v.length(); i++) {
+ if (!EncodeExpr(e, *v[i]))
+ return false;
+ }
+ return true;
+}
+
+static bool
+EncodeBlock(Encoder& e, AstBlock& b)
+{
+ if (!e.writeOp(b.op()))
+ return false;
+
+ if (!e.writeBlockType(b.type()))
+ return false;
+
+ if (!EncodeExprList(e, b.exprs()))
+ return false;
+
+ if (!e.writeOp(Op::End))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeBranch(Encoder& e, AstBranch& br)
+{
+ MOZ_ASSERT(br.op() == Op::Br || br.op() == Op::BrIf);
+
+ if (br.maybeValue()) {
+ if (!EncodeExpr(e, *br.maybeValue()))
+ return false;
+ }
+
+ if (br.op() == Op::BrIf) {
+ if (!EncodeExpr(e, br.cond()))
+ return false;
+ }
+
+ if (!e.writeOp(br.op()))
+ return false;
+
+ if (!e.writeVarU32(br.target().index()))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeFirst(Encoder& e, AstFirst& f)
+{
+ return EncodeExprList(e, f.exprs());
+}
+
+static bool
+EncodeArgs(Encoder& e, const AstExprVector& args)
+{
+ for (AstExpr* arg : args) {
+ if (!EncodeExpr(e, *arg))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+EncodeCall(Encoder& e, AstCall& c)
+{
+ if (!EncodeArgs(e, c.args()))
+ return false;
+
+ if (!e.writeOp(c.op()))
+ return false;
+
+ if (!e.writeVarU32(c.func().index()))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeCallIndirect(Encoder& e, AstCallIndirect& c)
+{
+ if (!EncodeArgs(e, c.args()))
+ return false;
+
+ if (!EncodeExpr(e, *c.index()))
+ return false;
+
+ if (!e.writeOp(Op::CallIndirect))
+ return false;
+
+ if (!e.writeVarU32(c.sig().index()))
+ return false;
+
+ if (!e.writeVarU32(uint32_t(MemoryTableFlags::Default)))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeConst(Encoder& e, AstConst& c)
+{
+ switch (c.val().type()) {
+ case ValType::I32:
+ return e.writeOp(Op::I32Const) &&
+ e.writeVarS32(c.val().i32());
+ case ValType::I64:
+ return e.writeOp(Op::I64Const) &&
+ e.writeVarS64(c.val().i64());
+ case ValType::F32:
+ return e.writeOp(Op::F32Const) &&
+ e.writeFixedF32(c.val().f32());
+ case ValType::F64:
+ return e.writeOp(Op::F64Const) &&
+ e.writeFixedF64(c.val().f64());
+ default:
+ break;
+ }
+ MOZ_CRASH("Bad value type");
+}
+
+static bool
+EncodeDrop(Encoder& e, AstDrop &drop)
+{
+ return EncodeExpr(e, drop.value()) &&
+ e.writeOp(Op::Drop);
+}
+
+static bool
+EncodeGetLocal(Encoder& e, AstGetLocal& gl)
+{
+ return e.writeOp(Op::GetLocal) &&
+ e.writeVarU32(gl.local().index());
+}
+
+static bool
+EncodeSetLocal(Encoder& e, AstSetLocal& sl)
+{
+ return EncodeExpr(e, sl.value()) &&
+ e.writeOp(Op::SetLocal) &&
+ e.writeVarU32(sl.local().index());
+}
+
+static bool
+EncodeTeeLocal(Encoder& e, AstTeeLocal& sl)
+{
+ return EncodeExpr(e, sl.value()) &&
+ e.writeOp(Op::TeeLocal) &&
+ e.writeVarU32(sl.local().index());
+}
+
+static bool
+EncodeGetGlobal(Encoder& e, AstGetGlobal& gg)
+{
+ return e.writeOp(Op::GetGlobal) &&
+ e.writeVarU32(gg.global().index());
+}
+
+static bool
+EncodeSetGlobal(Encoder& e, AstSetGlobal& sg)
+{
+ return EncodeExpr(e, sg.value()) &&
+ e.writeOp(Op::SetGlobal) &&
+ e.writeVarU32(sg.global().index());
+}
+
+static bool
+EncodeUnaryOperator(Encoder& e, AstUnaryOperator& b)
+{
+ return EncodeExpr(e, *b.operand()) &&
+ e.writeOp(b.op());
+}
+
+static bool
+EncodeBinaryOperator(Encoder& e, AstBinaryOperator& b)
+{
+ return EncodeExpr(e, *b.lhs()) &&
+ EncodeExpr(e, *b.rhs()) &&
+ e.writeOp(b.op());
+}
+
+static bool
+EncodeTernaryOperator(Encoder& e, AstTernaryOperator& b)
+{
+ return EncodeExpr(e, *b.op0()) &&
+ EncodeExpr(e, *b.op1()) &&
+ EncodeExpr(e, *b.op2()) &&
+ e.writeOp(b.op());
+}
+
+static bool
+EncodeComparisonOperator(Encoder& e, AstComparisonOperator& b)
+{
+ return EncodeExpr(e, *b.lhs()) &&
+ EncodeExpr(e, *b.rhs()) &&
+ e.writeOp(b.op());
+}
+
+static bool
+EncodeConversionOperator(Encoder& e, AstConversionOperator& b)
+{
+ return EncodeExpr(e, *b.operand()) &&
+ e.writeOp(b.op());
+}
+
+static bool
+EncodeIf(Encoder& e, AstIf& i)
+{
+ if (!EncodeExpr(e, i.cond()) || !e.writeOp(Op::If))
+ return false;
+
+ if (!e.writeBlockType(i.type()))
+ return false;
+
+ if (!EncodeExprList(e, i.thenExprs()))
+ return false;
+
+ if (i.hasElse()) {
+ if (!e.writeOp(Op::Else))
+ return false;
+ if (!EncodeExprList(e, i.elseExprs()))
+ return false;
+ }
+
+ return e.writeOp(Op::End);
+}
+
+static bool
+EncodeLoadStoreAddress(Encoder &e, const AstLoadStoreAddress &address)
+{
+ return EncodeExpr(e, address.base());
+}
+
+static bool
+EncodeLoadStoreFlags(Encoder &e, const AstLoadStoreAddress &address)
+{
+ return e.writeVarU32(address.flags()) &&
+ e.writeVarU32(address.offset());
+}
+
+static bool
+EncodeLoad(Encoder& e, AstLoad& l)
+{
+ return EncodeLoadStoreAddress(e, l.address()) &&
+ e.writeOp(l.op()) &&
+ EncodeLoadStoreFlags(e, l.address());
+}
+
+static bool
+EncodeStore(Encoder& e, AstStore& s)
+{
+ return EncodeLoadStoreAddress(e, s.address()) &&
+ EncodeExpr(e, s.value()) &&
+ e.writeOp(s.op()) &&
+ EncodeLoadStoreFlags(e, s.address());
+}
+
+static bool
+EncodeReturn(Encoder& e, AstReturn& r)
+{
+ if (r.maybeExpr()) {
+ if (!EncodeExpr(e, *r.maybeExpr()))
+ return false;
+ }
+
+ if (!e.writeOp(Op::Return))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeBranchTable(Encoder& e, AstBranchTable& bt)
+{
+ if (bt.maybeValue()) {
+ if (!EncodeExpr(e, *bt.maybeValue()))
+ return false;
+ }
+
+ if (!EncodeExpr(e, bt.index()))
+ return false;
+
+ if (!e.writeOp(Op::BrTable))
+ return false;
+
+ if (!e.writeVarU32(bt.table().length()))
+ return false;
+
+ for (const AstRef& elem : bt.table()) {
+ if (!e.writeVarU32(elem.index()))
+ return false;
+ }
+
+ if (!e.writeVarU32(bt.def().index()))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeCurrentMemory(Encoder& e, AstCurrentMemory& cm)
+{
+ if (!e.writeOp(Op::CurrentMemory))
+ return false;
+
+ if (!e.writeVarU32(uint32_t(MemoryTableFlags::Default)))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeGrowMemory(Encoder& e, AstGrowMemory& gm)
+{
+ if (!EncodeExpr(e, *gm.operand()))
+ return false;
+
+ if (!e.writeOp(Op::GrowMemory))
+ return false;
+
+ if (!e.writeVarU32(uint32_t(MemoryTableFlags::Default)))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeExpr(Encoder& e, AstExpr& expr)
+{
+ switch (expr.kind()) {
+ case AstExprKind::Pop:
+ return true;
+ case AstExprKind::Nop:
+ return e.writeOp(Op::Nop);
+ case AstExprKind::Unreachable:
+ return e.writeOp(Op::Unreachable);
+ case AstExprKind::BinaryOperator:
+ return EncodeBinaryOperator(e, expr.as<AstBinaryOperator>());
+ case AstExprKind::Block:
+ return EncodeBlock(e, expr.as<AstBlock>());
+ case AstExprKind::Branch:
+ return EncodeBranch(e, expr.as<AstBranch>());
+ case AstExprKind::Call:
+ return EncodeCall(e, expr.as<AstCall>());
+ case AstExprKind::CallIndirect:
+ return EncodeCallIndirect(e, expr.as<AstCallIndirect>());
+ case AstExprKind::ComparisonOperator:
+ return EncodeComparisonOperator(e, expr.as<AstComparisonOperator>());
+ case AstExprKind::Const:
+ return EncodeConst(e, expr.as<AstConst>());
+ case AstExprKind::ConversionOperator:
+ return EncodeConversionOperator(e, expr.as<AstConversionOperator>());
+ case AstExprKind::Drop:
+ return EncodeDrop(e, expr.as<AstDrop>());
+ case AstExprKind::First:
+ return EncodeFirst(e, expr.as<AstFirst>());
+ case AstExprKind::GetLocal:
+ return EncodeGetLocal(e, expr.as<AstGetLocal>());
+ case AstExprKind::GetGlobal:
+ return EncodeGetGlobal(e, expr.as<AstGetGlobal>());
+ case AstExprKind::If:
+ return EncodeIf(e, expr.as<AstIf>());
+ case AstExprKind::Load:
+ return EncodeLoad(e, expr.as<AstLoad>());
+ case AstExprKind::Return:
+ return EncodeReturn(e, expr.as<AstReturn>());
+ case AstExprKind::SetLocal:
+ return EncodeSetLocal(e, expr.as<AstSetLocal>());
+ case AstExprKind::TeeLocal:
+ return EncodeTeeLocal(e, expr.as<AstTeeLocal>());
+ case AstExprKind::SetGlobal:
+ return EncodeSetGlobal(e, expr.as<AstSetGlobal>());
+ case AstExprKind::Store:
+ return EncodeStore(e, expr.as<AstStore>());
+ case AstExprKind::BranchTable:
+ return EncodeBranchTable(e, expr.as<AstBranchTable>());
+ case AstExprKind::TernaryOperator:
+ return EncodeTernaryOperator(e, expr.as<AstTernaryOperator>());
+ case AstExprKind::UnaryOperator:
+ return EncodeUnaryOperator(e, expr.as<AstUnaryOperator>());
+ case AstExprKind::CurrentMemory:
+ return EncodeCurrentMemory(e, expr.as<AstCurrentMemory>());
+ case AstExprKind::GrowMemory:
+ return EncodeGrowMemory(e, expr.as<AstGrowMemory>());
+ }
+ MOZ_CRASH("Bad expr kind");
+}
+
+/*****************************************************************************/
+// wasm AST binary serialization
+
+static bool
+EncodeTypeSection(Encoder& e, AstModule& module)
+{
+ if (module.sigs().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Type, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.sigs().length()))
+ return false;
+
+ for (AstSig* sig : module.sigs()) {
+ if (!e.writeVarU32(uint32_t(TypeCode::Func)))
+ return false;
+
+ if (!e.writeVarU32(sig->args().length()))
+ return false;
+
+ for (ValType t : sig->args()) {
+ if (!e.writeValType(t))
+ return false;
+ }
+
+ if (!e.writeVarU32(!IsVoid(sig->ret())))
+ return false;
+
+ if (!IsVoid(sig->ret())) {
+ if (!e.writeValType(NonVoidToValType(sig->ret())))
+ return false;
+ }
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeFunctionSection(Encoder& e, AstModule& module)
+{
+ if (module.funcs().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Function, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.funcs().length()))
+ return false;
+
+ for (AstFunc* func : module.funcs()) {
+ if (!e.writeVarU32(func->sig().index()))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeBytes(Encoder& e, AstName wasmName)
+{
+ TwoByteChars range(wasmName.begin(), wasmName.length());
+ UniqueChars utf8(JS::CharsToNewUTF8CharsZ(nullptr, range).c_str());
+ return utf8 && e.writeBytes(utf8.get(), strlen(utf8.get()));
+}
+
+static bool
+EncodeLimits(Encoder& e, const Limits& limits)
+{
+ uint32_t flags = limits.maximum ? 1 : 0;
+ if (!e.writeVarU32(flags))
+ return false;
+
+ if (!e.writeVarU32(limits.initial))
+ return false;
+
+ if (limits.maximum) {
+ if (!e.writeVarU32(*limits.maximum))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+EncodeTableLimits(Encoder& e, const Limits& limits)
+{
+ if (!e.writeVarU32(uint32_t(TypeCode::AnyFunc)))
+ return false;
+
+ return EncodeLimits(e, limits);
+}
+
+static bool
+EncodeGlobalType(Encoder& e, const AstGlobal* global)
+{
+ return e.writeValType(global->type()) &&
+ e.writeVarU32(global->isMutable() ? uint32_t(GlobalTypeImmediate::IsMutable) : 0);
+}
+
+static bool
+EncodeImport(Encoder& e, AstImport& imp)
+{
+ if (!EncodeBytes(e, imp.module()))
+ return false;
+
+ if (!EncodeBytes(e, imp.field()))
+ return false;
+
+ if (!e.writeVarU32(uint32_t(imp.kind())))
+ return false;
+
+ switch (imp.kind()) {
+ case DefinitionKind::Function:
+ if (!e.writeVarU32(imp.funcSig().index()))
+ return false;
+ break;
+ case DefinitionKind::Global:
+ MOZ_ASSERT(!imp.global().hasInit());
+ if (!EncodeGlobalType(e, &imp.global()))
+ return false;
+ break;
+ case DefinitionKind::Table:
+ if (!EncodeTableLimits(e, imp.limits()))
+ return false;
+ break;
+ case DefinitionKind::Memory:
+ if (!EncodeLimits(e, imp.limits()))
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static bool
+EncodeImportSection(Encoder& e, AstModule& module)
+{
+ if (module.imports().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Import, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.imports().length()))
+ return false;
+
+ for (AstImport* imp : module.imports()) {
+ if (!EncodeImport(e, *imp))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeMemorySection(Encoder& e, AstModule& module)
+{
+ size_t numOwnMemories = 0;
+ for (const AstResizable& memory : module.memories()) {
+ if (!memory.imported)
+ numOwnMemories++;
+ }
+
+ if (!numOwnMemories)
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Memory, &offset))
+ return false;
+
+ if (!e.writeVarU32(numOwnMemories))
+ return false;
+
+ for (const AstResizable& memory : module.memories()) {
+ if (memory.imported)
+ continue;
+ if (!EncodeLimits(e, memory.limits))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeGlobalSection(Encoder& e, AstModule& module)
+{
+ size_t offset;
+ if (!e.startSection(SectionId::Global, &offset))
+ return false;
+
+ const AstGlobalVector& globals = module.globals();
+
+ if (!e.writeVarU32(globals.length()))
+ return false;
+
+ for (const AstGlobal* global : globals) {
+ MOZ_ASSERT(global->hasInit());
+ if (!EncodeGlobalType(e, global))
+ return false;
+ if (!EncodeExpr(e, global->init()))
+ return false;
+ if (!e.writeOp(Op::End))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeExport(Encoder& e, AstExport& exp)
+{
+ if (!EncodeBytes(e, exp.name()))
+ return false;
+
+ if (!e.writeVarU32(uint32_t(exp.kind())))
+ return false;
+
+ if (!e.writeVarU32(exp.ref().index()))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeExportSection(Encoder& e, AstModule& module)
+{
+ uint32_t numExports = module.exports().length();
+ if (!numExports)
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Export, &offset))
+ return false;
+
+ if (!e.writeVarU32(numExports))
+ return false;
+
+ for (AstExport* exp : module.exports()) {
+ if (!EncodeExport(e, *exp))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeTableSection(Encoder& e, AstModule& module)
+{
+ size_t numOwnTables = 0;
+ for (const AstResizable& table : module.tables()) {
+ if (!table.imported)
+ numOwnTables++;
+ }
+
+ if (!numOwnTables)
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Table, &offset))
+ return false;
+
+ if (!e.writeVarU32(numOwnTables))
+ return false;
+
+ for (const AstResizable& table : module.tables()) {
+ if (table.imported)
+ continue;
+ if (!EncodeTableLimits(e, table.limits))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeFunctionBody(Encoder& e, AstFunc& func)
+{
+ size_t bodySizeAt;
+ if (!e.writePatchableVarU32(&bodySizeAt))
+ return false;
+
+ size_t beforeBody = e.currentOffset();
+
+ ValTypeVector varTypes;
+ if (!varTypes.appendAll(func.vars()))
+ return false;
+ if (!EncodeLocalEntries(e, varTypes))
+ return false;
+
+ for (AstExpr* expr : func.body()) {
+ if (!EncodeExpr(e, *expr))
+ return false;
+ }
+
+ if (!e.writeOp(Op::End))
+ return false;
+
+ e.patchVarU32(bodySizeAt, e.currentOffset() - beforeBody);
+ return true;
+}
+
+static bool
+EncodeStartSection(Encoder& e, AstModule& module)
+{
+ if (!module.hasStartFunc())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Start, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.startFunc().func().index()))
+ return false;
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeCodeSection(Encoder& e, AstModule& module)
+{
+ if (module.funcs().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Code, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.funcs().length()))
+ return false;
+
+ for (AstFunc* func : module.funcs()) {
+ if (!EncodeFunctionBody(e, *func))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeDataSegment(Encoder& e, const AstDataSegment& segment)
+{
+ if (!e.writeVarU32(0)) // linear memory index
+ return false;
+
+ if (!EncodeExpr(e, *segment.offset()))
+ return false;
+ if (!e.writeOp(Op::End))
+ return false;
+
+ size_t totalLength = 0;
+ for (const AstName& fragment : segment.fragments())
+ totalLength += fragment.length();
+
+ Vector<uint8_t, 0, SystemAllocPolicy> bytes;
+ if (!bytes.reserve(totalLength))
+ return false;
+
+ for (const AstName& fragment : segment.fragments()) {
+ const char16_t* cur = fragment.begin();
+ const char16_t* end = fragment.end();
+ while (cur != end) {
+ uint8_t byte;
+ MOZ_ALWAYS_TRUE(ConsumeTextByte(&cur, end, &byte));
+ bytes.infallibleAppend(byte);
+ }
+ }
+
+ return e.writeBytes(bytes.begin(), bytes.length());
+}
+
+static bool
+EncodeDataSection(Encoder& e, AstModule& module)
+{
+ if (module.dataSegments().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Data, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.dataSegments().length()))
+ return false;
+
+ for (AstDataSegment* segment : module.dataSegments()) {
+ if (!EncodeDataSegment(e, *segment))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeElemSegment(Encoder& e, AstElemSegment& segment)
+{
+ if (!e.writeVarU32(0)) // table index
+ return false;
+
+ if (!EncodeExpr(e, *segment.offset()))
+ return false;
+ if (!e.writeOp(Op::End))
+ return false;
+
+ if (!e.writeVarU32(segment.elems().length()))
+ return false;
+
+ for (const AstRef& elem : segment.elems()) {
+ if (!e.writeVarU32(elem.index()))
+ return false;
+ }
+
+ return true;
+}
+
+static bool
+EncodeElemSection(Encoder& e, AstModule& module)
+{
+ if (module.elemSegments().empty())
+ return true;
+
+ size_t offset;
+ if (!e.startSection(SectionId::Elem, &offset))
+ return false;
+
+ if (!e.writeVarU32(module.elemSegments().length()))
+ return false;
+
+ for (AstElemSegment* segment : module.elemSegments()) {
+ if (!EncodeElemSegment(e, *segment))
+ return false;
+ }
+
+ e.finishSection(offset);
+ return true;
+}
+
+static bool
+EncodeModule(AstModule& module, Bytes* bytes)
+{
+ Encoder e(*bytes);
+
+ if (!e.writeFixedU32(MagicNumber))
+ return false;
+
+ if (!e.writeFixedU32(EncodingVersion))
+ return false;
+
+ if (!EncodeTypeSection(e, module))
+ return false;
+
+ if (!EncodeImportSection(e, module))
+ return false;
+
+ if (!EncodeFunctionSection(e, module))
+ return false;
+
+ if (!EncodeTableSection(e, module))
+ return false;
+
+ if (!EncodeMemorySection(e, module))
+ return false;
+
+ if (!EncodeGlobalSection(e, module))
+ return false;
+
+ if (!EncodeExportSection(e, module))
+ return false;
+
+ if (!EncodeStartSection(e, module))
+ return false;
+
+ if (!EncodeElemSection(e, module))
+ return false;
+
+ if (!EncodeCodeSection(e, module))
+ return false;
+
+ if (!EncodeDataSection(e, module))
+ return false;
+
+ return true;
+}
+
+static bool
+EncodeBinaryModule(const AstModule& module, Bytes* bytes)
+{
+ Encoder e(*bytes);
+
+ const AstDataSegmentVector& dataSegments = module.dataSegments();
+ MOZ_ASSERT(dataSegments.length() == 1);
+
+ for (const AstName& fragment : dataSegments[0]->fragments()) {
+ const char16_t* cur = fragment.begin();
+ const char16_t* end = fragment.end();
+ while (cur != end) {
+ uint8_t byte;
+ MOZ_ALWAYS_TRUE(ConsumeTextByte(&cur, end, &byte));
+ if (!e.writeFixedU8(byte))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*****************************************************************************/
+
+bool
+wasm::TextToBinary(const char16_t* text, Bytes* bytes, UniqueChars* error)
+{
+ LifoAlloc lifo(AST_LIFO_DEFAULT_CHUNK_SIZE);
+
+ bool binary = false;
+ AstModule* module = ParseModule(text, lifo, error, &binary);
+ if (!module)
+ return false;
+
+ if (binary)
+ return EncodeBinaryModule(*module, bytes);
+
+ if (!ResolveModule(lifo, module, error))
+ return false;
+
+ return EncodeModule(*module, bytes);
+}
diff --git a/js/src/wasm/WasmTextToBinary.h b/js/src/wasm/WasmTextToBinary.h
new file mode 100644
index 0000000000..e2f1e719a3
--- /dev/null
+++ b/js/src/wasm/WasmTextToBinary.h
@@ -0,0 +1,37 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_text_to_binary_h
+#define wasm_text_to_binary_h
+
+#include "wasm/WasmTypes.h"
+
+namespace js {
+namespace wasm {
+
+// Translate the textual representation of a wasm module (given by a
+// null-terminated char16_t array) into serialized bytes. If there is an error
+// other than out-of-memory an error message string will be stored in 'error'.
+
+extern MOZ_MUST_USE bool
+TextToBinary(const char16_t* text, Bytes* bytes, UniqueChars* error);
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_text_to_binary_h
diff --git a/js/src/wasm/WasmTextUtils.cpp b/js/src/wasm/WasmTextUtils.cpp
new file mode 100644
index 0000000000..22dc3a3d88
--- /dev/null
+++ b/js/src/wasm/WasmTextUtils.cpp
@@ -0,0 +1,77 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTextUtils.h"
+
+#include "vm/StringBuffer.h"
+#include "wasm/WasmTypes.h"
+
+using namespace js;
+using namespace js::jit;
+
+using mozilla::IsNaN;
+
+template<size_t base>
+bool
+js::wasm::RenderInBase(StringBuffer& sb, uint64_t num)
+{
+ uint64_t n = num;
+ uint64_t pow = 1;
+ while (n) {
+ pow *= base;
+ n /= base;
+ }
+ pow /= base;
+
+ n = num;
+ while (pow) {
+ if (!sb.append("0123456789abcdef"[n / pow]))
+ return false;
+ n -= (n / pow) * pow;
+ pow /= base;
+ }
+
+ return true;
+}
+
+template bool js::wasm::RenderInBase<10>(StringBuffer& sb, uint64_t num);
+
+template<class T>
+bool
+js::wasm::RenderNaN(StringBuffer& sb, Raw<T> num)
+{
+ typedef typename mozilla::SelectTrait<T> Traits;
+
+ MOZ_ASSERT(IsNaN(num.fp()));
+
+ if ((num.bits() & Traits::kSignBit) && !sb.append("-"))
+ return false;
+ if (!sb.append("nan"))
+ return false;
+
+ typename Traits::Bits payload = num.bits() & Traits::kSignificandBits;
+ // Only render the payload if it's not the spec's default NaN.
+ if (payload == ((Traits::kSignificandBits + 1) >> 1))
+ return true;
+
+ return sb.append(":0x") &&
+ RenderInBase<16>(sb, payload);
+}
+
+template MOZ_MUST_USE bool js::wasm::RenderNaN(StringBuffer& b, Raw<float> num);
+template MOZ_MUST_USE bool js::wasm::RenderNaN(StringBuffer& b, Raw<double> num);
diff --git a/js/src/wasm/WasmTextUtils.h b/js/src/wasm/WasmTextUtils.h
new file mode 100644
index 0000000000..acb744db25
--- /dev/null
+++ b/js/src/wasm/WasmTextUtils.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2016 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_text_utils
+#define wasm_text_utils
+
+#include "NamespaceImports.h"
+
+namespace js {
+
+class StringBuffer;
+
+namespace wasm {
+
+template<size_t base>
+MOZ_MUST_USE bool
+RenderInBase(StringBuffer& sb, uint64_t num);
+
+template<class T>
+class Raw;
+
+template<class T>
+MOZ_MUST_USE bool
+RenderNaN(StringBuffer& sb, Raw<T> num);
+
+} // namespace wasm
+
+} // namespace js
+
+#endif // namespace wasm_text_utils
diff --git a/js/src/wasm/WasmTypes.cpp b/js/src/wasm/WasmTypes.cpp
new file mode 100644
index 0000000000..5add17d06e
--- /dev/null
+++ b/js/src/wasm/WasmTypes.cpp
@@ -0,0 +1,727 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "wasm/WasmTypes.h"
+
+#include "mozilla/MathAlgorithms.h"
+
+#include "fdlibm.h"
+
+#include "jslibmath.h"
+#include "jsmath.h"
+
+#include "jit/MacroAssembler.h"
+#include "js/Conversions.h"
+#include "vm/Interpreter.h"
+#include "wasm/WasmInstance.h"
+#include "wasm/WasmSerialize.h"
+#include "wasm/WasmSignalHandlers.h"
+
+#include "vm/Stack-inl.h"
+
+using namespace js;
+using namespace js::jit;
+using namespace js::wasm;
+
+using mozilla::IsNaN;
+using mozilla::IsPowerOfTwo;
+
+void
+Val::writePayload(uint8_t* dst) const
+{
+ switch (type_) {
+ case ValType::I32:
+ case ValType::F32:
+ memcpy(dst, &u.i32_, sizeof(u.i32_));
+ return;
+ case ValType::I64:
+ case ValType::F64:
+ memcpy(dst, &u.i64_, sizeof(u.i64_));
+ return;
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ memcpy(dst, &u, jit::Simd128DataSize);
+ return;
+ }
+}
+
+#if defined(JS_CODEGEN_ARM)
+extern "C" {
+
+extern MOZ_EXPORT int64_t
+__aeabi_idivmod(int, int);
+
+extern MOZ_EXPORT int64_t
+__aeabi_uidivmod(int, int);
+
+}
+#endif
+
+static void
+WasmReportOverRecursed()
+{
+ ReportOverRecursed(JSRuntime::innermostWasmActivation()->cx());
+}
+
+static bool
+WasmHandleExecutionInterrupt()
+{
+ WasmActivation* activation = JSRuntime::innermostWasmActivation();
+ bool success = CheckForInterrupt(activation->cx());
+
+ // Preserve the invariant that having a non-null resumePC means that we are
+ // handling an interrupt. Note that resumePC has already been copied onto
+ // the stack by the interrupt stub, so we can clear it before returning
+ // to the stub.
+ activation->setResumePC(nullptr);
+
+ return success;
+}
+
+static void
+WasmReportTrap(int32_t trapIndex)
+{
+ JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
+
+ MOZ_ASSERT(trapIndex < int32_t(Trap::Limit) && trapIndex >= 0);
+ Trap trap = Trap(trapIndex);
+
+ unsigned errorNumber;
+ switch (trap) {
+ case Trap::Unreachable:
+ errorNumber = JSMSG_WASM_UNREACHABLE;
+ break;
+ case Trap::IntegerOverflow:
+ errorNumber = JSMSG_WASM_INTEGER_OVERFLOW;
+ break;
+ case Trap::InvalidConversionToInteger:
+ errorNumber = JSMSG_WASM_INVALID_CONVERSION;
+ break;
+ case Trap::IntegerDivideByZero:
+ errorNumber = JSMSG_WASM_INT_DIVIDE_BY_ZERO;
+ break;
+ case Trap::IndirectCallToNull:
+ errorNumber = JSMSG_WASM_IND_CALL_TO_NULL;
+ break;
+ case Trap::IndirectCallBadSig:
+ errorNumber = JSMSG_WASM_IND_CALL_BAD_SIG;
+ break;
+ case Trap::ImpreciseSimdConversion:
+ errorNumber = JSMSG_SIMD_FAILED_CONVERSION;
+ break;
+ case Trap::OutOfBounds:
+ errorNumber = JSMSG_WASM_OUT_OF_BOUNDS;
+ break;
+ case Trap::StackOverflow:
+ errorNumber = JSMSG_OVER_RECURSED;
+ break;
+ default:
+ MOZ_CRASH("unexpected trap");
+ }
+
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, errorNumber);
+}
+
+static void
+WasmReportOutOfBounds()
+{
+ JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_OUT_OF_BOUNDS);
+}
+
+static void
+WasmReportUnalignedAccess()
+{
+ JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
+ JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_UNALIGNED_ACCESS);
+}
+
+static int32_t
+CoerceInPlace_ToInt32(MutableHandleValue val)
+{
+ JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
+
+ int32_t i32;
+ if (!ToInt32(cx, val, &i32))
+ return false;
+ val.set(Int32Value(i32));
+
+ return true;
+}
+
+static int32_t
+CoerceInPlace_ToNumber(MutableHandleValue val)
+{
+ JSContext* cx = JSRuntime::innermostWasmActivation()->cx();
+
+ double dbl;
+ if (!ToNumber(cx, val, &dbl))
+ return false;
+ val.set(DoubleValue(dbl));
+
+ return true;
+}
+
+static int64_t
+DivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi, uint32_t y_lo)
+{
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x / y;
+}
+
+static int64_t
+UDivI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi, uint32_t y_lo)
+{
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return x / y;
+}
+
+static int64_t
+ModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi, uint32_t y_lo)
+{
+ int64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ int64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(x != INT64_MIN || y != -1);
+ MOZ_ASSERT(y != 0);
+ return x % y;
+}
+
+static int64_t
+UModI64(uint32_t x_hi, uint32_t x_lo, uint32_t y_hi, uint32_t y_lo)
+{
+ uint64_t x = ((uint64_t)x_hi << 32) + x_lo;
+ uint64_t y = ((uint64_t)y_hi << 32) + y_lo;
+ MOZ_ASSERT(y != 0);
+ return x % y;
+}
+
+static int64_t
+TruncateDoubleToInt64(double input)
+{
+ // Note: INT64_MAX is not representable in double. It is actually
+ // INT64_MAX + 1. Therefore also sending the failure value.
+ if (input >= double(INT64_MAX) || input < double(INT64_MIN) || IsNaN(input))
+ return 0x8000000000000000;
+ return int64_t(input);
+}
+
+static uint64_t
+TruncateDoubleToUint64(double input)
+{
+ // Note: UINT64_MAX is not representable in double. It is actually UINT64_MAX + 1.
+ // Therefore also sending the failure value.
+ if (input >= double(UINT64_MAX) || input <= -1.0 || IsNaN(input))
+ return 0x8000000000000000;
+ return uint64_t(input);
+}
+
+static double
+Int64ToFloatingPoint(int32_t x_hi, uint32_t x_lo)
+{
+ int64_t x = int64_t((uint64_t(x_hi) << 32)) + int64_t(x_lo);
+ return double(x);
+}
+
+static double
+Uint64ToFloatingPoint(int32_t x_hi, uint32_t x_lo)
+{
+ uint64_t x = (uint64_t(x_hi) << 32) + uint64_t(x_lo);
+ return double(x);
+}
+
+template <class F>
+static inline void*
+FuncCast(F* pf, ABIFunctionType type)
+{
+ void *pv = JS_FUNC_TO_DATA_PTR(void*, pf);
+#ifdef JS_SIMULATOR
+ pv = Simulator::RedirectNativeFunction(pv, type);
+#endif
+ return pv;
+}
+
+void*
+wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
+{
+ switch (imm) {
+ case SymbolicAddress::Context:
+ return cx->contextAddressForJit();
+ case SymbolicAddress::InterruptUint32:
+ return cx->runtimeAddressOfInterruptUint32();
+ case SymbolicAddress::ReportOverRecursed:
+ return FuncCast(WasmReportOverRecursed, Args_General0);
+ case SymbolicAddress::HandleExecutionInterrupt:
+ return FuncCast(WasmHandleExecutionInterrupt, Args_General0);
+ case SymbolicAddress::ReportTrap:
+ return FuncCast(WasmReportTrap, Args_General1);
+ case SymbolicAddress::ReportOutOfBounds:
+ return FuncCast(WasmReportOutOfBounds, Args_General0);
+ case SymbolicAddress::ReportUnalignedAccess:
+ return FuncCast(WasmReportUnalignedAccess, Args_General0);
+ case SymbolicAddress::CallImport_Void:
+ return FuncCast(Instance::callImport_void, Args_General4);
+ case SymbolicAddress::CallImport_I32:
+ return FuncCast(Instance::callImport_i32, Args_General4);
+ case SymbolicAddress::CallImport_I64:
+ return FuncCast(Instance::callImport_i64, Args_General4);
+ case SymbolicAddress::CallImport_F64:
+ return FuncCast(Instance::callImport_f64, Args_General4);
+ case SymbolicAddress::CoerceInPlace_ToInt32:
+ return FuncCast(CoerceInPlace_ToInt32, Args_General1);
+ case SymbolicAddress::CoerceInPlace_ToNumber:
+ return FuncCast(CoerceInPlace_ToNumber, Args_General1);
+ case SymbolicAddress::ToInt32:
+ return FuncCast<int32_t (double)>(JS::ToInt32, Args_Int_Double);
+ case SymbolicAddress::DivI64:
+ return FuncCast(DivI64, Args_General4);
+ case SymbolicAddress::UDivI64:
+ return FuncCast(UDivI64, Args_General4);
+ case SymbolicAddress::ModI64:
+ return FuncCast(ModI64, Args_General4);
+ case SymbolicAddress::UModI64:
+ return FuncCast(UModI64, Args_General4);
+ case SymbolicAddress::TruncateDoubleToUint64:
+ return FuncCast(TruncateDoubleToUint64, Args_Int64_Double);
+ case SymbolicAddress::TruncateDoubleToInt64:
+ return FuncCast(TruncateDoubleToInt64, Args_Int64_Double);
+ case SymbolicAddress::Uint64ToFloatingPoint:
+ return FuncCast(Uint64ToFloatingPoint, Args_Double_IntInt);
+ case SymbolicAddress::Int64ToFloatingPoint:
+ return FuncCast(Int64ToFloatingPoint, Args_Double_IntInt);
+#if defined(JS_CODEGEN_ARM)
+ case SymbolicAddress::aeabi_idivmod:
+ return FuncCast(__aeabi_idivmod, Args_General2);
+ case SymbolicAddress::aeabi_uidivmod:
+ return FuncCast(__aeabi_uidivmod, Args_General2);
+ case SymbolicAddress::AtomicCmpXchg:
+ return FuncCast(atomics_cmpxchg_asm_callout, Args_General5);
+ case SymbolicAddress::AtomicXchg:
+ return FuncCast(atomics_xchg_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicFetchAdd:
+ return FuncCast(atomics_add_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicFetchSub:
+ return FuncCast(atomics_sub_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicFetchAnd:
+ return FuncCast(atomics_and_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicFetchOr:
+ return FuncCast(atomics_or_asm_callout, Args_General4);
+ case SymbolicAddress::AtomicFetchXor:
+ return FuncCast(atomics_xor_asm_callout, Args_General4);
+#endif
+ case SymbolicAddress::ModD:
+ return FuncCast(NumberMod, Args_Double_DoubleDouble);
+ case SymbolicAddress::SinD:
+ return FuncCast<double (double)>(sin, Args_Double_Double);
+ case SymbolicAddress::CosD:
+ return FuncCast<double (double)>(cos, Args_Double_Double);
+ case SymbolicAddress::TanD:
+ return FuncCast<double (double)>(tan, Args_Double_Double);
+ case SymbolicAddress::ASinD:
+ return FuncCast<double (double)>(fdlibm::asin, Args_Double_Double);
+ case SymbolicAddress::ACosD:
+ return FuncCast<double (double)>(fdlibm::acos, Args_Double_Double);
+ case SymbolicAddress::ATanD:
+ return FuncCast<double (double)>(fdlibm::atan, Args_Double_Double);
+ case SymbolicAddress::CeilD:
+ return FuncCast<double (double)>(fdlibm::ceil, Args_Double_Double);
+ case SymbolicAddress::CeilF:
+ return FuncCast<float (float)>(fdlibm::ceilf, Args_Float32_Float32);
+ case SymbolicAddress::FloorD:
+ return FuncCast<double (double)>(fdlibm::floor, Args_Double_Double);
+ case SymbolicAddress::FloorF:
+ return FuncCast<float (float)>(fdlibm::floorf, Args_Float32_Float32);
+ case SymbolicAddress::TruncD:
+ return FuncCast<double (double)>(fdlibm::trunc, Args_Double_Double);
+ case SymbolicAddress::TruncF:
+ return FuncCast<float (float)>(fdlibm::truncf, Args_Float32_Float32);
+ case SymbolicAddress::NearbyIntD:
+ return FuncCast<double (double)>(fdlibm::nearbyint, Args_Double_Double);
+ case SymbolicAddress::NearbyIntF:
+ return FuncCast<float (float)>(fdlibm::nearbyintf, Args_Float32_Float32);
+ case SymbolicAddress::ExpD:
+ return FuncCast<double (double)>(fdlibm::exp, Args_Double_Double);
+ case SymbolicAddress::LogD:
+ return FuncCast<double (double)>(fdlibm::log, Args_Double_Double);
+ case SymbolicAddress::PowD:
+ return FuncCast(ecmaPow, Args_Double_DoubleDouble);
+ case SymbolicAddress::ATan2D:
+ return FuncCast(ecmaAtan2, Args_Double_DoubleDouble);
+ case SymbolicAddress::GrowMemory:
+ return FuncCast<uint32_t (Instance*, uint32_t)>(Instance::growMemory_i32, Args_General2);
+ case SymbolicAddress::CurrentMemory:
+ return FuncCast<uint32_t (Instance*)>(Instance::currentMemory_i32, Args_General1);
+ case SymbolicAddress::Limit:
+ break;
+ }
+
+ MOZ_CRASH("Bad SymbolicAddress");
+}
+
+static uint32_t
+GetCPUID()
+{
+ enum Arch {
+ X86 = 0x1,
+ X64 = 0x2,
+ ARM = 0x3,
+ MIPS = 0x4,
+ MIPS64 = 0x5,
+ ARCH_BITS = 3
+ };
+
+#if defined(JS_CODEGEN_X86)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
+ return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_X64)
+ MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <= (UINT32_MAX >> ARCH_BITS));
+ return X64 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM)
+ MOZ_ASSERT(jit::GetARMFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return ARM | (jit::GetARMFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_ARM64)
+ MOZ_CRASH("not enabled");
+#elif defined(JS_CODEGEN_MIPS32)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_MIPS64)
+ MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
+ return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
+#elif defined(JS_CODEGEN_NONE)
+ return 0;
+#else
+# error "unknown architecture"
+#endif
+}
+
+size_t
+Sig::serializedSize() const
+{
+ return sizeof(ret_) +
+ SerializedPodVectorSize(args_);
+}
+
+uint8_t*
+Sig::serialize(uint8_t* cursor) const
+{
+ cursor = WriteScalar<ExprType>(cursor, ret_);
+ cursor = SerializePodVector(cursor, args_);
+ return cursor;
+}
+
+const uint8_t*
+Sig::deserialize(const uint8_t* cursor)
+{
+ (cursor = ReadScalar<ExprType>(cursor, &ret_)) &&
+ (cursor = DeserializePodVector(cursor, &args_));
+ return cursor;
+}
+
+size_t
+Sig::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return args_.sizeOfExcludingThis(mallocSizeOf);
+}
+
+typedef uint32_t ImmediateType; // for 32/64 consistency
+static const unsigned sTotalBits = sizeof(ImmediateType) * 8;
+static const unsigned sTagBits = 1;
+static const unsigned sReturnBit = 1;
+static const unsigned sLengthBits = 4;
+static const unsigned sTypeBits = 2;
+static const unsigned sMaxTypes = (sTotalBits - sTagBits - sReturnBit - sLengthBits) / sTypeBits;
+
+static bool
+IsImmediateType(ValType vt)
+{
+ switch (vt) {
+ case ValType::I32:
+ case ValType::I64:
+ case ValType::F32:
+ case ValType::F64:
+ return true;
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ return false;
+ }
+ MOZ_CRASH("bad ValType");
+}
+
+static unsigned
+EncodeImmediateType(ValType vt)
+{
+ static_assert(3 < (1 << sTypeBits), "fits");
+ switch (vt) {
+ case ValType::I32:
+ return 0;
+ case ValType::I64:
+ return 1;
+ case ValType::F32:
+ return 2;
+ case ValType::F64:
+ return 3;
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ break;
+ }
+ MOZ_CRASH("bad ValType");
+}
+
+/* static */ bool
+SigIdDesc::isGlobal(const Sig& sig)
+{
+ unsigned numTypes = (sig.ret() == ExprType::Void ? 0 : 1) +
+ (sig.args().length());
+ if (numTypes > sMaxTypes)
+ return true;
+
+ if (sig.ret() != ExprType::Void && !IsImmediateType(NonVoidToValType(sig.ret())))
+ return true;
+
+ for (ValType v : sig.args()) {
+ if (!IsImmediateType(v))
+ return true;
+ }
+
+ return false;
+}
+
+/* static */ SigIdDesc
+SigIdDesc::global(const Sig& sig, uint32_t globalDataOffset)
+{
+ MOZ_ASSERT(isGlobal(sig));
+ return SigIdDesc(Kind::Global, globalDataOffset);
+}
+
+static ImmediateType
+LengthToBits(uint32_t length)
+{
+ static_assert(sMaxTypes <= ((1 << sLengthBits) - 1), "fits");
+ MOZ_ASSERT(length <= sMaxTypes);
+ return length;
+}
+
+/* static */ SigIdDesc
+SigIdDesc::immediate(const Sig& sig)
+{
+ ImmediateType immediate = ImmediateBit;
+ uint32_t shift = sTagBits;
+
+ if (sig.ret() != ExprType::Void) {
+ immediate |= (1 << shift);
+ shift += sReturnBit;
+
+ immediate |= EncodeImmediateType(NonVoidToValType(sig.ret())) << shift;
+ shift += sTypeBits;
+ } else {
+ shift += sReturnBit;
+ }
+
+ immediate |= LengthToBits(sig.args().length()) << shift;
+ shift += sLengthBits;
+
+ for (ValType argType : sig.args()) {
+ immediate |= EncodeImmediateType(argType) << shift;
+ shift += sTypeBits;
+ }
+
+ MOZ_ASSERT(shift <= sTotalBits);
+ return SigIdDesc(Kind::Immediate, immediate);
+}
+
+size_t
+SigWithId::serializedSize() const
+{
+ return Sig::serializedSize() +
+ sizeof(id);
+}
+
+uint8_t*
+SigWithId::serialize(uint8_t* cursor) const
+{
+ cursor = Sig::serialize(cursor);
+ cursor = WriteBytes(cursor, &id, sizeof(id));
+ return cursor;
+}
+
+const uint8_t*
+SigWithId::deserialize(const uint8_t* cursor)
+{
+ (cursor = Sig::deserialize(cursor)) &&
+ (cursor = ReadBytes(cursor, &id, sizeof(id)));
+ return cursor;
+}
+
+size_t
+SigWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return Sig::sizeOfExcludingThis(mallocSizeOf);
+}
+
+Assumptions::Assumptions(JS::BuildIdCharVector&& buildId)
+ : cpuId(GetCPUID()),
+ buildId(Move(buildId))
+{}
+
+Assumptions::Assumptions()
+ : cpuId(GetCPUID()),
+ buildId()
+{}
+
+bool
+Assumptions::initBuildIdFromContext(ExclusiveContext* cx)
+{
+ if (!cx->buildIdOp() || !cx->buildIdOp()(&buildId)) {
+ ReportOutOfMemory(cx);
+ return false;
+ }
+ return true;
+}
+
+bool
+Assumptions::clone(const Assumptions& other)
+{
+ cpuId = other.cpuId;
+ return buildId.appendAll(other.buildId);
+}
+
+bool
+Assumptions::operator==(const Assumptions& rhs) const
+{
+ return cpuId == rhs.cpuId &&
+ buildId.length() == rhs.buildId.length() &&
+ PodEqual(buildId.begin(), rhs.buildId.begin(), buildId.length());
+}
+
+size_t
+Assumptions::serializedSize() const
+{
+ return sizeof(uint32_t) +
+ SerializedPodVectorSize(buildId);
+}
+
+uint8_t*
+Assumptions::serialize(uint8_t* cursor) const
+{
+ // The format of serialized Assumptions must never change in a way that
+ // would cause old cache files written with by an old build-id to match the
+ // assumptions of a different build-id.
+
+ cursor = WriteScalar<uint32_t>(cursor, cpuId);
+ cursor = SerializePodVector(cursor, buildId);
+ return cursor;
+}
+
+const uint8_t*
+Assumptions::deserialize(const uint8_t* cursor, size_t remain)
+{
+ (cursor = ReadScalarChecked<uint32_t>(cursor, &remain, &cpuId)) &&
+ (cursor = DeserializePodVectorChecked(cursor, &remain, &buildId));
+ return cursor;
+}
+
+size_t
+Assumptions::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
+{
+ return buildId.sizeOfExcludingThis(mallocSizeOf);
+}
+
+// Heap length on ARM should fit in an ARM immediate. We approximate the set
+// of valid ARM immediates with the predicate:
+// 2^n for n in [16, 24)
+// or
+// 2^24 * n for n >= 1.
+bool
+wasm::IsValidARMImmediate(uint32_t i)
+{
+ bool valid = (IsPowerOfTwo(i) ||
+ (i & 0x00ffffff) == 0);
+
+ MOZ_ASSERT_IF(valid, i % PageSize == 0);
+
+ return valid;
+}
+
+uint32_t
+wasm::RoundUpToNextValidARMImmediate(uint32_t i)
+{
+ MOZ_ASSERT(i <= 0xff000000);
+
+ if (i <= 16 * 1024 * 1024)
+ i = i ? mozilla::RoundUpPow2(i) : 0;
+ else
+ i = (i + 0x00ffffff) & ~0x00ffffff;
+
+ MOZ_ASSERT(IsValidARMImmediate(i));
+
+ return i;
+}
+
+#ifndef WASM_HUGE_MEMORY
+
+bool
+wasm::IsValidBoundsCheckImmediate(uint32_t i)
+{
+#ifdef JS_CODEGEN_ARM
+ return IsValidARMImmediate(i);
+#else
+ return true;
+#endif
+}
+
+size_t
+wasm::ComputeMappedSize(uint32_t maxSize)
+{
+ MOZ_ASSERT(maxSize % PageSize == 0);
+
+ // It is the bounds-check limit, not the mapped size, that gets baked into
+ // code. Thus round up the maxSize to the next valid immediate value
+ // *before* adding in the guard page.
+
+# ifdef JS_CODEGEN_ARM
+ uint32_t boundsCheckLimit = RoundUpToNextValidARMImmediate(maxSize);
+# else
+ uint32_t boundsCheckLimit = maxSize;
+# endif
+ MOZ_ASSERT(IsValidBoundsCheckImmediate(boundsCheckLimit));
+
+ MOZ_ASSERT(boundsCheckLimit % gc::SystemPageSize() == 0);
+ MOZ_ASSERT(GuardSize % gc::SystemPageSize() == 0);
+ return boundsCheckLimit + GuardSize;
+}
+
+#endif // WASM_HUGE_MEMORY
diff --git a/js/src/wasm/WasmTypes.h b/js/src/wasm/WasmTypes.h
new file mode 100644
index 0000000000..c79af12e5e
--- /dev/null
+++ b/js/src/wasm/WasmTypes.h
@@ -0,0 +1,1510 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ *
+ * Copyright 2015 Mozilla Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef wasm_types_h
+#define wasm_types_h
+
+#include "mozilla/EnumeratedArray.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Move.h"
+#include "mozilla/RefCounted.h"
+#include "mozilla/RefPtr.h"
+#include "mozilla/Unused.h"
+
+#include "NamespaceImports.h"
+
+#include "ds/LifoAlloc.h"
+#include "jit/IonTypes.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+#include "vm/MallocProvider.h"
+#include "wasm/WasmBinaryConstants.h"
+
+namespace js {
+
+class PropertyName;
+namespace jit { struct BaselineScript; }
+
+// This is a widespread header, so lets keep out the core wasm impl types.
+
+class WasmMemoryObject;
+typedef GCPtr<WasmMemoryObject*> GCPtrWasmMemoryObject;
+typedef Rooted<WasmMemoryObject*> RootedWasmMemoryObject;
+typedef Handle<WasmMemoryObject*> HandleWasmMemoryObject;
+typedef MutableHandle<WasmMemoryObject*> MutableHandleWasmMemoryObject;
+
+class WasmModuleObject;
+typedef Rooted<WasmModuleObject*> RootedWasmModuleObject;
+typedef Handle<WasmModuleObject*> HandleWasmModuleObject;
+typedef MutableHandle<WasmModuleObject*> MutableHandleWasmModuleObject;
+
+class WasmInstanceObject;
+typedef GCVector<WasmInstanceObject*> WasmInstanceObjectVector;
+typedef Rooted<WasmInstanceObject*> RootedWasmInstanceObject;
+typedef Handle<WasmInstanceObject*> HandleWasmInstanceObject;
+typedef MutableHandle<WasmInstanceObject*> MutableHandleWasmInstanceObject;
+
+class WasmTableObject;
+typedef Rooted<WasmTableObject*> RootedWasmTableObject;
+typedef Handle<WasmTableObject*> HandleWasmTableObject;
+typedef MutableHandle<WasmTableObject*> MutableHandleWasmTableObject;
+
+namespace wasm {
+
+using mozilla::DebugOnly;
+using mozilla::EnumeratedArray;
+using mozilla::Maybe;
+using mozilla::Move;
+using mozilla::MallocSizeOf;
+using mozilla::Nothing;
+using mozilla::PodZero;
+using mozilla::PodCopy;
+using mozilla::PodEqual;
+using mozilla::RefCounted;
+using mozilla::Some;
+using mozilla::Unused;
+
+typedef Vector<uint32_t, 0, SystemAllocPolicy> Uint32Vector;
+typedef Vector<uint8_t, 0, SystemAllocPolicy> Bytes;
+
+typedef int8_t I8x16[16];
+typedef int16_t I16x8[8];
+typedef int32_t I32x4[4];
+typedef float F32x4[4];
+
+class Code;
+class CodeRange;
+class Memory;
+class Module;
+class Instance;
+class Table;
+
+// To call Vector::podResizeToFit, a type must specialize mozilla::IsPod
+// which is pretty verbose to do within js::wasm, so factor that process out
+// into a macro.
+
+#define WASM_DECLARE_POD_VECTOR(Type, VectorName) \
+} } namespace mozilla { \
+template <> struct IsPod<js::wasm::Type> : TrueType {}; \
+} namespace js { namespace wasm { \
+typedef Vector<Type, 0, SystemAllocPolicy> VectorName;
+
+// A wasm Module and everything it contains must support serialization and
+// deserialization. Some data can be simply copied as raw bytes and,
+// as a convention, is stored in an inline CacheablePod struct. Everything else
+// should implement the below methods which are called recusively by the
+// containing Module.
+
+#define WASM_DECLARE_SERIALIZABLE(Type) \
+ size_t serializedSize() const; \
+ uint8_t* serialize(uint8_t* cursor) const; \
+ const uint8_t* deserialize(const uint8_t* cursor); \
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+#define WASM_DECLARE_SERIALIZABLE_VIRTUAL(Type) \
+ virtual size_t serializedSize() const; \
+ virtual uint8_t* serialize(uint8_t* cursor) const; \
+ virtual const uint8_t* deserialize(const uint8_t* cursor); \
+ virtual size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+
+#define WASM_DECLARE_SERIALIZABLE_OVERRIDE(Type) \
+ size_t serializedSize() const override; \
+ uint8_t* serialize(uint8_t* cursor) const override; \
+ const uint8_t* deserialize(const uint8_t* cursor) override; \
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+// This reusable base class factors out the logic for a resource that is shared
+// by multiple instances/modules but should only be counted once when computing
+// about:memory stats.
+
+template <class T>
+struct ShareableBase : RefCounted<T>
+{
+ using SeenSet = HashSet<const T*, DefaultHasher<const T*>, SystemAllocPolicy>;
+
+ size_t sizeOfIncludingThisIfNotSeen(MallocSizeOf mallocSizeOf, SeenSet* seen) const {
+ const T* self = static_cast<const T*>(this);
+ typename SeenSet::AddPtr p = seen->lookupForAdd(self);
+ if (p)
+ return 0;
+ bool ok = seen->add(p, self);
+ (void)ok; // oh well
+ return mallocSizeOf(self) + self->sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+// ValType utilities
+
+static inline bool
+IsSimdType(ValType vt)
+{
+ switch (vt) {
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static inline uint32_t
+NumSimdElements(ValType vt)
+{
+ MOZ_ASSERT(IsSimdType(vt));
+ switch (vt) {
+ case ValType::I8x16:
+ case ValType::B8x16:
+ return 16;
+ case ValType::I16x8:
+ case ValType::B16x8:
+ return 8;
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B32x4:
+ return 4;
+ default:
+ MOZ_CRASH("Unhandled SIMD type");
+ }
+}
+
+static inline ValType
+SimdElementType(ValType vt)
+{
+ MOZ_ASSERT(IsSimdType(vt));
+ switch (vt) {
+ case ValType::I8x16:
+ case ValType::I16x8:
+ case ValType::I32x4:
+ return ValType::I32;
+ case ValType::F32x4:
+ return ValType::F32;
+ case ValType::B8x16:
+ case ValType::B16x8:
+ case ValType::B32x4:
+ return ValType::I32;
+ default:
+ MOZ_CRASH("Unhandled SIMD type");
+ }
+}
+
+static inline ValType
+SimdBoolType(ValType vt)
+{
+ MOZ_ASSERT(IsSimdType(vt));
+ switch (vt) {
+ case ValType::I8x16:
+ case ValType::B8x16:
+ return ValType::B8x16;
+ case ValType::I16x8:
+ case ValType::B16x8:
+ return ValType::B16x8;
+ case ValType::I32x4:
+ case ValType::F32x4:
+ case ValType::B32x4:
+ return ValType::B32x4;
+ default:
+ MOZ_CRASH("Unhandled SIMD type");
+ }
+}
+
+static inline bool
+IsSimdBoolType(ValType vt)
+{
+ return vt == ValType::B8x16 || vt == ValType::B16x8 || vt == ValType::B32x4;
+}
+
+static inline jit::MIRType
+ToMIRType(ValType vt)
+{
+ switch (vt) {
+ case ValType::I32: return jit::MIRType::Int32;
+ case ValType::I64: return jit::MIRType::Int64;
+ case ValType::F32: return jit::MIRType::Float32;
+ case ValType::F64: return jit::MIRType::Double;
+ case ValType::I8x16: return jit::MIRType::Int8x16;
+ case ValType::I16x8: return jit::MIRType::Int16x8;
+ case ValType::I32x4: return jit::MIRType::Int32x4;
+ case ValType::F32x4: return jit::MIRType::Float32x4;
+ case ValType::B8x16: return jit::MIRType::Bool8x16;
+ case ValType::B16x8: return jit::MIRType::Bool16x8;
+ case ValType::B32x4: return jit::MIRType::Bool32x4;
+ }
+ MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("bad type");
+}
+
+// The ExprType enum represents the type of a WebAssembly expression or return
+// value and may either be a value type or void. Soon, expression types will be
+// generalized to a list of ValType and this enum will go away, replaced,
+// wherever it is used, by a varU32 + list of ValType.
+
+enum class ExprType
+{
+ Void = uint8_t(TypeCode::BlockVoid),
+
+ I32 = uint8_t(TypeCode::I32),
+ I64 = uint8_t(TypeCode::I64),
+ F32 = uint8_t(TypeCode::F32),
+ F64 = uint8_t(TypeCode::F64),
+
+ I8x16 = uint8_t(TypeCode::I8x16),
+ I16x8 = uint8_t(TypeCode::I16x8),
+ I32x4 = uint8_t(TypeCode::I32x4),
+ F32x4 = uint8_t(TypeCode::F32x4),
+ B8x16 = uint8_t(TypeCode::B8x16),
+ B16x8 = uint8_t(TypeCode::B16x8),
+ B32x4 = uint8_t(TypeCode::B32x4),
+
+ Limit = uint8_t(TypeCode::Limit)
+};
+
+static inline bool
+IsVoid(ExprType et)
+{
+ return et == ExprType::Void;
+}
+
+static inline ValType
+NonVoidToValType(ExprType et)
+{
+ MOZ_ASSERT(!IsVoid(et));
+ return ValType(et);
+}
+
+static inline ExprType
+ToExprType(ValType vt)
+{
+ return ExprType(vt);
+}
+
+static inline bool
+IsSimdType(ExprType et)
+{
+ return IsVoid(et) ? false : IsSimdType(ValType(et));
+}
+
+static inline jit::MIRType
+ToMIRType(ExprType et)
+{
+ return IsVoid(et) ? jit::MIRType::None : ToMIRType(ValType(et));
+}
+
+static inline const char*
+ToCString(ExprType type)
+{
+ switch (type) {
+ case ExprType::Void: return "void";
+ case ExprType::I32: return "i32";
+ case ExprType::I64: return "i64";
+ case ExprType::F32: return "f32";
+ case ExprType::F64: return "f64";
+ case ExprType::I8x16: return "i8x16";
+ case ExprType::I16x8: return "i16x8";
+ case ExprType::I32x4: return "i32x4";
+ case ExprType::F32x4: return "f32x4";
+ case ExprType::B8x16: return "b8x16";
+ case ExprType::B16x8: return "b16x8";
+ case ExprType::B32x4: return "b32x4";
+ case ExprType::Limit:;
+ }
+ MOZ_CRASH("bad expression type");
+}
+
+static inline const char*
+ToCString(ValType type)
+{
+ return ToCString(ToExprType(type));
+}
+
+// Because WebAssembly allows one to define the payload of a NaN value,
+// including the signal/quiet bit (highest order bit of payload), another
+// represenation of floating-point values is required: on some platforms (x86
+// without SSE2), passing a floating-point argument to a function call may use
+// the x87 stack, which has the side-effect of clearing the signal/quiet bit.
+// Because the signal/quiet bit must be preserved (by spec), we use the raw
+// punned integer representation of floating points instead, in function calls.
+//
+// When we leave the WebAssembly sandbox back to JS, NaNs are canonicalized, so
+// this isn't observable from JS.
+
+template<class T>
+class Raw
+{
+ typedef typename mozilla::FloatingPoint<T>::Bits Bits;
+ Bits value_;
+
+ public:
+ Raw() : value_(0) {}
+
+ explicit Raw(T value)
+ : value_(mozilla::BitwiseCast<Bits>(value))
+ {}
+
+ template<class U> MOZ_IMPLICIT Raw(U) = delete;
+
+ static Raw fromBits(Bits bits) { Raw r; r.value_ = bits; return r; }
+
+ Bits bits() const { return value_; }
+ T fp() const { return mozilla::BitwiseCast<T>(value_); }
+};
+
+using RawF64 = Raw<double>;
+using RawF32 = Raw<float>;
+
+// The Val class represents a single WebAssembly value of a given value type,
+// mostly for the purpose of numeric literals and initializers. A Val does not
+// directly map to a JS value since there is not (currently) a precise
+// representation of i64 values. A Val may contain non-canonical NaNs since,
+// within WebAssembly, floats are not canonicalized. Canonicalization must
+// happen at the JS boundary.
+
+class Val
+{
+ ValType type_;
+ union U {
+ uint32_t i32_;
+ uint64_t i64_;
+ RawF32 f32_;
+ RawF64 f64_;
+ I8x16 i8x16_;
+ I16x8 i16x8_;
+ I32x4 i32x4_;
+ F32x4 f32x4_;
+ U() {}
+ } u;
+
+ public:
+ Val() = default;
+
+ explicit Val(uint32_t i32) : type_(ValType::I32) { u.i32_ = i32; }
+ explicit Val(uint64_t i64) : type_(ValType::I64) { u.i64_ = i64; }
+
+ explicit Val(RawF32 f32) : type_(ValType::F32) { u.f32_ = f32; }
+ explicit Val(RawF64 f64) : type_(ValType::F64) { u.f64_ = f64; }
+ MOZ_IMPLICIT Val(float) = delete;
+ MOZ_IMPLICIT Val(double) = delete;
+
+ explicit Val(const I8x16& i8x16, ValType type = ValType::I8x16) : type_(type) {
+ MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+ memcpy(u.i8x16_, i8x16, sizeof(u.i8x16_));
+ }
+ explicit Val(const I16x8& i16x8, ValType type = ValType::I16x8) : type_(type) {
+ MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+ memcpy(u.i16x8_, i16x8, sizeof(u.i16x8_));
+ }
+ explicit Val(const I32x4& i32x4, ValType type = ValType::I32x4) : type_(type) {
+ MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+ memcpy(u.i32x4_, i32x4, sizeof(u.i32x4_));
+ }
+ explicit Val(const F32x4& f32x4) : type_(ValType::F32x4) {
+ memcpy(u.f32x4_, f32x4, sizeof(u.f32x4_));
+ }
+
+ ValType type() const { return type_; }
+ bool isSimd() const { return IsSimdType(type()); }
+
+ uint32_t i32() const { MOZ_ASSERT(type_ == ValType::I32); return u.i32_; }
+ uint64_t i64() const { MOZ_ASSERT(type_ == ValType::I64); return u.i64_; }
+ RawF32 f32() const { MOZ_ASSERT(type_ == ValType::F32); return u.f32_; }
+ RawF64 f64() const { MOZ_ASSERT(type_ == ValType::F64); return u.f64_; }
+
+ const I8x16& i8x16() const {
+ MOZ_ASSERT(type_ == ValType::I8x16 || type_ == ValType::B8x16);
+ return u.i8x16_;
+ }
+ const I16x8& i16x8() const {
+ MOZ_ASSERT(type_ == ValType::I16x8 || type_ == ValType::B16x8);
+ return u.i16x8_;
+ }
+ const I32x4& i32x4() const {
+ MOZ_ASSERT(type_ == ValType::I32x4 || type_ == ValType::B32x4);
+ return u.i32x4_;
+ }
+ const F32x4& f32x4() const {
+ MOZ_ASSERT(type_ == ValType::F32x4);
+ return u.f32x4_;
+ }
+
+ void writePayload(uint8_t* dst) const;
+};
+
+typedef Vector<Val, 0, SystemAllocPolicy> ValVector;
+
+// The Sig class represents a WebAssembly function signature which takes a list
+// of value types and returns an expression type. The engine uses two in-memory
+// representations of the argument Vector's memory (when elements do not fit
+// inline): normal malloc allocation (via SystemAllocPolicy) and allocation in
+// a LifoAlloc (via LifoAllocPolicy). The former Sig objects can have any
+// lifetime since they own the memory. The latter Sig objects must not outlive
+// the associated LifoAlloc mark/release interval (which is currently the
+// duration of module validation+compilation). Thus, long-lived objects like
+// WasmModule must use malloced allocation.
+
+class Sig
+{
+ ValTypeVector args_;
+ ExprType ret_;
+
+ public:
+ Sig() : args_(), ret_(ExprType::Void) {}
+ Sig(ValTypeVector&& args, ExprType ret) : args_(Move(args)), ret_(ret) {}
+
+ MOZ_MUST_USE bool clone(const Sig& rhs) {
+ ret_ = rhs.ret_;
+ MOZ_ASSERT(args_.empty());
+ return args_.appendAll(rhs.args_);
+ }
+
+ ValType arg(unsigned i) const { return args_[i]; }
+ const ValTypeVector& args() const { return args_; }
+ const ExprType& ret() const { return ret_; }
+
+ HashNumber hash() const {
+ return AddContainerToHash(args_, HashNumber(ret_));
+ }
+ bool operator==(const Sig& rhs) const {
+ return ret() == rhs.ret() && EqualContainers(args(), rhs.args());
+ }
+ bool operator!=(const Sig& rhs) const {
+ return !(*this == rhs);
+ }
+
+ WASM_DECLARE_SERIALIZABLE(Sig)
+};
+
+struct SigHashPolicy
+{
+ typedef const Sig& Lookup;
+ static HashNumber hash(Lookup sig) { return sig.hash(); }
+ static bool match(const Sig* lhs, Lookup rhs) { return *lhs == rhs; }
+};
+
+// An InitExpr describes a deferred initializer expression, used to initialize
+// a global or a table element offset. Such expressions are created during
+// decoding and actually executed on module instantiation.
+
+class InitExpr
+{
+ public:
+ enum class Kind {
+ Constant,
+ GetGlobal
+ };
+
+ private:
+ Kind kind_;
+ union U {
+ Val val_;
+ struct {
+ uint32_t index_;
+ ValType type_;
+ } global;
+ U() {}
+ } u;
+
+ public:
+ InitExpr() = default;
+
+ explicit InitExpr(Val val) : kind_(Kind::Constant) {
+ u.val_ = val;
+ }
+
+ explicit InitExpr(uint32_t globalIndex, ValType type) : kind_(Kind::GetGlobal) {
+ u.global.index_ = globalIndex;
+ u.global.type_ = type;
+ }
+
+ Kind kind() const { return kind_; }
+
+ bool isVal() const { return kind() == Kind::Constant; }
+ Val val() const { MOZ_ASSERT(isVal()); return u.val_; }
+
+ uint32_t globalIndex() const { MOZ_ASSERT(kind() == Kind::GetGlobal); return u.global.index_; }
+
+ ValType type() const {
+ switch (kind()) {
+ case Kind::Constant: return u.val_.type();
+ case Kind::GetGlobal: return u.global.type_;
+ }
+ MOZ_CRASH("unexpected initExpr type");
+ }
+};
+
+// CacheableChars is used to cacheably store UniqueChars.
+
+struct CacheableChars : UniqueChars
+{
+ CacheableChars() = default;
+ explicit CacheableChars(char* ptr) : UniqueChars(ptr) {}
+ MOZ_IMPLICIT CacheableChars(UniqueChars&& rhs) : UniqueChars(Move(rhs)) {}
+ WASM_DECLARE_SERIALIZABLE(CacheableChars)
+};
+
+typedef Vector<CacheableChars, 0, SystemAllocPolicy> CacheableCharsVector;
+
+// Import describes a single wasm import. An ImportVector describes all
+// of a single module's imports.
+//
+// ImportVector is built incrementally by ModuleGenerator and then stored
+// immutably by Module.
+
+struct Import
+{
+ CacheableChars module;
+ CacheableChars field;
+ DefinitionKind kind;
+
+ Import() = default;
+ Import(UniqueChars&& module, UniqueChars&& field, DefinitionKind kind)
+ : module(Move(module)), field(Move(field)), kind(kind)
+ {}
+
+ WASM_DECLARE_SERIALIZABLE(Import)
+};
+
+typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
+
+// A GlobalDesc describes a single global variable. Currently, asm.js and wasm
+// exposes mutable and immutable private globals, but can't import nor export
+// mutable globals.
+
+enum class GlobalKind
+{
+ Import,
+ Constant,
+ Variable
+};
+
+class GlobalDesc
+{
+ union V {
+ struct {
+ union U {
+ InitExpr initial_;
+ struct {
+ ValType type_;
+ uint32_t index_;
+ } import;
+ U() {}
+ } val;
+ unsigned offset_;
+ bool isMutable_;
+ } var;
+ Val cst_;
+ V() {}
+ } u;
+ GlobalKind kind_;
+
+ public:
+ GlobalDesc() = default;
+
+ explicit GlobalDesc(InitExpr initial, bool isMutable)
+ : kind_((isMutable || !initial.isVal()) ? GlobalKind::Variable : GlobalKind::Constant)
+ {
+ if (isVariable()) {
+ u.var.val.initial_ = initial;
+ u.var.isMutable_ = isMutable;
+ u.var.offset_ = UINT32_MAX;
+ } else {
+ u.cst_ = initial.val();
+ }
+ }
+
+ explicit GlobalDesc(ValType type, bool isMutable, uint32_t importIndex)
+ : kind_(GlobalKind::Import)
+ {
+ u.var.val.import.type_ = type;
+ u.var.val.import.index_ = importIndex;
+ u.var.isMutable_ = isMutable;
+ u.var.offset_ = UINT32_MAX;
+ }
+
+ void setOffset(unsigned offset) {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(u.var.offset_ == UINT32_MAX);
+ u.var.offset_ = offset;
+ }
+ unsigned offset() const {
+ MOZ_ASSERT(!isConstant());
+ MOZ_ASSERT(u.var.offset_ != UINT32_MAX);
+ return u.var.offset_;
+ }
+
+ GlobalKind kind() const { return kind_; }
+ bool isVariable() const { return kind_ == GlobalKind::Variable; }
+ bool isConstant() const { return kind_ == GlobalKind::Constant; }
+ bool isImport() const { return kind_ == GlobalKind::Import; }
+
+ bool isMutable() const { return !isConstant() && u.var.isMutable_; }
+ Val constantValue() const { MOZ_ASSERT(isConstant()); return u.cst_; }
+ const InitExpr& initExpr() const { MOZ_ASSERT(isVariable()); return u.var.val.initial_; }
+ uint32_t importIndex() const { MOZ_ASSERT(isImport()); return u.var.val.import.index_; }
+
+ ValType type() const {
+ switch (kind_) {
+ case GlobalKind::Import: return u.var.val.import.type_;
+ case GlobalKind::Variable: return u.var.val.initial_.type();
+ case GlobalKind::Constant: return u.cst_.type();
+ }
+ MOZ_CRASH("unexpected global kind");
+ }
+};
+
+typedef Vector<GlobalDesc, 0, SystemAllocPolicy> GlobalDescVector;
+
+// DataSegment describes the offset of a data segment in the bytecode that is
+// to be copied at a given offset into linear memory upon instantiation.
+
+struct DataSegment
+{
+ InitExpr offset;
+ uint32_t bytecodeOffset;
+ uint32_t length;
+};
+
+typedef Vector<DataSegment, 0, SystemAllocPolicy> DataSegmentVector;
+
+// SigIdDesc describes a signature id that can be used by call_indirect and
+// table-entry prologues to structurally compare whether the caller and callee's
+// signatures *structurally* match. To handle the general case, a Sig is
+// allocated and stored in a process-wide hash table, so that pointer equality
+// implies structural equality. As an optimization for the 99% case where the
+// Sig has a small number of parameters, the Sig is bit-packed into a uint32
+// immediate value so that integer equality implies structural equality. Both
+// cases can be handled with a single comparison by always setting the LSB for
+// the immediates (the LSB is necessarily 0 for allocated Sig pointers due to
+// alignment).
+
+class SigIdDesc
+{
+ public:
+ enum class Kind { None, Immediate, Global };
+ static const uintptr_t ImmediateBit = 0x1;
+
+ private:
+ Kind kind_;
+ size_t bits_;
+
+ SigIdDesc(Kind kind, size_t bits) : kind_(kind), bits_(bits) {}
+
+ public:
+ Kind kind() const { return kind_; }
+ static bool isGlobal(const Sig& sig);
+
+ SigIdDesc() : kind_(Kind::None), bits_(0) {}
+ static SigIdDesc global(const Sig& sig, uint32_t globalDataOffset);
+ static SigIdDesc immediate(const Sig& sig);
+
+ bool isGlobal() const { return kind_ == Kind::Global; }
+
+ size_t immediate() const { MOZ_ASSERT(kind_ == Kind::Immediate); return bits_; }
+ uint32_t globalDataOffset() const { MOZ_ASSERT(kind_ == Kind::Global); return bits_; }
+};
+
+// SigWithId pairs a Sig with SigIdDesc, describing either how to compile code
+// that compares this signature's id or, at instantiation what signature ids to
+// allocate in the global hash and where to put them.
+
+struct SigWithId : Sig
+{
+ SigIdDesc id;
+
+ SigWithId() = default;
+ explicit SigWithId(Sig&& sig, SigIdDesc id) : Sig(Move(sig)), id(id) {}
+ void operator=(Sig&& rhs) { Sig::operator=(Move(rhs)); }
+
+ WASM_DECLARE_SERIALIZABLE(SigWithId)
+};
+
+typedef Vector<SigWithId, 0, SystemAllocPolicy> SigWithIdVector;
+typedef Vector<const SigWithId*, 0, SystemAllocPolicy> SigWithIdPtrVector;
+
+// The (,Profiling,Func)Offsets classes are used to record the offsets of
+// different key points in a CodeRange during compilation.
+
+struct Offsets
+{
+ explicit Offsets(uint32_t begin = 0, uint32_t end = 0)
+ : begin(begin), end(end)
+ {}
+
+ // These define a [begin, end) contiguous range of instructions compiled
+ // into a CodeRange.
+ uint32_t begin;
+ uint32_t end;
+
+ void offsetBy(uint32_t offset) {
+ begin += offset;
+ end += offset;
+ }
+};
+
+struct ProfilingOffsets : Offsets
+{
+ MOZ_IMPLICIT ProfilingOffsets(uint32_t profilingReturn = 0)
+ : Offsets(), profilingReturn(profilingReturn)
+ {}
+
+ // For CodeRanges with ProfilingOffsets, 'begin' is the offset of the
+ // profiling entry.
+ uint32_t profilingEntry() const { return begin; }
+
+ // The profiling return is the offset of the return instruction, which
+ // precedes the 'end' by a variable number of instructions due to
+ // out-of-line codegen.
+ uint32_t profilingReturn;
+
+ void offsetBy(uint32_t offset) {
+ Offsets::offsetBy(offset);
+ profilingReturn += offset;
+ }
+};
+
+struct FuncOffsets : ProfilingOffsets
+{
+ MOZ_IMPLICIT FuncOffsets()
+ : ProfilingOffsets(),
+ tableEntry(0),
+ tableProfilingJump(0),
+ nonProfilingEntry(0),
+ profilingJump(0),
+ profilingEpilogue(0)
+ {}
+
+ // Function CodeRanges have a table entry which takes an extra signature
+ // argument which is checked against the callee's signature before falling
+ // through to the normal prologue. When profiling is enabled, a nop on the
+ // fallthrough is patched to instead jump to the profiling epilogue.
+ uint32_t tableEntry;
+ uint32_t tableProfilingJump;
+
+ // Function CodeRanges have an additional non-profiling entry that comes
+ // after the profiling entry and a non-profiling epilogue that comes before
+ // the profiling epilogue.
+ uint32_t nonProfilingEntry;
+
+ // When profiling is enabled, the 'nop' at offset 'profilingJump' is
+ // overwritten to be a jump to 'profilingEpilogue'.
+ uint32_t profilingJump;
+ uint32_t profilingEpilogue;
+
+ void offsetBy(uint32_t offset) {
+ ProfilingOffsets::offsetBy(offset);
+ tableEntry += offset;
+ tableProfilingJump += offset;
+ nonProfilingEntry += offset;
+ profilingJump += offset;
+ profilingEpilogue += offset;
+ }
+};
+
+// A wasm::Trap represents a wasm-defined trap that can occur during execution
+// which triggers a WebAssembly.RuntimeError. Generated code may jump to a Trap
+// symbolically, passing the bytecode offset to report as the trap offset. The
+// generated jump will be bound to a tiny stub which fills the offset and
+// then jumps to a per-Trap shared stub at the end of the module.
+
+enum class Trap
+{
+ // The Unreachable opcode has been executed.
+ Unreachable,
+ // An integer arithmetic operation led to an overflow.
+ IntegerOverflow,
+ // Trying to coerce NaN to an integer.
+ InvalidConversionToInteger,
+ // Integer division by zero.
+ IntegerDivideByZero,
+ // Out of bounds on wasm memory accesses and asm.js SIMD/atomic accesses.
+ OutOfBounds,
+ // call_indirect to null.
+ IndirectCallToNull,
+ // call_indirect signature mismatch.
+ IndirectCallBadSig,
+
+ // (asm.js only) SIMD float to int conversion failed because the input
+ // wasn't in bounds.
+ ImpreciseSimdConversion,
+
+ // The internal stack space was exhausted. For compatibility, this throws
+ // the same over-recursed error as JS.
+ StackOverflow,
+
+ Limit
+};
+
+// A wrapper around the bytecode offset of a wasm instruction within a whole
+// module. Trap offsets should refer to the first byte of the instruction that
+// triggered the trap and should ultimately derive from OpIter::trapOffset.
+
+struct TrapOffset
+{
+ uint32_t bytecodeOffset;
+
+ TrapOffset() = default;
+ explicit TrapOffset(uint32_t bytecodeOffset) : bytecodeOffset(bytecodeOffset) {}
+};
+
+// While the frame-pointer chain allows the stack to be unwound without
+// metadata, Error.stack still needs to know the line/column of every call in
+// the chain. A CallSiteDesc describes a single callsite to which CallSite adds
+// the metadata necessary to walk up to the next frame. Lastly CallSiteAndTarget
+// adds the function index of the callee.
+
+class CallSiteDesc
+{
+ uint32_t lineOrBytecode_ : 30;
+ uint32_t kind_ : 2;
+ public:
+ enum Kind {
+ Func, // pc-relative call to a specific function
+ Dynamic, // dynamic callee called via register
+ Symbolic, // call to a single symbolic callee
+ TrapExit // call to a trap exit
+ };
+ CallSiteDesc() {}
+ explicit CallSiteDesc(Kind kind)
+ : lineOrBytecode_(0), kind_(kind)
+ {
+ MOZ_ASSERT(kind == Kind(kind_));
+ }
+ CallSiteDesc(uint32_t lineOrBytecode, Kind kind)
+ : lineOrBytecode_(lineOrBytecode), kind_(kind)
+ {
+ MOZ_ASSERT(kind == Kind(kind_));
+ MOZ_ASSERT(lineOrBytecode == lineOrBytecode_);
+ }
+ uint32_t lineOrBytecode() const { return lineOrBytecode_; }
+ Kind kind() const { return Kind(kind_); }
+};
+
+class CallSite : public CallSiteDesc
+{
+ uint32_t returnAddressOffset_;
+ uint32_t stackDepth_;
+
+ public:
+ CallSite() {}
+
+ CallSite(CallSiteDesc desc, uint32_t returnAddressOffset, uint32_t stackDepth)
+ : CallSiteDesc(desc),
+ returnAddressOffset_(returnAddressOffset),
+ stackDepth_(stackDepth)
+ { }
+
+ void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
+ void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
+ uint32_t returnAddressOffset() const { return returnAddressOffset_; }
+
+ // The stackDepth measures the amount of stack space pushed since the
+ // function was called. In particular, this includes the pushed return
+ // address on all archs (whether or not the call instruction pushes the
+ // return address (x86/x64) or the prologue does (ARM/MIPS)).
+ uint32_t stackDepth() const { return stackDepth_; }
+};
+
+WASM_DECLARE_POD_VECTOR(CallSite, CallSiteVector)
+
+class CallSiteAndTarget : public CallSite
+{
+ uint32_t index_;
+
+ public:
+ explicit CallSiteAndTarget(CallSite cs)
+ : CallSite(cs)
+ {
+ MOZ_ASSERT(cs.kind() != Func);
+ }
+ CallSiteAndTarget(CallSite cs, uint32_t funcIndex)
+ : CallSite(cs), index_(funcIndex)
+ {
+ MOZ_ASSERT(cs.kind() == Func);
+ }
+ CallSiteAndTarget(CallSite cs, Trap trap)
+ : CallSite(cs),
+ index_(uint32_t(trap))
+ {
+ MOZ_ASSERT(cs.kind() == TrapExit);
+ }
+
+ uint32_t funcIndex() const { MOZ_ASSERT(kind() == Func); return index_; }
+ Trap trap() const { MOZ_ASSERT(kind() == TrapExit); return Trap(index_); }
+};
+
+typedef Vector<CallSiteAndTarget, 0, SystemAllocPolicy> CallSiteAndTargetVector;
+
+// A wasm::SymbolicAddress represents a pointer to a well-known function or
+// object that is embedded in wasm code. Since wasm code is serialized and
+// later deserialized into a different address space, symbolic addresses must be
+// used for *all* pointers into the address space. The MacroAssembler records a
+// list of all SymbolicAddresses and the offsets of their use in the code for
+// later patching during static linking.
+
+enum class SymbolicAddress
+{
+ ToInt32,
+#if defined(JS_CODEGEN_ARM)
+ aeabi_idivmod,
+ aeabi_uidivmod,
+ AtomicCmpXchg,
+ AtomicXchg,
+ AtomicFetchAdd,
+ AtomicFetchSub,
+ AtomicFetchAnd,
+ AtomicFetchOr,
+ AtomicFetchXor,
+#endif
+ ModD,
+ SinD,
+ CosD,
+ TanD,
+ ASinD,
+ ACosD,
+ ATanD,
+ CeilD,
+ CeilF,
+ FloorD,
+ FloorF,
+ TruncD,
+ TruncF,
+ NearbyIntD,
+ NearbyIntF,
+ ExpD,
+ LogD,
+ PowD,
+ ATan2D,
+ Context,
+ InterruptUint32,
+ ReportOverRecursed,
+ HandleExecutionInterrupt,
+ ReportTrap,
+ ReportOutOfBounds,
+ ReportUnalignedAccess,
+ CallImport_Void,
+ CallImport_I32,
+ CallImport_I64,
+ CallImport_F64,
+ CoerceInPlace_ToInt32,
+ CoerceInPlace_ToNumber,
+ DivI64,
+ UDivI64,
+ ModI64,
+ UModI64,
+ TruncateDoubleToInt64,
+ TruncateDoubleToUint64,
+ Uint64ToFloatingPoint,
+ Int64ToFloatingPoint,
+ GrowMemory,
+ CurrentMemory,
+ Limit
+};
+
+void*
+AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
+
+// Assumptions captures ambient state that must be the same when compiling and
+// deserializing a module for the compiled code to be valid. If it's not, then
+// the module must be recompiled from scratch.
+
+struct Assumptions
+{
+ uint32_t cpuId;
+ JS::BuildIdCharVector buildId;
+
+ explicit Assumptions(JS::BuildIdCharVector&& buildId);
+
+ // If Assumptions is constructed without arguments, initBuildIdFromContext()
+ // must be called to complete initialization.
+ Assumptions();
+ bool initBuildIdFromContext(ExclusiveContext* cx);
+
+ bool clone(const Assumptions& other);
+
+ bool operator==(const Assumptions& rhs) const;
+ bool operator!=(const Assumptions& rhs) const { return !(*this == rhs); }
+
+ size_t serializedSize() const;
+ uint8_t* serialize(uint8_t* cursor) const;
+ const uint8_t* deserialize(const uint8_t* cursor, size_t limit);
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
+};
+
+// A Module can either be asm.js or wasm.
+
+enum ModuleKind
+{
+ Wasm,
+ AsmJS
+};
+
+// Represents the resizable limits of memories and tables.
+
+struct Limits
+{
+ uint32_t initial;
+ Maybe<uint32_t> maximum;
+};
+
+// TableDesc describes a table as well as the offset of the table's base pointer
+// in global memory. Currently, wasm only has "any function" and asm.js only
+// "typed function".
+
+enum class TableKind
+{
+ AnyFunction,
+ TypedFunction
+};
+
+struct TableDesc
+{
+ TableKind kind;
+ bool external;
+ uint32_t globalDataOffset;
+ Limits limits;
+
+ TableDesc() = default;
+ TableDesc(TableKind kind, Limits limits)
+ : kind(kind),
+ external(false),
+ globalDataOffset(UINT32_MAX),
+ limits(limits)
+ {}
+};
+
+typedef Vector<TableDesc, 0, SystemAllocPolicy> TableDescVector;
+
+// ExportArg holds the unboxed operands to the wasm entry trampoline which can
+// be called through an ExportFuncPtr.
+
+struct ExportArg
+{
+ uint64_t lo;
+ uint64_t hi;
+};
+
+// TLS data for a single module instance.
+//
+// Every WebAssembly function expects to be passed a hidden TLS pointer argument
+// in WasmTlsReg. The TLS pointer argument points to a TlsData struct.
+// Compiled functions expect that the TLS pointer does not change for the
+// lifetime of the thread.
+//
+// There is a TlsData per module instance per thread, so inter-module calls need
+// to pass the TLS pointer appropriate for the callee module.
+//
+// After the TlsData struct follows the module's declared TLS variables.
+
+struct TlsData
+{
+ // Pointer to the JSContext that contains this TLS data.
+ JSContext* cx;
+
+ // Pointer to the Instance that contains this TLS data.
+ Instance* instance;
+
+ // Pointer to the global data for this Instance.
+ uint8_t* globalData;
+
+ // Pointer to the base of the default memory (or null if there is none).
+ uint8_t* memoryBase;
+
+ // Stack limit for the current thread. This limit is checked against the
+ // stack pointer in the prologue of functions that allocate stack space. See
+ // `CodeGenerator::generateWasm`.
+ void* stackLimit;
+};
+
+typedef int32_t (*ExportFuncPtr)(ExportArg* args, TlsData* tls);
+
+// FuncImportTls describes the region of wasm global memory allocated in the
+// instance's thread-local storage for a function import. This is accessed
+// directly from JIT code and mutated by Instance as exits become optimized and
+// deoptimized.
+
+struct FuncImportTls
+{
+ // The code to call at an import site: a wasm callee, a thunk into C++, or a
+ // thunk into JIT code.
+ void* code;
+
+ // The callee's TlsData pointer, which must be loaded to WasmTlsReg (along
+ // with any pinned registers) before calling 'code'.
+ TlsData* tls;
+
+ // If 'code' points into a JIT code thunk, the BaselineScript of the callee,
+ // for bidirectional registration purposes.
+ jit::BaselineScript* baselineScript;
+
+ // A GC pointer which keeps the callee alive. For imported wasm functions,
+ // this points to the wasm function's WasmInstanceObject. For all other
+ // imported functions, 'obj' points to the JSFunction.
+ GCPtrObject obj;
+ static_assert(sizeof(GCPtrObject) == sizeof(void*), "for JIT access");
+};
+
+// TableTls describes the region of wasm global memory allocated in the
+// instance's thread-local storage which is accessed directly from JIT code
+// to bounds-check and index the table.
+
+struct TableTls
+{
+ // Length of the table in number of elements (not bytes).
+ uint32_t length;
+
+ // Pointer to the array of elements (of type either ExternalTableElem or
+ // void*).
+ void* base;
+};
+
+// When a table can contain functions from other instances (it is "external"),
+// the internal representation is an array of ExternalTableElem instead of just
+// an array of code pointers.
+
+struct ExternalTableElem
+{
+ // The code to call when calling this element. The table ABI is the system
+ // ABI with the additional ABI requirements that:
+ // - WasmTlsReg and any pinned registers have been loaded appropriately
+ // - if this is a heterogeneous table that requires a signature check,
+ // WasmTableCallSigReg holds the signature id.
+ void* code;
+
+ // The pointer to the callee's instance's TlsData. This must be loaded into
+ // WasmTlsReg before calling 'code'.
+ TlsData* tls;
+};
+
+// CalleeDesc describes how to compile one of the variety of asm.js/wasm calls.
+// This is hoisted into WasmTypes.h for sharing between Ion and Baseline.
+
+class CalleeDesc
+{
+ public:
+ enum Which {
+ // Calls a function defined in the same module by its index.
+ Func,
+
+ // Calls the import identified by the offset of its FuncImportTls in
+ // thread-local data.
+ Import,
+
+ // Calls a WebAssembly table (heterogeneous, index must be bounds
+ // checked, callee instance depends on TableDesc).
+ WasmTable,
+
+ // Calls an asm.js table (homogeneous, masked index, same-instance).
+ AsmJSTable,
+
+ // Call a C++ function identified by SymbolicAddress.
+ Builtin,
+
+ // Like Builtin, but automatically passes Instance* as first argument.
+ BuiltinInstanceMethod
+ };
+
+ private:
+ Which which_;
+ union U {
+ U() {}
+ uint32_t funcIndex_;
+ struct {
+ uint32_t globalDataOffset_;
+ } import;
+ struct {
+ uint32_t globalDataOffset_;
+ bool external_;
+ SigIdDesc sigId_;
+ } table;
+ SymbolicAddress builtin_;
+ } u;
+
+ public:
+ CalleeDesc() {}
+ static CalleeDesc function(uint32_t funcIndex) {
+ CalleeDesc c;
+ c.which_ = Func;
+ c.u.funcIndex_ = funcIndex;
+ return c;
+ }
+ static CalleeDesc import(uint32_t globalDataOffset) {
+ CalleeDesc c;
+ c.which_ = Import;
+ c.u.import.globalDataOffset_ = globalDataOffset;
+ return c;
+ }
+ static CalleeDesc wasmTable(const TableDesc& desc, SigIdDesc sigId) {
+ CalleeDesc c;
+ c.which_ = WasmTable;
+ c.u.table.globalDataOffset_ = desc.globalDataOffset;
+ c.u.table.external_ = desc.external;
+ c.u.table.sigId_ = sigId;
+ return c;
+ }
+ static CalleeDesc asmJSTable(const TableDesc& desc) {
+ CalleeDesc c;
+ c.which_ = AsmJSTable;
+ c.u.table.globalDataOffset_ = desc.globalDataOffset;
+ return c;
+ }
+ static CalleeDesc builtin(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = Builtin;
+ c.u.builtin_ = callee;
+ return c;
+ }
+ static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
+ CalleeDesc c;
+ c.which_ = BuiltinInstanceMethod;
+ c.u.builtin_ = callee;
+ return c;
+ }
+ Which which() const {
+ return which_;
+ }
+ uint32_t funcIndex() const {
+ MOZ_ASSERT(which_ == Func);
+ return u.funcIndex_;
+ }
+ uint32_t importGlobalDataOffset() const {
+ MOZ_ASSERT(which_ == Import);
+ return u.import.globalDataOffset_;
+ }
+ bool isTable() const {
+ return which_ == WasmTable || which_ == AsmJSTable;
+ }
+ uint32_t tableLengthGlobalDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.globalDataOffset_ + offsetof(TableTls, length);
+ }
+ uint32_t tableBaseGlobalDataOffset() const {
+ MOZ_ASSERT(isTable());
+ return u.table.globalDataOffset_ + offsetof(TableTls, base);
+ }
+ bool wasmTableIsExternal() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.external_;
+ }
+ SigIdDesc wasmTableSigId() const {
+ MOZ_ASSERT(which_ == WasmTable);
+ return u.table.sigId_;
+ }
+ SymbolicAddress builtin() const {
+ MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
+ return u.builtin_;
+ }
+};
+
+// Because ARM has a fixed-width instruction encoding, ARM can only express a
+// limited subset of immediates (in a single instruction).
+
+extern bool
+IsValidARMImmediate(uint32_t i);
+
+extern uint32_t
+RoundUpToNextValidARMImmediate(uint32_t i);
+
+// The WebAssembly spec hard-codes the virtual page size to be 64KiB and
+// requires the size of linear memory to always be a multiple of 64KiB.
+
+static const unsigned PageSize = 64 * 1024;
+
+// Bounds checks always compare the base of the memory access with the bounds
+// check limit. If the memory access is unaligned, this means that, even if the
+// bounds check succeeds, a few bytes of the access can extend past the end of
+// memory. To guard against this, extra space is included in the guard region to
+// catch the overflow. MaxMemoryAccessSize is a conservative approximation of
+// the maximum guard space needed to catch all unaligned overflows.
+
+static const unsigned MaxMemoryAccessSize = sizeof(Val);
+
+#ifdef JS_CODEGEN_X64
+
+// All other code should use WASM_HUGE_MEMORY instead of JS_CODEGEN_X64 so that
+// it is easy to use the huge-mapping optimization for other 64-bit platforms in
+// the future.
+# define WASM_HUGE_MEMORY
+
+// On WASM_HUGE_MEMORY platforms, every asm.js or WebAssembly memory
+// unconditionally allocates a huge region of virtual memory of size
+// wasm::HugeMappedSize. This allows all memory resizing to work without
+// reallocation and provides enough guard space for all offsets to be folded
+// into memory accesses.
+
+static const uint64_t IndexRange = uint64_t(UINT32_MAX) + 1;
+static const uint64_t OffsetGuardLimit = uint64_t(INT32_MAX) + 1;
+static const uint64_t UnalignedGuardPage = PageSize;
+static const uint64_t HugeMappedSize = IndexRange + OffsetGuardLimit + UnalignedGuardPage;
+
+static_assert(MaxMemoryAccessSize <= UnalignedGuardPage, "rounded up to static page size");
+
+#else // !WASM_HUGE_MEMORY
+
+// On !WASM_HUGE_MEMORY platforms:
+// - To avoid OOM in ArrayBuffer::prepareForAsmJS, asm.js continues to use the
+// original ArrayBuffer allocation which has no guard region at all.
+// - For WebAssembly memories, an additional GuardSize is mapped after the
+// accessible region of the memory to catch folded (base+offset) accesses
+// where `offset < OffsetGuardLimit` as well as the overflow from unaligned
+// accesses, as described above for MaxMemoryAccessSize.
+
+static const size_t OffsetGuardLimit = PageSize - MaxMemoryAccessSize;
+static const size_t GuardSize = PageSize;
+
+// Return whether the given immediate satisfies the constraints of the platform
+// (viz. that, on ARM, IsValidARMImmediate).
+
+extern bool
+IsValidBoundsCheckImmediate(uint32_t i);
+
+// For a given WebAssembly/asm.js max size, return the number of bytes to
+// map which will necessarily be a multiple of the system page size and greater
+// than maxSize. For a returned mappedSize:
+// boundsCheckLimit = mappedSize - GuardSize
+// IsValidBoundsCheckImmediate(boundsCheckLimit)
+
+extern size_t
+ComputeMappedSize(uint32_t maxSize);
+
+#endif // WASM_HUGE_MEMORY
+
+// Metadata for bounds check instructions that are patched at runtime with the
+// appropriate bounds check limit. On WASM_HUGE_MEMORY platforms for wasm (and
+// SIMD/Atomic) bounds checks, no BoundsCheck is created: the signal handler
+// catches everything. On !WASM_HUGE_MEMORY, a BoundsCheck is created for each
+// memory access (except when statically eliminated by optimizations) so that
+// the length can be patched in as an immediate. This requires that the bounds
+// check limit IsValidBoundsCheckImmediate.
+
+class BoundsCheck
+{
+ public:
+ BoundsCheck() = default;
+
+ explicit BoundsCheck(uint32_t cmpOffset)
+ : cmpOffset_(cmpOffset)
+ { }
+
+ uint8_t* patchAt(uint8_t* code) const { return code + cmpOffset_; }
+ void offsetBy(uint32_t offset) { cmpOffset_ += offset; }
+
+ private:
+ uint32_t cmpOffset_;
+};
+
+WASM_DECLARE_POD_VECTOR(BoundsCheck, BoundsCheckVector)
+
+// Metadata for memory accesses. On WASM_HUGE_MEMORY platforms, only
+// (non-SIMD/Atomic) asm.js loads and stores create a MemoryAccess so that the
+// signal handler can implement the semantically-correct wraparound logic; the
+// rest simply redirect to the out-of-bounds stub in the signal handler. On x86,
+// the base address of memory is baked into each memory access instruction so
+// the MemoryAccess records the location of each for patching. On all other
+// platforms, no MemoryAccess is created.
+
+class MemoryAccess
+{
+ uint32_t insnOffset_;
+ uint32_t trapOutOfLineOffset_;
+
+ public:
+ MemoryAccess() = default;
+ explicit MemoryAccess(uint32_t insnOffset, uint32_t trapOutOfLineOffset = UINT32_MAX)
+ : insnOffset_(insnOffset),
+ trapOutOfLineOffset_(trapOutOfLineOffset)
+ {}
+
+ uint32_t insnOffset() const {
+ return insnOffset_;
+ }
+ bool hasTrapOutOfLineCode() const {
+ return trapOutOfLineOffset_ != UINT32_MAX;
+ }
+ uint8_t* trapOutOfLineCode(uint8_t* code) const {
+ MOZ_ASSERT(hasTrapOutOfLineCode());
+ return code + trapOutOfLineOffset_;
+ }
+
+ void offsetBy(uint32_t delta) {
+ insnOffset_ += delta;
+ if (hasTrapOutOfLineCode())
+ trapOutOfLineOffset_ += delta;
+ }
+};
+
+WASM_DECLARE_POD_VECTOR(MemoryAccess, MemoryAccessVector)
+
+// Metadata for the offset of an instruction to patch with the base address of
+// memory. In practice, this is only used for x86 where the offset points to the
+// *end* of the instruction (which is a non-fixed offset from the beginning of
+// the instruction). As part of the move away from code patching, this should be
+// removed.
+
+struct MemoryPatch
+{
+ uint32_t offset;
+
+ MemoryPatch() = default;
+ explicit MemoryPatch(uint32_t offset) : offset(offset) {}
+
+ void offsetBy(uint32_t delta) {
+ offset += delta;
+ }
+};
+
+WASM_DECLARE_POD_VECTOR(MemoryPatch, MemoryPatchVector)
+
+// Constants:
+
+static const unsigned NaN64GlobalDataOffset = 0;
+static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
+static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);
+
+static const unsigned MaxSigs = 4 * 1024;
+static const unsigned MaxFuncs = 512 * 1024;
+static const unsigned MaxGlobals = 4 * 1024;
+static const unsigned MaxLocals = 64 * 1024;
+static const unsigned MaxImports = 64 * 1024;
+static const unsigned MaxExports = 64 * 1024;
+static const unsigned MaxTables = 4 * 1024;
+static const unsigned MaxTableElems = 1024 * 1024;
+static const unsigned MaxDataSegments = 64 * 1024;
+static const unsigned MaxElemSegments = 64 * 1024;
+static const unsigned MaxArgsPerFunc = 4 * 1024;
+static const unsigned MaxBrTableElems = 4 * 1024 * 1024;
+
+// To be able to assign function indices during compilation while the number of
+// imports is still unknown, asm.js sets a maximum number of imports so it can
+// immediately start handing out function indices starting at the maximum + 1.
+// this means that there is a "hole" between the last import and the first
+// definition, but that's fine.
+
+static const unsigned AsmJSMaxImports = 4 * 1024;
+static const unsigned AsmJSFirstDefFuncIndex = AsmJSMaxImports + 1;
+
+static_assert(AsmJSMaxImports <= MaxImports, "conservative");
+static_assert(AsmJSFirstDefFuncIndex < MaxFuncs, "conservative");
+
+} // namespace wasm
+} // namespace js
+
+#endif // wasm_types_h