summaryrefslogtreecommitdiff
path: root/js/src
diff options
context:
space:
mode:
authorJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-12 12:40:13 +0800
committerJiaxun Yang <jiaxun.yang@flygoat.com>2020-05-14 16:31:59 +0800
commit77544de5d66a8bf403a9e9db4c0eb7b98c437a65 (patch)
tree6a96a53bdd73735c95242b866243d36ae7f5fe7c /js/src
parent9679f714f7c3a1da7af0ddd1c2005f8f7ffd039b (diff)
downloaduxp-77544de5d66a8bf403a9e9db4c0eb7b98c437a65.tar.gz
Bug 1330942 - move MemoryAccessDesc::isUnaligned to the ARM/MIPS platform layer
Tag: #1542
Diffstat (limited to 'js/src')
-rw-r--r--js/src/jit/arm/Assembler-arm.cpp12
-rw-r--r--js/src/jit/arm/Assembler-arm.h2
-rw-r--r--js/src/jit/arm/Lowering-arm.cpp4
-rw-r--r--js/src/jit/mips-shared/Assembler-mips-shared.h6
-rw-r--r--js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp4
-rw-r--r--js/src/jit/mips-shared/Lowering-mips-shared.cpp4
-rw-r--r--js/src/jit/mips32/CodeGenerator-mips32.cpp4
-rw-r--r--js/src/jit/mips64/CodeGenerator-mips64.cpp4
-rw-r--r--js/src/jit/shared/Assembler-shared.h1
-rw-r--r--js/src/wasm/WasmBaselineCompile.cpp20
10 files changed, 40 insertions, 21 deletions
diff --git a/js/src/jit/arm/Assembler-arm.cpp b/js/src/jit/arm/Assembler-arm.cpp
index 1e20da1c8c..b231128c54 100644
--- a/js/src/jit/arm/Assembler-arm.cpp
+++ b/js/src/jit/arm/Assembler-arm.cpp
@@ -171,6 +171,18 @@ ABIArgGenerator::next(MIRType type)
return softNext(type);
}
+bool
+js::jit::IsUnaligned(const wasm::MemoryAccessDesc& access)
+{
+ if (!access.align())
+ return false;
+
+ if (access.type() == Scalar::Float64 && access.align() >= 4)
+ return false;
+
+ return access.align() < access.byteSize();
+}
+
// Encode a standard register when it is being used as src1, the dest, and an
// extra register. These should never be called with an InvalidReg.
uint32_t
diff --git a/js/src/jit/arm/Assembler-arm.h b/js/src/jit/arm/Assembler-arm.h
index 8bb754a50d..ecfb83b422 100644
--- a/js/src/jit/arm/Assembler-arm.h
+++ b/js/src/jit/arm/Assembler-arm.h
@@ -108,6 +108,8 @@ class ABIArgGenerator
uint32_t stackBytesConsumedSoFar() const { return stackOffset_; }
};
+bool IsUnaligned(const wasm::MemoryAccessDesc& access);
+
static constexpr Register ABINonArgReg0 = r4;
static constexpr Register ABINonArgReg1 = r5;
static constexpr Register ABINonArgReg2 = r6;
diff --git a/js/src/jit/arm/Lowering-arm.cpp b/js/src/jit/arm/Lowering-arm.cpp
index c266801162..b9440816a5 100644
--- a/js/src/jit/arm/Lowering-arm.cpp
+++ b/js/src/jit/arm/Lowering-arm.cpp
@@ -613,7 +613,7 @@ LIRGeneratorARM::visitWasmLoad(MWasmLoad* ins)
LAllocation ptr = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
// Unaligned access expected! Revert to a byte load.
LDefinition ptrCopy = tempCopy(base, 0);
@@ -662,7 +662,7 @@ LIRGeneratorARM::visitWasmStore(MWasmStore* ins)
LAllocation ptr = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
// Unaligned access expected! Revert to a byte store.
LDefinition ptrCopy = tempCopy(base, 0);
diff --git a/js/src/jit/mips-shared/Assembler-mips-shared.h b/js/src/jit/mips-shared/Assembler-mips-shared.h
index 5a47eb1dc4..4cfb30117c 100644
--- a/js/src/jit/mips-shared/Assembler-mips-shared.h
+++ b/js/src/jit/mips-shared/Assembler-mips-shared.h
@@ -1547,6 +1547,12 @@ class InstGS : public Instruction
{ }
};
+inline bool
+IsUnaligned(const wasm::MemoryAccessDesc& access)
+{
+ return access.align() && access.align() < access.byteSize();
+}
+
} // namespace jit
} // namespace js
diff --git a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
index 1a5a3987bb..2526fc2379 100644
--- a/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
+++ b/js/src/jit/mips-shared/CodeGenerator-mips-shared.cpp
@@ -1915,7 +1915,7 @@ CodeGeneratorMIPSShared::emitWasmLoad(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
@@ -2000,7 +2000,7 @@ CodeGeneratorMIPSShared::emitWasmStore(T* lir)
BaseIndex address(HeapReg, ptr, TimesOne);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (isFloat) {
diff --git a/js/src/jit/mips-shared/Lowering-mips-shared.cpp b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
index f328d16f79..8c78f56b73 100644
--- a/js/src/jit/mips-shared/Lowering-mips-shared.cpp
+++ b/js/src/jit/mips-shared/Lowering-mips-shared.cpp
@@ -324,7 +324,7 @@ LIRGeneratorMIPSShared::visitWasmLoad(MWasmLoad* ins)
LAllocation ptr = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
auto* lir = new(alloc()) LWasmUnalignedLoadI64(ptr, temp());
if (ins->access().offset())
@@ -367,7 +367,7 @@ LIRGeneratorMIPSShared::visitWasmStore(MWasmStore* ins)
MDefinition* value = ins->value();
LAllocation baseAlloc = useRegisterAtStart(base);
- if (ins->access().isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
if (ins->type() == MIRType::Int64) {
LInt64Allocation valueAlloc = useInt64RegisterAtStart(value);
auto* lir = new(alloc()) LWasmUnalignedStoreI64(baseAlloc, valueAlloc, temp());
diff --git a/js/src/jit/mips32/CodeGenerator-mips32.cpp b/js/src/jit/mips32/CodeGenerator-mips32.cpp
index b947c14aac..33fea01ae3 100644
--- a/js/src/jit/mips32/CodeGenerator-mips32.cpp
+++ b/js/src/jit/mips32/CodeGenerator-mips32.cpp
@@ -490,7 +490,7 @@ CodeGeneratorMIPS::emitWasmLoadI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
MOZ_ASSERT(INT64LOW_OFFSET == 0);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
@@ -577,7 +577,7 @@ CodeGeneratorMIPS::emitWasmStoreI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
MOZ_ASSERT(INT64LOW_OFFSET == 0);
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
if (byteSize <= 4) {
diff --git a/js/src/jit/mips64/CodeGenerator-mips64.cpp b/js/src/jit/mips64/CodeGenerator-mips64.cpp
index 45f0e69d73..d1b6a7a32c 100644
--- a/js/src/jit/mips64/CodeGenerator-mips64.cpp
+++ b/js/src/jit/mips64/CodeGenerator-mips64.cpp
@@ -449,7 +449,7 @@ CodeGeneratorMIPS64::emitWasmLoadI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
masm.ma_load_unaligned(ToOutRegister64(lir).reg, BaseIndex(HeapReg, ptr, TimesOne),
@@ -514,7 +514,7 @@ CodeGeneratorMIPS64::emitWasmStoreI64(T* lir)
masm.memoryBarrier(mir->access().barrierBefore());
- if (mir->access().isUnaligned()) {
+ if (IsUnaligned(mir->access())) {
Register temp = ToRegister(lir->getTemp(1));
masm.ma_store_unaligned(ToRegister64(lir->value()).reg, BaseIndex(HeapReg, ptr, TimesOne),
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
index 8044e75cb4..258f977e37 100644
--- a/js/src/jit/shared/Assembler-shared.h
+++ b/js/src/jit/shared/Assembler-shared.h
@@ -747,7 +747,6 @@ class MemoryAccessDesc
TrapOffset trapOffset() const { return *trapOffset_; }
bool isAtomic() const { return (barrierBefore_ | barrierAfter_) != jit::MembarNobits; }
bool isSimd() const { return Scalar::isSimdType(type_); }
- bool isUnaligned() const { return align() && align() < byteSize(); }
bool isPlainAsmJS() const { return !hasTrap(); }
void clearOffset() { offset_ = 0; }
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
index 564b81f683..8dc5c104f4 100644
--- a/js/src/wasm/WasmBaselineCompile.cpp
+++ b/js/src/wasm/WasmBaselineCompile.cpp
@@ -3206,7 +3206,7 @@ class BaseCompiler
// This is the temp register passed as the last argument to load()
MOZ_MUST_USE size_t loadStoreTemps(MemoryAccessDesc& access) {
#if defined(JS_CODEGEN_ARM)
- if (access.isUnaligned()) {
+ if (IsUnaligned(access)) {
switch (access.type()) {
case Scalar::Float32:
return 1;
@@ -3391,7 +3391,7 @@ class BaseCompiler
#ifdef JS_CODEGEN_ARM
void
loadI32(MemoryAccessDesc access, bool isSigned, RegI32 ptr, Register rt) {
- if (access.byteSize() > 1 && access.isUnaligned()) {
+ if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(isSigned, access.byteSize(), ptr.reg, scratch, rt, 0);
@@ -3405,7 +3405,7 @@ class BaseCompiler
void
storeI32(MemoryAccessDesc access, RegI32 ptr, Register rt) {
- if (access.byteSize() > 1 && access.isUnaligned()) {
+ if (access.byteSize() > 1 && IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
masm.emitUnalignedStore(access.byteSize(), ptr.reg, rt, 0);
} else {
@@ -3419,7 +3419,7 @@ class BaseCompiler
void
loadI64(MemoryAccessDesc access, RegI32 ptr, RegI64 dest) {
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, dest.reg.low,
@@ -3440,7 +3440,7 @@ class BaseCompiler
void
storeI64(MemoryAccessDesc access, RegI32 ptr, RegI64 src) {
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
masm.add32(HeapReg, ptr.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.low, 0);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, src.reg.high, 4);
@@ -3459,7 +3459,7 @@ class BaseCompiler
void
loadF32(MemoryAccessDesc access, RegI32 ptr, RegF32 dest, RegI32 tmp1) {
masm.add32(HeapReg, ptr.reg);
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
masm.ma_vxfer(tmp1.reg, dest.reg);
@@ -3473,7 +3473,7 @@ class BaseCompiler
void
storeF32(MemoryAccessDesc access, RegI32 ptr, RegF32 src, RegI32 tmp1) {
masm.add32(HeapReg, ptr.reg);
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
masm.ma_vxfer(src.reg, tmp1.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
} else {
@@ -3486,7 +3486,7 @@ class BaseCompiler
void
loadF64(MemoryAccessDesc access, RegI32 ptr, RegF64 dest, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr.reg);
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
SecondScratchRegisterScope scratch(*this);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp1.reg, 0);
masm.emitUnalignedLoad(IsSigned(false), ByteSize(4), ptr.reg, scratch, tmp2.reg, 4);
@@ -3501,7 +3501,7 @@ class BaseCompiler
void
storeF64(MemoryAccessDesc access, RegI32 ptr, RegF64 src, RegI32 tmp1, RegI32 tmp2) {
masm.add32(HeapReg, ptr.reg);
- if (access.isUnaligned()) {
+ if (IsUnaligned(ins->access())) {
masm.ma_vxfer(src.reg, tmp1.reg, tmp2.reg);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp1.reg, 0);
masm.emitUnalignedStore(ByteSize(4), ptr.reg, tmp2.reg, 4);
@@ -5991,7 +5991,7 @@ BaseCompiler::emitLoad(ValType type, Scalar::Type viewType)
case ValType::I32: {
RegI32 rp = popI32();
#ifdef JS_CODEGEN_ARM
- RegI32 rv = access.isUnaligned() ? needI32() : rp;
+ RegI32 rv = IsUnaligned(access) ? needI32() : rp;
#else
RegI32 rv = rp;
#endif