summaryrefslogtreecommitdiff
path: root/js/src/jit/mips-shared/MacroAssembler-mips-shared.h
blob: cd75b2e37137cf26d0288f8c2cc510c9d2b39502 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
 * vim: set ts=8 sts=4 et sw=4 tw=99:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#ifndef jit_mips_shared_MacroAssembler_mips_shared_h
#define jit_mips_shared_MacroAssembler_mips_shared_h

#if defined(JS_CODEGEN_MIPS32)
# include "jit/mips32/Assembler-mips32.h"
#elif defined(JS_CODEGEN_MIPS64)
# include "jit/mips64/Assembler-mips64.h"
#endif

#include "jit/AtomicOp.h"

namespace js {
namespace jit {

enum LoadStoreSize
{
    SizeByte = 8,
    SizeHalfWord = 16,
    SizeWord = 32,
    SizeDouble = 64
};

enum LoadStoreExtension
{
    ZeroExtend = 0,
    SignExtend = 1
};

enum JumpKind
{
    MixedJump = 0,
    ShortJump = 1
};

enum DelaySlotFill
{
    DontFillDelaySlot = 0,
    FillDelaySlot = 1
};

static Register CallReg = t9;

class MacroAssemblerMIPSShared : public Assembler
{
  protected:
    // Perform a downcast. Should be removed by Bug 996602.
    MacroAssembler& asMasm();
    const MacroAssembler& asMasm() const;

    Condition ma_cmp(Register rd, Register lhs, Register rhs, Condition c);
    Condition ma_cmp(Register rd, Register lhs, Imm32 imm, Condition c);

    void compareFloatingPoint(FloatFormat fmt, FloatRegister lhs, FloatRegister rhs,
                              DoubleCondition c, FloatTestKind* testKind,
                              FPConditionBit fcc = FCC0);

    void GenerateMixedJumps();

  public:
    void ma_move(Register rd, Register rs);

    void ma_li(Register dest, ImmGCPtr ptr);

    void ma_li(Register dest, Imm32 imm);
    void ma_liPatchable(Register dest, Imm32 imm);

    // Shift operations
    void ma_sll(Register rd, Register rt, Imm32 shift);
    void ma_srl(Register rd, Register rt, Imm32 shift);
    void ma_sra(Register rd, Register rt, Imm32 shift);
    void ma_ror(Register rd, Register rt, Imm32 shift);
    void ma_rol(Register rd, Register rt, Imm32 shift);

    void ma_sll(Register rd, Register rt, Register shift);
    void ma_srl(Register rd, Register rt, Register shift);
    void ma_sra(Register rd, Register rt, Register shift);
    void ma_ror(Register rd, Register rt, Register shift);
    void ma_rol(Register rd, Register rt, Register shift);

    // Negate
    void ma_negu(Register rd, Register rs);

    void ma_not(Register rd, Register rs);

    // and
    void ma_and(Register rd, Register rs);
    void ma_and(Register rd, Imm32 imm);
    void ma_and(Register rd, Register rs, Imm32 imm);

    // or
    void ma_or(Register rd, Register rs);
    void ma_or(Register rd, Imm32 imm);
    void ma_or(Register rd, Register rs, Imm32 imm);

    // xor
    void ma_xor(Register rd, Register rs);
    void ma_xor(Register rd, Imm32 imm);
    void ma_xor(Register rd, Register rs, Imm32 imm);

    void ma_ctz(Register rd, Register rs);

    // load
    void ma_load(Register dest, const BaseIndex& src, LoadStoreSize size = SizeWord,
                 LoadStoreExtension extension = SignExtend);
    void ma_load_unaligned(Register dest, const BaseIndex& src, Register temp,
                           LoadStoreSize size, LoadStoreExtension extension);

    // store
    void ma_store(Register data, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                  LoadStoreExtension extension = SignExtend);
    void ma_store(Imm32 imm, const BaseIndex& dest, LoadStoreSize size = SizeWord,
                  LoadStoreExtension extension = SignExtend);
    void ma_store_unaligned(Register data, const BaseIndex& dest, Register temp,
                            LoadStoreSize size, LoadStoreExtension extension);

    // arithmetic based ops
    // add
    void ma_addu(Register rd, Register rs, Imm32 imm);
    void ma_addu(Register rd, Register rs);
    void ma_addu(Register rd, Imm32 imm);
    template <typename L>
    void ma_addTestCarry(Register rd, Register rs, Register rt, L overflow);
    template <typename L>
    void ma_addTestCarry(Register rd, Register rs, Imm32 imm, L overflow);

    // subtract
    void ma_subu(Register rd, Register rs, Imm32 imm);
    void ma_subu(Register rd, Register rs);
    void ma_subu(Register rd, Imm32 imm);
    void ma_subTestOverflow(Register rd, Register rs, Imm32 imm, Label* overflow);

    // multiplies.  For now, there are only few that we care about.
    void ma_mul(Register rd, Register rs, Imm32 imm);
    void ma_mul_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
    void ma_mul_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);

    // divisions
    void ma_div_branch_overflow(Register rd, Register rs, Register rt, Label* overflow);
    void ma_div_branch_overflow(Register rd, Register rs, Imm32 imm, Label* overflow);

    // fast mod, uses scratch registers, and thus needs to be in the assembler
    // implicitly assumes that we can overwrite dest at the beginning of the sequence
    void ma_mod_mask(Register src, Register dest, Register hold, Register remain,
                     int32_t shift, Label* negZero = nullptr);

    void branchWithCode(InstImm code, Label* label, JumpKind jumpKind);
    // branches when done from within mips-specific code
    void ma_b(Register lhs, Register rhs, Label* l, Condition c, JumpKind jumpKind = MixedJump);
    void ma_b(Register lhs, Imm32 imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
    void ma_b(Register lhs, ImmPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump);
    void ma_b(Register lhs, ImmGCPtr imm, Label* l, Condition c, JumpKind jumpKind = MixedJump) {
        MOZ_ASSERT(lhs != ScratchRegister);
        ma_li(ScratchRegister, imm);
        ma_b(lhs, ScratchRegister, l, c, jumpKind);
    }
    template <typename T>
    void ma_b(Register lhs, T rhs, wasm::TrapDesc target, Condition c,
              JumpKind jumpKind = MixedJump);

    void ma_b(Label* l, JumpKind jumpKind = MixedJump);
    void ma_b(wasm::TrapDesc target, JumpKind jumpKind = MixedJump);

    void ma_jal(Label* l);

    // fp instructions
    void ma_lis(FloatRegister dest, float value);
    void ma_lis(FloatRegister dest, wasm::RawF32 value);
    void ma_liNegZero(FloatRegister dest);

    void ma_sd(FloatRegister fd, BaseIndex address);
    void ma_ss(FloatRegister fd, BaseIndex address);

    //FP branches
    void ma_bc1s(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
                 JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);
    void ma_bc1d(FloatRegister lhs, FloatRegister rhs, Label* label, DoubleCondition c,
                 JumpKind jumpKind = MixedJump, FPConditionBit fcc = FCC0);

    void ma_call(ImmPtr dest);

    void ma_jump(ImmPtr dest);

    void ma_cmp_set(Register dst, Register lhs, Register rhs, Condition c);
    void ma_cmp_set(Register dst, Register lhs, Imm32 imm, Condition c);
    void ma_cmp_set_double(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);
    void ma_cmp_set_float32(Register dst, FloatRegister lhs, FloatRegister rhs, DoubleCondition c);

    BufferOffset ma_BoundsCheck(Register bounded) {
        BufferOffset bo = m_buffer.nextOffset();
        ma_liPatchable(bounded, Imm32(0));
        return bo;
    }

    void moveToDoubleLo(Register src, FloatRegister dest) {
        as_mtc1(src, dest);
    }
    void moveFromDoubleLo(FloatRegister src, Register dest) {
        as_mfc1(dest, src);
    }

    void moveToFloat32(Register src, FloatRegister dest) {
        as_mtc1(src, dest);
    }
    void moveFromFloat32(FloatRegister src, Register dest) {
        as_mfc1(dest, src);
    }

    // Evaluate srcDest = minmax<isMax>{Float32,Double}(srcDest, other).
    // Handle NaN specially if handleNaN is true.
    void minMaxDouble(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);
    void minMaxFloat32(FloatRegister srcDest, FloatRegister other, bool handleNaN, bool isMax);

  private:
    void atomicEffectOpMIPSr2(int nbytes, AtomicOp op, const Register& value, const Register& addr,
                              Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
    void atomicFetchOpMIPSr2(int nbytes, bool signExtend, AtomicOp op, const Register& value, const Register& addr,
                             Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp,
                             Register output);
    void compareExchangeMIPSr2(int nbytes, bool signExtend, const Register& addr, Register oldval,
                               Register newval, Register flagTemp, Register valueTemp, Register offsetTemp,
                               Register maskTemp, Register output);

  protected:
    void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const Address& address,
                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
    void atomicEffectOp(int nbytes, AtomicOp op, const Imm32& value, const BaseIndex& address,
                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
    void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const Address& address,
                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);
    void atomicEffectOp(int nbytes, AtomicOp op, const Register& value, const BaseIndex& address,
                        Register flagTemp, Register valueTemp, Register offsetTemp, Register maskTemp);

    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
                       const Address& address, Register flagTemp, Register valueTemp,
                       Register offsetTemp, Register maskTemp, Register output);
    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Imm32& value,
                       const BaseIndex& address, Register flagTemp, Register valueTemp,
                       Register offsetTemp, Register maskTemp, Register output);
    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
                       const Address& address, Register flagTemp, Register valueTemp,
                       Register offsetTemp, Register maskTemp, Register output);
    void atomicFetchOp(int nbytes, bool signExtend, AtomicOp op, const Register& value,
                       const BaseIndex& address, Register flagTemp, Register valueTemp,
                       Register offsetTemp, Register maskTemp, Register output);

    void compareExchange(int nbytes, bool signExtend, const Address& address, Register oldval,
                         Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
                         Register output);
    void compareExchange(int nbytes, bool signExtend, const BaseIndex& address, Register oldval,
                         Register newval, Register valueTemp, Register offsetTemp, Register maskTemp,
                         Register output);

    void atomicExchange(int nbytes, bool signExtend, const Address& address, Register value,
                        Register valueTemp, Register offsetTemp, Register maskTemp,
                        Register output);
    void atomicExchange(int nbytes, bool signExtend, const BaseIndex& address, Register value,
                        Register valueTemp, Register offsetTemp, Register maskTemp,
                        Register output);

  public:
    struct AutoPrepareForPatching {
        explicit AutoPrepareForPatching(MacroAssemblerMIPSShared&) {}
    };
};

} // namespace jit
} // namespace js

#endif /* jit_mips_shared_MacroAssembler_mips_shared_h */