summaryrefslogtreecommitdiff
path: root/js/src/jit/AtomicOperations.h
blob: 42aee72eb879377aef1b3215f1b2db934b2b4558 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
 * vim: set ts=8 sts=4 et sw=4 tw=99:
 * This Source Code Form is subject to the terms of the Mozilla Public
 * License, v. 2.0. If a copy of the MPL was not distributed with this
 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */

#ifndef jit_AtomicOperations_h
#define jit_AtomicOperations_h

#include "mozilla/Types.h"

#include "vm/SharedMem.h"

namespace js {
namespace jit {

class RegionLock;

/*
 * The atomic operations layer defines types and functions for
 * JIT-compatible atomic operation.
 *
 * The fundamental constraints on the functions are:
 *
 * - That their realization here MUST be compatible with code the JIT
 *   generates for its Atomics operations, so that an atomic access
 *   from the interpreter or runtime - from any C++ code - really is
 *   atomic relative to a concurrent, compatible atomic access from
 *   jitted code.  That is, these primitives expose JIT-compatible
 *   atomicity functionality to C++.
 *
 * - That accesses may race without creating C++ undefined behavior:
 *   atomic accesses (marked "SeqCst") may race with non-atomic
 *   accesses (marked "SafeWhenRacy"); overlapping but non-matching,
 *   and hence incompatible, atomic accesses may race; and non-atomic
 *   accesses may race.  The effects of races need not be predictable,
 *   so garbage can be produced by a read or written by a write, but
 *   the effects must be benign: the program must continue to run, and
 *   only the memory in the union of addresses named in the racing
 *   accesses may be affected.
 *
 * The compatibility constraint means that if the JIT makes dynamic
 * decisions about how to implement atomic operations then
 * corresponding dynamic decisions MUST be made in the implementations
 * of the functions below.
 *
 * The safe-for-races constraint means that by and large, it is hard
 * to implement these primitives in C++.  See "Implementation notes"
 * below.
 *
 * The "SeqCst" suffix on operations means "sequentially consistent"
 * and means such a function's operation must have "sequentially
 * consistent" memory ordering.  See mfbt/Atomics.h for an explanation
 * of this memory ordering.
 *
 * Note that a "SafeWhenRacy" access does not provide the atomicity of
 * a "relaxed atomic" access: it can read or write garbage if there's
 * a race.
 *
 *
 * Implementation notes.
 *
 * It's not a requirement that these functions be inlined; performance
 * is not a great concern.  On some platforms these functions may call
 * out to code that's generated at run time.
 *
 * In principle these functions will not be written in C++, thus
 * making races defined behavior if all racy accesses from C++ go via
 * these functions.  (Jitted code will always be safe for races and
 * provides the same guarantees as these functions.)
 *
 * The appropriate implementations will be platform-specific and
 * there are some obvious implementation strategies to choose
 * from, sometimes a combination is appropriate:
 *
 *  - generating the code at run-time with the JIT;
 *  - hand-written assembler (maybe inline); or
 *  - using special compiler intrinsics or directives.
 *
 * Trusting the compiler not to generate code that blows up on a
 * race definitely won't work in the presence of TSan, or even of
 * optimizing compilers in seemingly-"innocuous" conditions.  (See
 * https://www.usenix.org/legacy/event/hotpar11/tech/final_files/Boehm.pdf
 * for details.)
 */
class AtomicOperations
{
    friend class RegionLock;

  private:
    // The following functions are defined for T = int8_t, uint8_t,
    // int16_t, uint16_t, int32_t, uint32_t, int64_t, and uint64_t.

    // Atomically read *addr.
    template<typename T>
    static inline T loadSeqCst(T* addr);

    // Atomically store val in *addr.
    template<typename T>
    static inline void storeSeqCst(T* addr, T val);

    // Atomically store val in *addr and return the old value of *addr.
    template<typename T>
    static inline T exchangeSeqCst(T* addr, T val);

    // Atomically check that *addr contains oldval and if so replace it
    // with newval, in any case returning the old contents of *addr.
    template<typename T>
    static inline T compareExchangeSeqCst(T* addr, T oldval, T newval);

    // The following functions are defined for T = int8_t, uint8_t,
    // int16_t, uint16_t, int32_t, uint32_t only.

    // Atomically add, subtract, bitwise-AND, bitwise-OR, or bitwise-XOR
    // val into *addr and return the old value of *addr.
    template<typename T>
    static inline T fetchAddSeqCst(T* addr, T val);

    template<typename T>
    static inline T fetchSubSeqCst(T* addr, T val);

    template<typename T>
    static inline T fetchAndSeqCst(T* addr, T val);

    template<typename T>
    static inline T fetchOrSeqCst(T* addr, T val);

    template<typename T>
    static inline T fetchXorSeqCst(T* addr, T val);

    // The SafeWhenRacy functions are to be used when C++ code has to access
    // memory without synchronization and can't guarantee that there
    // won't be a race on the access.

    // Defined for all the integral types as well as for float32 and float64.
    template<typename T>
    static inline T loadSafeWhenRacy(T* addr);

    // Defined for all the integral types as well as for float32 and float64.
    template<typename T>
    static inline void storeSafeWhenRacy(T* addr, T val);

    // Replacement for memcpy().
    static inline void memcpySafeWhenRacy(void* dest, const void* src, size_t nbytes);

    // Replacement for memmove().
    static inline void memmoveSafeWhenRacy(void* dest, const void* src, size_t nbytes);

  public:
    // Test lock-freedom for any int32 value.  This implements the
    // Atomics::isLockFree() operation in the Shared Memory and
    // Atomics specification, as follows:
    //
    // 1, 2, and 4 bytes are always lock free (in SpiderMonkey).
    //
    // Lock-freedom for 8 bytes is determined by the platform's
    // isLockfree8().  However, the spec stipulates that isLockFree(8)
    // is true only if there is an integer array that admits atomic
    // operations whose BYTES_PER_ELEMENT=8; at the moment (February
    // 2016) there are no such arrays.
    //
    // There is no lock-freedom for any other values on any platform.
    static inline bool isLockfree(int32_t n);

    // If the return value is true then a call to the 64-bit (8-byte)
    // routines below will work, otherwise those functions will assert in
    // debug builds and may crash in release build.  (See the code in
    // ../arm for an example.)  The value of this call does not change
    // during execution.
    static inline bool isLockfree8();

    // Execute a full memory barrier (LoadLoad+LoadStore+StoreLoad+StoreStore).
    static inline void fenceSeqCst();

    // All clients should use the APIs that take SharedMem pointers.
    // See above for semantics and acceptable types.

    template<typename T>
    static T loadSeqCst(SharedMem<T*> addr) {
        return loadSeqCst(addr.unwrap());
    }

    template<typename T>
    static void storeSeqCst(SharedMem<T*> addr, T val) {
        return storeSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T exchangeSeqCst(SharedMem<T*> addr, T val) {
        return exchangeSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T compareExchangeSeqCst(SharedMem<T*> addr, T oldval, T newval) {
        return compareExchangeSeqCst(addr.unwrap(), oldval, newval);
    }

    template<typename T>
    static T fetchAddSeqCst(SharedMem<T*> addr, T val) {
        return fetchAddSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T fetchSubSeqCst(SharedMem<T*> addr, T val) {
        return fetchSubSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T fetchAndSeqCst(SharedMem<T*> addr, T val) {
        return fetchAndSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T fetchOrSeqCst(SharedMem<T*> addr, T val) {
        return fetchOrSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T fetchXorSeqCst(SharedMem<T*> addr, T val) {
        return fetchXorSeqCst(addr.unwrap(), val);
    }

    template<typename T>
    static T loadSafeWhenRacy(SharedMem<T*> addr) {
        return loadSafeWhenRacy(addr.unwrap());
    }

    template<typename T>
    static void storeSafeWhenRacy(SharedMem<T*> addr, T val) {
        return storeSafeWhenRacy(addr.unwrap(), val);
    }

    template<typename T>
    static void memcpySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
        memcpySafeWhenRacy(dest.template cast<void*>().unwrap(),
                           src.template cast<void*>().unwrap(), nbytes);
    }

    template<typename T>
    static void memcpySafeWhenRacy(SharedMem<T*> dest, T* src, size_t nbytes) {
        memcpySafeWhenRacy(dest.template cast<void*>().unwrap(), static_cast<void*>(src), nbytes);
    }

    template<typename T>
    static void memcpySafeWhenRacy(T* dest, SharedMem<T*> src, size_t nbytes) {
        memcpySafeWhenRacy(static_cast<void*>(dest), src.template cast<void*>().unwrap(), nbytes);
    }

    template<typename T>
    static void memmoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nbytes) {
        memmoveSafeWhenRacy(dest.template cast<void*>().unwrap(),
                            src.template cast<void*>().unwrap(), nbytes);
    }

    template<typename T>
    static void podCopySafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
        memcpySafeWhenRacy(dest, src, nelem * sizeof(T));
    }

    template<typename T>
    static void podMoveSafeWhenRacy(SharedMem<T*> dest, SharedMem<T*> src, size_t nelem) {
        memmoveSafeWhenRacy(dest, src, nelem * sizeof(T));
    }
};

/* A data type representing a lock on some region of a
 * SharedArrayRawBuffer's memory, to be used only when the hardware
 * does not provide necessary atomicity (eg, float64 access on ARMv6
 * and some ARMv7 systems).
 */
class RegionLock
{
  public:
    RegionLock() : spinlock(0) {}

    /* Addr is the address to be locked, nbytes the number of bytes we
     * need to lock.  The lock that is taken may cover a larger range
     * of bytes.
     */
    template<size_t nbytes>
    void acquire(void* addr);

    /* Addr is the address to be unlocked, nbytes the number of bytes
     * we need to unlock.  The lock must be held by the calling thread,
     * at the given address and for the number of bytes.
     */
    template<size_t nbytes>
    void release(void* addr);

  private:
    /* For now, a simple spinlock that covers the entire buffer. */
    uint32_t spinlock;
};

inline bool
AtomicOperations::isLockfree(int32_t size)
{
    // Keep this in sync with visitAtomicIsLockFree() in jit/CodeGenerator.cpp.

    switch (size) {
      case 1:
        return true;
      case 2:
        return true;
      case 4:
        // The spec requires Atomics.isLockFree(4) to return true.
        return true;
      case 8:
        // The spec requires Atomics.isLockFree(n) to return false
        // unless n is the BYTES_PER_ELEMENT value of some integer
        // TypedArray that admits atomic operations.  At the time of
        // writing (February 2016) there is no such array with n=8.
        // return AtomicOperations::isLockfree8();
        return false;
      default:
        return false;
    }
}

} // namespace jit
} // namespace js

#if defined(JS_CODEGEN_ARM)
# include "jit/arm/AtomicOperations-arm.h"
#elif defined(JS_CODEGEN_ARM64)
# include "jit/arm64/AtomicOperations-arm64.h"
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
# include "jit/mips-shared/AtomicOperations-mips-shared.h"
#elif defined(__ppc__) || defined(__PPC__)
# include "jit/none/AtomicOperations-ppc.h"
#elif defined(__sparc__)
# include "jit/none/AtomicOperations-sparc.h"
#elif defined(JS_CODEGEN_NONE)
  // You can disable the JIT with --disable-ion but you must still
  // provide the atomic operations that will be used by the JS engine.
  // When the JIT is disabled the operations are simply safe-for-races
  // C++ realizations of atomics.  These operations cannot be written
  // in portable C++, hence the default here is to crash.  See the
  // top of the file for more guidance.
# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || defined(__PPC64LE__)
#  include "jit/none/AtomicOperations-ppc.h"
# elif defined(__aarch64__)
#  include "jit/arm64/AtomicOperations-arm64.h"
# else
#  include "jit/none/AtomicOperations-none.h" // These MOZ_CRASH() always
# endif
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# include "jit/x86-shared/AtomicOperations-x86-shared.h"
#else
# error "Atomic operations must be defined for this platform"
#endif

#endif // jit_AtomicOperations_h