|
| 1 | +/* Copyright (c) 2020, MariaDB |
| 2 | +
|
| 3 | + This program is free software; you can redistribute it and/or modify |
| 4 | + it under the terms of the GNU General Public License as published by |
| 5 | + the Free Software Foundation; version 2 of the License. |
| 6 | +
|
| 7 | + This program is distributed in the hope that it will be useful, |
| 8 | + but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | + GNU General Public License for more details. |
| 11 | +
|
| 12 | + You should have received a copy of the GNU General Public License |
| 13 | + along with this program; if not, write to the Free Software |
| 14 | + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ |
| 15 | + |
| 16 | +#ifdef __cplusplus |
| 17 | +#include <atomic> |
| 18 | +/** |
| 19 | + A wrapper for std::atomic, defaulting to std::memory_order_relaxed. |
| 20 | +
|
| 21 | + When it comes to atomic loads or stores at std::memory_order_relaxed |
| 22 | + on IA-32 or AMD64, this wrapper is only introducing some constraints |
| 23 | + to the C++ compiler, to prevent some optimizations of loads or |
| 24 | + stores. |
| 25 | +
|
| 26 | + On POWER and ARM, atomic loads and stores involve different instructions |
| 27 | + from normal loads and stores and will thus incur some overhead. |
| 28 | +
|
| 29 | + Because atomic read-modify-write operations will always incur |
| 30 | + overhead, we intentionally do not define |
| 31 | + operator++(), operator--(), operator+=(), operator-=(), or similar, |
| 32 | + to make the overhead stand out in the users of this code. |
| 33 | +*/ |
| 34 | +template <typename Type> class Atomic_relaxed |
| 35 | +{ |
| 36 | + std::atomic<Type> m; |
| 37 | +public: |
| 38 | + Atomic_relaxed(const Atomic_relaxed<Type> &rhs) |
| 39 | + { m.store(rhs, std::memory_order_relaxed); } |
| 40 | + Atomic_relaxed(Type val) : m(val) {} |
| 41 | + Atomic_relaxed() {} |
| 42 | + |
| 43 | + operator Type() const { return m.load(std::memory_order_relaxed); } |
| 44 | + Type operator=(const Type val) |
| 45 | + { m.store(val, std::memory_order_relaxed); return val; } |
| 46 | + Type operator=(const Atomic_relaxed<Type> &rhs) { return *this= Type{rhs}; } |
| 47 | + Type fetch_add(const Type i, std::memory_order o= std::memory_order_relaxed) |
| 48 | + { return m.fetch_add(i, o); } |
| 49 | + Type fetch_sub(const Type i, std::memory_order o= std::memory_order_relaxed) |
| 50 | + { return m.fetch_sub(i, o); } |
| 51 | + bool compare_exchange_strong(Type& i1, const Type i2, |
| 52 | + std::memory_order o1= std::memory_order_relaxed, |
| 53 | + std::memory_order o2= std::memory_order_relaxed) |
| 54 | + { return m.compare_exchange_strong(i1, i2, o1, o2); } |
| 55 | + Type exchange(const Type i, std::memory_order o= std::memory_order_relaxed) |
| 56 | + { return m.exchange(i, o); } |
| 57 | +}; |
| 58 | +#endif /* __cplusplus */ |
0 commit comments