94#if defined(__cpp_exceptions)
96 throw std::bad_alloc();
101 buffers_.store(
newBuffers, std::memory_order_release);
120 T**
buffers = buffers_.load(std::memory_order_acquire);
175 newPos = oldPos + delta;
176 }
while (!std::atomic_compare_exchange_weak_explicit(
177 &pos_, &oldPos, newPos, std::memory_order_release, std::memory_order_relaxed));
179 constructObjects(oldPos, oldPos + delta);
192 inline const T& operator[](
const Index index)
const {
193 const Index bufIndex = index >> kLog2BuffSize;
194 const Index i = index & kMask;
196 return buffers_.load(std::memory_order_acquire)[bufIndex][i];
207 inline T& operator[](
const Index index) {
208 return const_cast<T&
>(
209 const_cast<const ConcurrentObjectArena<T, Index, alignment>&
>(*this)[index]);
218 return pos_.load(std::memory_order_relaxed);
225 Index capacity()
const {
226 return allocatedSize_.load(std::memory_order_relaxed);
233 Index numBuffers()
const {
242 const T* getBuffer(
const Index index)
const {
243 return buffers_.load(std::memory_order_acquire)[index];
251 T* getBuffer(
const Index index) {
252 return buffers_.load(std::memory_order_acquire)[index];
260 Index getBufferSize(
const Index index)
const {
261 const Index numBuffs = numBuffers();
262 assert(index < numBuffs);
264 if (index < numBuffs - 1)
267 return pos_.load(std::memory_order_relaxed) - (kBufferSize * (numBuffs - 1));
276 ConcurrentObjectArena<T, Index, alignment>& lhs,
277 ConcurrentObjectArena<T, Index, alignment>& rhs)
noexcept {
280 swap(lhs.kLog2BuffSize, rhs.kLog2BuffSize);
281 swap(lhs.kBufferSize, rhs.kBufferSize);
282 swap(lhs.kMask, rhs.kMask);
284 const Index rhs_pos = rhs.pos_.load(std::memory_order_relaxed);
285 rhs.pos_.store(lhs.pos_.load(std::memory_order_relaxed), std::memory_order_relaxed);
286 lhs.pos_.store(rhs_pos, std::memory_order_relaxed);
288 const Index rhs_allocatedSize = rhs.allocatedSize_.load(std::memory_order_relaxed);
289 rhs.allocatedSize_.store(
290 lhs.allocatedSize_.load(std::memory_order_relaxed), std::memory_order_relaxed);
291 lhs.allocatedSize_.store(rhs_allocatedSize, std::memory_order_relaxed);
293 T**
const rhs_buffers = rhs.buffers_.load(std::memory_order_acquire);
294 rhs.buffers_.store(lhs.buffers_.load(std::memory_order_acquire), std::memory_order_release);
295 lhs.buffers_.store(rhs_buffers, std::memory_order_release);
297 swap(lhs.buffersSize_, rhs.buffersSize_);
298 swap(lhs.buffersPos_, rhs.buffersPos_);
299 swap(lhs.deleteLater_, rhs.deleteLater_);
303 void allocateBuffer() {
304 void* ptr = detail::alignedMalloc(kBufferSize *
sizeof(T), alignment);
305#if defined(__cpp_exceptions)
307 throw std::bad_alloc();
310 if (buffersPos_ < buffersSize_) {
311 buffers_.load(std::memory_order_acquire)[buffersPos_++] =
static_cast<T*
>(ptr);
313 const Index oldBuffersSize = buffersSize_;
314 T** oldBuffers = buffers_.load(std::memory_order_acquire);
316 buffersSize_ = oldBuffersSize == 0 ? 2 : oldBuffersSize * 2;
317 T** newBuffers =
new T*[buffersSize_];
319 if (oldBuffers !=
nullptr) {
320 std::memcpy(newBuffers, oldBuffers,
sizeof(T*) * oldBuffersSize);
321 deleteLater_.push_back(oldBuffers);
324 newBuffers[buffersPos_++] =
static_cast<T*
>(ptr);
325 buffers_.store(newBuffers, std::memory_order_release);
329 void constructObjects(
const Index beginIndex,
const Index endIndex) {
330 const Index startBuffer = beginIndex >> kLog2BuffSize;
331 const Index endBuffer = endIndex >> kLog2BuffSize;
333 Index bufStart = beginIndex & kMask;
334 for (Index b = startBuffer; b <= endBuffer; ++b) {
335 T* buf = buffers_.load(std::memory_order_acquire)[b];
336 const Index bufEnd = b == endBuffer ? (endIndex & kMask) : kBufferSize;
337 for (Index i = bufStart; i < bufEnd; ++i)
349 std::mutex resizeMutex_;
355 std::atomic<Index> pos_;
356 std::atomic<Index> allocatedSize_;
358 std::atomic<T**> buffers_;
361 std::vector<T**> deleteLater_;