67 ::detail::log2i(minBuffSize) +
68 ((Index{1} << ::detail::log2i(minBuffSize)) == minBuffSize ? 0 : 1)),
69 kBufferSize(Index{1} << kLog2BuffSize),
70 kMask((Index{1} << kLog2BuffSize) - 1),
77 allocatedSize_.store(kBufferSize, std::memory_order_relaxed);
84 : kLog2BuffSize(other.kLog2BuffSize),
85 kBufferSize(other.kBufferSize),
87 pos_(other.pos_.load(std::memory_order_relaxed)),
88 allocatedSize_(other.allocatedSize_.load(std::memory_order_relaxed)),
89 buffersSize_(other.buffersSize_),
90 buffersPos_(other.buffersPos_) {
91 T** otherBuffers = other.buffers_.load(std::memory_order_acquire);
92 T** newBuffers =
new T*[buffersSize_];
93 for (Index i = 0; i < buffersSize_; ++i) {
94 void* ptr = detail::alignedMalloc(kBufferSize *
sizeof(T), alignment);
95#if defined(__cpp_exceptions)
97 throw std::bad_alloc();
99 std::memcpy(ptr, otherBuffers[i], kBufferSize *
sizeof(T));
100 newBuffers[i] =
static_cast<T*
>(ptr);
102 buffers_.store(newBuffers, std::memory_order_release);
121 T** buffers = buffers_.load(std::memory_order_acquire);
123 for (Index i = 0; i < buffersPos_; i++)
124 detail::alignedFree(buffers[i]);
128 for (T** p : deleteLater_)
159 Index grow_by(
const Index delta) {
161 Index oldPos = pos_.load(std::memory_order_relaxed);
164 Index curSize = allocatedSize_.load(std::memory_order_acquire);
166 if (oldPos + delta >= curSize) {
167 const std::lock_guard<std::mutex> guard(resizeMutex_);
168 curSize = allocatedSize_.load(std::memory_order_relaxed);
169 while (oldPos + delta >= curSize) {
171 allocatedSize_.store(curSize + kBufferSize, std::memory_order_release);
172 curSize = curSize + kBufferSize;
176 newPos = oldPos + delta;
177 }
while (!std::atomic_compare_exchange_weak_explicit(
178 &pos_, &oldPos, newPos, std::memory_order_release, std::memory_order_relaxed));
180 constructObjects(oldPos, oldPos + delta);
193 inline const T& operator[](
const Index index)
const {
194 const Index bufIndex = index >> kLog2BuffSize;
195 const Index i = index & kMask;
197 return buffers_.load(std::memory_order_acquire)[bufIndex][i];
208 inline T& operator[](
const Index index) {
209 return const_cast<T&
>(
210 const_cast<const ConcurrentObjectArena<T, Index, alignment>&
>(*this)[index]);
219 return pos_.load(std::memory_order_relaxed);
226 Index capacity()
const {
227 return allocatedSize_.load(std::memory_order_relaxed);
234 Index numBuffers()
const {
243 const T* getBuffer(
const Index index)
const {
244 return buffers_.load(std::memory_order_acquire)[index];
252 T* getBuffer(
const Index index) {
253 return buffers_.load(std::memory_order_acquire)[index];
261 Index getBufferSize(
const Index index)
const {
262 const Index numBuffs = numBuffers();
263 assert(index < numBuffs);
265 if (index < numBuffs - 1)
268 return pos_.load(std::memory_order_relaxed) - (kBufferSize * (numBuffs - 1));
277 ConcurrentObjectArena<T, Index, alignment>& lhs,
278 ConcurrentObjectArena<T, Index, alignment>& rhs)
noexcept {
281 swap(lhs.kLog2BuffSize, rhs.kLog2BuffSize);
282 swap(lhs.kBufferSize, rhs.kBufferSize);
283 swap(lhs.kMask, rhs.kMask);
285 const Index rhs_pos = rhs.pos_.load(std::memory_order_relaxed);
286 rhs.pos_.store(lhs.pos_.load(std::memory_order_relaxed), std::memory_order_relaxed);
287 lhs.pos_.store(rhs_pos, std::memory_order_relaxed);
289 const Index rhs_allocatedSize = rhs.allocatedSize_.load(std::memory_order_relaxed);
290 rhs.allocatedSize_.store(
291 lhs.allocatedSize_.load(std::memory_order_relaxed), std::memory_order_relaxed);
292 lhs.allocatedSize_.store(rhs_allocatedSize, std::memory_order_relaxed);
294 T**
const rhs_buffers = rhs.buffers_.load(std::memory_order_acquire);
295 rhs.buffers_.store(lhs.buffers_.load(std::memory_order_acquire), std::memory_order_release);
296 lhs.buffers_.store(rhs_buffers, std::memory_order_release);
298 swap(lhs.buffersSize_, rhs.buffersSize_);
299 swap(lhs.buffersPos_, rhs.buffersPos_);
300 swap(lhs.deleteLater_, rhs.deleteLater_);
304 void allocateBuffer() {
305 void* ptr = detail::alignedMalloc(kBufferSize *
sizeof(T), alignment);
306#if defined(__cpp_exceptions)
308 throw std::bad_alloc();
311 if (buffersPos_ < buffersSize_) {
312 buffers_.load(std::memory_order_acquire)[buffersPos_++] =
static_cast<T*
>(ptr);
314 const Index oldBuffersSize = buffersSize_;
315 T** oldBuffers = buffers_.load(std::memory_order_acquire);
317 buffersSize_ = oldBuffersSize == 0 ? 2 : oldBuffersSize * 2;
318 T** newBuffers =
new T*[buffersSize_];
320 if (oldBuffers !=
nullptr) {
321 std::memcpy(newBuffers, oldBuffers,
sizeof(T*) * oldBuffersSize);
322 deleteLater_.push_back(oldBuffers);
325 newBuffers[buffersPos_++] =
static_cast<T*
>(ptr);
326 buffers_.store(newBuffers, std::memory_order_release);
330 void constructObjects(
const Index beginIndex,
const Index endIndex) {
331 const Index startBuffer = beginIndex >> kLog2BuffSize;
332 const Index endBuffer = endIndex >> kLog2BuffSize;
334 Index bufStart = beginIndex & kMask;
335 for (Index b = startBuffer; b <= endBuffer; ++b) {
336 T* buf = buffers_.load(std::memory_order_acquire)[b];
337 const Index bufEnd = b == endBuffer ? (endIndex & kMask) : kBufferSize;
338 for (Index i = bufStart; i < bufEnd; ++i)
350 std::mutex resizeMutex_;
356 std::atomic<Index> pos_;
357 std::atomic<Index> allocatedSize_;
359 std::atomic<T**> buffers_;
362 std::vector<T**> deleteLater_;