dispenso 1.5.1
A library for task parallelism
Loading...
Searching...
No Matches
small_vector.h
Go to the documentation of this file.
1/*
2 * Copyright (c) Meta Platforms, Inc. and affiliates.
3 *
4 * This source code is licensed under the MIT license found in the
5 * LICENSE file in the root directory of this source tree.
6 */
7
22#pragma once
23
24#include <cassert>
25#include <cstddef>
26#include <initializer_list>
27#include <new>
28#include <utility>
29
30namespace dispenso {
31
47template <typename T, size_t N = 4>
49 static_assert(N > 0, "SmallVector requires at least 1 inline element. Use std::vector for N=0.");
50 static_assert(N < 65536, "SmallVector inline capacity is too large. Use std::vector instead.");
51
52 public:
53 using value_type = T;
54 using size_type = size_t;
55 using difference_type = std::ptrdiff_t;
56 using reference = T&;
57 using const_reference = const T&;
58 using pointer = T*;
59 using const_pointer = const T*;
60 using iterator = T*;
61 using const_iterator = const T*;
62
66 SmallVector() noexcept : size_(0) {}
67
73 explicit SmallVector(size_type count) : size_(0) {
74 resize(count);
75 }
76
83 SmallVector(size_type count, const T& value) : size_(0) {
84 resize(count, value);
85 }
86
92 SmallVector(std::initializer_list<T> init) : size_(0) {
93 ensureCapacity(init.size());
94 for (const auto& v : init) {
95 emplace_back(v);
96 }
97 }
98
100 SmallVector(const SmallVector& other) : size_(0) {
101 ensureCapacity(other.rawSize());
102 for (const auto& v : other) {
103 emplace_back(v);
104 }
105 }
106
108 SmallVector(SmallVector&& other) noexcept : size_(0) {
109 if (other.isInline()) {
110 for (size_type i = 0; i < other.rawSize(); ++i) {
111 new (inlineData() + i) T(std::move(other.inlineData()[i]));
112 other.inlineData()[i].~T();
113 }
114 } else {
115 storage_.heap_.ptr = other.storage_.heap_.ptr;
116 storage_.heap_.capacity = other.storage_.heap_.capacity;
117 size_ |= kHeapBit;
118 }
119 size_ = (size_ & kHeapBit) | other.rawSize();
120 other.size_ = 0;
121 }
122
123 ~SmallVector() {
124 destroyAll();
125 }
126
128 SmallVector& operator=(const SmallVector& other) {
129 if (this != &other) {
130 destroyAll();
131 size_ = 0;
132 ensureCapacity(other.rawSize());
133 for (const auto& v : other) {
134 emplace_back(v);
135 }
136 }
137 return *this;
138 }
139
141 SmallVector& operator=(SmallVector&& other) noexcept {
142 if (this != &other) {
143 destroyAll();
144 size_ = 0;
145
146 if (other.isInline()) {
147 for (size_type i = 0; i < other.rawSize(); ++i) {
148 new (inlineData() + i) T(std::move(other.inlineData()[i]));
149 other.inlineData()[i].~T();
150 }
151 } else {
152 storage_.heap_.ptr = other.storage_.heap_.ptr;
153 storage_.heap_.capacity = other.storage_.heap_.capacity;
154 size_ |= kHeapBit;
155 }
156 size_ = (size_ & kHeapBit) | other.rawSize();
157 other.size_ = 0;
158 }
159 return *this;
160 }
161
162 // --- Element Access ---
163
165 reference operator[](size_type pos) {
166 return data()[pos];
167 }
169 const_reference operator[](size_type pos) const {
170 return data()[pos];
171 }
173 reference front() {
174 return data()[0];
175 }
177 const_reference front() const {
178 return data()[0];
179 }
181 reference back() {
182 return data()[rawSize() - 1];
183 }
185 const_reference back() const {
186 return data()[rawSize() - 1];
187 }
188
190 pointer data() noexcept {
191 return isInline() ? inlineData() : storage_.heap_.ptr;
192 }
194 const_pointer data() const noexcept {
195 return isInline() ? inlineData() : storage_.heap_.ptr;
196 }
197
198 // --- Iterators ---
199
201 iterator begin() noexcept {
202 return data();
203 }
205 const_iterator begin() const noexcept {
206 return data();
207 }
209 const_iterator cbegin() const noexcept {
210 return data();
211 }
213 iterator end() noexcept {
214 return data() + rawSize();
215 }
217 const_iterator end() const noexcept {
218 return data() + rawSize();
219 }
221 const_iterator cend() const noexcept {
222 return data() + rawSize();
223 }
224
225 // --- Capacity ---
226
228 bool empty() const noexcept {
229 return rawSize() == 0;
230 }
232 size_type size() const noexcept {
233 return rawSize();
234 }
235
240 size_type capacity() const noexcept {
241 return isInline() ? N : storage_.heap_.capacity;
242 }
243
249 void reserve(size_type newCap) {
250 ensureCapacity(newCap);
251 }
252
253 // --- Modifiers ---
254
259 void clear() noexcept {
260 destroyAll();
261 size_ = 0;
262 }
263
265 void push_back(const T& value) {
266 emplace_back(value);
267 }
269 void push_back(T&& value) {
270 emplace_back(std::move(value));
271 }
272
279 template <typename... Args>
280 reference emplace_back(Args&&... args) {
281 T* ptr;
282 if (isInline()) {
283 size_type sz = rawSize();
284 if (sz < N) {
285 ptr = inlineData();
286 } else {
287 growToHeap(N * 2);
288 ptr = storage_.heap_.ptr;
289 }
290 } else {
291 size_type sz = rawSize();
292 if (sz == storage_.heap_.capacity) {
293 growToHeap(storage_.heap_.capacity * 2);
294 }
295 ptr = storage_.heap_.ptr;
296 }
297 size_type idx = rawSize();
298 new (ptr + idx) T(std::forward<Args>(args)...);
299 // Increment preserves heap bit naturally
300 ++size_;
301 assert(rawSize() > 0 && "Size overflow into heap bit");
302 return ptr[idx];
303 }
304
306 void pop_back() {
307 T* ptr = data();
308 size_type sz = rawSize();
309 ptr[sz - 1].~T();
310 // Decrement preserves heap bit naturally
311 --size_;
312 }
313
319 void resize(size_type count) {
320 size_type sz = rawSize();
321 if (count > sz) {
322 ensureCapacity(count);
323 T* ptr = data();
324 for (size_type i = sz; i < count; ++i) {
325 new (ptr + i) T();
326 }
327 setSize(count);
328 } else if (count < sz) {
329 T* ptr = data();
330 for (size_type i = count; i < sz; ++i) {
331 ptr[i].~T();
332 }
333 setSize(count);
334 }
335 }
336
344 void resize(size_type count, const T& value) {
345 size_type sz = rawSize();
346 if (count > sz) {
347 ensureCapacity(count);
348 T* ptr = data();
349 for (size_type i = sz; i < count; ++i) {
350 new (ptr + i) T(value);
351 }
352 setSize(count);
353 } else if (count < sz) {
354 T* ptr = data();
355 for (size_type i = count; i < sz; ++i) {
356 ptr[i].~T();
357 }
358 setSize(count);
359 }
360 }
361
370 iterator erase(const_iterator pos) {
371 T* ptr = data();
372 size_type sz = rawSize();
373 size_type index = pos - ptr;
374
375 for (size_type i = index; i + 1 < sz; ++i) {
376 ptr[i] = std::move(ptr[i + 1]);
377 }
378 ptr[sz - 1].~T();
379 --size_; // Preserves heap bit
380 return data() + index;
381 }
382
383 private:
384 // High bit of size_ tracks heap vs inline mode.
385 // Since kHeapBit >> N, comparisons like size_ <= N and size_ < N
386 // are naturally false when the heap bit is set, so isInline() and
387 // many internal checks work without masking.
388 static constexpr size_type kHeapBit = size_type(1) << (sizeof(size_type) * 8 - 1);
389 static constexpr size_type kSizeMask = ~kHeapBit;
390
391 bool isInline() const noexcept {
392 return (size_ & kHeapBit) == 0;
393 }
394
395 size_type rawSize() const noexcept {
396 return size_ & kSizeMask;
397 }
398
399 // Set the size portion, preserving the heap bit.
400 void setSize(size_type s) noexcept {
401 assert((s & kHeapBit) == 0 && "Size overflow into heap bit");
402 size_ = (size_ & kHeapBit) | s;
403 }
404
405 T* inlineData() noexcept {
406 return reinterpret_cast<T*>(&storage_.inline_);
407 }
408 const T* inlineData() const noexcept {
409 return reinterpret_cast<const T*>(&storage_.inline_);
410 }
411
412 void destroyAll() noexcept {
413 T* ptr = data();
414 size_type sz = rawSize();
415 for (size_type i = 0; i < sz; ++i) {
416 ptr[i].~T();
417 }
418 if (!isInline()) {
419 ::operator delete(storage_.heap_.ptr);
420 }
421 }
422
423 // Grow to heap storage with the specified capacity.
424 // Moves existing elements, frees old heap if applicable, sets heap bit.
425 void growToHeap(size_type newCap) {
426 T* newData = static_cast<T*>(::operator new(newCap * sizeof(T)));
427 T* oldData = data();
428 size_type sz = rawSize();
429
430 for (size_type i = 0; i < sz; ++i) {
431 new (newData + i) T(std::move(oldData[i]));
432 oldData[i].~T();
433 }
434
435 if (!isInline()) {
436 ::operator delete(storage_.heap_.ptr);
437 }
438
439 storage_.heap_.ptr = newData;
440 storage_.heap_.capacity = newCap;
441 size_ = kHeapBit | sz;
442 }
443
444 void ensureCapacity(size_type newCap) {
445 if (newCap <= N && isInline()) {
446 return;
447 }
448 if (isInline()) {
449 growToHeap(newCap);
450 } else if (newCap > storage_.heap_.capacity) {
451 growToHeap(newCap);
452 }
453 }
454
455 size_type size_;
456
457 struct HeapStorage {
458 T* ptr;
459 size_type capacity;
460 };
461
462 union Storage {
463 alignas(T) unsigned char inline_[sizeof(T) * N];
464 HeapStorage heap_;
465 Storage() noexcept {}
466 ~Storage() {}
467 } storage_;
468};
469
470} // namespace dispenso
pointer data() noexcept
void reserve(size_type newCap)
SmallVector(const SmallVector &other)
void push_back(const T &value)
void resize(size_type count)
void push_back(T &&value)
const_iterator end() const noexcept
const_iterator cbegin() const noexcept
const_reference back() const
const_reference front() const
SmallVector(SmallVector &&other) noexcept
bool empty() const noexcept
SmallVector(std::initializer_list< T > init)
const_iterator begin() const noexcept
const_pointer data() const noexcept
size_type capacity() const noexcept
void resize(size_type count, const T &value)
reference emplace_back(Args &&... args)
const_iterator cend() const noexcept
iterator end() noexcept
SmallVector(size_type count, const T &value)
iterator erase(const_iterator pos)
size_type size() const noexcept
SmallVector(size_type count)
iterator begin() noexcept
void clear() noexcept