Zero  0.1.0
w_gc_pool_forest.h
Go to the documentation of this file.
1 /*
2  * (c) Copyright 2014, Hewlett-Packard Development Company, LP
3  */
4 #ifndef __W_GC_POOL_FOREST_H
5 #define __W_GC_POOL_FOREST_H
6 
7 #include <cstdint>
8 #include <memory>
9 #include "AtomicCounter.hpp"
10 #include "w_defines.h"
11 #include "w_debug.h"
12 #include "lsn.h"
13 
50 template<class T> class GcPointer;
51 
52 template<class T>
53 struct GcSegment;
54 template<class T>
55 struct GcGeneration;
56 template<class T>
57 struct GcPoolForest;
58 
69 typedef int32_t gc_status;
70 
75 typedef uint32_t gc_aba;
76 
81 typedef uint8_t gc_generation;
82 
83 const size_t GC_MAX_GENERATIONS = (1 << (sizeof(gc_generation) * 8));
84 
89 typedef uint8_t gc_segment;
90 
91 const size_t GC_MAX_SEGMENTS = (1 << (sizeof(gc_segment) * 8));
92 
97 typedef uint16_t gc_offset;
98 
99 const size_t GC_MAX_OFFSETS = (1 << (sizeof(gc_offset) * 8));
100 
105 typedef uint64_t gc_thread_id;
106 
128  // so far 4/1/1/2. Maybe 2/2/2/2 might make sense if we need more generations/segments.
129  struct {
134  } components;
135 
137  uint64_t word;
138 };
139 
140 // csauer: gcc 6+ needs this
141 inline bool operator==(const gc_pointer_raw& a, const gc_pointer_raw& b) {
142  return a.word == b.word;
143 };
144 
145 inline bool operator!=(const gc_pointer_raw& a, const gc_pointer_raw& b) {
146  return a.word != b.word;
147 };
148 
185 template<class T>
186 class GcPointer {
187 public:
190  _raw.word = 0;
191  }
192 
194  explicit GcPointer(gc_pointer_raw raw) : _raw(raw) {}
195 
197  GcPointer(const GcPointer& other) {
198  operator=(other);
199  }
200 
202  GcPointer& operator=(const GcPointer& other) {
203  // ACCESS_ONCE semantics to make it at least regular.
204  _raw.word = static_cast<const volatile gc_pointer_raw&>(other._raw).word;
205  return *this;
206  }
207 
211  bool operator==(const GcPointer& other) const {
212  return _raw == other._raw;
213  }
214 
218  bool operator!=(const GcPointer& other) const {
219  return _raw != other._raw;
220  }
221 
226  bool is_equal_address(const GcPointer& other) const {
227  return _raw.components.generation == other._raw.components.generation
228  && _raw.components.segment == other._raw.components.segment
229  && _raw.components.offset == other._raw.components.offset;
230  }
231 
235  T* dereference(GcPoolForest<T>& pool) const;
236 
241  void set_mark(bool on) {
242  _raw.components.status = get_aba() | (on ? 0x80000000 : 0);
243  }
244 
246  bool is_marked() const {
247  return _raw.components.status < 0;
248  }
249 
251  gc_aba get_aba() const {
252  return static_cast<gc_aba>(_raw.components.status & 0x7FFFFFFF);
253  }
254 
256  void set_aba(gc_aba stamp) {
257  _raw.components.status = (_raw.components.status & 0x80000000)
258  | static_cast<gc_status>(stamp);
259  }
260 
263  return _raw.components.generation;
264  }
265 
268  return _raw.components.segment;
269  }
270 
273  return _raw.components.offset;
274  }
275 
277  bool is_null() const {
278  return get_generation() == 0;
279  }
280 
283  return _raw;
284  }
285 
287  gc_pointer_raw raw() const {
288  return _raw;
289  }
290 
298  bool atomic_cas(const GcPointer& expected, const GcPointer& desired) {
299  gc_pointer_raw expected_tmp = expected._raw;
300  return lintel::unsafe::atomic_compare_exchange_strong<gc_pointer_raw>(
301  &_raw, &expected_tmp, desired._raw);
302  }
303 
309  GcPointer atomic_swap(const GcPointer& new_ptr) {
310  return GcPointer(lintel::unsafe::atomic_exchange<gc_pointer_raw>(&_raw, new_ptr._raw));
311  }
312 
313 protected:
316 };
317 
335 template<class T>
336 struct GcSegment {
338  owner(0),
339  total_objects(size),
340  allocated_objects(0) {
341  objects = new T[size];
342  w_assert1(objects != nullptr);
343  }
344 
346  delete[] objects;
347  }
348 
354  void recycle() {
355  owner = 0;
356  allocated_objects = 0;
357  }
358 
361 
364 
367 
370 };
371 
379 template<class T>
380 struct GcGeneration {
381  GcGeneration(uint32_t generation_nowrap_arg) :
382  retire_suggested(false),
383  total_segments(0),
384  allocated_segments(0),
385  generation_nowrap(generation_nowrap_arg) {}
386 
388  DBGOUT1(<<"Destroying a GC Generation " << generation_nowrap
389  << ". total_segments=" << total_segments
390  << ", allocated_segments=" << allocated_segments);
391  for (gc_segment i = 0; i < total_segments; ++i) {
392  delete segments[i];
393  }
394  }
395 
397  uint32_t get_free_count() const {
398  return total_segments - allocated_segments;
399  }
400 
407  bool preallocate_segments(size_t segment_count, gc_offset segment_size);
408 
414  void recycle(uint32_t generation_nowrap_arg) {
415  retire_suggested = false;
416  generation_nowrap = generation_nowrap_arg;
417  for (uint32_t seg = 0; seg < total_segments; ++seg) {
418  if (segments[seg]->owner != 0) {
419  segments[seg]->recycle();
420  }
421  }
422  allocated_segments = 0;
423  }
424 
431 
437  uint32_t total_segments;
438 
448 
451 
454 };
455 
460 struct GcPoolEntry {
462 };
463 
469  virtual ~GcWakeupFunctor() {}
470 
471  virtual void wakeup() = 0;
472 };
473 
483 template<class T>
484 struct GcPoolForest {
485  GcPoolForest(const char* debug_name, uint32_t desired_gens,
486  size_t initial_segment_count, gc_offset initial_segment_size) :
487  name(debug_name),
488  head_nowrap(1), // generation=0 is an invalid generation, so we start from 1.
489  curr_nowrap(1),
490  tail_nowrap(2),
491  desired_generations(desired_gens),
492  gc_wakeup_functor(nullptr) {
493  // We always have at least one active generation
494  generations[0] = nullptr;
495  generations[1] = new GcGeneration<T>(1);
496  if (initial_segment_count > 0) {
497  generations[1]->preallocate_segments(initial_segment_count, initial_segment_size);
498  }
499  epochs[0].set(0);
500  epochs[1].set(0);
501  }
502 
504  DBGOUT1(<< name << ": Destroying a GC Pool Forest. head_nowrap=" << head_nowrap
505  << ", tail_nowrap=" << tail_nowrap);
506  for (size_t i = head_nowrap; i < tail_nowrap; ++i) {
507  delete generations[wrap(i)];
508  }
509  }
510 
512  T* resolve_pointer(const GcPointer<T>& pointer) {
513  return pointer.raw();
514  }
515 
517  T* resolve_pointer(gc_pointer_raw pointer);
518 
520  return resolve_generation(pointer.components.generation);
521  }
522 
524  return generations[gen];
525  }
526 
528  return resolve_segment(pointer.components.generation, pointer.components.segment);
529  }
530 
532  w_assert1(is_valid_generation(gen));
533  GcGeneration<T>* generation = resolve_generation(gen);
534  w_assert1(seg < generation->total_segments);
535  return generation->segments[seg];
536  }
537 
544  T* allocate(gc_pointer_raw& next, gc_thread_id self);
545 
550  void deallocate(T* pointer) {
551  deallocate(pointer->gc_pointer);
552  }
553 
558  void deallocate(gc_pointer_raw pointer);
559 
561  gc_pointer_raw occupy_segment(gc_thread_id self);
562 
564  gc_generation head() const {
565  return wrap(head_nowrap);
566  }
567 
569  gc_generation tail() const {
570  return wrap(tail_nowrap);
571  }
572 
574  gc_generation curr() const {
575  return wrap(curr_nowrap);
576  }
577 
579  size_t active_generations() const {
580  return tail_nowrap - head_nowrap;
581  }
582 
584  return generations[head()];
585  }
586 
588  return generations[curr()];
589  }
590 
592  // CS: changed check conditions because otherwise the tail might wrap
593  // around and in that case it will be greater than the given generation
594  // return generation != 0 && generation >= head() && generation < tail();
595  if (generation == 0) {
596  return false;
597  }
598  if (head() < tail()) {
599  return generation >= head() && generation < tail();
600  } else if (tail() < head()) {
601  return (generation >= head() && generation < GC_MAX_GENERATIONS)
602  || generation < tail();
603  }
604  return false;
605  }
606 
616  bool advance_generation(lsn_t low_water_mark, lsn_t now,
617  size_t initial_segment_count, gc_offset segment_size);
618 
625  void retire_generations(lsn_t low_water_mark, lsn_t recycle_now = lsn_t::null);
626 
628  void mfence() const {
630  }
631 
632  static gc_generation wrap(size_t i) {
633  return static_cast<gc_generation>(i);
634  }
635 
637  const char* name;
638 
640  uint32_t head_nowrap;
641 
643  uint32_t curr_nowrap;
644 
646  uint32_t tail_nowrap;
647 
650 
653 
656 
659 };
660 
661 template<class T>
664  if (generation == 0) {
665  return nullptr;
666  }
667 
669  w_assert1(is_valid_generation(generation));
670  GcGeneration<T>* gen = generations[generation];
671  w_assert1(gen != nullptr);
672 
675  w_assert1(segment != nullptr);
676 
677  gc_offset offset = pointer.components.offset;
678  w_assert1(offset < segment->total_objects);
679  w_assert1(offset < segment->allocated_objects);
680 
681  return segment->objects + offset;
682 }
683 
684 template<class T>
686  if (!is_valid_generation(next.components.generation)
687  || generations[next.components.generation]->retire_suggested
688  || next.components.generation != curr()) {
689  next = occupy_segment(self);
690  }
691 
692  w_assert1(is_valid_generation(next.components.generation));
693  GcGeneration<T>* gen = generations[next.components.generation];
694  w_assert1(gen != nullptr);
695 
696  if (next.components.segment >= gen->allocated_segments
697  || gen->segments[next.components.segment]->owner != self
698  || gen->segments[next.components.segment]->allocated_objects
699  >= gen->segments[next.components.segment]->total_objects) {
700  next = occupy_segment(self);
701  gen = generations[next.components.generation];
702  }
705  w_assert1(segment != nullptr);
706  w_assert1(segment->owner == self);
707  w_assert1(segment->allocated_objects < segment->total_objects);
708 
709  next.components.offset = segment->allocated_objects;
710  T* ret = segment->objects + next.components.offset;
711  ret->gc_pointer = next;
712 
713  ++segment->allocated_objects;
714  next.components.offset = segment->allocated_objects;
715  return ret;
716 }
717 
718 template<class T>
720  // We don't do anything in deallocate.
721  w_assert1(is_valid_generation(pointer.components.generation));
722  GcGeneration<T>* gen = generations[pointer.components.generation];
723  w_assert1(gen != nullptr);
724 
727  w_assert1(segment != nullptr);
728 
729  // we don't check double-free or anything
730  w_assert1(pointer.components.offset < segment->allocated_objects);
731 }
732 
733 template<class T>
735  // this method is relatively infrequently called, so we can afford barriers and atomics.
736  while (true) {
737  mfence();
738  gc_generation generation = curr();
739  GcGeneration<T>* gen = generations[generation];
740  if (gen->allocated_segments >= gen->total_segments) {
741  // allocator threads are not catching up. This must be rare. let's sleep.
742  DBGOUT0(<< name << ": GC Thread is not catching up. have to sleep. me=" << self);
743  if (gc_wakeup_functor != nullptr) {
744  gc_wakeup_functor->wakeup();
745  }
746  const uint32_t SLEEP_MICROSEC = 10000;
747  ::usleep(SLEEP_MICROSEC);
748  continue;
749  }
750  uint32_t alloc_segment = gen->allocated_segments;
751  mfence();
752  if (lintel::unsafe::atomic_compare_exchange_strong<uint32_t>(
753  &gen->allocated_segments, &alloc_segment, alloc_segment + 1)) {
754  // Okay, we are surely the only winner of the pre-allocated segment.
755  GcSegment<T>* segment = gen->segments[alloc_segment];
756  while (segment == nullptr) {
757  // possible right after CAS in preallocate_segments. wait.
758  mfence();
759  DBGOUT3(<< name << ": Waiting for segment " << alloc_segment << " in generation "
760  << gen->generation_nowrap << ". me=" << self);
761  segment = gen->segments[alloc_segment];
762  }
763  w_assert1(segment->owner == 0);
764  w_assert1(segment->allocated_objects == 0);
765  w_assert1(segment->total_objects > 0);
766 
767  // Occupied!
768  DBGOUT1(<< name << ": Occupied a pre-allocated Segment " << alloc_segment
769  << " in generation " << gen->generation_nowrap << ". me=" << self << ".");
770  segment->owner = self;
771  mfence(); // let the world know (though the CAS above should be enough..)
772  gc_pointer_raw ret;
773  ret.components.status = 0;
775  ret.components.segment = alloc_segment;
776  ret.components.offset = 0;
777  return ret;
778  } else {
779  DBGOUT1(<< "Oops, CAS failed");
780  continue;
781  }
782  }
783 }
784 
785 template<class T>
786 inline bool GcPoolForest<T>::advance_generation(lsn_t low_water_mark, lsn_t now,
787  size_t segment_count, gc_offset segment_size) {
788  while (true) {
789  mfence();
790  if (low_water_mark != lsn_t::null && active_generations() >= desired_generations) {
791  // try recycling oldest generation
792  uint32_t old_tail_nowrap = tail_nowrap;
793  retire_generations(low_water_mark, now);
794  if (old_tail_nowrap < tail_nowrap) {
795  return true; // it worked!
796  }
797  }
798  if (tail_nowrap - head_nowrap >= GC_MAX_GENERATIONS - 1) {
799  ERROUT(<< name << ": Too many generations!");
800  return false; // too many generations
801  }
802  uint32_t new_generation_nowrap = tail_nowrap;
803  uint32_t new_tail = new_generation_nowrap + 1;
804  if (wrap(new_tail) == 0) {
805  DBGOUT1(<< name << ": Generation wrapped!");
806  ++new_tail; // skip generation==0
807  }
808  if (lintel::unsafe::atomic_compare_exchange_strong<uint32_t>(
809  &tail_nowrap, &new_generation_nowrap, new_tail)) {
810  // okay, let's create the generation
811  gc_generation new_generation = wrap(new_generation_nowrap);
812  generations[new_generation] = new GcGeneration<T>(new_generation_nowrap);
813  epochs[new_generation] = now;
814  generations[new_generation]->preallocate_segments(segment_count, segment_size);
815  DBGOUT1(<< name << ": Generation " << new_generation_nowrap
816  << " created. epoch=" << now.data());
817  curr_nowrap = new_generation_nowrap;
818  mfence();
819  return true;
820  }
821  // else retry
822  }
823 }
824 
825 template<class T>
826 inline bool GcGeneration<T>::preallocate_segments(size_t segment_count, gc_offset segment_size) {
827  // this method is infrequently called by background thread,
828  // so barriers/atomics and big allocations are fine.
829  while (segment_count > 0) {
830  w_assert1(!retire_suggested);
831  if (total_segments >= GC_MAX_SEGMENTS - 1) {
832  return false; // already full!
833  }
834 
835  uint32_t new_segment = total_segments;
836  if (lintel::unsafe::atomic_compare_exchange_strong<uint32_t>(
837  &total_segments, &new_segment, new_segment + 1)) {
838  // okay, we exclusively own this segment index
839  GcSegment<T>* seg = new GcSegment<T>(segment_size);
840  segments[new_segment] = seg;
841  --segment_count;
842  DBGOUT1(<<"Pre-allocated Segment " << new_segment << " in generation "
843  << generation_nowrap << ". segment size=" << segment_size << ".");
844  }
845  // else, someone else has just changed. retry
846  }
847  return true;
848 }
849 
850 template<class T>
851 inline void GcPoolForest<T>::retire_generations(lsn_t low_water_mark, lsn_t recycle_now) {
852  // this method is infrequently called by background thread,
853  // so barriers/atomics and big deallocations are fine.
854  w_assert1(tail_nowrap > 0);
855  while (true) {
856  mfence();
857  uint32_t oldest_nowrap = head_nowrap;
858  uint32_t next_oldest_nowrap = oldest_nowrap + 1;
859  if (wrap(next_oldest_nowrap) == 0) {
860  ++next_oldest_nowrap; // skip generation==0
861  }
862  const uint32_t MIN_HEALTHY_GENERATIONS = 2;
863  if (tail_nowrap <= next_oldest_nowrap + MIN_HEALTHY_GENERATIONS) {
864  return;
865  }
866 
867  GcGeneration<T>* oldest = generations[wrap(oldest_nowrap)];
868  oldest->retire_suggested = true; // this softly requests threads to move onto new gen
869  mfence();
870  if (low_water_mark >= epochs[wrap(next_oldest_nowrap)]) {
871  // if even the next generation's beginning LSN is older than low water mark,
872  // there is no chance that the oldest generation has anything used.
873  // Let _me_ retire that.
874  if (lintel::unsafe::atomic_compare_exchange_strong<uint32_t>(
875  &head_nowrap, &oldest_nowrap, next_oldest_nowrap)) {
876  // okay, I'm exclusively retiring this generation.
877  generations[wrap(oldest_nowrap)] = nullptr;
878  epochs[wrap(oldest_nowrap)].set(0);
879 
880  DBGOUT1(<< name << ": Successfully retired generation " << oldest_nowrap);
881  if (recycle_now != lsn_t::null && active_generations() <= desired_generations) {
882  DBGOUT1(<< "Now recycling it as new generation ...");
883  mfence();
884  uint32_t new_generation_nowrap = tail_nowrap;
885  uint32_t new_tail = new_generation_nowrap + 1;
886  if (wrap(new_tail) == 0) {
887  DBGOUT1(<< name << ": Generation wrapped!");
888  ++new_tail; // skip generation==0
889  }
890 
891  if (lintel::unsafe::atomic_compare_exchange_strong<uint32_t>(
892  &tail_nowrap, &new_generation_nowrap, new_tail)) {
893  oldest->recycle(new_generation_nowrap);
894  generations[wrap(new_generation_nowrap)] = oldest;
895  epochs[wrap(new_generation_nowrap)].set(recycle_now.data());
896  curr_nowrap = new_generation_nowrap;
897  DBGOUT1(<< name << ": Successfully recycled as gen " << new_generation_nowrap);
898  mfence();
899  return;
900  } else {
901  DBGOUT1(<< name << ": Oops, others incremented generation. couldn't "
902  << " reuse the retired generation");
903  delete oldest; // well, no other way.
904  }
905  } else {
906  delete oldest;
907  }
908  } else {
909  // someone else has retired it.
910  DBGOUT1(<< name << ": Oops, CAS failed, someone has retired it?");
911  continue;
912  }
913  } else {
914  // then, we can't safely retire this generation. yet.
915  return;
916  }
917  }
918 }
919 
920 template<class T>
922  T* pointer = pool.resolve_pointer(_raw);
923  w_assert1(pointer != nullptr);
924  return pointer;
925 }
926 
927 #endif // __W_GC_POOL_FOREST_H
void atomic_thread_fence(memory_order order)
Definition: AtomicCounter.hpp:223
gc_generation tail() const
Definition: w_gc_pool_forest.h:569
bool operator==(const GcPointer &other) const
Definition: w_gc_pool_forest.h:211
Definition: w_gc_pool_forest.h:460
GcPointer()
Definition: w_gc_pool_forest.h:189
T * allocate(gc_pointer_raw &next, gc_thread_id self)
Definition: w_gc_pool_forest.h:685
gc_pointer_raw raw() const
Definition: w_gc_pool_forest.h:287
bool is_null() const
Definition: w_gc_pool_forest.h:277
#define w_assert1(x)
Level 1 should not add significant extra time.
Definition: w_base.h:198
T * resolve_pointer(const GcPointer< T > &pointer)
Definition: w_gc_pool_forest.h:512
virtual ~GcWakeupFunctor()
Definition: w_gc_pool_forest.h:469
uint32_t desired_generations
Definition: w_gc_pool_forest.h:649
GcPointer(const GcPointer &other)
Definition: w_gc_pool_forest.h:197
const size_t GC_MAX_GENERATIONS
Definition: w_gc_pool_forest.h:83
GcPointer atomic_swap(const GcPointer &new_ptr)
[Atomic] Atomic swap.
Definition: w_gc_pool_forest.h:309
gc_offset total_objects
Definition: w_gc_pool_forest.h:363
GcGeneration< T > * curr_generation()
Definition: w_gc_pool_forest.h:587
A generation of objects.
Definition: w_gc_pool_forest.h:55
bool preallocate_segments(size_t segment_count, gc_offset segment_size)
Definition: w_gc_pool_forest.h:826
bool operator==(const gc_pointer_raw &a, const gc_pointer_raw &b)
Definition: w_gc_pool_forest.h:141
uint32_t allocated_segments
Definition: w_gc_pool_forest.h:447
gc_pointer_raw occupy_segment(gc_thread_id self)
Definition: w_gc_pool_forest.h:734
~GcPoolForest()
Definition: w_gc_pool_forest.h:503
Header file for lintel::Atomic class.
const size_t GC_MAX_SEGMENTS
Definition: w_gc_pool_forest.h:91
uint32_t gc_aba
Definition: w_gc_pool_forest.h:75
gc_offset offset
Definition: w_gc_pool_forest.h:133
GcSegment< T > * resolve_segment(gc_generation gen, gc_segment seg)
Definition: w_gc_pool_forest.h:531
bool operator!=(const GcPointer &other) const
Definition: w_gc_pool_forest.h:218
void deallocate(T *pointer)
Definition: w_gc_pool_forest.h:550
~GcSegment()
Definition: w_gc_pool_forest.h:345
GcGeneration< T > * resolve_generation(gc_generation gen)
Definition: w_gc_pool_forest.h:523
gc_segment get_segment() const
Definition: w_gc_pool_forest.h:267
static const lsn_t null
Definition: lsn.h:371
gc_generation get_generation() const
Definition: w_gc_pool_forest.h:262
GcGeneration< T > * head_generation()
Definition: w_gc_pool_forest.h:583
GcPointer(gc_pointer_raw raw)
Definition: w_gc_pool_forest.h:194
bool is_marked() const
Definition: w_gc_pool_forest.h:246
uint64_t word
Definition: w_gc_pool_forest.h:137
void retire_generations(lsn_t low_water_mark, lsn_t recycle_now=lsn_t::null)
Definition: w_gc_pool_forest.h:851
gc_pointer_raw & raw()
Definition: w_gc_pool_forest.h:282
struct gc_pointer_raw::@14 components
#define DBGOUT3(a)
Definition: w_debug.h:212
gc_aba get_aba() const
Definition: w_gc_pool_forest.h:251
lsndata_t data() const
Definition: lsn.h:270
#define DBGOUT1(a)
Definition: w_debug.h:200
bool retire_suggested
Definition: w_gc_pool_forest.h:430
bool is_valid_generation(gc_generation generation) const
Definition: w_gc_pool_forest.h:591
gc_segment segment
Definition: w_gc_pool_forest.h:132
bool is_equal_address(const GcPointer &other) const
Definition: w_gc_pool_forest.h:226
gc_offset get_offset() const
Definition: w_gc_pool_forest.h:272
gc_generation generation
Definition: w_gc_pool_forest.h:131
#define DBGOUT0(a)
Definition: w_debug.h:194
gc_thread_id owner
Definition: w_gc_pool_forest.h:360
const size_t GC_MAX_OFFSETS
Definition: w_gc_pool_forest.h:99
Log Sequence Number. See Log Sequence Numbers (LSN).
Definition: lsn.h:243
bool operator!=(const gc_pointer_raw &a, const gc_pointer_raw &b)
Definition: w_gc_pool_forest.h:145
Definition: AtomicCounter.hpp:116
GcSegment< T > * segments[GC_MAX_SEGMENTS]
Definition: w_gc_pool_forest.h:453
uint32_t head_nowrap
Definition: w_gc_pool_forest.h:640
void recycle()
Definition: w_gc_pool_forest.h:354
GcWakeupFunctor * gc_wakeup_functor
Definition: w_gc_pool_forest.h:658
gc_generation head() const
Definition: w_gc_pool_forest.h:564
#define ERROUT(a)
Definition: w_debug.h:175
void set_aba(gc_aba stamp)
Definition: w_gc_pool_forest.h:256
void mfence() const
Definition: w_gc_pool_forest.h:628
A portable and logical pointer with mark-for-death, ABA counter, and generation/segement bits...
Definition: w_gc_pool_forest.h:127
uint8_t gc_segment
Definition: w_gc_pool_forest.h:89
~GcGeneration()
Definition: w_gc_pool_forest.h:387
uint8_t gc_generation
Definition: w_gc_pool_forest.h:81
Garbage-collected Pool Forest.
Definition: w_gc_pool_forest.h:57
uint32_t tail_nowrap
Definition: w_gc_pool_forest.h:646
uint32_t total_segments
Definition: w_gc_pool_forest.h:437
void recycle(uint32_t generation_nowrap_arg)
Definition: w_gc_pool_forest.h:414
GcGeneration< T > * resolve_generation(gc_pointer_raw pointer)
Definition: w_gc_pool_forest.h:519
int32_t gc_status
Status bits in gc_pointer.
Definition: w_gc_pool_forest.h:57
const char * name
Definition: w_gc_pool_forest.h:637
gc_pointer_raw gc_pointer
Definition: w_gc_pool_forest.h:461
Wrapper for gc_pointer_raw.
Definition: w_gc_pool_forest.h:50
uint16_t gc_offset
Definition: w_gc_pool_forest.h:97
uint32_t generation_nowrap
Definition: w_gc_pool_forest.h:450
gc_status status
Definition: w_gc_pool_forest.h:130
uint64_t gc_thread_id
Definition: w_gc_pool_forest.h:105
Definition: w_gc_pool_forest.h:468
gc_generation curr() const
Definition: w_gc_pool_forest.h:574
gc_offset allocated_objects
Definition: w_gc_pool_forest.h:366
bool advance_generation(lsn_t low_water_mark, lsn_t now, size_t initial_segment_count, gc_offset segment_size)
Definition: w_gc_pool_forest.h:786
uint32_t get_free_count() const
Definition: w_gc_pool_forest.h:397
bool atomic_cas(const GcPointer &expected, const GcPointer &desired)
[Atomic] Compare and set for pointer, mark, and ABA counter altogether. See Figure 9...
Definition: w_gc_pool_forest.h:298
uint32_t curr_nowrap
Definition: w_gc_pool_forest.h:643
GcGeneration(uint32_t generation_nowrap_arg)
Definition: w_gc_pool_forest.h:381
GcSegment< T > * resolve_segment(gc_pointer_raw pointer)
Definition: w_gc_pool_forest.h:527
GcPointer & operator=(const GcPointer &other)
Definition: w_gc_pool_forest.h:202
GcSegment(gc_offset size)
Definition: w_gc_pool_forest.h:337
A segment in each generation.
Definition: w_gc_pool_forest.h:53
size_t active_generations() const
Definition: w_gc_pool_forest.h:579
static gc_generation wrap(size_t i)
Definition: w_gc_pool_forest.h:632
GcPoolForest(const char *debug_name, uint32_t desired_gens, size_t initial_segment_count, gc_offset initial_segment_size)
Definition: w_gc_pool_forest.h:485
gc_pointer_raw _raw
Definition: w_gc_pool_forest.h:315
#define T
Definition: w_okvl_inl.h:45
Definition: AtomicCounter.hpp:112
T * dereference(GcPoolForest< T > &pool) const
Definition: w_gc_pool_forest.h:921
T * objects
Definition: w_gc_pool_forest.h:369
void set_mark(bool on)
Definition: w_gc_pool_forest.h:241