Zero  0.1.0
bf_tree_cb.h
Go to the documentation of this file.
1 /*
2  * (c) Copyright 2011-2014, Hewlett-Packard Development Company, LP
3  */
4 
5 #ifndef __BF_TREE_CB_H
6 #define __BF_TREE_CB_H
7 
8 #include "w_defines.h"
9 #include "latch.h"
10 #include <cstring>
11 #include <atomic>
12 
13 #include <cassert>
14 
53 struct bf_tree_cb_t {
71  static const uint16_t BP_MAX_REFCOUNT = 1024;
72 
74 
76  void init(PageID pid = 0, lsn_t page_lsn = lsn_t::null) {
77  w_assert1(_pin_cnt == -1);
78  _pin_cnt = 0;
79  _pid = pid;
80  _swizzled = false;
81  _pinned_for_restore = false;
82  _check_recovery = false;
83  _ref_count = 0;
84  _ref_count_ex = 0;
85  _page_lsn = page_lsn;
87  _persisted_lsn = page_lsn;
90 
91  // Update _used last. Since it's an std::atomic, a thread seeing it set
92  // to true (e.g., cleaner or fuzzy checkpoints) can rely on the fact that
93  // the other fields have been properly initialized.
94  _used = true;
95  }
96 
98  inline void clear_latch() {
99  ::memset(latchp(), 0, sizeof(latch_t));
100  }
101 
102  // control block is bulk-initialized by malloc and memset. It has to be aligned.
103 
108  PageID _pid; // +4 -> 4
109 
111  std::atomic<int32_t> _pin_cnt; // +4 -> 8
112 
121  uint16_t _ref_count;// +2 -> 10
122 
124  uint16_t _ref_count_ex; // +2 -> 12
125 
126  uint8_t _fill13; // +1 -> 13
127 
128  std::atomic<bool> _pinned_for_restore; // +1 -> 14
130  _pinned_for_restore = true;
131  }
132 
134  _pinned_for_restore = false;
135  }
136 
138  return _pinned_for_restore;
139  }
140 
142  std::atomic<bool> _used; // +1 -> 15
144  std::atomic<bool> _swizzled; // +1 -> 16
145 
146  lsn_t _page_lsn; // +8 -> 24
147  lsn_t get_page_lsn() const {
148  return _page_lsn;
149  }
150 
151  void set_page_lsn(lsn_t lsn) {
152  // caller must hold EX latch, since it has just performed an update on
153  // the page
154  _page_lsn = lsn;
155  if (_rec_lsn <= _persisted_lsn) {
156  _rec_lsn = lsn;
157  }
159  _next_rec_lsn = lsn;
160  }
161  }
162 
163  // CS: page_lsn value when it was last picked for cleaning
164  // Replaces the old dirty flag, because dirty is defined as
165  // page_lsn > clean_lsn
166  lsn_t _persisted_lsn; // +8 -> 32
168  return _persisted_lsn;
169  }
170 
171  bool is_dirty() const {
172  // Unconditional latch may cause deadlocks!
174  if (rc.is_error()) {
175  // If could not acquire latch, assume dirty: false positives are
176  // fine, whereas false negative cause lost updates on recovery
177  return true;
178  }
179  bool res = _page_lsn > _persisted_lsn;
180  latch().latch_release();
181  return res;
182  }
183 
184  // Recovery LSN, a.k.a. DirtyPageLSN, is used by traditional checkpoints
185  // that determine the dirty page table by scanning the buffer pool -- unlike
186  // our decoupled checkpoints (see chkpt_t::scan_log) that scan the log.
187  // Its value is the LSN of the first update since the page was last cleaned,
188  // and it is set in set_page_lsn() above. During log analysis, the minimum
189  // of all recovery LSNs determines the starting point for the log-based redo
190  // recovery of traditional ARIES. This is not used in instant restart.
191  lsn_t _rec_lsn; // +8 -> 40
192  lsn_t get_rec_lsn() const {
193  return _rec_lsn;
194  }
195 
198  return _next_persisted_lsn;
199  }
200 
202  // called by cleaner while it holds SH latch
204  _next_persisted_lsn = _page_lsn;
205  }
206 
207  lsn_t _next_rec_lsn; // +8 -> 56
209  return _next_rec_lsn;
210  }
211 
212  // Called when cleaner writes and fsync's the page to disk. This updates
213  // rec_lsn and persisted_lsn to their "next" counterparts that were recorded
214  // when the cleaner first copied the page into its own write buffer. Note
215  // that this does not necessarily mark the frame as "clean", since updates
216  // might have happened since the copy was taken. That's why we don't have
217  // a dirty flag and rely on LSN comparisons instead.
218  void notify_write() {
219  // SH latch is enough because we are only worried about races with
220  // set_page_lsn, which always holds an EX latch
221  // Unconditional latch may cause deadlocks!
223  if (rc.is_error()) {
224  // We have to leave the page as dirty for now, as long as we
225  // can't be sure that these LSN updates can be done latch-free
226  // (which I don't think so).
227  return;
228  }
229  _persisted_lsn = _next_persisted_lsn;
230  _rec_lsn = _next_rec_lsn;
231  latch().latch_release();
232  }
233 
235  // CS TODO: I never tested restart with decoupled cleaner!
236  void notify_write_logbased(lsn_t archived_lsn) {
238  if (rc.is_error()) {
239  return;
240  }
241 
242  _rec_lsn = archived_lsn;
243  _persisted_lsn = archived_lsn;
244  latch().latch_release();
245  }
246 
248  uint32_t _log_volume; // +4 -> 60
249  uint16_t get_log_volume() const {
250  return _log_volume;
251  }
252 
253  void increment_log_volume(uint16_t c) {
254  _log_volume += c;
255  }
256 
257  void set_log_volume(uint16_t c) {
258  _log_volume = c;
259  }
260 
264  uint16_t _swizzled_ptr_cnt_hint; // +2 -> 62
265 
268  bool _check_recovery; // +1 -> 63
269  void set_check_recovery(bool chk) {
270  _check_recovery = chk;
271  }
272 
273  // Add padding to align control block at cacheline boundary (64 bytes)
274  // CS: padding not needed anymore because we are exactly on offset 63 above
275  // uint8_t _fill63[16]; // +16 -> 63
276 
277  /* The bufferpool should alternate location of latches and control blocks
278  * starting at an odd multiple of 64B as follows:
279  * ...|CB0|L0|L1|CB1|CB2|L2|L3|CB3|...
280  * This layout addresses a pathology that we attribute to the hardware
281  * spatial prefetcher. The default layout allocates a latch right after a
282  * control block so that the control block and latch live in adjacent cache
283  * lines (in the same 128B sector). The pathology happens because when we
284  * write-access the latch, the processor prefetches the control block in
285  * read-exclusive mode even if we late really only read-access the control
286  * block. This causes unnecessary coherence traffic. With the new layout, we
287  * avoid having a control block and latch in the same 128B sector.
288  */
289 
290  int8_t _fill64; // +1 -> 64
291 
293 
294  // increment pin count atomically
295  bool pin() {
296  int32_t old = _pin_cnt;
297  while (true) {
298  if (old < 0) {
299  return false;
300  }
301  if (_pin_cnt.compare_exchange_strong(old, old + 1)) {
302  return true;
303  }
304  }
305  }
306 
307  // decrement pin count atomically
308  void unpin() {
309  auto v = --_pin_cnt;
310  // only prepare_for_eviction may set negative pin count
311  w_assert1(v >= 0);
312  }
313 
315  w_assert1(_pin_cnt >= 0);
316  int32_t old = 0;
317  if (!_pin_cnt.compare_exchange_strong(old, -1)) {
318  w_assert1(_pin_cnt >= 0);
319  return false;
320  }
321  _used = false;
322  return true;
323  }
324 
325  bool is_in_use() const {
326  return _pin_cnt >= 0 && _used;
327  }
328 
329  void inc_ref_count() {
330  if (_ref_count < BP_MAX_REFCOUNT) {
331  ++_ref_count;
332  }
333  }
334 
336  if (_ref_count < BP_MAX_REFCOUNT) {
337  ++_ref_count_ex;
338  }
339  }
340 
342  _ref_count_ex = 0;
343  }
344 
345  // disabled (no implementation)
346  bf_tree_cb_t(const bf_tree_cb_t&);
347 
349 
350  latch_t* latchp() const {
351  return const_cast<latch_t*>(&_latch);
352  }
353 
354  latch_t& latch() const {
355  return *latchp();
356  }
357 };
358 
359 #endif // __BF_TREE_CB_H
lsn_t _next_persisted_lsn
Definition: bf_tree_cb.h:196
uint16_t _ref_count_ex
Reference count incremented only by X-latching.
Definition: bf_tree_cb.h:124
bool prepare_for_eviction()
Definition: bf_tree_cb.h:314
uint16_t get_log_volume() const
Definition: bf_tree_cb.h:249
PageID _pid
Definition: bf_tree_cb.h:108
latch_t & latch() const
Definition: bf_tree_cb.h:354
w_rc_t latch_acquire(latch_mode_t m, int timeout=timeout_t::WAIT_FOREVER)
Acquire the latch in given mode.
Definition: latch.cpp:343
uint8_t _fill13
Definition: bf_tree_cb.h:126
lsn_t _page_lsn
Definition: bf_tree_cb.h:146
lsn_t _persisted_lsn
Definition: bf_tree_cb.h:166
bool is_dirty() const
Definition: bf_tree_cb.h:171
#define w_assert1(x)
Level 1 should not add significant extra time.
Definition: w_base.h:198
Control block in the new buffer pool class.
Definition: bf_tree_cb.h:53
std::atomic< bool > _used
true if this block is actually used
Definition: bf_tree_cb.h:142
void increment_log_volume(uint16_t c)
Definition: bf_tree_cb.h:253
uint32_t _log_volume
Log volume generated on this page (for page_img logrec compression, see xct_logger.h)
Definition: bf_tree_cb.h:248
void set_page_lsn(lsn_t lsn)
Definition: bf_tree_cb.h:151
void reset_ref_count_ex()
Definition: bf_tree_cb.h:341
static const lsn_t null
Definition: lsn.h:371
uint16_t _swizzled_ptr_cnt_hint
Definition: bf_tree_cb.h:264
bool is_pinned_for_restore()
Definition: bf_tree_cb.h:137
void inc_ref_count_ex()
Definition: bf_tree_cb.h:335
void clear_latch()
Definition: bf_tree_cb.h:98
static constexpr int WAIT_IMMEDIATE
Definition: timeout.h:27
std::atomic< int32_t > _pin_cnt
Count of pins on this block. See class comments; protected by ??
Definition: bf_tree_cb.h:111
static const uint16_t BP_MAX_REFCOUNT
Definition: bf_tree_cb.h:71
void notify_write()
Definition: bf_tree_cb.h:218
lsn_t _next_rec_lsn
Definition: bf_tree_cb.h:207
uint32_t PageID
Definition: basics.h:45
void init(PageID pid=0, lsn_t page_lsn=lsn_t::null)
Definition: bf_tree_cb.h:76
Log Sequence Number. See Log Sequence Numbers (LSN).
Definition: lsn.h:243
A short-term hold (exclusive or shared) on a page.
Definition: latch.h:159
void set_check_recovery(bool chk)
Definition: bf_tree_cb.h:269
void inc_ref_count()
Definition: bf_tree_cb.h:329
bool is_in_use() const
Definition: bf_tree_cb.h:325
void mark_persisted_lsn()
Definition: bf_tree_cb.h:201
lsn_t _rec_lsn
Definition: bf_tree_cb.h:191
void pin_for_restore()
Definition: bf_tree_cb.h:129
bf_tree_cb_t & operator=(const bf_tree_cb_t &)
lsn_t get_next_persisted_lsn() const
Definition: bf_tree_cb.h:197
void unpin_for_restore()
Definition: bf_tree_cb.h:133
lsn_t get_next_rec_lsn() const
Definition: bf_tree_cb.h:208
std::atomic< bool > _swizzled
Whether this page is swizzled from the parent.
Definition: bf_tree_cb.h:144
std::atomic< bool > _pinned_for_restore
Definition: bf_tree_cb.h:128
uint16_t _ref_count
Definition: bf_tree_cb.h:121
bool _check_recovery
Definition: bf_tree_cb.h:268
Definition: latch.h:81
latch_t * latchp() const
Definition: bf_tree_cb.h:350
lsn_t get_persisted_lsn() const
Definition: bf_tree_cb.h:167
bf_tree_cb_t()
Definition: bf_tree_cb.h:73
lsn_t get_page_lsn() const
Definition: bf_tree_cb.h:147
lsn_t get_rec_lsn() const
Definition: bf_tree_cb.h:192
int8_t _fill64
Definition: bf_tree_cb.h:290
latch_t _latch
Definition: bf_tree_cb.h:292
void notify_write_logbased(lsn_t archived_lsn)
This is used by the decoupled (a.k.a log-based) cleaner.
Definition: bf_tree_cb.h:236
void set_log_volume(uint16_t c)
Definition: bf_tree_cb.h:257
int latch_release()
release the latch.
Definition: latch.cpp:381
bool pin()
Definition: bf_tree_cb.h:295
void unpin()
Definition: bf_tree_cb.h:308