Zero  0.1.0
latches.h
Go to the documentation of this file.
1 #ifndef __LATCHES_H
2 #define __LATCHES_H
3 
4 #include "w_base.h"
5 #include "timeout.h"
6 #include "w_pthread.h"
7 #include "AtomicCounter.hpp"
8 #include "tatas.h"
9 
48  struct ext_qnode {
49  w_pthread_lock_t* _held;
50  };
51 #define PTHREAD_EXT_QNODE_INITIALIZER { NULL }
52 #define PTHREAD_EXT_QNODE_INITIALIZE(x) (x)._held = NULL
53 
54  typedef ext_qnode volatile* ext_qnode_ptr;
57 private:
58  pthread_mutex_t _mutex; // w_pthread_lock_t blocks on this
61 
62 public:
63  w_pthread_lock_t() : _holder(0) {
64  pthread_mutex_init(&_mutex, 0);
65  }
66 
69 // TODO(Restart)... comment out the assertion in debug mode for 'instant restart' testing purpose
70 // if we are using simulated crash shutdown, this assertion might fire if
71 // we are in the middle of taking a checkpoint
72 // this is for mutex chkpt_serial_m::write_release();
73 // need a way to ignore _holder checking if using simulated system crash
74 //
75 // For now, comment out the assertion, although we might miss other
76 // bugs by comment out the assertion
78 
79 // w_assert1(!_holder);
80 
81  pthread_mutex_destroy(&_mutex);
82  }
83 
85  bool attempt(ext_qnode* me) {
86  if (attempt(*me)) {
87  me->_held = this;
88  _holder = this;
89  return true;
90  }
91  return false;
92  }
93 
94 private:
96  bool attempt(ext_qnode& me) {
97  w_assert1(!is_mine(&me));
98  w_assert0(me._held == 0); // had better not
99  // be using this qnode for another lock!
100  return pthread_mutex_trylock(&_mutex) == 0;
101  }
102 
103 public:
105  void* acquire(ext_qnode* me) {
106  w_assert1(!is_mine(me));
107  w_assert1(me->_held == 0); // had better not
108  // be using this qnode for another lock!
109  pthread_mutex_lock(&_mutex);
110  me->_held = this;
111  _holder = this;
112 #if W_DEBUG_LEVEL > 0
113  {
115  w_assert1(is_mine(me)); // TODO: change to assert2
116  }
117 #endif
118  return 0;
119  }
120 
122  void release(ext_qnode& me) {
123  release(&me);
124  }
125 
127  void release(ext_qnode_ptr me) {
128  // assert is_mine:
129  w_assert1(_holder == me->_held);
130  w_assert1(me->_held == this);
131  me->_held = 0;
132  _holder = 0;
133  pthread_mutex_unlock(&_mutex);
134 #if W_DEBUG_LEVEL > 10
135  // This is racy since the containing structure could
136  // have been freed by the time we do this check. Thus,
137  // we'll remove it.
138  {
139  lintel::atomic_thread_fence(lintel::memory_order_acquire);// needed for the assertions?
141  w_pthread_lock_t *m = me->_held;
142  w_assert1( (h==nullptr && m==nullptr)
143  || (h != m) );
144  }
145 #endif
146  }
147 
158  bool is_mine(ext_qnode* me) const {
159  if (me->_held == this) {
160  // only valid if is_mine
161  w_assert1(_holder == me->_held);
162  return true;
163  }
164  return false;
165  }
166 };
167 
194 /*
195  * These typedefs are included to allow substitution at some point.
196  * Where there is a preference, the code should use the appropriate typedef.
197  */
198 
199 typedef w_pthread_lock_t queue_based_block_lock_t; // blocking impl always ok
200 #define QUEUE_BLOCK_EXT_QNODE_INITIALIZER PTHREAD_EXT_QNODE_INITIALIZER
201 // non-static initialize:
202 #define QUEUE_BLOCK_EXT_QNODE_INITIALIZE(x) x._held = NULL
203 
204 #ifdef USE_PTHREAD_MUTEX
205 
206 typedef w_pthread_lock_t queue_based_spin_lock_t; // spin impl preferred
207 typedef w_pthread_lock_t queue_based_lock_t; // might want to use spin impl
208 #define QUEUE_SPIN_EXT_QNODE_INITIALIZER PTHREAD_EXT_QNODE_INITIALIZER
209 #define QUEUE_EXT_QNODE_INITIALIZER PTHREAD_EXT_QNODE_INITIALIZER
210 // non-static initialize:
211 #define QUEUE_EXT_QNODE_INITIALIZE(x) x._held = NULL;
212 #else
213 #include "mcs_lock.h"
214 typedef mcs_lock queue_based_spin_lock_t; // spin preferred
216 #define QUEUE_SPIN_EXT_QNODE_INITIALIZER MCS_EXT_QNODE_INITIALIZER
217 #define QUEUE_EXT_QNODE_INITIALIZER MCS_EXT_QNODE_INITIALIZER
218 // non-static initialize:
219 #define QUEUE_EXT_QNODE_INITIALIZE(x) MCS_EXT_QNODE_INITIALIZE(x)
220 #endif
221 
240 struct occ_rwlock {
241  occ_rwlock();
242 
243  ~occ_rwlock();
244 
246  void acquire_read();
247 
249  void release_read();
250 
252  void acquire_write();
253 
255  void release_write();
256 
258  struct occ_rlock {
260  occ_rwlock* _lock;
261 
262  void acquire() {
263  _lock->acquire_read();
264  }
265 
266  void release() {
267  _lock->release_read();
268  }
269  };
270 
272  struct occ_wlock {
273  occ_rwlock* _lock;
274 
275  void acquire() {
276  _lock->acquire_write();
277  }
278 
279  void release() {
280  _lock->release_write();
281  }
282  };
283 
285  occ_rlock* read_lock() {
286  return &_read_lock;
287  }
288 
290  occ_wlock* write_lock() {
291  return &_write_lock;
292  }
294 private:
295  enum {
296  WRITER = 1,
297  READER = 2
298  };
299 
300  unsigned int volatile _active_count;
301 
302  occ_rlock _read_lock;
303 
304  occ_wlock _write_lock;
305 
306  pthread_mutex_t _read_write_mutex; // paired w/ _read_cond, _write_cond
307  pthread_cond_t _read_cond; // paired w/ _read_write_mutex
308  pthread_cond_t _write_cond; // paired w/ _read_write_mutex
309 };
310 
311 #define MUTEX_ACQUIRE(mutex) W_COERCE((mutex).acquire());
312 #define MUTEX_RELEASE(mutex) (mutex).release();
313 #define MUTEX_IS_MINE(mutex) (mutex).is_mine()
314 
315 // critical_section.h contains the macros needed for the following
316 // SPECIALIZE_CS
317 #include "critical_section.h"
318 
319 // tatas_lock doesn't have is_mine, but I changed its release()
320 // to Release and through compiling saw everywhere that uses release,
321 // and fixed those places
322 SPECIALIZE_CS(tatas_lock, int _dummy, (_dummy = 0),
323  _mutex->acquire(), _mutex->release());
324 
325 // queue_based_lock_t asserts is_mine() in release()
326 SPECIALIZE_CS(w_pthread_lock_t, w_pthread_lock_t::ext_qnode _me, (_me._held = 0),
327  _mutex->acquire(&_me), _mutex->release(&_me));
328 
329 #ifndef USE_PTHREAD_MUTEX
331  _mutex->acquire(&_me), _mutex->release(&_me));
332 #endif
333 
334 SPECIALIZE_CS(occ_rwlock::occ_rlock, int _dummy, (_dummy = 0),
335  _mutex->acquire(), _mutex->release());
336 
337 SPECIALIZE_CS(occ_rwlock::occ_wlock, int _dummy, (_dummy = 0),
338  _mutex->acquire(), _mutex->release());
339 
350 class mcs_rwlock : protected queue_based_lock_t {
352 
353  /* \todo TODO: Add support for blocking if any of the spins takes too long.
354  *
355  There are three spins to worry about: spin_on_writer,
356  spin_on_reader, and spin_on_waiting
357 
358  The overall idea is that threads which decide to block lose
359  their place in line to avoid forming convoys. To make this work
360  we need to modify the spin_on_waiting so that it blocks
361  eventually; the mcs_lock's preemption resistance will take care
362  of booting it from the queue as necessary.
363 
364  Whenever the last reader leaves it signals a cond var; when a
365  writer leaves it broadcasts.
366  END TODO
367  */
368  unsigned int volatile _holders; // 2*readers + writer
369 
370 public:
371  enum rwmode_t {
372  NONE = 0,
373  WRITER = 0x1,
374  READER = 0x2
375  };
376 
377  mcs_rwlock() : _holders(0) {}
378 
380 
382  rwmode_t mode() const {
383  int holders = *&_holders;
384  return (holders == WRITER) ? WRITER : (holders > 0) ? READER : NONE;
385  }
386 
388  bool is_locked() const {
389  return (*&_holders) == 0 ? false : true;
390  }
391 
393  int num_holders() const {
394  int holders = *&_holders;
395  return (holders == WRITER) ? 1 : holders / 2;
396  }
397 
399  bool has_reader() const {
400  return *&_holders & ~WRITER;
401  }
402 
404  bool has_writer() const {
405  return *&_holders & WRITER;
406  }
407 
409  bool attempt_read();
410 
412  void acquire_read();
413 
415  void release_read();
416 
418  bool attempt_write();
419 
421  void acquire_write();
422 
424  void release_write();
425 
427  bool attempt_upgrade();
428 
430  void downgrade();
431 
432 private:
433  // CC mangles this as __1cKmcs_rwlockO_spin_on_writer6M_v_
434  int _spin_on_writer();
435 
436  // CC mangles this as __1cKmcs_rwlockP_spin_on_readers6M_v_
437  void _spin_on_readers();
438 
439  bool _attempt_write(unsigned int expected);
440 
441  void _add_when_writer_leaves(int delta);
442 };
443 
445 
448 public:
449  spinlock_read_critical_section(srwlock_t* lock) : _lock(lock) {
450  _lock->acquire_read();
451  }
452 
454  _lock->release_read();
455  }
456 
457 private:
458  srwlock_t* _lock;
459 };
460 
462 public:
463  spinlock_write_critical_section(srwlock_t* lock) : _lock(lock) {
464  _lock->acquire_write();
465  }
466 
468  _lock->release_write();
469  }
470 
471 private:
472  srwlock_t* _lock;
473 };
474 
475 #endif // __LATCHES_H
A multiple-reader/single-writer lock based on pthreads (blocking)
Definition: latches.h:240
void atomic_thread_fence(memory_order order)
Definition: AtomicCounter.hpp:223
w_pthread_lock_t()
Definition: latches.h:63
~spinlock_write_critical_section()
Definition: latches.h:467
pthread_mutex_t _mutex
Definition: latches.h:58
#define w_assert1(x)
Level 1 should not add significant extra time.
Definition: w_base.h:198
bool attempt(ext_qnode &me)
Returns true if success. Helper for attempt(ext_qnode *).
Definition: latches.h:96
occ_wlock _write_lock
Definition: latches.h:304
rwmode_t
Definition: latches.h:371
Definition: latches.h:461
SPECIALIZE_CS(tatas_lock, int _dummy,(_dummy=0), _mutex->acquire(), _mutex->release())
occ_rlock _read_lock
Definition: latches.h:302
Header file for lintel::Atomic class.
mcs_lock queue_based_lock_t
Definition: latches.h:215
w_pthread_lock_t * _holder
Holder is this struct if acquire is successful.
Definition: latches.h:60
Definition: latches.h:447
void acquire_write()
The normal way to acquire a write lock.
Definition: latches.cpp:60
bool has_reader() const
True iff has one or more readers.
Definition: latches.h:399
pthread_cond_t _write_cond
Definition: latches.h:308
rwmode_t mode() const
Return the mode in which this lock is held by anyone.
Definition: latches.h:382
srwlock_t * _lock
Definition: latches.h:458
unsigned int volatile _holders
Definition: latches.h:368
mcs_lock queue_based_spin_lock_t
Definition: latches.h:214
mcs_rwlock srwlock_t
Definition: latches.h:444
srwlock_t * _lock
Definition: latches.h:472
#define w_assert0(x)
Default assert/debug level is 0.
Definition: w_base.h:175
spinlock_read_critical_section(srwlock_t *lock)
Definition: latches.h:449
pthread_cond_t _read_cond
Definition: latches.h:307
void release(ext_qnode_ptr me)
Release the lock and clear the qnode.
Definition: latches.h:127
~mcs_rwlock()
Definition: latches.h:379
An MCS queuing spinlock.
Definition: mcs_lock.h:61
void * acquire(ext_qnode *me)
Acquire the lock and set the qnode to refer to this lock.
Definition: latches.h:105
void release_read()
The normal way to release a read lock.
Definition: latches.cpp:20
bool attempt(ext_qnode *me)
Returns true if success.
Definition: latches.h:85
A test-and-test-and-set spinlock.
Definition: tatas.h:25
void release(ext_qnode &me)
Release the lock and clear the qnode.
Definition: latches.h:122
queue_based_lock_t parent_lock
Definition: latches.h:351
void acquire_read()
The normal way to acquire a read lock.
Definition: latches.cpp:31
bool is_locked() const
True if locked in any mode.
Definition: latches.h:388
w_pthread_lock_t queue_based_block_lock_t
Definition: latches.h:199
int num_holders() const
1 if held in write mode, else it's the number of readers
Definition: latches.h:393
mcs_lock * _held
Definition: mcs_lock.h:79
mcs_rwlock()
Definition: latches.h:377
unsigned int volatile _active_count
Definition: latches.h:300
bool is_mine(ext_qnode *me) const
Return true if this thread holds the lock.
Definition: latches.h:158
~spinlock_read_critical_section()
Definition: latches.h:453
Wrapper for pthread mutexes, with a queue-based lock API.
Definition: latches.h:46
~w_pthread_lock_t()
Definition: latches.h:67
void release_write()
The normal way to release a write lock.
Definition: latches.cpp:53
pthread_mutex_t _read_write_mutex
Definition: latches.h:306
bool has_writer() const
True iff has a writer (never more than 1)
Definition: latches.h:404
Shore read-write lock:: many-reader/one-writer spin lock.
Definition: latches.h:350
Definition: mcs_lock.h:76
spinlock_write_critical_section(srwlock_t *lock)
Definition: latches.h:463
Definition: AtomicCounter.hpp:113