6 #ifndef QUEUED_RWLOCK_HPP 7 #define QUEUED_RWLOCK_HPP 13 #define QUEUED_RW_LOCK_REQUEST_READ 0 14 #define QUEUED_RW_LOCK_REQUEST_WRITE 1 15 #define QUEUED_RW_LOCK_REQUEST_NONE 2 29 volatile uint32_t stateu;
31 volatile uint16_t successor_class;
32 volatile bool blocked;
38 volatile request*
volatile next;
39 volatile state_union s;
40 volatile char lockclass;
43 request*
volatile tail;
44 atomic<size_t> reader_count;
45 request*
volatile next_writer;
47 queued_rw_lock(): tail(NULL), reader_count(0), next_writer(NULL) { }
49 inline void writelock(request *I) {
50 I->lockclass = QUEUED_RW_LOCK_REQUEST_WRITE;
53 I->s.state.blocked =
true;
54 I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
56 request* predecessor = __sync_lock_test_and_set(&tail, I);
58 if (predecessor == NULL) {
61 if (reader_count.value == 0) {
62 if (__sync_lock_test_and_set(&next_writer, (request*)NULL) == I) {
63 I->s.state.blocked =
false;
68 predecessor->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_WRITE;
70 predecessor->next = I;
73 volatile state_union& is = I->s;
74 while (is.state.blocked) sched_yield();
75 assert(reader_count.value == 0);
78 inline void wrunlock(request *I) {
80 if (I->next != NULL || !__sync_bool_compare_and_swap(&tail, I, (request*)NULL)) {
82 while(I->next == NULL) sched_yield();
85 if (I->next->lockclass == QUEUED_RW_LOCK_REQUEST_READ) {
88 I->next->s.state.blocked =
false;
92 inline void readlock(request *I) {
93 I->lockclass =QUEUED_RW_LOCK_REQUEST_READ;
96 I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
97 I->s.state.blocked =
true;
99 request* predecessor = __sync_lock_test_and_set(&tail, I);
100 if (predecessor == NULL) {
102 I->s.state.blocked =
false;
106 state_union tempold, tempnew;
107 tempold.state.blocked =
true;
108 tempold.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
109 tempnew.state.blocked =
true;
110 tempnew.state.successor_class = QUEUED_RW_LOCK_REQUEST_READ;
111 __sync_synchronize();
112 if (predecessor->lockclass == QUEUED_RW_LOCK_REQUEST_WRITE ||
117 predecessor->next = I;
119 __sync_synchronize();
120 volatile state_union& is = I->s;
121 while(is.state.blocked) sched_yield();
125 predecessor->next = I;
126 __sync_synchronize();
127 I->s.state.blocked =
false;
130 __sync_synchronize();
131 if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_READ) {
134 while(I->next == NULL) sched_yield();
136 I->next->s.state.blocked =
false;
140 inline void rdunlock(request *I) {
141 __sync_synchronize();
142 if (I->next != NULL || !__sync_bool_compare_and_swap(&tail, I, (request*)NULL)) {
143 while(I->next == NULL) sched_yield();
144 if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_WRITE) {
145 next_writer = (request*)(I->next);
146 __sync_synchronize();
149 if (reader_count.dec() == 0) {
150 __sync_synchronize();
151 request * w = __sync_lock_test_and_set(&next_writer, (request*)NULL);
153 w->s.state.blocked =
false;
154 __sync_synchronize();
bool atomic_compare_and_swap(T &a, T oldval, T newval)