Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | /* rwsem.c: R/W semaphores: contention handling functions
*
* Written by David Howells (dhowells@redhat.com).
* Derived from arch/i386/kernel/semaphore.c
*/
#include <linux/rwsem.h>
#include <linux/sched.h>
#include <linux/module.h>
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
unsigned int flags;
#define RWSEM_WAITING_FOR_READ 0x00000001
#define RWSEM_WAITING_FOR_WRITE 0x00000002
};
#if RWSEM_DEBUG
#undef rwsemtrace
void rwsemtrace(struct rw_semaphore *sem, const char *str)
{
printk("sem=%p\n",sem);
printk("(sem)=%08lx\n",sem->count);
if (sem->debug)
printk("[%d] %s({%08lx})\n",current->pid,str,sem->count);
}
#endif
/*
* handle the lock being released whilst there are processes blocked on it that can now run
* - if we come here, then:
* - the 'active part' of the count (&0x0000ffff) reached zero but has been re-incremented
* - the 'waiting part' of the count (&0xffff0000) is negative (and will still be so)
* - there must be someone on the queue
* - the spinlock must be held by the caller
* - woken process blocks are discarded from the list after having flags zeroised
*/
static inline struct rw_semaphore *__rwsem_do_wake(struct rw_semaphore *sem)
{
struct rwsem_waiter *waiter;
struct list_head *next;
signed long oldcount;
int woken, loop;
rwsemtrace(sem,"Entering __rwsem_do_wake");
/* only wake someone up if we can transition the active part of the count from 0 -> 1 */
try_again:
oldcount = rwsem_atomic_update(RWSEM_ACTIVE_BIAS,sem) - RWSEM_ACTIVE_BIAS;
if (oldcount & RWSEM_ACTIVE_MASK)
goto undo;
waiter = list_entry(sem->wait_list.next,struct rwsem_waiter,list);
/* try to grant a single write lock if there's a writer at the front of the queue
* - note we leave the 'active part' of the count incremented by 1 and the waiting part
* incremented by 0x00010000
*/
if (!(waiter->flags & RWSEM_WAITING_FOR_WRITE))
goto readers_only;
list_del(&waiter->list);
waiter->flags = 0;
wake_up_process(waiter->task);
goto out;
/* grant an infinite number of read locks to the readers at the front of the queue
* - note we increment the 'active part' of the count by the number of readers (less one
* for the activity decrement we've already done) before waking any processes up
*/
readers_only:
woken = 0;
do {
woken++;
if (waiter->list.next==&sem->wait_list)
break;
waiter = list_entry(waiter->list.next,struct rwsem_waiter,list);
} while (waiter->flags & RWSEM_WAITING_FOR_READ);
loop = woken;
woken *= RWSEM_ACTIVE_BIAS-RWSEM_WAITING_BIAS;
woken -= RWSEM_ACTIVE_BIAS;
rwsem_atomic_add(woken,sem);
next = sem->wait_list.next;
for (; loop>0; loop--) {
waiter = list_entry(next,struct rwsem_waiter,list);
next = waiter->list.next;
waiter->flags = 0;
wake_up_process(waiter->task);
}
sem->wait_list.next = next;
next->prev = &sem->wait_list;
out:
rwsemtrace(sem,"Leaving __rwsem_do_wake");
return sem;
/* undo the change to count, but check for a transition 1->0 */
undo:
if (rwsem_atomic_update(-RWSEM_ACTIVE_BIAS,sem)!=0)
goto out;
goto try_again;
}
/*
* wait for a lock to be granted
*/
static inline struct rw_semaphore *rwsem_down_failed_common(struct rw_semaphore *sem,
struct rwsem_waiter *waiter,
signed long adjustment)
{
struct task_struct *tsk = current;
signed long count;
set_task_state(tsk,TASK_UNINTERRUPTIBLE);
/* set up my own style of waitqueue */
spin_lock(&sem->wait_lock);
waiter->task = tsk;
list_add_tail(&waiter->list,&sem->wait_list);
/* note that we're now waiting on the lock, but no longer actively read-locking */
count = rwsem_atomic_update(adjustment,sem);
/* if there are no longer active locks, wake the front queued process(es) up
* - it might even be this process, since the waker takes a more active part
*/
if (!(count & RWSEM_ACTIVE_MASK))
sem = __rwsem_do_wake(sem);
spin_unlock(&sem->wait_lock);
/* wait to be given the lock */
for (;;) {
if (!waiter->flags)
break;
schedule();
set_task_state(tsk, TASK_UNINTERRUPTIBLE);
}
tsk->state = TASK_RUNNING;
return sem;
}
/*
* wait for the read lock to be granted
*/
struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
rwsemtrace(sem,"Entering rwsem_down_read_failed");
waiter.flags = RWSEM_WAITING_FOR_READ;
rwsem_down_failed_common(sem,&waiter,RWSEM_WAITING_BIAS-RWSEM_ACTIVE_BIAS);
rwsemtrace(sem,"Leaving rwsem_down_read_failed");
return sem;
}
/*
* wait for the write lock to be granted
*/
struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem)
{
struct rwsem_waiter waiter;
rwsemtrace(sem,"Entering rwsem_down_write_failed");
waiter.flags = RWSEM_WAITING_FOR_WRITE;
rwsem_down_failed_common(sem,&waiter,-RWSEM_ACTIVE_BIAS);
rwsemtrace(sem,"Leaving rwsem_down_write_failed");
return sem;
}
/*
* handle waking up a waiter on the semaphore
* - up_read has decremented the active part of the count if we come here
*/
struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
{
rwsemtrace(sem,"Entering rwsem_wake");
spin_lock(&sem->wait_lock);
/* do nothing if list empty */
if (!list_empty(&sem->wait_list))
sem = __rwsem_do_wake(sem);
spin_unlock(&sem->wait_lock);
rwsemtrace(sem,"Leaving rwsem_wake");
return sem;
}
EXPORT_SYMBOL_NOVERS(rwsem_down_read_failed);
EXPORT_SYMBOL_NOVERS(rwsem_down_write_failed);
EXPORT_SYMBOL_NOVERS(rwsem_wake);
#if RWSEM_DEBUG
EXPORT_SYMBOL(rwsemtrace);
#endif
|