Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 | // SPDX-License-Identifier: BSD-2-Clause
/*
* Copyright (c) 2015-2016, Linaro Limited
*/
#include <compiler.h>
#include <kernel/spinlock.h>
#include <kernel/thread.h>
#include <kernel/wait_queue.h>
#include <optee_rpc_cmd.h>
#include <string.h>
#include <tee_api_defines.h>
#include <trace.h>
#include <types_ext.h>
static unsigned wq_spin_lock;
void wq_init(struct wait_queue *wq)
{
*wq = (struct wait_queue)WAIT_QUEUE_INITIALIZER;
}
/*
* Note: this function is weak just to make it possible to exclude it from
* the unpaged area.
*/
void __weak __wq_rpc(uint32_t func, int id, const void *sync_obj __maybe_unused,
int owner __maybe_unused, const char *fname,
int lineno __maybe_unused)
{
uint32_t ret;
const char *cmd_str __maybe_unused =
func == OPTEE_RPC_WAIT_QUEUE_SLEEP ? "sleep" : "wake ";
if (fname)
DMSG("%s thread %u %p %d %s:%d", cmd_str, id,
sync_obj, owner, fname, lineno);
else
DMSG("%s thread %u %p %d", cmd_str, id, sync_obj, owner);
struct thread_param params = THREAD_PARAM_VALUE(IN, func, id, 0);
ret = thread_rpc_cmd(OPTEE_RPC_CMD_WAIT_QUEUE, 1, ¶ms);
if (ret != TEE_SUCCESS)
DMSG("%s thread %u ret 0x%x", cmd_str, id, ret);
}
static void slist_add_tail(struct wait_queue *wq, struct wait_queue_elem *wqe)
{
struct wait_queue_elem *wqe_iter;
/* Add elem to end of wait queue */
wqe_iter = SLIST_FIRST(wq);
if (wqe_iter) {
while (SLIST_NEXT(wqe_iter, link))
wqe_iter = SLIST_NEXT(wqe_iter, link);
SLIST_INSERT_AFTER(wqe_iter, wqe, link);
} else
SLIST_INSERT_HEAD(wq, wqe, link);
}
void wq_wait_init_condvar(struct wait_queue *wq, struct wait_queue_elem *wqe,
struct condvar *cv, bool wait_read)
{
uint32_t old_itr_status;
wqe->handle = thread_get_id();
wqe->done = false;
wqe->wait_read = wait_read;
wqe->cv = cv;
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
slist_add_tail(wq, wqe);
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
}
void wq_wait_final(struct wait_queue *wq, struct wait_queue_elem *wqe,
const void *sync_obj, int owner, const char *fname,
int lineno)
{
uint32_t old_itr_status;
unsigned done;
do {
__wq_rpc(OPTEE_RPC_WAIT_QUEUE_SLEEP, wqe->handle,
sync_obj, owner, fname, lineno);
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
done = wqe->done;
if (done)
SLIST_REMOVE(wq, wqe, wait_queue_elem, link);
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
} while (!done);
}
void wq_wake_next(struct wait_queue *wq, const void *sync_obj,
const char *fname, int lineno)
{
uint32_t old_itr_status;
struct wait_queue_elem *wqe;
int handle = -1;
bool do_wakeup = false;
bool wake_type_assigned = false;
bool wake_read = false; /* avoid gcc warning */
/*
* If next type is wait_read wakeup all wqe with wait_read true.
* If next type isn't wait_read wakeup only the first wqe which isn't
* done.
*/
while (true) {
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
SLIST_FOREACH(wqe, wq, link) {
if (wqe->cv)
continue;
if (wqe->done)
continue;
if (!wake_type_assigned) {
wake_read = wqe->wait_read;
wake_type_assigned = true;
}
if (wqe->wait_read != wake_read)
continue;
wqe->done = true;
handle = wqe->handle;
do_wakeup = true;
break;
}
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
if (do_wakeup)
__wq_rpc(OPTEE_RPC_WAIT_QUEUE_WAKEUP, handle,
sync_obj, MUTEX_OWNER_ID_MUTEX_UNLOCK,
fname, lineno);
if (!do_wakeup || !wake_read)
break;
do_wakeup = false;
}
}
void wq_promote_condvar(struct wait_queue *wq, struct condvar *cv,
bool only_one, const void *sync_obj __unused,
const char *fname, int lineno __maybe_unused)
{
uint32_t old_itr_status;
struct wait_queue_elem *wqe;
if (!cv)
return;
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
/*
* Find condvar waiter(s) and promote each to an active waiter.
* This is a bit unfair to eventual other active waiters as a
* condvar waiter is added the the queue when waiting for the
* condvar.
*/
SLIST_FOREACH(wqe, wq, link) {
if (wqe->cv == cv) {
if (fname)
FMSG("promote thread %u %p %s:%d",
wqe->handle, (void *)cv->m, fname, lineno);
else
FMSG("promote thread %u %p",
wqe->handle, (void *)cv->m);
wqe->cv = NULL;
if (only_one)
break;
}
}
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
}
bool wq_have_condvar(struct wait_queue *wq, struct condvar *cv)
{
uint32_t old_itr_status;
struct wait_queue_elem *wqe;
bool rc = false;
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
SLIST_FOREACH(wqe, wq, link) {
if (wqe->cv == cv) {
rc = true;
break;
}
}
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
return rc;
}
bool wq_is_empty(struct wait_queue *wq)
{
uint32_t old_itr_status;
bool ret;
old_itr_status = cpu_spin_lock_xsave(&wq_spin_lock);
ret = SLIST_EMPTY(wq);
cpu_spin_unlock_xrestore(&wq_spin_lock, old_itr_status);
return ret;
}
|