I'll have more flags shortly.
Signed-off-by: Barret Rhoden <brho@cs.berkeley.edu>
#define GENBUF_SZ 128 /* plan9 uses this as a scratch space, per syscall */
+#define KTH_IS_KTASK (1 << 0)
+
/* This captures the essence of a kernel context that we want to suspend. When
* a kthread is running, we make sure its stacktop is the default kernel stack,
* meaning it will receive the interrupts from userspace. */
struct errbuf *errbuf;
TAILQ_ENTRY(kthread) link;
/* ID, other shit, etc */
- bool is_ktask; /* default is FALSE */
+ int flags;
char *name;
char generic_buf[GENBUF_SZ];
struct systrace_record *trace;
void kthread_yield(void);
void kthread_usleep(uint64_t usec);
void ktask(char *name, void (*fn)(void*), void *arg);
+
+static inline bool is_ktask(struct kthread *kthread)
+{
+ return kthread->flags & KTH_IS_KTASK;
+}
+
/* Debugging */
void check_poison(char *msg);
printk("IRQ :");
} else {
assert(pcpui->cur_kthread);
- if (pcpui->cur_kthread->is_ktask) {
+ if (is_ktask(pcpui->cur_kthread)) {
printk("%10s:", pcpui->cur_kthread->name);
} else {
printk("PID %3d :", pcpui->cur_proc ? pcpui->cur_proc->pid : 0);
void *arg = (void*)a1;
char *name = (char*)a2;
struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
- assert(pcpui->cur_kthread->is_ktask);
+ assert(is_ktask(pcpui->cur_kthread));
pcpui->cur_kthread->name = name;
/* There are some rendezs out there that aren't wrapped. Though no one can
* abort them. Yet. */
new_kthread = pcpui->spare;
new_stacktop = new_kthread->stacktop;
pcpui->spare = 0;
- /* Based on how we set is_ktask (in PRKM), we'll usually have a spare
- * with is_ktask set, even though the default setting is off. The
- * reason is that the launching of blocked kthreads also uses PRKM, and
- * that KMSG (__launch_kthread) doesn't return. Thus the soon-to-be
- * spare kthread, that is launching another, has is_ktask set. */
- new_kthread->is_ktask = FALSE;
+ /* Based on how we set KTH_IS_KTASK (in PRKM), we'll usually have a
+ * spare with KTH_IS_KTASK set, even though the default setting is off.
+ * The reason is that the launching of blocked kthreads also uses PRKM,
+ * and that KMSG (__launch_kthread) doesn't return. Thus the soon-to-be
+ * spare kthread, that is launching another, has flags & KTH_IS_KTASK
+ * set. */
+ new_kthread->flags = 0;
new_kthread->proc = 0;
new_kthread->name = 0;
} else {
* we want the core (which could be a vcore) to stay in the context too. In
* the future, we could check owning_proc. If it isn't set, we could leave
* the process context and transfer the refcnt to kthread->proc. */
- if (!kthread->is_ktask) {
+ if (!is_ktask(kthread)) {
kthread->proc = current;
if (kthread->proc) /* still could be none, like during init */
proc_incref(kthread->proc, 1);
cle->cv = cv;
cle->kthread = pcpui->cur_kthread;
/* Could be a ktask. Can build in support for aborting these later */
- if (cle->kthread->is_ktask) {
+ if (is_ktask(cle->kthread)) {
cle->sysc = 0;
return;
}
* CV lock. So if we hold the CV lock, we can deadlock (circular dependency).*/
void dereg_abortable_cv(struct cv_lookup_elm *cle)
{
- if (cle->kthread->is_ktask)
+ if (is_ktask(cle->kthread))
return;
assert(cle->proc);
spin_lock_irqsave(&cle->proc->abort_list_lock);
* this with things for ktasks in the future. */
bool should_abort(struct cv_lookup_elm *cle)
{
- if (cle->kthread->is_ktask)
+ if (is_ktask(cle->kthread))
return FALSE;
if (cle->proc && (cle->proc->state == PROC_DYING))
return TRUE;
assert(!irq_is_enabled());
/* Should never have ktask still set. If we do, future syscalls could try
* to block later and lose track of our address space. */
- assert(!pcpui->cur_kthread->is_ktask);
+ assert(!is_ktask(pcpui->cur_kthread));
__set_proc_current(p);
/* Clear the current_ctx, since it is no longer used */
current_ctx = 0; /* TODO: might not need this... */
{
struct per_cpu_info *pcpui = &per_cpu_info[core_id()];
clear_rkmsg(pcpui);
- pcpui->cur_kthread->is_ktask = FALSE;
+ pcpui->cur_kthread->flags &= ~KTH_IS_KTASK;
enable_irq(); /* one-shot change to get any IRQs before we halt later */
while (1) {
disable_irq();
/* Treat the startup threads as ktasks. This will last until smp_idle when
* they clear it, either in anticipation of being a user-backing kthread or
* to handle an RKM. */
- kthread->is_ktask = TRUE;
+ kthread->flags |= KTH_IS_KTASK;
per_cpu_info[coreid].spare = 0;
/* Init relevant lists */
spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock);
* it's not running on behalf of a process, and we're actually spawning
* a kernel task. While we do have a syscall that does work in an RKM
* (change_to), it's not really the rest of the syscall context. */
- pcpui->cur_kthread->is_ktask = TRUE;
+ pcpui->cur_kthread->flags |= KTH_IS_KTASK;
pcpui_trace_kmsg(pcpui, (uintptr_t)msg_cp.pc);
msg_cp.pc(msg_cp.srcid, msg_cp.arg0, msg_cp.arg1, msg_cp.arg2);
/* And if we make it back, be sure to unset this. If we never return,
* an example of an RKM that does this, check out the
* monitor->mon_bin_run. Finally, if the kthread gets swapped out of
* pcpui, such as in __launch_kthread(), the next time the kthread is
- * reused, is_ktask will be reset. */
- pcpui->cur_kthread->is_ktask = FALSE;
+ * reused, KTH_IS_KTASK will be reset. */
+ pcpui->cur_kthread->flags &= ~KTH_IS_KTASK;
/* If we aren't still in early RKM, it is because the KMSG blocked
* (thus leaving early RKM, finishing in default context) and then
* returned. This is a 'detached' RKM. Must idle in this scenario,