00001
00025 #ifndef _XENO_NUCLEUS_SCHED_H
00026 #define _XENO_NUCLEUS_SCHED_H
00027
00031 #include <nucleus/thread.h>
00032
00033 #if defined(__KERNEL__) || defined(__XENO_SIM__)
00034
00035 #include <nucleus/schedqueue.h>
00036 #include <nucleus/sched-tp.h>
00037 #include <nucleus/sched-sporadic.h>
00038
00039
00040 #define XNKCOUT 0x80000000
00041 #define XNHTICK 0x40000000
00042 #define XNRPICK 0x20000000
00043 #define XNINTCK 0x10000000
00044 #define XNINIRQ 0x08000000
00045 #define XNSWLOCK 0x04000000
00046 #define XNRESCHED 0x02000000
00047 #define XNHDEFER 0x01000000
00048
00049 struct xnsched_rt {
00050 xnsched_queue_t runnable;
00051 #ifdef CONFIG_XENO_OPT_PRIOCPL
00052 xnsched_queue_t relaxed;
00053 #endif
00054 };
00055
00060 typedef struct xnsched {
00061
00062 xnflags_t status;
00063 int cpu;
00064 struct xnthread *curr;
00065 xnarch_cpumask_t resched;
00067 struct xnsched_rt rt;
00068 #ifdef CONFIG_XENO_OPT_SCHED_TP
00069 struct xnsched_tp tp;
00070 #endif
00071 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00072 struct xnsched_sporadic pss;
00073 #endif
00074
00075 xntimerq_t timerqueue;
00076 volatile unsigned inesting;
00077 struct xntimer htimer;
00078 struct xnthread *zombie;
00079 struct xnthread rootcb;
00081 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00082 struct xnthread *last;
00083 #endif
00084
00085 #ifdef CONFIG_XENO_HW_FPU
00086 struct xnthread *fpuholder;
00087 #endif
00088
00089 #ifdef CONFIG_XENO_OPT_WATCHDOG
00090 struct xntimer wdtimer;
00091 int wdcount;
00092 #endif
00093
00094 #ifdef CONFIG_XENO_OPT_STATS
00095 xnticks_t last_account_switch;
00096 xnstat_exectime_t *current_account;
00097 #endif
00098
00099 #ifdef CONFIG_XENO_OPT_PRIOCPL
00100 DECLARE_XNLOCK(rpilock);
00101 #endif
00102
00103 #ifdef CONFIG_XENO_OPT_PERVASIVE
00104 struct task_struct *gatekeeper;
00105 wait_queue_head_t gkwaitq;
00106 struct linux_semaphore gksync;
00107 struct xnthread *gktarget;
00108 #endif
00109
00110 } xnsched_t;
00111
00112 union xnsched_policy_param;
00113
00114 struct xnsched_class {
00115
00116 void (*sched_init)(struct xnsched *sched);
00117 void (*sched_enqueue)(struct xnthread *thread);
00118 void (*sched_dequeue)(struct xnthread *thread);
00119 void (*sched_requeue)(struct xnthread *thread);
00120 struct xnthread *(*sched_pick)(struct xnsched *sched);
00121 void (*sched_tick)(struct xnthread *curr);
00122 void (*sched_rotate)(struct xnsched *sched,
00123 const union xnsched_policy_param *p);
00124 void (*sched_migrate)(struct xnthread *thread,
00125 struct xnsched *sched);
00126 void (*sched_setparam)(struct xnthread *thread,
00127 const union xnsched_policy_param *p);
00128 void (*sched_getparam)(struct xnthread *thread,
00129 union xnsched_policy_param *p);
00130 void (*sched_trackprio)(struct xnthread *thread,
00131 const union xnsched_policy_param *p);
00132 int (*sched_declare)(struct xnthread *thread,
00133 const union xnsched_policy_param *p);
00134 void (*sched_forget)(struct xnthread *thread);
00135 #ifdef CONFIG_XENO_OPT_PRIOCPL
00136 struct xnthread *(*sched_push_rpi)(struct xnsched *sched,
00137 struct xnthread *thread);
00138 void (*sched_pop_rpi)(struct xnthread *thread);
00139 struct xnthread *(*sched_peek_rpi)(struct xnsched *sched);
00140 void (*sched_suspend_rpi)(struct xnthread *thread);
00141 void (*sched_resume_rpi)(struct xnthread *thread);
00142 #endif
00143 #ifdef CONFIG_PROC_FS
00144 void (*sched_init_proc)(struct proc_dir_entry *root);
00145 void (*sched_cleanup_proc)(struct proc_dir_entry *root);
00146 struct proc_dir_entry *proc;
00147 #endif
00148 int nthreads;
00149 struct xnsched_class *next;
00150 int weight;
00151 const char *name;
00152 };
00153
00154 #define XNSCHED_CLASS_MAX_THREADS 32768
00155 #define XNSCHED_CLASS_WEIGHT(n) (n * XNSCHED_CLASS_MAX_THREADS)
00156
00157
00158 #define XNSCHED_RUNPRIO 0x80000000
00159
00160 #ifdef CONFIG_SMP
00161 #define xnsched_cpu(__sched__) ((__sched__)->cpu)
00162 #else
00163 #define xnsched_cpu(__sched__) ({ (void)__sched__; 0; })
00164 #endif
00165
00166
00167 static inline int xnsched_resched_p(struct xnsched *sched)
00168 {
00169 return testbits(sched->status, XNRESCHED);
00170 }
00171
00172 static inline int xnsched_self_resched_p(struct xnsched *sched)
00173 {
00174 return testbits(sched->status, XNRESCHED);
00175 }
00176
00177
00178 #define xnsched_set_self_resched(__sched__) do { \
00179 setbits((__sched__)->status, XNRESCHED); \
00180 } while (0)
00181
00182
00183 #define xnsched_set_resched(__sched__) do { \
00184 xnsched_t *current_sched = xnpod_current_sched(); \
00185 setbits(current_sched->status, XNRESCHED); \
00186 if (current_sched != (__sched__)) { \
00187 xnarch_cpu_set(xnsched_cpu(__sched__), current_sched->resched); \
00188 setbits((__sched__)->status, XNRESCHED); \
00189 } \
00190 } while (0)
00191
00192 void xnsched_zombie_hooks(struct xnthread *thread);
00193
00194 void __xnsched_finalize_zombie(struct xnsched *sched);
00195
00196 static inline void xnsched_finalize_zombie(struct xnsched *sched)
00197 {
00198 if (sched->zombie)
00199 __xnsched_finalize_zombie(sched);
00200 }
00201
00202 #ifdef CONFIG_XENO_HW_UNLOCKED_SWITCH
00203
00204 struct xnsched *xnsched_finish_unlocked_switch(struct xnsched *sched);
00205
00206 #define xnsched_resched_after_unlocked_switch() xnpod_schedule()
00207
00208 static inline
00209 int xnsched_maybe_resched_after_unlocked_switch(struct xnsched *sched)
00210 {
00211 return testbits(sched->status, XNRESCHED);
00212 }
00213
00214 #else
00215
00216 #ifdef CONFIG_SMP
00217 #define xnsched_finish_unlocked_switch(__sched__) \
00218 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00219 xnpod_current_sched(); })
00220 #else
00221 #define xnsched_finish_unlocked_switch(__sched__) \
00222 ({ XENO_BUGON(NUCLEUS, !irqs_disabled_hw()); \
00223 (__sched__); })
00224 #endif
00225
00226 #define xnsched_resched_after_unlocked_switch() do { } while(0)
00227
00228 #define xnsched_maybe_resched_after_unlocked_switch(sched) \
00229 ({ (void)(sched); 0; })
00230
00231 #endif
00232
00233 #ifdef CONFIG_XENO_OPT_WATCHDOG
00234 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00235 {
00236 sched->wdcount = 0;
00237 }
00238 #else
00239 static inline void xnsched_reset_watchdog(struct xnsched *sched)
00240 {
00241 }
00242 #endif
00243
00244 #include <nucleus/sched-idle.h>
00245 #include <nucleus/sched-rt.h>
00246
00247 void xnsched_init_proc(void);
00248
00249 void xnsched_cleanup_proc(void);
00250
00251 void xnsched_register_classes(void);
00252
00253 void xnsched_init(struct xnsched *sched, int cpu);
00254
00255 void xnsched_destroy(struct xnsched *sched);
00256
00257 struct xnthread *xnsched_pick_next(struct xnsched *sched);
00258
00259 void xnsched_putback(struct xnthread *thread);
00260
00261 int xnsched_set_policy(struct xnthread *thread,
00262 struct xnsched_class *sched_class,
00263 const union xnsched_policy_param *p);
00264
00265 void xnsched_track_policy(struct xnthread *thread,
00266 struct xnthread *target);
00267
00268 void xnsched_migrate(struct xnthread *thread,
00269 struct xnsched *sched);
00270
00271 void xnsched_migrate_passive(struct xnthread *thread,
00272 struct xnsched *sched);
00273
00305 static inline void xnsched_rotate(struct xnsched *sched,
00306 struct xnsched_class *sched_class,
00307 const union xnsched_policy_param *sched_param)
00308 {
00309 sched_class->sched_rotate(sched, sched_param);
00310 }
00311
00312 static inline int xnsched_init_tcb(struct xnthread *thread)
00313 {
00314 int ret = 0;
00315
00316 xnsched_idle_init_tcb(thread);
00317 xnsched_rt_init_tcb(thread);
00318 #ifdef CONFIG_XENO_OPT_SCHED_TP
00319 ret = xnsched_tp_init_tcb(thread);
00320 if (ret)
00321 return ret;
00322 #endif
00323 #ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
00324 ret = xnsched_sporadic_init_tcb(thread);
00325 if (ret)
00326 return ret;
00327 #endif
00328 return ret;
00329 }
00330
00331 static inline int xnsched_root_priority(struct xnsched *sched)
00332 {
00333 return sched->rootcb.cprio;
00334 }
00335
00336 static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
00337 {
00338 return sched->rootcb.sched_class;
00339 }
00340
00341 static inline void xnsched_tick(struct xnthread *curr, struct xntbase *tbase)
00342 {
00343 struct xnsched_class *sched_class = curr->sched_class;
00344
00345
00346
00347
00348
00349 if (xnthread_time_base(curr) == tbase &&
00350 sched_class != &xnsched_class_idle &&
00351 sched_class == curr->base_class &&
00352 xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNLOCK|XNRRB) == XNRRB)
00353 sched_class->sched_tick(curr);
00354 }
00355
00356 #ifdef CONFIG_XENO_OPT_SCHED_CLASSES
00357
00358 static inline void xnsched_enqueue(struct xnthread *thread)
00359 {
00360 struct xnsched_class *sched_class = thread->sched_class;
00361
00362 if (sched_class != &xnsched_class_idle)
00363 sched_class->sched_enqueue(thread);
00364 }
00365
00366 static inline void xnsched_dequeue(struct xnthread *thread)
00367 {
00368 struct xnsched_class *sched_class = thread->sched_class;
00369
00370 if (sched_class != &xnsched_class_idle)
00371 sched_class->sched_dequeue(thread);
00372 }
00373
00374 static inline void xnsched_requeue(struct xnthread *thread)
00375 {
00376 struct xnsched_class *sched_class = thread->sched_class;
00377
00378 if (sched_class != &xnsched_class_idle)
00379 sched_class->sched_requeue(thread);
00380 }
00381
00382 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00383 {
00384 return thread->bprio + thread->sched_class->weight;
00385 }
00386
00387 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00388 {
00389 return thread->cprio + thread->sched_class->weight;
00390 }
00391
00392 static inline void xnsched_setparam(struct xnthread *thread,
00393 const union xnsched_policy_param *p)
00394 {
00395 thread->sched_class->sched_setparam(thread, p);
00396 }
00397
00398 static inline void xnsched_getparam(struct xnthread *thread,
00399 union xnsched_policy_param *p)
00400 {
00401 thread->sched_class->sched_getparam(thread, p);
00402 }
00403
00404 static inline void xnsched_trackprio(struct xnthread *thread,
00405 const union xnsched_policy_param *p)
00406 {
00407 thread->sched_class->sched_trackprio(thread, p);
00408 }
00409
00410 static inline void xnsched_forget(struct xnthread *thread)
00411 {
00412 struct xnsched_class *sched_class = thread->base_class;
00413
00414 --sched_class->nthreads;
00415
00416 if (sched_class->sched_forget)
00417 sched_class->sched_forget(thread);
00418 }
00419
00420 #ifdef CONFIG_XENO_OPT_PRIOCPL
00421
00422 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00423 struct xnthread *thread)
00424 {
00425 return thread->sched_class->sched_push_rpi(sched, thread);
00426 }
00427
00428 static inline void xnsched_pop_rpi(struct xnthread *thread)
00429 {
00430 thread->sched_class->sched_pop_rpi(thread);
00431 }
00432
00433 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00434 {
00435 struct xnsched_class *sched_class = thread->sched_class;
00436
00437 if (sched_class->sched_suspend_rpi)
00438 sched_class->sched_suspend_rpi(thread);
00439 }
00440
00441 static inline void xnsched_resume_rpi(struct xnthread *thread)
00442 {
00443 struct xnsched_class *sched_class = thread->sched_class;
00444
00445 if (sched_class->sched_resume_rpi)
00446 sched_class->sched_resume_rpi(thread);
00447 }
00448
00449 #endif
00450
00451 #else
00452
00453
00454
00455
00456
00457
00458 static inline void xnsched_enqueue(struct xnthread *thread)
00459 {
00460 struct xnsched_class *sched_class = thread->sched_class;
00461
00462 if (sched_class != &xnsched_class_idle)
00463 __xnsched_rt_enqueue(thread);
00464 }
00465
00466 static inline void xnsched_dequeue(struct xnthread *thread)
00467 {
00468 struct xnsched_class *sched_class = thread->sched_class;
00469
00470 if (sched_class != &xnsched_class_idle)
00471 __xnsched_rt_dequeue(thread);
00472 }
00473
00474 static inline void xnsched_requeue(struct xnthread *thread)
00475 {
00476 struct xnsched_class *sched_class = thread->sched_class;
00477
00478 if (sched_class != &xnsched_class_idle)
00479 __xnsched_rt_requeue(thread);
00480 }
00481
00482 static inline int xnsched_weighted_bprio(struct xnthread *thread)
00483 {
00484 return thread->bprio;
00485 }
00486
00487 static inline int xnsched_weighted_cprio(struct xnthread *thread)
00488 {
00489 return thread->cprio;
00490 }
00491
00492 static inline void xnsched_setparam(struct xnthread *thread,
00493 const union xnsched_policy_param *p)
00494 {
00495 struct xnsched_class *sched_class = thread->sched_class;
00496
00497 if (sched_class != &xnsched_class_idle)
00498 __xnsched_rt_setparam(thread, p);
00499 else
00500 __xnsched_idle_setparam(thread, p);
00501 }
00502
00503 static inline void xnsched_getparam(struct xnthread *thread,
00504 union xnsched_policy_param *p)
00505 {
00506 struct xnsched_class *sched_class = thread->sched_class;
00507
00508 if (sched_class != &xnsched_class_idle)
00509 __xnsched_rt_getparam(thread, p);
00510 else
00511 __xnsched_idle_getparam(thread, p);
00512 }
00513
00514 static inline void xnsched_trackprio(struct xnthread *thread,
00515 const union xnsched_policy_param *p)
00516 {
00517 struct xnsched_class *sched_class = thread->sched_class;
00518
00519 if (sched_class != &xnsched_class_idle)
00520 __xnsched_rt_trackprio(thread, p);
00521 else
00522 __xnsched_idle_trackprio(thread, p);
00523 }
00524
00525 static inline void xnsched_forget(struct xnthread *thread)
00526 {
00527 --thread->base_class->nthreads;
00528 __xnsched_rt_forget(thread);
00529 }
00530
00531 #ifdef CONFIG_XENO_OPT_PRIOCPL
00532
00533 static inline struct xnthread *xnsched_push_rpi(struct xnsched *sched,
00534 struct xnthread *thread)
00535 {
00536 return __xnsched_rt_push_rpi(sched, thread);
00537 }
00538
00539 static inline void xnsched_pop_rpi(struct xnthread *thread)
00540 {
00541 __xnsched_rt_pop_rpi(thread);
00542 }
00543
00544 static inline void xnsched_suspend_rpi(struct xnthread *thread)
00545 {
00546 __xnsched_rt_suspend_rpi(thread);
00547 }
00548
00549 static inline void xnsched_resume_rpi(struct xnthread *thread)
00550 {
00551 __xnsched_rt_resume_rpi(thread);
00552 }
00553
00554 #endif
00555
00556 #endif
00557
00558 void xnsched_renice_root(struct xnsched *sched,
00559 struct xnthread *target);
00560
00561 struct xnthread *xnsched_peek_rpi(struct xnsched *sched);
00562
00563 #else
00564
00565 #include <nucleus/sched-idle.h>
00566 #include <nucleus/sched-rt.h>
00567
00568 #endif
00569
00572 #endif