RTEMS 5.2
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
schedulerimpl.h
Go to the documentation of this file.
1
12/*
13 * Copyright (C) 2010 Gedare Bloom.
14 * Copyright (C) 2011 On-Line Applications Research Corporation (OAR).
15 * Copyright (c) 2014, 2017 embedded brains GmbH
16 *
17 * The license and distribution terms for this file may be
18 * found in the file LICENSE in this distribution or at
19 * http://www.rtems.org/license/LICENSE.
20 */
21
22#ifndef _RTEMS_SCORE_SCHEDULERIMPL_H
23#define _RTEMS_SCORE_SCHEDULERIMPL_H
24
26#include <rtems/score/assert.h>
28#include <rtems/score/smpimpl.h>
29#include <rtems/score/status.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif
35
50
59 const Scheduler_Control *scheduler
60)
61{
62 return scheduler->context;
63}
64
73 const Per_CPU_Control *cpu
74)
75{
76#if defined(RTEMS_SMP)
77 return cpu->Scheduler.control;
78#else
79 (void) cpu;
80 return &_Scheduler_Table[ 0 ];
81#endif
82}
83
93 const Scheduler_Control *scheduler,
94 ISR_lock_Context *lock_context
95)
96{
97#if defined(RTEMS_SMP)
99
100 context = _Scheduler_Get_context( scheduler );
101 _ISR_lock_Acquire( &context->Lock, lock_context );
102#else
103 (void) scheduler;
104 (void) lock_context;
105#endif
106}
107
117 const Scheduler_Control *scheduler,
118 ISR_lock_Context *lock_context
119)
120{
121#if defined(RTEMS_SMP)
123
124 context = _Scheduler_Get_context( scheduler );
125 _ISR_lock_Release( &context->Lock, lock_context );
126#else
127 (void) scheduler;
128 (void) lock_context;
129#endif
130}
131
132#if defined(RTEMS_SMP)
142RTEMS_INLINE_ROUTINE bool _Scheduler_Is_non_preempt_mode_supported(
143 const Scheduler_Control *scheduler
144)
145{
146 return scheduler->is_non_preempt_mode_supported;
147}
148#endif
149
150#if defined(RTEMS_SMP)
151void _Scheduler_Request_ask_for_help( Thread_Control *the_thread );
152
164RTEMS_INLINE_ROUTINE void _Scheduler_Ask_for_help( Thread_Control *the_thread )
165{
166 _Assert( _Thread_State_is_owner( the_thread ) );
167
168 if ( the_thread->Scheduler.helping_nodes > 0 ) {
169 _Scheduler_Request_ask_for_help( the_thread );
170 }
171}
172#endif
173
182/*
183 * Passing the Scheduler_Control* to these functions allows for multiple
184 * scheduler's to exist simultaneously, which could be useful on an SMP
185 * system. Then remote Schedulers may be accessible. How to protect such
186 * accesses remains an open problem.
187 */
188
198{
199 const Scheduler_Control *scheduler;
200 ISR_lock_Context lock_context;
201
202 scheduler = _Thread_Scheduler_get_home( the_thread );
203 _Scheduler_Acquire_critical( scheduler, &lock_context );
204
205 ( *scheduler->Operations.schedule )( scheduler, the_thread );
206
207 _Scheduler_Release_critical( scheduler, &lock_context );
208}
209
219{
220 const Scheduler_Control *scheduler;
221 ISR_lock_Context lock_context;
222
223 scheduler = _Thread_Scheduler_get_home( the_thread );
224 _Scheduler_Acquire_critical( scheduler, &lock_context );
225 ( *scheduler->Operations.yield )(
226 scheduler,
227 the_thread,
229 );
230 _Scheduler_Release_critical( scheduler, &lock_context );
231}
232
244{
245#if defined(RTEMS_SMP)
246 Chain_Node *node;
247 const Chain_Node *tail;
248 Scheduler_Node *scheduler_node;
249 const Scheduler_Control *scheduler;
250 ISR_lock_Context lock_context;
251
252 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
253 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
254
255 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
256 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
257
258 _Scheduler_Acquire_critical( scheduler, &lock_context );
259 ( *scheduler->Operations.block )(
260 scheduler,
261 the_thread,
262 scheduler_node
263 );
264 _Scheduler_Release_critical( scheduler, &lock_context );
265
266 node = _Chain_Next( node );
267
268 while ( node != tail ) {
269 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
270 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
271
272 _Scheduler_Acquire_critical( scheduler, &lock_context );
273 ( *scheduler->Operations.withdraw_node )(
274 scheduler,
275 the_thread,
276 scheduler_node,
277 THREAD_SCHEDULER_BLOCKED
278 );
279 _Scheduler_Release_critical( scheduler, &lock_context );
280
281 node = _Chain_Next( node );
282 }
283#else
284 const Scheduler_Control *scheduler;
285
286 scheduler = _Thread_Scheduler_get_home( the_thread );
287 ( *scheduler->Operations.block )(
288 scheduler,
289 the_thread,
291 );
292#endif
293}
294
306{
307 Scheduler_Node *scheduler_node;
308 const Scheduler_Control *scheduler;
309 ISR_lock_Context lock_context;
310
311#if defined(RTEMS_SMP)
312 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE(
313 _Chain_First( &the_thread->Scheduler.Scheduler_nodes )
314 );
315 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
316#else
317 scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
318 scheduler = _Thread_Scheduler_get_home( the_thread );
319#endif
320
321 _Scheduler_Acquire_critical( scheduler, &lock_context );
322 ( *scheduler->Operations.unblock )( scheduler, the_thread, scheduler_node );
323 _Scheduler_Release_critical( scheduler, &lock_context );
324}
325
341{
342#if defined(RTEMS_SMP)
343 Chain_Node *node;
344 const Chain_Node *tail;
345
346 _Thread_Scheduler_process_requests( the_thread );
347
348 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
349 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
350
351 do {
352 Scheduler_Node *scheduler_node;
353 const Scheduler_Control *scheduler;
354 ISR_lock_Context lock_context;
355
356 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
357 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
358
359 _Scheduler_Acquire_critical( scheduler, &lock_context );
360 ( *scheduler->Operations.update_priority )(
361 scheduler,
362 the_thread,
363 scheduler_node
364 );
365 _Scheduler_Release_critical( scheduler, &lock_context );
366
367 node = _Chain_Next( node );
368 } while ( node != tail );
369#else
370 const Scheduler_Control *scheduler;
371
372 scheduler = _Thread_Scheduler_get_home( the_thread );
373 ( *scheduler->Operations.update_priority )(
374 scheduler,
375 the_thread,
377 );
378#endif
379}
380
381#if defined(RTEMS_SMP)
390RTEMS_INLINE_ROUTINE void _Scheduler_Priority_and_sticky_update(
391 Thread_Control *the_thread,
392 int sticky_level_change
393)
394{
395 Chain_Node *node;
396 const Chain_Node *tail;
397 Scheduler_Node *scheduler_node;
398 const Scheduler_Control *scheduler;
399 ISR_lock_Context lock_context;
400
401 _Thread_Scheduler_process_requests( the_thread );
402
403 node = _Chain_First( &the_thread->Scheduler.Scheduler_nodes );
404 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
405 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
406
407 _Scheduler_Acquire_critical( scheduler, &lock_context );
408
409 scheduler_node->sticky_level += sticky_level_change;
410 _Assert( scheduler_node->sticky_level >= 0 );
411
412 ( *scheduler->Operations.update_priority )(
413 scheduler,
414 the_thread,
415 scheduler_node
416 );
417
418 _Scheduler_Release_critical( scheduler, &lock_context );
419
420 tail = _Chain_Immutable_tail( &the_thread->Scheduler.Scheduler_nodes );
421 node = _Chain_Next( node );
422
423 while ( node != tail ) {
424 scheduler_node = SCHEDULER_NODE_OF_THREAD_SCHEDULER_NODE( node );
425 scheduler = _Scheduler_Node_get_scheduler( scheduler_node );
426
427 _Scheduler_Acquire_critical( scheduler, &lock_context );
428 ( *scheduler->Operations.update_priority )(
429 scheduler,
430 the_thread,
431 scheduler_node
432 );
433 _Scheduler_Release_critical( scheduler, &lock_context );
434
435 node = _Chain_Next( node );
436 }
437}
438#endif
439
454 const Scheduler_Control *scheduler,
455 Priority_Control priority
456)
457{
458 return ( *scheduler->Operations.map_priority )( scheduler, priority );
459}
460
470 const Scheduler_Control *scheduler,
471 Priority_Control priority
472)
473{
474 return ( *scheduler->Operations.unmap_priority )( scheduler, priority );
475}
476
491 const Scheduler_Control *scheduler,
492 Scheduler_Node *node,
493 Thread_Control *the_thread,
494 Priority_Control priority
495)
496{
497 ( *scheduler->Operations.node_initialize )(
498 scheduler,
499 node,
500 the_thread,
501 priority
502 );
503}
504
515 const Scheduler_Control *scheduler,
516 Scheduler_Node *node
517)
518{
519 ( *scheduler->Operations.node_destroy )( scheduler, node );
520}
521
532 Thread_Control *the_thread,
533 Priority_Node *priority_node,
534 uint64_t deadline,
535 Thread_queue_Context *queue_context
536)
537{
538 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
539
541 ( *scheduler->Operations.release_job )(
542 scheduler,
543 the_thread,
544 priority_node,
545 deadline,
546 queue_context
547 );
548}
549
559 Thread_Control *the_thread,
560 Priority_Node *priority_node,
561 Thread_queue_Context *queue_context
562)
563{
564 const Scheduler_Control *scheduler = _Thread_Scheduler_get_home( the_thread );
565
567 ( *scheduler->Operations.cancel_job )(
568 scheduler,
569 the_thread,
570 priority_node,
571 queue_context
572 );
573}
574
586{
587 const Scheduler_Control *scheduler = _Scheduler_Get_by_CPU( cpu );
588 Thread_Control *executing = cpu->executing;
589
590 if ( scheduler != NULL && executing != NULL ) {
591 ( *scheduler->Operations.tick )( scheduler, executing );
592 }
593}
594
605 const Scheduler_Control *scheduler,
606 Thread_Control *the_thread,
607 Per_CPU_Control *cpu
608)
609{
610 ( *scheduler->Operations.start_idle )( scheduler, the_thread, cpu );
611}
612
624 const Scheduler_Control *scheduler,
625 uint32_t cpu_index
626)
627{
628#if defined(RTEMS_SMP)
629 const Per_CPU_Control *cpu;
630 const Scheduler_Control *scheduler_of_cpu;
631
632 cpu = _Per_CPU_Get_by_index( cpu_index );
633 scheduler_of_cpu = _Scheduler_Get_by_CPU( cpu );
634
635 return scheduler_of_cpu == scheduler;
636#else
637 (void) scheduler;
638 (void) cpu_index;
639
640 return true;
641#endif
642}
643
652 const Scheduler_Control *scheduler
653)
654{
655#if defined(RTEMS_SMP)
656 return &_Scheduler_Get_context( scheduler )->Processors;
657#else
658 return &_Processor_mask_The_one_and_only;
659#endif
660}
661
673 Thread_Control *the_thread,
674 size_t cpusetsize,
675 cpu_set_t *cpuset
676);
677
690 const Scheduler_Control *scheduler,
691 Thread_Control *the_thread,
692 Scheduler_Node *node,
693 const Processor_mask *affinity
694)
695{
696 (void) scheduler;
697 (void) the_thread;
698 (void) node;
700}
701
713 Thread_Control *the_thread,
714 size_t cpusetsize,
715 const cpu_set_t *cpuset
716);
717
728 const Scheduler_Control *scheduler,
729 Thread_Control *the_thread,
730 Scheduler_Node *node,
731 void ( *extract )(
732 const Scheduler_Control *,
735 ),
736 void ( *schedule )(
737 const Scheduler_Control *,
739 bool
740 )
741)
742{
743 ( *extract )( scheduler, the_thread, node );
744
745 /* TODO: flash critical section? */
746
747 if ( _Thread_Is_executing( the_thread ) || _Thread_Is_heir( the_thread ) ) {
748 ( *schedule )( scheduler, the_thread, true );
749 }
750}
751
760 const Scheduler_Control *scheduler
761)
762{
763#if defined(RTEMS_SMP)
765
766 return _Processor_mask_Count( &context->Processors );
767#else
768 (void) scheduler;
769
770 return 1;
771#endif
772}
773
782{
783 return _Objects_Build_id(
784 OBJECTS_FAKE_OBJECTS_API,
785 OBJECTS_FAKE_OBJECTS_SCHEDULERS,
787 (uint16_t) ( scheduler_index + 1 )
788 );
789}
790
799{
800 uint32_t minimum_id = _Scheduler_Build_id( 0 );
801
802 return id - minimum_id;
803}
804
813 Objects_Id id
814)
815{
816 uint32_t index;
817
818 index = _Scheduler_Get_index_by_id( id );
819
820 if ( index >= _Scheduler_Count ) {
821 return NULL;
822 }
823
824 return &_Scheduler_Table[ index ];
825}
826
835 const Scheduler_Control *scheduler
836)
837{
838 return (uint32_t) (scheduler - &_Scheduler_Table[ 0 ]);
839}
840
841#if defined(RTEMS_SMP)
850typedef Thread_Control *( *Scheduler_Get_idle_thread )(
852);
853
860typedef void ( *Scheduler_Release_idle_thread )(
862 Thread_Control *idle
863);
864
871RTEMS_INLINE_ROUTINE void _Scheduler_Thread_change_state(
872 Thread_Control *the_thread,
873 Thread_Scheduler_state new_state
874)
875{
876 _Assert(
877 _ISR_lock_Is_owner( &the_thread->Scheduler.Lock )
878 || the_thread->Scheduler.state == THREAD_SCHEDULER_BLOCKED
880 );
881
882 the_thread->Scheduler.state = new_state;
883}
884
891RTEMS_INLINE_ROUTINE void _Scheduler_Set_idle_thread(
892 Scheduler_Node *node,
893 Thread_Control *idle
894)
895{
896 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
897 _Assert(
898 _Scheduler_Node_get_owner( node ) == _Scheduler_Node_get_user( node )
899 );
900
901 _Scheduler_Node_set_user( node, idle );
902 node->idle = idle;
903}
904
918RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Use_idle_thread(
920 Scheduler_Node *node,
921 Per_CPU_Control *cpu,
922 Scheduler_Get_idle_thread get_idle_thread
923)
924{
925 Thread_Control *idle = ( *get_idle_thread )( context );
926
927 _Scheduler_Set_idle_thread( node, idle );
928 _Thread_Set_CPU( idle, cpu );
929 return idle;
930}
931
932typedef enum {
933 SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE,
934 SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE,
935 SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK
936} Scheduler_Try_to_schedule_action;
937
949RTEMS_INLINE_ROUTINE Scheduler_Try_to_schedule_action
950_Scheduler_Try_to_schedule_node(
952 Scheduler_Node *node,
953 Thread_Control *idle,
954 Scheduler_Get_idle_thread get_idle_thread
955)
956{
957 ISR_lock_Context lock_context;
958 Scheduler_Try_to_schedule_action action;
959 Thread_Control *owner;
960
961 action = SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE;
962 owner = _Scheduler_Node_get_owner( node );
963 _Assert( _Scheduler_Node_get_user( node ) == owner );
964 _Assert( _Scheduler_Node_get_idle( node ) == NULL );
965
966 _Thread_Scheduler_acquire_critical( owner, &lock_context );
967
968 if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
969 _Thread_Scheduler_cancel_need_for_help( owner, _Thread_Get_CPU( owner ) );
970 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
971 } else if (
972 owner->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
973 && node->sticky_level <= 1
974 ) {
975 action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
976 } else if ( node->sticky_level == 0 ) {
977 action = SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK;
978 } else if ( idle != NULL ) {
979 action = SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE;
980 } else {
981 _Scheduler_Use_idle_thread(
982 context,
983 node,
984 _Thread_Get_CPU( owner ),
985 get_idle_thread
986 );
987 }
988
989 _Thread_Scheduler_release_critical( owner, &lock_context );
990 return action;
991}
992
1003RTEMS_INLINE_ROUTINE Thread_Control *_Scheduler_Release_idle_thread(
1005 Scheduler_Node *node,
1006 Scheduler_Release_idle_thread release_idle_thread
1007)
1008{
1009 Thread_Control *idle = _Scheduler_Node_get_idle( node );
1010
1011 if ( idle != NULL ) {
1013
1014 node->idle = NULL;
1015 _Scheduler_Node_set_user( node, owner );
1016 ( *release_idle_thread )( context, idle );
1017 }
1018
1019 return idle;
1020}
1021
1030RTEMS_INLINE_ROUTINE void _Scheduler_Exchange_idle_thread(
1031 Scheduler_Node *needs_idle,
1032 Scheduler_Node *uses_idle,
1033 Thread_Control *idle
1034)
1035{
1036 uses_idle->idle = NULL;
1037 _Scheduler_Node_set_user(
1038 uses_idle,
1039 _Scheduler_Node_get_owner( uses_idle )
1040 );
1041 _Scheduler_Set_idle_thread( needs_idle, idle );
1042}
1043
1059RTEMS_INLINE_ROUTINE Per_CPU_Control *_Scheduler_Block_node(
1061 Thread_Control *thread,
1062 Scheduler_Node *node,
1063 bool is_scheduled,
1064 Scheduler_Get_idle_thread get_idle_thread
1065)
1066{
1067 int sticky_level;
1068 ISR_lock_Context lock_context;
1069 Per_CPU_Control *thread_cpu;
1070
1071 sticky_level = node->sticky_level;
1072 --sticky_level;
1073 node->sticky_level = sticky_level;
1074 _Assert( sticky_level >= 0 );
1075
1076 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1077 thread_cpu = _Thread_Get_CPU( thread );
1078 _Thread_Scheduler_cancel_need_for_help( thread, thread_cpu );
1079 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED );
1080 _Thread_Scheduler_release_critical( thread, &lock_context );
1081
1082 if ( sticky_level > 0 ) {
1083 if ( is_scheduled && _Scheduler_Node_get_idle( node ) == NULL ) {
1084 Thread_Control *idle;
1085
1086 idle = _Scheduler_Use_idle_thread(
1087 context,
1088 node,
1089 thread_cpu,
1090 get_idle_thread
1091 );
1092 _Thread_Dispatch_update_heir( _Per_CPU_Get(), thread_cpu, idle );
1093 }
1094
1095 return NULL;
1096 }
1097
1098 _Assert( thread == _Scheduler_Node_get_user( node ) );
1099 return thread_cpu;
1100}
1101
1110RTEMS_INLINE_ROUTINE void _Scheduler_Discard_idle_thread(
1112 Thread_Control *the_thread,
1113 Scheduler_Node *node,
1114 Scheduler_Release_idle_thread release_idle_thread
1115)
1116{
1117 Thread_Control *idle;
1118 Thread_Control *owner;
1119 Per_CPU_Control *cpu;
1120
1121 idle = _Scheduler_Node_get_idle( node );
1122 owner = _Scheduler_Node_get_owner( node );
1123
1124 node->idle = NULL;
1125 _Assert( _Scheduler_Node_get_user( node ) == idle );
1126 _Scheduler_Node_set_user( node, owner );
1127 ( *release_idle_thread )( context, idle );
1128
1129 cpu = _Thread_Get_CPU( idle );
1130 _Thread_Set_CPU( the_thread, cpu );
1131 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread );
1132}
1133
1146RTEMS_INLINE_ROUTINE bool _Scheduler_Unblock_node(
1148 Thread_Control *the_thread,
1149 Scheduler_Node *node,
1150 bool is_scheduled,
1151 Scheduler_Release_idle_thread release_idle_thread
1152)
1153{
1154 bool unblock;
1155
1156 ++node->sticky_level;
1157 _Assert( node->sticky_level > 0 );
1158
1159 if ( is_scheduled ) {
1160 _Scheduler_Discard_idle_thread(
1161 context,
1162 the_thread,
1163 node,
1164 release_idle_thread
1165 );
1166 _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED );
1167 unblock = false;
1168 } else {
1169 _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY );
1170 unblock = true;
1171 }
1172
1173 return unblock;
1174}
1175#endif
1176
1185 Thread_Control *new_heir,
1186 bool force_dispatch
1187)
1188{
1189 Thread_Control *heir = _Thread_Heir;
1190
1191 if ( heir != new_heir && ( heir->is_preemptible || force_dispatch ) ) {
1192#if defined(RTEMS_SMP)
1193 /*
1194 * We need this state only for _Thread_Get_CPU_time_used(). Cannot use
1195 * _Scheduler_Thread_change_state() since THREAD_SCHEDULER_BLOCKED to
1196 * THREAD_SCHEDULER_BLOCKED state changes are illegal for the real SMP
1197 * schedulers.
1198 */
1199 heir->Scheduler.state = THREAD_SCHEDULER_BLOCKED;
1200 new_heir->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1201#endif
1203 _Thread_Heir = new_heir;
1204 _Thread_Dispatch_necessary = true;
1205 }
1206}
1207
1220 const Scheduler_Control *new_scheduler,
1221 Thread_Control *the_thread,
1222 Priority_Control priority
1223)
1224{
1225 Scheduler_Node *new_scheduler_node;
1226 Scheduler_Node *old_scheduler_node;
1227#if defined(RTEMS_SMP)
1228 ISR_lock_Context lock_context;
1229 const Scheduler_Control *old_scheduler;
1230
1231#endif
1232
1233 if ( the_thread->Wait.queue != NULL ) {
1234 return STATUS_RESOURCE_IN_USE;
1235 }
1236
1237 old_scheduler_node = _Thread_Scheduler_get_home_node( the_thread );
1239 &old_scheduler_node->Wait.Priority,
1240 &the_thread->Real_priority
1241 );
1242
1243 if (
1244 !_Priority_Is_empty( &old_scheduler_node->Wait.Priority )
1245#if defined(RTEMS_SMP)
1246 || !_Chain_Has_only_one_node( &the_thread->Scheduler.Wait_nodes )
1247 || the_thread->Scheduler.pin_level != 0
1248#endif
1249 ) {
1251 &old_scheduler_node->Wait.Priority,
1252 &the_thread->Real_priority,
1253 the_thread->Real_priority.priority
1254 );
1255 return STATUS_RESOURCE_IN_USE;
1256 }
1257
1258#if defined(RTEMS_SMP)
1259 old_scheduler = _Thread_Scheduler_get_home( the_thread );
1260 new_scheduler_node = _Thread_Scheduler_get_node_by_index(
1261 the_thread,
1262 _Scheduler_Get_index( new_scheduler )
1263 );
1264
1265 _Scheduler_Acquire_critical( new_scheduler, &lock_context );
1266
1267 if (
1268 _Scheduler_Get_processor_count( new_scheduler ) == 0
1269 || !( *new_scheduler->Operations.set_affinity )(
1270 new_scheduler,
1271 the_thread,
1272 new_scheduler_node,
1273 &the_thread->Scheduler.Affinity
1274 )
1275 ) {
1276 _Scheduler_Release_critical( new_scheduler, &lock_context );
1278 &old_scheduler_node->Wait.Priority,
1279 &the_thread->Real_priority,
1280 the_thread->Real_priority.priority
1281 );
1282 return STATUS_UNSATISFIED;
1283 }
1284
1285 _Assert( the_thread->Scheduler.pinned_scheduler == NULL );
1286 the_thread->Scheduler.home_scheduler = new_scheduler;
1287
1288 _Scheduler_Release_critical( new_scheduler, &lock_context );
1289
1290 _Thread_Scheduler_process_requests( the_thread );
1291#else
1292 new_scheduler_node = old_scheduler_node;
1293#endif
1294
1295 the_thread->Start.initial_priority = priority;
1296 _Priority_Node_set_priority( &the_thread->Real_priority, priority );
1298 &new_scheduler_node->Wait.Priority,
1299 &the_thread->Real_priority
1300 );
1301
1302#if defined(RTEMS_SMP)
1303 if ( old_scheduler != new_scheduler ) {
1304 States_Control current_state;
1305
1306 current_state = the_thread->current_state;
1307
1308 if ( _States_Is_ready( current_state ) ) {
1309 _Scheduler_Block( the_thread );
1310 }
1311
1312 _Assert( old_scheduler_node->sticky_level == 0 );
1313 _Assert( new_scheduler_node->sticky_level == 0 );
1314
1315 _Chain_Extract_unprotected( &old_scheduler_node->Thread.Wait_node );
1316 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Wait_nodes ) );
1318 &the_thread->Scheduler.Wait_nodes,
1319 &new_scheduler_node->Thread.Wait_node
1320 );
1322 &old_scheduler_node->Thread.Scheduler_node.Chain
1323 );
1324 _Assert( _Chain_Is_empty( &the_thread->Scheduler.Scheduler_nodes ) );
1326 &the_thread->Scheduler.Scheduler_nodes,
1327 &new_scheduler_node->Thread.Scheduler_node.Chain
1328 );
1329
1330 _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1331
1332 if ( _States_Is_ready( current_state ) ) {
1333 _Scheduler_Unblock( the_thread );
1334 }
1335
1336 return STATUS_SUCCESSFUL;
1337 }
1338#endif
1339
1340 _Scheduler_Node_set_priority( new_scheduler_node, priority, false );
1341 _Scheduler_Update_priority( the_thread );
1342 return STATUS_SUCCESSFUL;
1343}
1344
1347#ifdef __cplusplus
1348}
1349#endif
1350
1351#endif
1352/* end of include file */
Information for the Assert Handler.
#define NULL
Requests a GPIO pin group configuration.
Definition: bestcomm_api.h:77
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
#define RTEMS_INLINE_ROUTINE
Definition: basedefs.h:66
RTEMS_INLINE_ROUTINE void _Chain_Initialize_one(Chain_Control *the_chain, Chain_Node *the_node)
Initializes this chain to contain exactly the specified node.
Definition: chainimpl.h:528
RTEMS_INLINE_ROUTINE bool _Chain_Has_only_one_node(const Chain_Control *the_chain)
Checks if this chain has only one node.
Definition: chainimpl.h:450
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
RTEMS_INLINE_ROUTINE const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain's first node.
Definition: chainimpl.h:260
#define _ISR_lock_Release(_lock, _context)
Releases an ISR lock inside an ISR disabled section.
Definition: isrlock.h:316
#define _ISR_lock_Acquire(_lock, _context)
Acquires an ISR lock inside an ISR disabled section.
Definition: isrlock.h:293
uint32_t Objects_Id
Definition: object.h:80
#define _Objects_Build_id(the_api, the_class, node, index)
Builds an object ID from its components.
Definition: object.h:317
#define _Objects_Local_node
The local MPCI node number.
Definition: object.h:347
RTEMS_INLINE_ROUTINE bool _Priority_Is_empty(const Priority_Aggregation *aggregation)
Checks if the priority aggregation is empty.
Definition: priorityimpl.h:256
RTEMS_INLINE_ROUTINE bool _Priority_Plain_insert(Priority_Aggregation *aggregation, Priority_Node *node, Priority_Control priority)
Inserts the node with the given priority into the priority aggregation's contributors.
Definition: priorityimpl.h:411
RTEMS_INLINE_ROUTINE void _Priority_Plain_extract(Priority_Aggregation *aggregation, Priority_Node *node)
Extracts the priority node from the aggregation.
Definition: priorityimpl.h:433
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
RTEMS_INLINE_ROUTINE void _Priority_Node_set_priority(Priority_Node *node, Priority_Control priority)
Sets the priority of the priority node to the given priority.
Definition: priorityimpl.h:171
RTEMS_INLINE_ROUTINE void _Priority_Initialize_one(Priority_Aggregation *aggregation, Priority_Node *node)
Initializes the priority aggregation with the given information.
Definition: priorityimpl.h:232
RTEMS_INLINE_ROUTINE bool _Processor_mask_Is_subset(const Processor_mask *big, const Processor_mask *small)
Checks if the processor set small is a subset of processor set big.
Definition: processormask.h:192
RTEMS_INLINE_ROUTINE uint32_t _Processor_mask_Count(const Processor_mask *a)
Gets the number of set bits in the processor mask.
Definition: processormask.h:271
RTEMS_INLINE_ROUTINE const Processor_mask * _SMP_Get_online_processors(void)
Gets all online processors.
Definition: smpimpl.h:318
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Map_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Maps a thread priority from the user domain to the scheduler domain.
Definition: schedulerimpl.h:453
void _Scheduler_Handler_initialization(void)
Initializes the scheduler to the policy chosen by the user.
Definition: scheduler.c:24
RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index(const Scheduler_Control *scheduler)
Gets the index of the scheduler.
Definition: schedulerimpl.h:834
RTEMS_INLINE_ROUTINE void _Scheduler_Update_heir(Thread_Control *new_heir, bool force_dispatch)
Updates the heir.
Definition: schedulerimpl.h:1184
RTEMS_INLINE_ROUTINE bool _Scheduler_default_Set_affinity_body(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, const Processor_mask *affinity)
Checks if the affinity is a subset of the online processors.
Definition: schedulerimpl.h:689
RTEMS_INLINE_ROUTINE void _Scheduler_Start_idle(const Scheduler_Control *scheduler, Thread_Control *the_thread, Per_CPU_Control *cpu)
Starts the idle thread for a particular processor.
Definition: schedulerimpl.h:604
RTEMS_INLINE_ROUTINE const Scheduler_Control * _Scheduler_Get_by_CPU(const Per_CPU_Control *cpu)
Gets the scheduler for the cpu.
Definition: schedulerimpl.h:72
#define _Scheduler_Count
Count of registered schedulers.
Definition: scheduler.h:325
RTEMS_INLINE_ROUTINE Scheduler_Context * _Scheduler_Get_context(const Scheduler_Control *scheduler)
Gets the context of the scheduler.
Definition: schedulerimpl.h:58
RTEMS_INLINE_ROUTINE Thread_Control * _Scheduler_Node_get_owner(const Scheduler_Node *node)
Gets the owner of the node.
Definition: schedulernodeimpl.h:135
RTEMS_INLINE_ROUTINE void _Scheduler_Yield(Thread_Control *the_thread)
Scheduler yield with a particular thread.
Definition: schedulerimpl.h:218
RTEMS_INLINE_ROUTINE void _Scheduler_Tick(const Per_CPU_Control *cpu)
Scheduler method invoked at each clock tick.
Definition: schedulerimpl.h:585
RTEMS_INLINE_ROUTINE Objects_Id _Scheduler_Build_id(uint32_t scheduler_index)
Builds an object build id.
Definition: schedulerimpl.h:781
RTEMS_INLINE_ROUTINE void _Scheduler_Node_destroy(const Scheduler_Control *scheduler, Scheduler_Node *node)
Destroys a scheduler node.
Definition: schedulerimpl.h:514
const Scheduler_Control _Scheduler_Table[]
Registered schedulers.
RTEMS_INLINE_ROUTINE void _Scheduler_Generic_block(const Scheduler_Control *scheduler, Thread_Control *the_thread, Scheduler_Node *node, void(*extract)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *), void(*schedule)(const Scheduler_Control *, Thread_Control *, bool))
Blocks the thread.
Definition: schedulerimpl.h:727
RTEMS_INLINE_ROUTINE const Scheduler_Control * _Scheduler_Node_get_scheduler(const Scheduler_Node *node)
Gets the scheduler of the node.
Definition: schedulernodeimpl.h:121
RTEMS_INLINE_ROUTINE void _Scheduler_Node_initialize(const Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a scheduler node.
Definition: schedulerimpl.h:490
RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_index_by_id(Objects_Id id)
Gets the scheduler index from the given object build id.
Definition: schedulerimpl.h:798
RTEMS_INLINE_ROUTINE void _Scheduler_Update_priority(Thread_Control *the_thread)
Propagates a priority change of a thread to the scheduler.
Definition: schedulerimpl.h:340
RTEMS_INLINE_ROUTINE void _Scheduler_Unblock(Thread_Control *the_thread)
Unblocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:305
RTEMS_INLINE_ROUTINE void _Scheduler_Schedule(Thread_Control *the_thread)
General scheduling decision.
Definition: schedulerimpl.h:197
RTEMS_INLINE_ROUTINE const Processor_mask * _Scheduler_Get_processors(const Scheduler_Control *scheduler)
Gets the processors of the scheduler.
Definition: schedulerimpl.h:651
RTEMS_INLINE_ROUTINE void _Scheduler_Acquire_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Acquires the scheduler instance inside a critical section (interrupts disabled).
Definition: schedulerimpl.h:92
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Unmap_priority(const Scheduler_Control *scheduler, Priority_Control priority)
Unmaps a thread priority from the scheduler domain to the user domain.
Definition: schedulerimpl.h:469
RTEMS_INLINE_ROUTINE void _Scheduler_Cancel_job(Thread_Control *the_thread, Priority_Node *priority_node, Thread_queue_Context *queue_context)
Cancels a job of a thread with respect to the scheduler.
Definition: schedulerimpl.h:558
bool _Scheduler_Get_affinity(Thread_Control *the_thread, size_t cpusetsize, cpu_set_t *cpuset)
Copies the thread's scheduler's affinity to the given cpuset.
Definition: schedulergetaffinity.c:21
RTEMS_INLINE_ROUTINE uint32_t _Scheduler_Get_processor_count(const Scheduler_Control *scheduler)
Gets the number of processors of the scheduler.
Definition: schedulerimpl.h:759
RTEMS_INLINE_ROUTINE const Scheduler_Control * _Scheduler_Get_by_id(Objects_Id id)
Gets the scheduler from the given object build id.
Definition: schedulerimpl.h:812
bool _Scheduler_Set_affinity(Thread_Control *the_thread, size_t cpusetsize, const cpu_set_t *cpuset)
Sets the thread's scheduler's affinity.
Definition: schedulersetaffinity.c:21
RTEMS_INLINE_ROUTINE bool _Scheduler_Has_processor_ownership(const Scheduler_Control *scheduler, uint32_t cpu_index)
Checks if the scheduler of the cpu with the given index is equal to the given scheduler.
Definition: schedulerimpl.h:623
RTEMS_INLINE_ROUTINE void _Scheduler_Block(Thread_Control *the_thread)
Blocks a thread with respect to the scheduler.
Definition: schedulerimpl.h:243
RTEMS_INLINE_ROUTINE void _Scheduler_Release_critical(const Scheduler_Control *scheduler, ISR_lock_Context *lock_context)
Releases the scheduler instance inside a critical section (interrupts disabled).
Definition: schedulerimpl.h:116
RTEMS_INLINE_ROUTINE void _Scheduler_Release_job(Thread_Control *the_thread, Priority_Node *priority_node, uint64_t deadline, Thread_queue_Context *queue_context)
Releases a job of a thread with respect to the scheduler.
Definition: schedulerimpl.h:531
RTEMS_INLINE_ROUTINE void _Scheduler_Node_set_priority(Scheduler_Node *node, Priority_Control new_priority, bool prepend_it)
Sets the priority of the node.
Definition: schedulernodeimpl.h:178
RTEMS_INLINE_ROUTINE Status_Control _Scheduler_Set(const Scheduler_Control *new_scheduler, Thread_Control *the_thread, Priority_Control priority)
Sets a new scheduler.
Definition: schedulerimpl.h:1219
RTEMS_INLINE_ROUTINE bool _States_Is_ready(States_Control the_states)
Checks if the state is ready.
Definition: statesimpl.h:195
uint32_t States_Control
Definition: states.h:46
RTEMS_INLINE_ROUTINE System_state_Codes _System_state_Get(void)
Gets the current system state.
Definition: sysstate.h:90
RTEMS_INLINE_ROUTINE bool _System_state_Is_up(System_state_Codes state)
Checks if the state is up.
Definition: sysstate.h:133
RTEMS_INLINE_ROUTINE void _Thread_queue_Context_clear_priority_updates(Thread_queue_Context *queue_context)
Clears the priority update count of the thread queue context.
Definition: threadqimpl.h:338
RTEMS_INLINE_ROUTINE void _Thread_Update_CPU_time_used(Thread_Control *the_thread, Per_CPU_Control *cpu)
Updates the cpu time used of the thread.
Definition: threadimpl.h:1129
RTEMS_INLINE_ROUTINE Scheduler_Node * _Thread_Scheduler_get_home_node(const Thread_Control *the_thread)
Gets the scheduler's home node.
Definition: threadimpl.h:1412
RTEMS_INLINE_ROUTINE bool _Thread_Is_heir(const Thread_Control *the_thread)
Checks if the thread is the heir.
Definition: threadimpl.h:923
RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(Thread_Control *thread, Per_CPU_Control *cpu)
Sets the cpu of the thread's scheduler.
Definition: threadimpl.h:860
RTEMS_INLINE_ROUTINE Per_CPU_Control * _Thread_Get_CPU(const Thread_Control *thread)
Gets the cpu of the thread's scheduler.
Definition: threadimpl.h:841
RTEMS_INLINE_ROUTINE Scheduler_Node * _Thread_Scheduler_get_node_by_index(const Thread_Control *the_thread, size_t scheduler_index)
Gets the thread's scheduler node by index.
Definition: threadimpl.h:1434
RTEMS_INLINE_ROUTINE bool _Thread_Is_executing(const Thread_Control *the_thread)
Checks if the thread is the currently executing thread.
Definition: threadimpl.h:884
RTEMS_INLINE_ROUTINE const Scheduler_Control * _Thread_Scheduler_get_home(const Thread_Control *the_thread)
Gets the home scheduler of the thread.
Definition: threadimpl.h:1393
Priority Handler API Implementation.
Constants and Structures Associated with the Scheduler.
SuperCore SMP Implementation.
Definition: chain.h:68
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
Per CPU Core Structure.
Definition: percpu.h:347
struct _Thread_Control * executing
This is the thread executing on this processor.
Definition: percpu.h:420
The priority node to build up a priority aggregation.
Definition: priority.h:98
Priority_Control priority
The priority value of this node.
Definition: priority.h:110
Scheduler context.
Definition: scheduler.h:252
Scheduler node for per-thread data.
Definition: schedulernode.h:79
struct Scheduler_Node::@3981 Wait
Thread wait support block.
void(* tick)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:226
void(* yield)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:56
Priority_Control(* unmap_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:90
void(* unblock)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:70
Priority_Control(* map_priority)(const Scheduler_Control *, Priority_Control)
Definition: scheduler.h:84
void(* cancel_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, Thread_queue_Context *)
Definition: scheduler.h:218
void(* block)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:63
void(* release_job)(const Scheduler_Control *, Thread_Control *, Priority_Node *, uint64_t, Thread_queue_Context *)
Definition: scheduler.h:209
void(* node_destroy)(const Scheduler_Control *, Scheduler_Node *)
Definition: scheduler.h:206
void(* node_initialize)(const Scheduler_Control *, Scheduler_Node *, Thread_Control *, Priority_Control)
Definition: scheduler.h:198
void(* update_priority)(const Scheduler_Control *, Thread_Control *, Scheduler_Node *)
Definition: scheduler.h:77
void(* schedule)(const Scheduler_Control *, Thread_Control *)
Definition: scheduler.h:53
void(* start_idle)(const Scheduler_Control *, Thread_Control *, struct Per_CPU_Control *)
Definition: scheduler.h:229
Priority_Control initial_priority
Definition: thread.h:201
Thread_queue_Queue * queue
The current thread queue.
Definition: thread.h:489
Thread queue context for the thread queue methods.
Definition: threadq.h:198
Scheduler control.
Definition: scheduler.h:269
Scheduler_Context * context
Reference to a statically allocated scheduler context.
Definition: scheduler.h:273
Scheduler_Operations Operations
The scheduler operations.
Definition: scheduler.h:278
Definition: thread.h:732
Thread_Wait_information Wait
Definition: thread.h:774
Priority_Node Real_priority
The base priority of this thread in its home scheduler instance.
Definition: thread.h:761
States_Control current_state
Definition: thread.h:756
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:771
bool is_preemptible
Definition: thread.h:802
Thread_Start_information Start
Definition: thread.h:832
unsigned context
Definition: tlb.h:1
Inlined Routines from the Thread Handler.