RTEMS 5.2
All Data Structures Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
schedulersmpimpl.h
Go to the documentation of this file.
1
9/*
10 * Copyright (c) 2013, 2017 embedded brains GmbH. All rights reserved.
11 *
12 * embedded brains GmbH
13 * Dornierstr. 4
14 * 82178 Puchheim
15 * Germany
16 * <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_SCORE_SCHEDULERSMPIMPL_H
24#define _RTEMS_SCORE_SCHEDULERSMPIMPL_H
25
27#include <rtems/score/assert.h>
30#include <rtems/bspIo.h>
31
32#ifdef __cplusplus
33extern "C" {
34#endif /* __cplusplus */
35
278typedef bool ( *Scheduler_SMP_Has_ready )(
280);
281
282typedef Scheduler_Node *( *Scheduler_SMP_Get_highest_ready )(
284 Scheduler_Node *node
285);
286
287typedef Scheduler_Node *( *Scheduler_SMP_Get_lowest_scheduled )(
289 Scheduler_Node *filter
290);
291
292typedef void ( *Scheduler_SMP_Extract )(
294 Scheduler_Node *node_to_extract
295);
296
297typedef void ( *Scheduler_SMP_Insert )(
299 Scheduler_Node *node_to_insert,
300 Priority_Control insert_priority
301);
302
303typedef void ( *Scheduler_SMP_Move )(
305 Scheduler_Node *node_to_move
306);
307
308typedef bool ( *Scheduler_SMP_Ask_for_help )(
310 Thread_Control *thread,
311 Scheduler_Node *node
312);
313
314typedef void ( *Scheduler_SMP_Update )(
316 Scheduler_Node *node_to_update,
317 Priority_Control new_priority
318);
319
320typedef void ( *Scheduler_SMP_Set_affinity )(
322 Scheduler_Node *node,
323 void *arg
324);
325
326typedef bool ( *Scheduler_SMP_Enqueue )(
328 Scheduler_Node *node_to_enqueue,
329 Priority_Control priority
330);
331
332typedef void ( *Scheduler_SMP_Allocate_processor )(
334 Scheduler_Node *scheduled,
335 Scheduler_Node *victim,
336 Per_CPU_Control *victim_cpu
337);
338
339typedef void ( *Scheduler_SMP_Register_idle )(
341 Scheduler_Node *idle,
342 Per_CPU_Control *cpu
343);
344
352static inline void _Scheduler_SMP_Do_nothing_register_idle(
354 Scheduler_Node *idle,
355 Per_CPU_Control *cpu
356)
357{
358 (void) context;
359 (void) idle;
360 (void) cpu;
361}
362
372static inline bool _Scheduler_SMP_Priority_less_equal(
373 const void *to_insert,
374 const Chain_Node *next
375)
376{
377 const Priority_Control *priority_to_insert;
378 const Scheduler_SMP_Node *node_next;
379
380 priority_to_insert = (const Priority_Control *) to_insert;
381 node_next = (const Scheduler_SMP_Node *) next;
382
383 return *priority_to_insert <= node_next->priority;
384}
385
393static inline Scheduler_SMP_Context *_Scheduler_SMP_Get_self(
395)
396{
398}
399
405static inline void _Scheduler_SMP_Initialize(
407)
408{
411}
412
420static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_node(
421 Thread_Control *thread
422)
423{
425}
426
434static inline Scheduler_SMP_Node *_Scheduler_SMP_Thread_get_own_node(
435 Thread_Control *thread
436)
437{
439}
440
448static inline Scheduler_SMP_Node *_Scheduler_SMP_Node_downcast(
449 Scheduler_Node *node
450)
451{
452 return (Scheduler_SMP_Node *) node;
453}
454
462static inline Scheduler_SMP_Node_state _Scheduler_SMP_Node_state(
463 const Scheduler_Node *node
464)
465{
466 return ( (const Scheduler_SMP_Node *) node )->state;
467}
468
476static inline Priority_Control _Scheduler_SMP_Node_priority(
477 const Scheduler_Node *node
478)
479{
480 return ( (const Scheduler_SMP_Node *) node )->priority;
481}
482
491static inline void _Scheduler_SMP_Node_initialize(
492 const Scheduler_Control *scheduler,
493 Scheduler_SMP_Node *node,
494 Thread_Control *thread,
495 Priority_Control priority
496)
497{
498 _Scheduler_Node_do_initialize( scheduler, &node->Base, thread, priority );
500 node->priority = priority;
501}
502
509static inline void _Scheduler_SMP_Node_update_priority(
510 Scheduler_SMP_Node *node,
511 Priority_Control new_priority
512)
513{
514 node->priority = new_priority;
515}
516
523static inline void _Scheduler_SMP_Node_change_state(
524 Scheduler_Node *node,
526)
527{
528 Scheduler_SMP_Node *the_node;
529
530 the_node = _Scheduler_SMP_Node_downcast( node );
531 the_node->state = new_state;
532}
533
543static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
545 const Per_CPU_Control *cpu
546)
547{
548 return cpu->Scheduler.context == context;
549}
550
558static inline Thread_Control *_Scheduler_SMP_Get_idle_thread(
560)
561{
562 Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
565
566 _Assert( &idle->Object.Node != _Chain_Tail( &self->Idle_threads ) );
567
568 return idle;
569}
570
577static inline void _Scheduler_SMP_Release_idle_thread(
579 Thread_Control *idle
580)
581{
582 Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
583
585}
586
592static inline void _Scheduler_SMP_Exctract_idle_thread(
593 Thread_Control *idle
594)
595{
597}
598
609static inline void _Scheduler_SMP_Allocate_processor_lazy(
611 Scheduler_Node *scheduled,
612 Scheduler_Node *victim,
613 Per_CPU_Control *victim_cpu
614)
615{
616 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
617 Thread_Control *victim_thread = _Scheduler_Node_get_user( victim );
618 Per_CPU_Control *scheduled_cpu = _Thread_Get_CPU( scheduled_thread );
619 Per_CPU_Control *cpu_self = _Per_CPU_Get();
620 Thread_Control *heir;
621
622 _Assert( _ISR_Get_level() != 0 );
623
624 if ( _Thread_Is_executing_on_a_processor( scheduled_thread ) ) {
625 if ( _Scheduler_SMP_Is_processor_owned_by_us( context, scheduled_cpu ) ) {
626 heir = scheduled_cpu->heir;
627 _Thread_Dispatch_update_heir(
628 cpu_self,
629 scheduled_cpu,
630 scheduled_thread
631 );
632 } else {
633 /* We have to force a migration to our processor set */
634 heir = scheduled_thread;
635 }
636 } else {
637 heir = scheduled_thread;
638 }
639
640 if ( heir != victim_thread ) {
641 _Thread_Set_CPU( heir, victim_cpu );
642 _Thread_Dispatch_update_heir( cpu_self, victim_cpu, heir );
643 }
644}
645
659static inline void _Scheduler_SMP_Allocate_processor_exact(
661 Scheduler_Node *scheduled,
662 Scheduler_Node *victim,
663 Per_CPU_Control *victim_cpu
664)
665{
666 Thread_Control *scheduled_thread = _Scheduler_Node_get_user( scheduled );
667 Per_CPU_Control *cpu_self = _Per_CPU_Get();
668
669 (void) context;
670 (void) victim;
671
672 _Thread_Set_CPU( scheduled_thread, victim_cpu );
673 _Thread_Dispatch_update_heir( cpu_self, victim_cpu, scheduled_thread );
674}
675
685static inline void _Scheduler_SMP_Allocate_processor(
687 Scheduler_Node *scheduled,
688 Scheduler_Node *victim,
689 Per_CPU_Control *victim_cpu,
690 Scheduler_SMP_Allocate_processor allocate_processor
691)
692{
693 _Scheduler_SMP_Node_change_state( scheduled, SCHEDULER_SMP_NODE_SCHEDULED );
694 ( *allocate_processor )( context, scheduled, victim, victim_cpu );
695}
696
707static inline Thread_Control *_Scheduler_SMP_Preempt(
709 Scheduler_Node *scheduled,
710 Scheduler_Node *victim,
711 Scheduler_SMP_Allocate_processor allocate_processor
712)
713{
714 Thread_Control *victim_thread;
715 ISR_lock_Context scheduler_lock_context;
716 Per_CPU_Control *victim_cpu;
717
718 victim_thread = _Scheduler_Node_get_user( victim );
719 _Scheduler_SMP_Node_change_state( victim, SCHEDULER_SMP_NODE_READY );
720
721 _Thread_Scheduler_acquire_critical( victim_thread, &scheduler_lock_context );
722
723 victim_cpu = _Thread_Get_CPU( victim_thread );
724
725 if ( victim_thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED ) {
726 _Scheduler_Thread_change_state( victim_thread, THREAD_SCHEDULER_READY );
727
728 if ( victim_thread->Scheduler.helping_nodes > 0 ) {
729 ISR_lock_Context per_cpu_lock_context;
730
731 _Per_CPU_Acquire( victim_cpu, &per_cpu_lock_context );
733 &victim_cpu->Threads_in_need_for_help,
734 &victim_thread->Scheduler.Help_node
735 );
736 _Per_CPU_Release( victim_cpu, &per_cpu_lock_context );
737 }
738 }
739
740 _Thread_Scheduler_release_critical( victim_thread, &scheduler_lock_context );
741
742 _Scheduler_SMP_Allocate_processor(
743 context,
744 scheduled,
745 victim,
746 victim_cpu,
747 allocate_processor
748 );
749
750 return victim_thread;
751}
752
761static inline Scheduler_Node *_Scheduler_SMP_Get_lowest_scheduled(
763 Scheduler_Node *filter
764)
765{
766 Scheduler_SMP_Context *self = _Scheduler_SMP_Get_self( context );
767 Chain_Control *scheduled = &self->Scheduled;
768 Scheduler_Node *lowest_scheduled =
769 (Scheduler_Node *) _Chain_Last( scheduled );
770
771 (void) filter;
772
773 _Assert( &lowest_scheduled->Node.Chain != _Chain_Tail( scheduled ) );
774 _Assert(
775 _Chain_Next( &lowest_scheduled->Node.Chain ) == _Chain_Tail( scheduled )
776 );
777
778 return lowest_scheduled;
779}
780
797static inline void _Scheduler_SMP_Enqueue_to_scheduled(
799 Scheduler_Node *node,
800 Priority_Control priority,
801 Scheduler_Node *lowest_scheduled,
802 Scheduler_SMP_Insert insert_scheduled,
803 Scheduler_SMP_Move move_from_scheduled_to_ready,
804 Scheduler_SMP_Allocate_processor allocate_processor
805)
806{
807 Scheduler_Try_to_schedule_action action;
808
809 action = _Scheduler_Try_to_schedule_node(
810 context,
811 node,
812 _Scheduler_Node_get_idle( lowest_scheduled ),
813 _Scheduler_SMP_Get_idle_thread
814 );
815
816 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
817 _Scheduler_SMP_Preempt(
818 context,
819 node,
820 lowest_scheduled,
821 allocate_processor
822 );
823
824 ( *insert_scheduled )( context, node, priority );
825 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
826
827 _Scheduler_Release_idle_thread(
828 context,
829 lowest_scheduled,
830 _Scheduler_SMP_Release_idle_thread
831 );
832 } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
833 _Scheduler_SMP_Node_change_state(
834 lowest_scheduled,
836 );
837 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
838
839 ( *insert_scheduled )( context, node, priority );
840 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
841
842 _Scheduler_Exchange_idle_thread(
843 node,
844 lowest_scheduled,
845 _Scheduler_Node_get_idle( lowest_scheduled )
846 );
847 } else {
848 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
849 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
850 }
851}
852
875static inline bool _Scheduler_SMP_Enqueue(
877 Scheduler_Node *node,
878 Priority_Control insert_priority,
879 Chain_Node_order order,
880 Scheduler_SMP_Insert insert_ready,
881 Scheduler_SMP_Insert insert_scheduled,
882 Scheduler_SMP_Move move_from_scheduled_to_ready,
883 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
884 Scheduler_SMP_Allocate_processor allocate_processor
885)
886{
887 bool needs_help;
888 Scheduler_Node *lowest_scheduled;
889
890 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
891
892 if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
893 _Scheduler_SMP_Enqueue_to_scheduled(
894 context,
895 node,
896 insert_priority,
897 lowest_scheduled,
898 insert_scheduled,
899 move_from_scheduled_to_ready,
900 allocate_processor
901 );
902 needs_help = false;
903 } else {
904 ( *insert_ready )( context, node, insert_priority );
905 needs_help = true;
906 }
907
908 return needs_help;
909}
910
930static inline bool _Scheduler_SMP_Enqueue_scheduled(
932 Scheduler_Node *const node,
933 Priority_Control insert_priority,
934 Chain_Node_order order,
935 Scheduler_SMP_Extract extract_from_ready,
936 Scheduler_SMP_Get_highest_ready get_highest_ready,
937 Scheduler_SMP_Insert insert_ready,
938 Scheduler_SMP_Insert insert_scheduled,
939 Scheduler_SMP_Move move_from_ready_to_scheduled,
940 Scheduler_SMP_Allocate_processor allocate_processor
941)
942{
943 while ( true ) {
944 Scheduler_Node *highest_ready;
945 Scheduler_Try_to_schedule_action action;
946
947 highest_ready = ( *get_highest_ready )( context, node );
948
949 /*
950 * The node has been extracted from the scheduled chain. We have to place
951 * it now on the scheduled or ready set.
952 */
953 if (
954 node->sticky_level > 0
955 && ( *order )( &insert_priority, &highest_ready->Node.Chain )
956 ) {
957 ( *insert_scheduled )( context, node, insert_priority );
958
959 if ( _Scheduler_Node_get_idle( node ) != NULL ) {
960 Thread_Control *owner;
961 ISR_lock_Context lock_context;
962
963 owner = _Scheduler_Node_get_owner( node );
964 _Thread_Scheduler_acquire_critical( owner, &lock_context );
965
966 if ( owner->Scheduler.state == THREAD_SCHEDULER_READY ) {
967 _Thread_Scheduler_cancel_need_for_help(
968 owner,
969 _Thread_Get_CPU( owner )
970 );
971 _Scheduler_Discard_idle_thread(
972 context,
973 owner,
974 node,
975 _Scheduler_SMP_Release_idle_thread
976 );
977 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_SCHEDULED );
978 }
979
980 _Thread_Scheduler_release_critical( owner, &lock_context );
981 }
982
983 return false;
984 }
985
986 action = _Scheduler_Try_to_schedule_node(
987 context,
988 highest_ready,
989 _Scheduler_Node_get_idle( node ),
990 _Scheduler_SMP_Get_idle_thread
991 );
992
993 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
994 Thread_Control *idle;
995
996 _Scheduler_SMP_Preempt(
997 context,
998 highest_ready,
999 node,
1000 allocate_processor
1001 );
1002
1003 ( *insert_ready )( context, node, insert_priority );
1004 ( *move_from_ready_to_scheduled )( context, highest_ready );
1005
1006 idle = _Scheduler_Release_idle_thread(
1007 context,
1008 node,
1009 _Scheduler_SMP_Release_idle_thread
1010 );
1011 return ( idle == NULL );
1012 } else if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_IDLE_EXCHANGE ) {
1013 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1014 _Scheduler_SMP_Node_change_state(
1015 highest_ready,
1017 );
1018
1019 ( *insert_ready )( context, node, insert_priority );
1020 ( *move_from_ready_to_scheduled )( context, highest_ready );
1021
1022 _Scheduler_Exchange_idle_thread(
1023 highest_ready,
1024 node,
1025 _Scheduler_Node_get_idle( node )
1026 );
1027 return false;
1028 } else {
1029 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1030
1031 _Scheduler_SMP_Node_change_state(
1032 highest_ready,
1034 );
1035
1036 ( *extract_from_ready )( context, highest_ready );
1037 }
1038 }
1039}
1040
1047static inline void _Scheduler_SMP_Extract_from_scheduled(
1049 Scheduler_Node *node
1050)
1051{
1052 (void) context;
1053 _Chain_Extract_unprotected( &node->Node.Chain );
1054}
1055
1070static inline void _Scheduler_SMP_Schedule_highest_ready(
1072 Scheduler_Node *victim,
1073 Per_CPU_Control *victim_cpu,
1074 Scheduler_SMP_Extract extract_from_ready,
1075 Scheduler_SMP_Get_highest_ready get_highest_ready,
1076 Scheduler_SMP_Move move_from_ready_to_scheduled,
1077 Scheduler_SMP_Allocate_processor allocate_processor
1078)
1079{
1080 Scheduler_Try_to_schedule_action action;
1081
1082 do {
1083 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1084
1085 action = _Scheduler_Try_to_schedule_node(
1086 context,
1087 highest_ready,
1088 NULL,
1089 _Scheduler_SMP_Get_idle_thread
1090 );
1091
1092 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1093 _Scheduler_SMP_Allocate_processor(
1094 context,
1095 highest_ready,
1096 victim,
1097 victim_cpu,
1098 allocate_processor
1099 );
1100
1101 ( *move_from_ready_to_scheduled )( context, highest_ready );
1102 } else {
1103 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1104
1105 _Scheduler_SMP_Node_change_state(
1106 highest_ready,
1108 );
1109
1110 ( *extract_from_ready )( context, highest_ready );
1111 }
1112 } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1113}
1114
1129static inline void _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1131 Scheduler_Node *victim,
1132 Per_CPU_Control *victim_cpu,
1133 Scheduler_SMP_Extract extract_from_ready,
1134 Scheduler_SMP_Get_highest_ready get_highest_ready,
1135 Scheduler_SMP_Move move_from_ready_to_scheduled,
1136 Scheduler_SMP_Allocate_processor allocate_processor
1137)
1138{
1139 Scheduler_Try_to_schedule_action action;
1140
1141 do {
1142 Scheduler_Node *highest_ready = ( *get_highest_ready )( context, victim );
1143
1144 action = _Scheduler_Try_to_schedule_node(
1145 context,
1146 highest_ready,
1147 NULL,
1148 _Scheduler_SMP_Get_idle_thread
1149 );
1150
1151 if ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_SCHEDULE ) {
1152 _Scheduler_SMP_Preempt(
1153 context,
1154 highest_ready,
1155 victim,
1156 allocate_processor
1157 );
1158
1159 ( *move_from_ready_to_scheduled )( context, highest_ready );
1160 } else {
1161 _Assert( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1162
1163 _Scheduler_SMP_Node_change_state(
1164 highest_ready,
1166 );
1167
1168 ( *extract_from_ready )( context, highest_ready );
1169 }
1170 } while ( action == SCHEDULER_TRY_TO_SCHEDULE_DO_BLOCK );
1171}
1172
1189static inline void _Scheduler_SMP_Block(
1191 Thread_Control *thread,
1192 Scheduler_Node *node,
1193 Scheduler_SMP_Extract extract_from_scheduled,
1194 Scheduler_SMP_Extract extract_from_ready,
1195 Scheduler_SMP_Get_highest_ready get_highest_ready,
1196 Scheduler_SMP_Move move_from_ready_to_scheduled,
1197 Scheduler_SMP_Allocate_processor allocate_processor
1198)
1199{
1200 Scheduler_SMP_Node_state node_state;
1201 Per_CPU_Control *thread_cpu;
1202
1203 node_state = _Scheduler_SMP_Node_state( node );
1204
1205 thread_cpu = _Scheduler_Block_node(
1206 context,
1207 thread,
1208 node,
1209 node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1210 _Scheduler_SMP_Get_idle_thread
1211 );
1212
1213 if ( thread_cpu != NULL ) {
1214 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1215
1216 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1217 ( *extract_from_scheduled )( context, node );
1218 _Scheduler_SMP_Schedule_highest_ready(
1219 context,
1220 node,
1221 thread_cpu,
1222 extract_from_ready,
1223 get_highest_ready,
1224 move_from_ready_to_scheduled,
1225 allocate_processor
1226 );
1227 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1228 ( *extract_from_ready )( context, node );
1229 }
1230 }
1231}
1232
1243static inline void _Scheduler_SMP_Unblock(
1245 Thread_Control *thread,
1246 Scheduler_Node *node,
1247 Scheduler_SMP_Update update,
1248 Scheduler_SMP_Enqueue enqueue
1249)
1250{
1251 Scheduler_SMP_Node_state node_state;
1252 bool unblock;
1253
1254 node_state = _Scheduler_SMP_Node_state( node );
1255 unblock = _Scheduler_Unblock_node(
1256 context,
1257 thread,
1258 node,
1259 node_state == SCHEDULER_SMP_NODE_SCHEDULED,
1260 _Scheduler_SMP_Release_idle_thread
1261 );
1262
1263 if ( unblock ) {
1264 Priority_Control priority;
1265 bool needs_help;
1266
1267 priority = _Scheduler_Node_get_priority( node );
1268 priority = SCHEDULER_PRIORITY_PURIFY( priority );
1269
1270 if ( priority != _Scheduler_SMP_Node_priority( node ) ) {
1271 ( *update )( context, node, priority );
1272 }
1273
1274 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1275 Priority_Control insert_priority;
1276
1277 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1278 insert_priority = SCHEDULER_PRIORITY_APPEND( priority );
1279 needs_help = ( *enqueue )( context, node, insert_priority );
1280 } else {
1281 _Assert( node_state == SCHEDULER_SMP_NODE_READY );
1282 _Assert( node->sticky_level > 0 );
1283 _Assert( node->idle == NULL );
1284 needs_help = true;
1285 }
1286
1287 if ( needs_help ) {
1288 _Scheduler_Ask_for_help( thread );
1289 }
1290 }
1291}
1292
1312static inline void _Scheduler_SMP_Update_priority(
1314 Thread_Control *thread,
1315 Scheduler_Node *node,
1316 Scheduler_SMP_Extract extract_from_ready,
1317 Scheduler_SMP_Update update,
1318 Scheduler_SMP_Enqueue enqueue,
1319 Scheduler_SMP_Enqueue enqueue_scheduled,
1320 Scheduler_SMP_Ask_for_help ask_for_help
1321)
1322{
1323 Priority_Control priority;
1324 Priority_Control insert_priority;
1325 Scheduler_SMP_Node_state node_state;
1326
1327 insert_priority = _Scheduler_Node_get_priority( node );
1328 priority = SCHEDULER_PRIORITY_PURIFY( insert_priority );
1329
1330 if ( priority == _Scheduler_SMP_Node_priority( node ) ) {
1331 if ( _Thread_Is_ready( thread ) ) {
1332 ( *ask_for_help )( context, thread, node );
1333 }
1334
1335 return;
1336 }
1337
1338 node_state = _Scheduler_SMP_Node_state( node );
1339
1340 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1341 _Scheduler_SMP_Extract_from_scheduled( context, node );
1342 ( *update )( context, node, priority );
1343 ( *enqueue_scheduled )( context, node, insert_priority );
1344 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1345 ( *extract_from_ready )( context, node );
1346 ( *update )( context, node, priority );
1347 ( *enqueue )( context, node, insert_priority );
1348 } else {
1349 ( *update )( context, node, priority );
1350
1351 if ( _Thread_Is_ready( thread ) ) {
1352 ( *ask_for_help )( context, thread, node );
1353 }
1354 }
1355}
1356
1368static inline void _Scheduler_SMP_Yield(
1370 Thread_Control *thread,
1371 Scheduler_Node *node,
1372 Scheduler_SMP_Extract extract_from_ready,
1373 Scheduler_SMP_Enqueue enqueue,
1374 Scheduler_SMP_Enqueue enqueue_scheduled
1375)
1376{
1377 bool needs_help;
1378 Scheduler_SMP_Node_state node_state;
1379 Priority_Control insert_priority;
1380
1381 node_state = _Scheduler_SMP_Node_state( node );
1382 insert_priority = _Scheduler_SMP_Node_priority( node );
1383 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1384
1385 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1386 _Scheduler_SMP_Extract_from_scheduled( context, node );
1387 ( *enqueue_scheduled )( context, node, insert_priority );
1388 needs_help = false;
1389 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1390 ( *extract_from_ready )( context, node );
1391
1392 needs_help = ( *enqueue )( context, node, insert_priority );
1393 } else {
1394 needs_help = true;
1395 }
1396
1397 if ( needs_help ) {
1398 _Scheduler_Ask_for_help( thread );
1399 }
1400}
1401
1409static inline void _Scheduler_SMP_Insert_scheduled(
1411 Scheduler_Node *node_to_insert,
1412 Priority_Control priority_to_insert
1413)
1414{
1416
1417 self = _Scheduler_SMP_Get_self( context );
1418
1420 &self->Scheduled,
1421 &node_to_insert->Node.Chain,
1422 &priority_to_insert,
1423 _Scheduler_SMP_Priority_less_equal
1424 );
1425}
1426
1449static inline bool _Scheduler_SMP_Ask_for_help(
1451 Thread_Control *thread,
1452 Scheduler_Node *node,
1453 Chain_Node_order order,
1454 Scheduler_SMP_Insert insert_ready,
1455 Scheduler_SMP_Insert insert_scheduled,
1456 Scheduler_SMP_Move move_from_scheduled_to_ready,
1457 Scheduler_SMP_Get_lowest_scheduled get_lowest_scheduled,
1458 Scheduler_SMP_Allocate_processor allocate_processor
1459)
1460{
1461 Scheduler_Node *lowest_scheduled;
1462 ISR_lock_Context lock_context;
1463 bool success;
1464
1465 if ( thread->Scheduler.pinned_scheduler != NULL ) {
1466 /*
1467 * Pinned threads are not allowed to ask for help. Return success to break
1468 * the loop in _Thread_Ask_for_help() early.
1469 */
1470 return true;
1471 }
1472
1473 lowest_scheduled = ( *get_lowest_scheduled )( context, node );
1474
1475 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1476
1477 if ( thread->Scheduler.state == THREAD_SCHEDULER_READY ) {
1478 Scheduler_SMP_Node_state node_state;
1479
1480 node_state = _Scheduler_SMP_Node_state( node );
1481
1482 if ( node_state == SCHEDULER_SMP_NODE_BLOCKED ) {
1483 Priority_Control insert_priority;
1484
1485 insert_priority = _Scheduler_SMP_Node_priority( node );
1486
1487 if ( ( *order )( &insert_priority, &lowest_scheduled->Node.Chain ) ) {
1488 _Thread_Scheduler_cancel_need_for_help(
1489 thread,
1490 _Thread_Get_CPU( thread )
1491 );
1492 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1493 _Thread_Scheduler_release_critical( thread, &lock_context );
1494
1495 _Scheduler_SMP_Preempt(
1496 context,
1497 node,
1498 lowest_scheduled,
1499 allocate_processor
1500 );
1501
1502 ( *insert_scheduled )( context, node, insert_priority );
1503 ( *move_from_scheduled_to_ready )( context, lowest_scheduled );
1504
1505 _Scheduler_Release_idle_thread(
1506 context,
1507 lowest_scheduled,
1508 _Scheduler_SMP_Release_idle_thread
1509 );
1510 success = true;
1511 } else {
1512 _Thread_Scheduler_release_critical( thread, &lock_context );
1513 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_READY );
1514 ( *insert_ready )( context, node, insert_priority );
1515 success = false;
1516 }
1517 } else if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1518 _Thread_Scheduler_cancel_need_for_help(
1519 thread,
1520 _Thread_Get_CPU( thread )
1521 );
1522 _Scheduler_Discard_idle_thread(
1523 context,
1524 thread,
1525 node,
1526 _Scheduler_SMP_Release_idle_thread
1527 );
1528 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_SCHEDULED );
1529 _Thread_Scheduler_release_critical( thread, &lock_context );
1530 success = true;
1531 } else {
1532 _Thread_Scheduler_release_critical( thread, &lock_context );
1533 success = false;
1534 }
1535 } else {
1536 _Thread_Scheduler_release_critical( thread, &lock_context );
1537 success = false;
1538 }
1539
1540 return success;
1541}
1542
1552static inline void _Scheduler_SMP_Reconsider_help_request(
1554 Thread_Control *thread,
1555 Scheduler_Node *node,
1556 Scheduler_SMP_Extract extract_from_ready
1557)
1558{
1559 ISR_lock_Context lock_context;
1560
1561 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1562
1563 if (
1564 thread->Scheduler.state == THREAD_SCHEDULER_SCHEDULED
1565 && _Scheduler_SMP_Node_state( node ) == SCHEDULER_SMP_NODE_READY
1566 && node->sticky_level == 1
1567 ) {
1568 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1569 ( *extract_from_ready )( context, node );
1570 }
1571
1572 _Thread_Scheduler_release_critical( thread, &lock_context );
1573}
1574
1590static inline void _Scheduler_SMP_Withdraw_node(
1592 Thread_Control *thread,
1593 Scheduler_Node *node,
1594 Thread_Scheduler_state next_state,
1595 Scheduler_SMP_Extract extract_from_ready,
1596 Scheduler_SMP_Get_highest_ready get_highest_ready,
1597 Scheduler_SMP_Move move_from_ready_to_scheduled,
1598 Scheduler_SMP_Allocate_processor allocate_processor
1599)
1600{
1601 ISR_lock_Context lock_context;
1602 Scheduler_SMP_Node_state node_state;
1603
1604 _Thread_Scheduler_acquire_critical( thread, &lock_context );
1605
1606 node_state = _Scheduler_SMP_Node_state( node );
1607 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_BLOCKED );
1608
1609 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1610 Per_CPU_Control *thread_cpu;
1611
1612 thread_cpu = _Thread_Get_CPU( thread );
1613 _Scheduler_Thread_change_state( thread, next_state );
1614 _Thread_Scheduler_release_critical( thread, &lock_context );
1615
1616 _Scheduler_SMP_Extract_from_scheduled( context, node );
1617 _Scheduler_SMP_Schedule_highest_ready(
1618 context,
1619 node,
1620 thread_cpu,
1621 extract_from_ready,
1622 get_highest_ready,
1623 move_from_ready_to_scheduled,
1624 allocate_processor
1625 );
1626 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1627 _Thread_Scheduler_release_critical( thread, &lock_context );
1628 ( *extract_from_ready )( context, node );
1629 } else {
1630 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1631 _Thread_Scheduler_release_critical( thread, &lock_context );
1632 }
1633}
1634
1643static inline void _Scheduler_SMP_Do_start_idle(
1645 Thread_Control *idle,
1646 Per_CPU_Control *cpu,
1647 Scheduler_SMP_Register_idle register_idle
1648)
1649{
1651 Scheduler_SMP_Node *node;
1652
1653 self = _Scheduler_SMP_Get_self( context );
1654 node = _Scheduler_SMP_Thread_get_node( idle );
1655
1656 _Scheduler_Thread_change_state( idle, THREAD_SCHEDULER_SCHEDULED );
1658
1659 _Thread_Set_CPU( idle, cpu );
1660 ( *register_idle )( context, &node->Base, cpu );
1661 _Chain_Append_unprotected( &self->Scheduled, &node->Base.Node.Chain );
1662 _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1663}
1664
1674static inline void _Scheduler_SMP_Add_processor(
1676 Thread_Control *idle,
1677 Scheduler_SMP_Has_ready has_ready,
1678 Scheduler_SMP_Enqueue enqueue_scheduled,
1679 Scheduler_SMP_Register_idle register_idle
1680)
1681{
1683 Scheduler_Node *node;
1684
1685 self = _Scheduler_SMP_Get_self( context );
1686 idle->Scheduler.state = THREAD_SCHEDULER_SCHEDULED;
1687 _Scheduler_SMP_Release_idle_thread( &self->Base, idle );
1688 node = _Thread_Scheduler_get_home_node( idle );
1689 _Scheduler_SMP_Node_change_state( node, SCHEDULER_SMP_NODE_SCHEDULED );
1690 ( *register_idle )( context, node, _Thread_Get_CPU( idle ) );
1691
1692 if ( ( *has_ready )( &self->Base ) ) {
1693 Priority_Control insert_priority;
1694
1695 insert_priority = _Scheduler_SMP_Node_priority( node );
1696 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1697 ( *enqueue_scheduled )( &self->Base, node, insert_priority );
1698 } else {
1699 _Chain_Append_unprotected( &self->Scheduled, &node->Node.Chain );
1700 }
1701}
1702
1714static inline Thread_Control *_Scheduler_SMP_Remove_processor(
1716 Per_CPU_Control *cpu,
1717 Scheduler_SMP_Extract extract_from_ready,
1718 Scheduler_SMP_Enqueue enqueue
1719)
1720{
1722 Chain_Node *chain_node;
1723 Scheduler_Node *victim_node;
1724 Thread_Control *victim_user;
1725 Thread_Control *victim_owner;
1726 Thread_Control *idle;
1727
1728 self = _Scheduler_SMP_Get_self( context );
1729 chain_node = _Chain_First( &self->Scheduled );
1730
1731 do {
1732 _Assert( chain_node != _Chain_Immutable_tail( &self->Scheduled ) );
1733 victim_node = (Scheduler_Node *) chain_node;
1734 victim_user = _Scheduler_Node_get_user( victim_node );
1735 chain_node = _Chain_Next( chain_node );
1736 } while ( _Thread_Get_CPU( victim_user ) != cpu );
1737
1738 _Scheduler_SMP_Extract_from_scheduled( context, victim_node );
1739 victim_owner = _Scheduler_Node_get_owner( victim_node );
1740
1741 if ( !victim_owner->is_idle ) {
1742 Scheduler_Node *idle_node;
1743
1744 _Scheduler_Release_idle_thread(
1745 &self->Base,
1746 victim_node,
1747 _Scheduler_SMP_Release_idle_thread
1748 );
1749 idle = _Scheduler_SMP_Get_idle_thread( &self->Base );
1750 idle_node = _Thread_Scheduler_get_home_node( idle );
1751 ( *extract_from_ready )( &self->Base, idle_node );
1752 _Scheduler_SMP_Preempt(
1753 &self->Base,
1754 idle_node,
1755 victim_node,
1756 _Scheduler_SMP_Allocate_processor_exact
1757 );
1758
1759 if ( !_Chain_Is_empty( &self->Scheduled ) ) {
1760 Priority_Control insert_priority;
1761
1762 insert_priority = _Scheduler_SMP_Node_priority( victim_node );
1763 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1764 ( *enqueue )( context, victim_node, insert_priority );
1765 }
1766 } else {
1767 _Assert( victim_owner == victim_user );
1768 _Assert( _Scheduler_Node_get_idle( victim_node ) == NULL );
1769 idle = victim_owner;
1770 _Scheduler_SMP_Exctract_idle_thread( idle );
1771 }
1772
1773 return idle;
1774}
1775
1795static inline void _Scheduler_SMP_Set_affinity(
1797 Thread_Control *thread,
1798 Scheduler_Node *node,
1799 void *arg,
1800 Scheduler_SMP_Set_affinity set_affinity,
1801 Scheduler_SMP_Extract extract_from_ready,
1802 Scheduler_SMP_Get_highest_ready get_highest_ready,
1803 Scheduler_SMP_Move move_from_ready_to_scheduled,
1804 Scheduler_SMP_Enqueue enqueue,
1805 Scheduler_SMP_Allocate_processor allocate_processor
1806)
1807{
1808 Scheduler_SMP_Node_state node_state;
1809 Priority_Control insert_priority;
1810
1811 node_state = _Scheduler_SMP_Node_state( node );
1812 insert_priority = _Scheduler_SMP_Node_priority( node );
1813 insert_priority = SCHEDULER_PRIORITY_APPEND( insert_priority );
1814
1815 if ( node_state == SCHEDULER_SMP_NODE_SCHEDULED ) {
1816 _Scheduler_SMP_Extract_from_scheduled( context, node );
1817 _Scheduler_SMP_Preempt_and_schedule_highest_ready(
1818 context,
1819 node,
1820 _Thread_Get_CPU( thread ),
1821 extract_from_ready,
1822 get_highest_ready,
1823 move_from_ready_to_scheduled,
1824 allocate_processor
1825 );
1826 ( *set_affinity )( context, node, arg );
1827 ( *enqueue )( context, node, insert_priority );
1828 } else if ( node_state == SCHEDULER_SMP_NODE_READY ) {
1829 ( *extract_from_ready )( context, node );
1830 ( *set_affinity )( context, node, arg );
1831 ( *enqueue )( context, node, insert_priority );
1832 } else {
1833 _Assert( node_state == SCHEDULER_SMP_NODE_BLOCKED );
1834 ( *set_affinity )( context, node, arg );
1835 }
1836}
1837
1840#ifdef __cplusplus
1841}
1842#endif /* __cplusplus */
1843
1844#endif /* _RTEMS_SCORE_SCHEDULERSMPIMPL_H */
Information for the Assert Handler.
#define NULL
Requests a GPIO pin group configuration.
Definition: bestcomm_api.h:77
Interface to Kernel Print Methods.
Chain Handler API.
#define _Assert(_e)
Assertion similar to assert() controlled via RTEMS_DEBUG instead of NDEBUG.
Definition: assert.h:100
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Last(const Chain_Control *the_chain)
Returns pointer to chain's last node.
Definition: chainimpl.h:294
RTEMS_INLINE_ROUTINE void _Chain_Initialize_empty(Chain_Control *the_chain)
Initializes this chain as empty.
Definition: chainimpl.h:505
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Get_first_unprotected(Chain_Control *the_chain)
Gets the first node (unprotected).
Definition: chainimpl.h:592
RTEMS_INLINE_ROUTINE void _Chain_Extract_unprotected(Chain_Node *the_node)
Extracts this node (unprotected).
Definition: chainimpl.h:558
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Tail(Chain_Control *the_chain)
Returns pointer to chain tail.
Definition: chainimpl.h:227
RTEMS_INLINE_ROUTINE bool _Chain_Is_empty(const Chain_Control *the_chain)
Checks if the chain is empty.
Definition: chainimpl.h:393
RTEMS_INLINE_ROUTINE void _Chain_Insert_ordered_unprotected(Chain_Control *the_chain, Chain_Node *to_insert, const void *left, Chain_Node_order order)
Inserts a node into the chain according to the order relation.
Definition: chainimpl.h:864
RTEMS_INLINE_ROUTINE const Chain_Node * _Chain_Immutable_tail(const Chain_Control *the_chain)
Returns pointer to immutable chain tail.
Definition: chainimpl.h:243
RTEMS_INLINE_ROUTINE void _Chain_Prepend_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Prepends a node (unprotected).
Definition: chainimpl.h:732
bool(* Chain_Node_order)(const void *left, const Chain_Node *right)
Chain node order.
Definition: chainimpl.h:844
RTEMS_INLINE_ROUTINE void _Chain_Append_unprotected(Chain_Control *the_chain, Chain_Node *the_node)
Appends a node (unprotected).
Definition: chainimpl.h:680
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_Next(const Chain_Node *the_node)
Returns pointer to the next node from this node.
Definition: chainimpl.h:327
RTEMS_INLINE_ROUTINE Chain_Node * _Chain_First(const Chain_Control *the_chain)
Returns pointer to chain's first node.
Definition: chainimpl.h:260
#define _ISR_Get_level()
Return current interrupt level.
Definition: isrlevel.h:128
uint64_t Priority_Control
The thread priority control.
Definition: priority.h:70
Scheduler_SMP_Node_state
SMP scheduler node states.
Definition: schedulersmp.h:70
@ SCHEDULER_SMP_NODE_READY
This scheduler node is ready.
Definition: schedulersmp.h:94
@ SCHEDULER_SMP_NODE_BLOCKED
This scheduler node is blocked.
Definition: schedulersmp.h:76
@ SCHEDULER_SMP_NODE_SCHEDULED
The scheduler node is scheduled.
Definition: schedulersmp.h:86
RTEMS_INLINE_ROUTINE void _Scheduler_Node_do_initialize(const struct _Scheduler_Control *scheduler, Scheduler_Node *node, Thread_Control *the_thread, Priority_Control priority)
Initializes a node.
Definition: schedulernodeimpl.h:91
#define SCHEDULER_PRIORITY_APPEND(priority)
Returns the priority control with the append indicator bit set.
Definition: schedulernodeimpl.h:72
RTEMS_INLINE_ROUTINE Thread_Control * _Scheduler_Node_get_owner(const Scheduler_Node *node)
Gets the owner of the node.
Definition: schedulernodeimpl.h:135
#define SCHEDULER_PRIORITY_PURIFY(priority)
Clears the priority append indicator bit.
Definition: schedulernodeimpl.h:66
RTEMS_INLINE_ROUTINE Priority_Control _Scheduler_Node_get_priority(Scheduler_Node *node)
Gets the priority of the node.
Definition: schedulernodeimpl.h:149
RTEMS_INLINE_ROUTINE Scheduler_Node * _Thread_Scheduler_get_home_node(const Thread_Control *the_thread)
Gets the scheduler's home node.
Definition: threadimpl.h:1412
RTEMS_INLINE_ROUTINE void _Thread_Set_CPU(Thread_Control *thread, Per_CPU_Control *cpu)
Sets the cpu of the thread's scheduler.
Definition: threadimpl.h:860
RTEMS_INLINE_ROUTINE bool _Thread_Is_ready(const Thread_Control *the_thread)
Checks if the thread is ready.
Definition: threadimpl.h:375
RTEMS_INLINE_ROUTINE Per_CPU_Control * _Thread_Get_CPU(const Thread_Control *thread)
Gets the cpu of the thread's scheduler.
Definition: threadimpl.h:841
Inlined Routines Associated with the Manipulation of the Priority-Based Scheduling Structures.
SMP Scheduler API.
Definition: chain.h:68
Local ISR lock context for acquire and release pairs.
Definition: isrlock.h:65
Chain_Node Node
Definition: objectdata.h:41
Per CPU Core Structure.
Definition: percpu.h:347
struct _Thread_Control * heir
This is the heir thread for this processor.
Definition: percpu.h:436
Scheduler context.
Definition: scheduler.h:252
Scheduler node for per-thread data.
Definition: schedulernode.h:79
Scheduler context specialization for SMP schedulers.
Definition: schedulersmp.h:46
Chain_Control Scheduled
The chain of scheduled nodes.
Definition: schedulersmp.h:55
Scheduler_Context Base
Basic scheduler context.
Definition: schedulersmp.h:50
Chain_Control Idle_threads
Chain of the available idle threads.
Definition: schedulersmp.h:64
Scheduler node specialization for SMP schedulers.
Definition: schedulersmp.h:100
Scheduler_SMP_Node_state state
The state of this node.
Definition: schedulersmp.h:109
Priority_Control priority
The current priority of thread owning this node.
Definition: schedulersmp.h:114
Scheduler_Node Base
Basic scheduler node.
Definition: schedulersmp.h:104
Scheduler control.
Definition: scheduler.h:269
Definition: thread.h:732
Objects_Control Object
Definition: thread.h:734
Thread_Scheduler_control Scheduler
Scheduler related control.
Definition: thread.h:771
bool is_idle
Definition: thread.h:796
unsigned context
Definition: tlb.h:1
Definition: chain.h:86