예제 #1
0
    def _generate_transitive_trace_frames(self, run: Run,
                                          start_frame: TraceFrame,
                                          leaf_ids: Set[int]):
        """Generates all trace reachable from start_frame, provided they contain a
        leaf_id from the initial set of leaf_ids."""

        kind = start_frame.kind
        queue = [(start_frame, leaf_ids)]
        while len(queue) > 0:
            frame, leaves = queue.pop()
            if len(leaves) == 0:
                continue

            frame_id = frame.id.local_id
            if frame_id in self.visited_frames:
                leaves = leaves - self.visited_frames[frame_id]
                if len(leaves) == 0:
                    continue
                else:
                    self.visited_frames[frame_id].update(leaves)
            else:
                self.visited_frames[frame_id] = leaves

            next_frames = self._get_or_populate_trace_frames(
                kind, run, frame.callee_id, caller_port=frame.callee_port)
            queue.extend([
                # pyre-fixme[16]: `_Alias` has no attribute `intersection`.
                (frame, Set.intersection(leaves, frame_leaves))
                for (frame, frame_leaves) in next_frames
            ])
    def offline_stage(self, processor_definition: Processor,
                      environment_specification: Environment,
                      task_set: TaskSet) -> int:
        """
          Method to implement with the offline stage scheduler tasks

          :param environment_specification: Specification of the environment
          :param processor_definition: Specification of the cpu
          :param task_set: Tasks in the system
          :return CPU frequency
          """
        m = len(processor_definition.cores_definition)

        clock_available_frequencies = Set.intersection(*[
            i.core_type.available_frequencies
            for i in processor_definition.cores_definition.values()
        ])

        self.__m = m

        self.__tasks_relative_deadline = {
            i.identifier: i.relative_deadline
            for i in task_set.periodic_tasks + task_set.aperiodic_tasks +
            task_set.sporadic_tasks
        }

        return max(clock_available_frequencies)
예제 #3
0
    def check_schedulability(self, processor_definition: Processor,
                             environment_specification: Environment,
                             task_set: TaskSet) -> [bool, Optional[str]]:
        """
        Return true if the scheduler can be able to schedule the system. In negative case, it can return a reason.
        In example, an scheduler that only can work with periodic tasks with phase=0, can return
         [false, "Only can schedule tasks with phase=0"]

        :param environment_specification: Specification of the environment
        :param processor_definition: Specification of the cpu
        :param task_set: Tasks in the system
        :return CPU frequency
        """
        only_0_phase = all(i.phase is None or i.phase == 0
                           for i in task_set.periodic_tasks)

        only_periodic_tasks = len(task_set.sporadic_tasks) + len(
            task_set.aperiodic_tasks) == 0

        only_implicit_deadline = all(i.relative_deadline == i.period
                                     for i in task_set.periodic_tasks)

        only_fully_preemptive = all(
            i.preemptive_execution == PreemptiveExecution.FULLY_PREEMPTIVE
            for i in task_set.periodic_tasks)

        if not (only_0_phase and only_periodic_tasks and only_implicit_deadline
                and only_fully_preemptive):
            return False, "Error: Only implicit deadline, fully preemptive, 0 phase periodic tasks are allowed"

        m = len(processor_definition.cores_definition)

        clock_available_frequencies = list(
            Set.intersection(*[
                i.core_type.available_frequencies
                for i in processor_definition.cores_definition.values()
            ]))

        # Calculate F start
        major_cycle = calculate_major_cycle(task_set)

        available_frequencies = [
            actual_frequency
            for actual_frequency in clock_available_frequencies if sum([
                i.worst_case_execution_time * round(major_cycle / i.period)
                for i in task_set.periodic_tasks
            ]) <= m * round(major_cycle * actual_frequency) and all([
                i.worst_case_execution_time * round(major_cycle / i.period) <=
                round(major_cycle * actual_frequency)
                for i in task_set.periodic_tasks
            ])
        ]

        if len(available_frequencies) == 0:
            return False, "Error: Schedule is not feasible"

        # All tests passed
        return True, None
예제 #4
0
            def offline_stage(self, cpu_specification: Processor,
                              environment_specification: Environment,
                              task_set: TaskSet) -> int:
                clock_available_frequencies = Set.intersection(*[
                    i.core_type.available_frequencies
                    for i in cpu_specification.cores_definition.values()
                ])

                return max(clock_available_frequencies)
예제 #5
0
            def offline_stage(self, cpu_specification: Processor,
                              environment_specification: Environment,
                              task_set: TaskSet) -> int:
                self.__m = len(cpu_specification.cores_definition)

                self.__tasks_priority = {
                    i.identifier: i.priority
                    for i in task_set.periodic_tasks +
                    task_set.aperiodic_tasks + task_set.sporadic_tasks
                }

                clock_available_frequencies = Set.intersection(*[
                    i.core_type.available_frequencies
                    for i in cpu_specification.cores_definition.values()
                ])

                return max(clock_available_frequencies)
예제 #6
0
    def offline_stage(self, processor_definition: Processor,
                      environment_specification: Environment,
                      task_set: TaskSet) -> int:
        """
        Method to implement with the offline stage scheduler tasks

        :param environment_specification: Specification of the environment
        :param processor_definition: Specification of the cpu
        :param task_set: Tasks in the system
        :return CPU frequency
        """
        m = len(processor_definition.cores_definition)

        clock_available_frequencies = Set.intersection(*[
            i.core_type.available_frequencies
            for i in processor_definition.cores_definition.values()
        ])

        # Calculate F start
        major_cycle = calculate_major_cycle(task_set)
        self.__major_cycle = major_cycle

        available_frequencies = (
            actual_frequency
            for actual_frequency in clock_available_frequencies if sum([
                i.worst_case_execution_time * round(major_cycle / i.period)
                for i in task_set.periodic_tasks
            ]) <= m * round(major_cycle * actual_frequency) and all([
                i.worst_case_execution_time * round(major_cycle / i.period) <=
                round(major_cycle * actual_frequency)
                for i in task_set.periodic_tasks
            ]))

        # F star in HZ
        f_star_hz = min(available_frequencies)

        periodic_tasks_dict = {
            i.identifier: i
            for i in task_set.periodic_tasks
        }

        task_set_calecs: Dict[int, ImplicitDeadlineTask] = {
            i.identifier: ImplicitDeadlineTask(i.worst_case_execution_time,
                                               round(i.period * f_star_hz))
            for i in task_set.periodic_tasks
        }

        major_cycle_cycles: int = list_int_lcm(
            [i.d for i in task_set_calecs.values()])

        used_cycles = sum([
            i.c * (major_cycle_cycles // i.d)
            for i in task_set_calecs.values()
        ])

        number_of_used_processors = next(
            i for i in range(1, m + 1)
            if major_cycle_cycles * i >= used_cycles)

        free_cycles = major_cycle_cycles * number_of_used_processors - used_cycles

        if free_cycles != 0:
            task_set_calecs[-1] = ImplicitDeadlineTask(free_cycles,
                                                       major_cycle_cycles)

        partition_algorithm = BestFitDescendantBPPBasedPartitionAlgorithm()

        partitions_obtained: List[Tuple[
            int, Set[int]]] = partition_algorithm.do_partition(
                task_set_calecs, number_of_used_processors)

        # Save the clusters obtained
        if self.__clusters_obtained is not None:
            self.__clusters_obtained = [i for i, _ in partitions_obtained]

        last_cpu_id_used = 0

        clusters_scheduling_points = []

        # Partitions done
        for utilization, task_set_loop in partitions_obtained:
            local_major_cycle = list_int_lcm([
                round(periodic_tasks_dict[i].relative_deadline * f_star_hz)
                for i in task_set_loop if i != -1
            ])
            number_of_major_cycles = major_cycle_cycles // local_major_cycle

            # Cores used
            local_used_cores_ids: List[int] = list(
                range(last_cpu_id_used, last_cpu_id_used + utilization))

            if utilization == 1:
                scheduling_points = obtain_edf_cyclic_executive(
                    periodic_tasks=[
                        periodic_tasks_dict[i] for i in task_set_loop
                        if i != -1
                    ],
                    processor_frequency=f_star_hz)
            else:
                local_used_cores = [
                    (i,
                     Core(location=processor_definition.cores_definition[j].
                          location,
                          core_type=CoreModel(
                              dimensions=processor_definition.
                              cores_definition[j].core_type.dimensions,
                              material=processor_definition.
                              cores_definition[j].core_type.material,
                              core_energy_consumption=processor_definition.
                              cores_definition[j].core_type.
                              core_energy_consumption,
                              available_frequencies={f_star_hz})))
                    # preemption_cost=
                    # processor_definition.cores_definition[j].core_type.preemption_cost)))
                    for i, j in enumerate(local_used_cores_ids)
                ]

                # migration_costs = {
                #     (i, j): processor_definition.migration_costs[(local_used_cores_ids[i], local_used_cores_ids[j])]
                #     for i in range(utilization) for j in range(utilization) if i != j}

                local_processor_definition = Processor(
                    board_definition=processor_definition.board_definition,
                    cores_definition={
                        k: j
                        for k, (i, j) in enumerate(local_used_cores)
                    },
                    measure_unit=processor_definition.measure_unit)
                # migration_costs=migration_costs)

                local_task_set = TaskSet(periodic_tasks=[
                    periodic_tasks_dict[i] for i in task_set_loop if i != -1
                ],
                                         aperiodic_tasks=[],
                                         sporadic_tasks=[])

                local_scheduler = SALECS(self.is_debug)
                local_scheduler.offline_stage(
                    processor_definition=local_processor_definition,
                    task_set=local_task_set,
                    environment_specification=environment_specification)
                scheduling_points = local_scheduler.get_scheduling_points()

            # Update last used CPU
            last_cpu_id_used += utilization

            # Replicate scheduling points number_of_major_cycles times
            # Translate scheduling points to real CPUs IDs
            cluster_execution_interval = {
                i + (local_major_cycle * k):
                {local_used_cores_ids[r]: q
                 for r, q in j.items()}
                for i, j in scheduling_points.items()
                for k in range(number_of_major_cycles)
            }

            # Append data to global scheduling_points
            clusters_scheduling_points.append(cluster_execution_interval)

        # Obtain all scheduling points
        scheduling_points_global = Set.union(
            *[set(i.keys()) for i in clusters_scheduling_points])

        last_scheduling_point = [
            {} for _ in range(len(clusters_scheduling_points))
        ]

        for i in sorted(scheduling_points_global):
            # Update last scheduling points
            for j, k in enumerate(clusters_scheduling_points):
                if k.__contains__(i):
                    last_scheduling_point[j] = k[i]

            # Update actual scheduling point
            actual_scheduling_point = {}
            for j in last_scheduling_point:
                actual_scheduling_point.update(j)

            self.__scheduling_points[i] = actual_scheduling_point

        return f_star_hz
예제 #7
0
    def offline_stage(self, processor_definition: Processor,
                      environment_specification: Environment,
                      task_set: TaskSet) -> int:
        """
        Method to implement with the offline stage scheduler tasks

        :param environment_specification: Specification of the environment
        :param processor_definition: Specification of the cpu
        :param task_set: Tasks in the system
        :return CPU frequency
        """
        # Select always the maximum frequency
        selected_frequency = max(
            Set.intersection(*[
                i.core_type.available_frequencies
                for i in processor_definition.cores_definition.values()
            ]))

        task_set_run = [
            _RUNTask(i.identifier, i.worst_case_execution_time,
                     int(i.period * selected_frequency))
            for i in task_set.periodic_tasks
        ]

        major_cycle = calculate_major_cycle(task_set)

        major_cycle_in_cycles = list_int_lcm([
            int(i.period * selected_frequency) for i in task_set.periodic_tasks
        ])

        used_cycles = sum(
            [i.c * (major_cycle_in_cycles // i.d) for i in task_set_run])

        m = len(processor_definition.cores_definition)

        free_cycles = major_cycle_in_cycles * m - used_cycles

        if free_cycles != 0:
            task_set_run.append(
                _RUNTask(-1, free_cycles, major_cycle_in_cycles))

        run_tree = _create_tree(task_set_run)

        if self.__clusters_obtained is not None:
            self.__clusters_obtained = [
                round(_obtain_utilization_of_run_pack_subtree(i))
                for i in run_tree
            ]

        # Tasks periods in cycles
        tasks_periods_cycles: Dict[int, int] = {
            i.identifier: int(i.period * selected_frequency)
            for i in task_set.periodic_tasks
        }

        # Compute the schedule for a major cycle
        scheduling_points: Dict[int, Dict[int, int]] = {}

        previous_tasks_being_executed: List[int] = m * [-1]

        for actual_cycle in range(0, major_cycle_in_cycles):
            selected_tasks = _select_tasks_to_execute(run_tree, actual_cycle)
            tasks_being_executed = _assign_tasks_to_cpu(
                selected_tasks, previous_tasks_being_executed, m)

            # Mark for scheduling point
            if previous_tasks_being_executed != tasks_being_executed or any(
                    actual_cycle % tasks_periods_cycles[i] == 0
                    for i in tasks_being_executed if i != -1):
                scheduling_points[actual_cycle] = {
                    i: j
                    for i, j in enumerate(tasks_being_executed) if j != -1
                }

            previous_tasks_being_executed = tasks_being_executed

        self.__scheduling_points = scheduling_points
        self.__major_cycle = major_cycle

        return selected_frequency
예제 #8
0
    def offline_stage(self, processor_definition: Processor,
                      environment_specification: Environment,
                      task_set: TaskSet) -> int:
        """
        Method to implement with the offline stage scheduler tasks

        :param environment_specification: Specification of the environment
        :param processor_definition: Specification of the cpu
        :param task_set: Tasks in the system
        :return CPU frequency
        """
        m = len(processor_definition.cores_definition)

        clock_available_frequencies = list(
            Set.intersection(*[
                i.core_type.available_frequencies
                for i in processor_definition.cores_definition.values()
            ]))

        # Calculate F start
        major_cycle = calculate_major_cycle(task_set)

        available_frequencies = [
            actual_frequency
            for actual_frequency in clock_available_frequencies if sum([
                i.worst_case_execution_time * round(major_cycle / i.period)
                for i in task_set.periodic_tasks
            ]) <= m * round(major_cycle * actual_frequency) and all([
                i.worst_case_execution_time * round(major_cycle / i.period) <=
                round(major_cycle * actual_frequency)
                for i in task_set.periodic_tasks
            ])
        ]

        # F star in HZ
        f_star_hz = min(available_frequencies)

        # Number of cycles
        cci = [i.worst_case_execution_time for i in task_set.periodic_tasks]
        tci = [int(i.period * f_star_hz) for i in task_set.periodic_tasks]

        # Add dummy task if needed
        major_cycle_in_cycles = int(major_cycle * f_star_hz)

        a_i = [round(major_cycle_in_cycles / i) for i in tci]

        # Check if it's needed a dummy task
        total_used_cycles = sum([i[0] * i[1] for i in zip(cci, a_i)])

        if total_used_cycles < m * major_cycle_in_cycles:
            cci.append(m * major_cycle_in_cycles - total_used_cycles)
            tci.append(major_cycle_in_cycles)

        # Linear programing problem
        interval_start_list, x = self.aiecs_periods_lpp_glop(cci, tci, m)
        x = numpy.array(x)

        # Delete dummy task
        x = x[:-1, :] if total_used_cycles < m * major_cycle_in_cycles else x

        # Index to id
        index_to_id = {
            i: j.identifier
            for i, j in enumerate(task_set.periodic_tasks)
        }

        # Low the range of sd, x
        range_quantum = list_int_gcd([
            x[i, j] for i in range(x.shape[0])
            for j in range(x.shape[1]) if x[i, j] != 0
        ] + [i for i in interval_start_list[1:]])

        # Compute the schedule for a major cycle
        scheduling_points: Dict[int, Dict[int, int]] = {}

        actual_interval_cc: List[int] = []
        actual_interval_end: int = 0

        tasks_being_executed: List[int] = m * [-1]

        for i in range(0, major_cycle_in_cycles, range_quantum):
            # Obtain all assignations
            interval_have_ended_this_cycle = False
            if i == actual_interval_end:
                interval_index = interval_start_list.index(i)
                actual_interval_cc = x[:, interval_index].tolist()
                actual_interval_end = interval_start_list[interval_index + 1]
                interval_have_ended_this_cycle = True

            previous_tasks_being_executed = tasks_being_executed
            if self.__offline_stage_check_interrupt(
                    tasks_being_executed, actual_interval_cc,
                    actual_interval_end - i, interval_have_ended_this_cycle):
                tasks_being_executed = self.__schedule_policy_imp(
                    tasks_being_executed, actual_interval_cc,
                    actual_interval_end - i)

            # Update CC
            for j in (r for r in tasks_being_executed if r != -1):
                actual_interval_cc[j] = actual_interval_cc[j] - range_quantum

            # Mark for scheduling point
            # TODO: This can be a bit improved, because not all interval ends the scheduler should
            # be called, neither when a task end his execution
            if interval_have_ended_this_cycle or previous_tasks_being_executed != tasks_being_executed:
                scheduling_points[i] = {
                    i: index_to_id[j]
                    for i, j in enumerate(tasks_being_executed) if j != -1
                }

        self.__scheduling_points = scheduling_points
        self.__major_cycle = major_cycle

        # Only used for debug purposes
        scheduling_points_debug_activated = False

        if scheduling_points_debug_activated:
            scheduling_points_debug = -2 * numpy.ones(
                (m, major_cycle_in_cycles // range_quantum))

            index_sp = {
                i: max(j for j in scheduling_points.keys()
                       if j <= i * range_quantum)
                for i in range(major_cycle_in_cycles // range_quantum)
            }

            for i in range(major_cycle_in_cycles // range_quantum):
                for j in range(m):
                    scheduling_points_debug[j, i] = scheduling_points[
                        index_sp[i]][j] if scheduling_points[
                            index_sp[i]].__contains__(j) else -1

        return f_star_hz
예제 #9
0
def _execute_centralized_scheduler_simulation(
        jobs: List[Job], tasks: TaskSet, processor_definition: Processor,
        environment_specification: Environment,
        scheduler: CentralizedScheduler,
        simulation_options: SimulationConfiguration,
        simulation_start_time: float,
        simulation_end_time: float) -> RawSimulationResult:
    """
    Run a simulation using a centralized scheduler

    :param jobs: Jobs in the system
    :param simulation_start_time: Time in seconds where the system start to make decisions. Time 0 is the start of the
     first major cycle
    :param simulation_end_time: Time in seconds since the start of the first major cycle where the simulation ends.
    :param tasks: Group of tasks in the system
    :param processor_definition: Definition of the CPU to use
    :param environment_specification: Specification of the environment
    :param scheduler: Centralized scheduler to use
    :param simulation_options: Options of the simulation
    :return: Simulation result
    """
    # Possible frequencies
    # As we are simulating with a centralized scheduler, only frequencies possibles in all cores are available
    available_frequencies = Set.intersection(*[
        i.core_type.available_frequencies
        for i in processor_definition.cores_definition.values()
    ])

    if len(available_frequencies) == 0:
        return RawSimulationResult(
            have_been_scheduled=False,
            scheduler_acceptance_error_message=
            "at least one frequency must be shared by all" +
            " cores in a centralized scheduler simulation",
            job_sections_execution={},
            cpus_frequencies={},
            scheduling_points=[],
            temperature_measures={},
            hard_real_time_deadline_missed_stack_trace=None,
            memory_usage_record=None)

    # Unit mesh division
    if simulation_options.processor_mesh_division < 1:
        return RawSimulationResult(
            have_been_scheduled=False,
            scheduler_acceptance_error_message=
            "mesh division must be greater than 0",
            job_sections_execution={},
            cpus_frequencies={},
            scheduling_points=[],
            temperature_measures={},
            hard_real_time_deadline_missed_stack_trace=None,
            memory_usage_record=None)

    # Number of cpus
    number_of_cpus = len(processor_definition.cores_definition)

    # Check if CPU ids go from 0 to number_of_tasks - 1
    cpus_ids_corrects: bool = all(
        0 <= i < number_of_cpus
        for i in processor_definition.cores_definition.keys())

    if not cpus_ids_corrects:
        return RawSimulationResult(
            have_been_scheduled=False,
            scheduler_acceptance_error_message=
            "Processors id must go from 0 to the number of" + " CPUS - 1",
            job_sections_execution={},
            cpus_frequencies={},
            scheduling_points=[],
            temperature_measures={},
            hard_real_time_deadline_missed_stack_trace=None,
            memory_usage_record=None)

    # Check if scheduler is capable of execute task set
    can_schedule, error_message = scheduler.check_schedulability(
        processor_definition, environment_specification, tasks)

    if not can_schedule:
        return RawSimulationResult(
            have_been_scheduled=False,
            scheduler_acceptance_error_message="the scheduler can't schedule"
            if error_message is None else error_message,
            job_sections_execution={},
            cpus_frequencies={},
            scheduling_points=[],
            temperature_measures={},
            hard_real_time_deadline_missed_stack_trace=None,
            memory_usage_record=None)

    # Run scheduler offline phase
    cpu_frequency = scheduler.offline_stage(processor_definition,
                                            environment_specification, tasks)

    # Create data structures for the simulation
    # Max frequency
    lcm_frequency = list_int_lcm(list(available_frequencies))

    # Dict with activation and deadlines
    activation_dict, deadlines_dict = _create_deadline_arrive_dict(
        lcm_frequency, jobs)

    # Jobs CC dict by id (this value is constant and only should be used for fast access to the original cc)
    jobs_cc_dict: Dict[int,
                       int] = {i.identifier: i.execution_time
                               for i in jobs}

    # Remaining jobs CC dict by id
    remaining_cc_dict: Dict[int, int] = jobs_cc_dict.copy()

    # Simulation step
    actual_lcm_cycle: int = round(simulation_start_time * lcm_frequency)
    final_lcm_cycle: int = round(simulation_end_time * lcm_frequency)

    # Major cycle
    major_cycle_lcm = list_int_lcm(
        [round(i.period * lcm_frequency) for i in tasks.periodic_tasks])

    # Jobs to task dict
    jobs_to_task_dict = {i.identifier: i.task.identifier for i in jobs}

    # Activate jobs set
    active_jobs = set()

    # Hard deadline task miss deadline
    hard_rt_task_miss_deadline = False

    # Only must take value if a hard real time is missed
    hard_real_time_deadline_missed_stack_trace: Optional[
        SimulationStackTraceHardRTDeadlineMissed] = None

    # Jobs type dict
    hard_real_time_jobs: Set[int] = {
        i.identifier
        for i in jobs if i.task.deadline_criteria == Criticality.HARD
    }
    firm_real_time_jobs: Set[int] = {
        i.identifier
        for i in jobs if i.task.deadline_criteria == Criticality.FIRM
    }
    non_preemptive_jobs: Set[int] = {
        i.identifier
        for i in jobs
        if i.task.preemptive_execution == PreemptiveExecution.NON_PREEMPTIVE
    }

    # When is set the next scheduling point by quantum
    next_scheduling_point = None

    # Jobs being executed
    jobs_being_executed_id: Dict[int, int] = {}

    #  Raw execution result tables
    job_sections_execution: Dict[int, List[JobSectionExecution]] = {
        i: []
        for i in range(number_of_cpus)
    }  # List of jobs executed by each core
    cpus_frequencies: Dict[int, List[CPUUsedFrequency]] = {
        i: []
        for i in range(number_of_cpus)
    }  # List of CPU frequencies used by each core
    scheduling_points: List[float] = [
    ]  # Points where the scheduler have made an scheduling
    temperature_measures: Dict[float, Dict[int, PhysicalCuboid]] = {
    }  # Measures of temperature

    # Jobs being executed extra information [CPU, [start time]]
    jobs_last_section_start_time: Dict[int, float] = {
        i.identifier: -1
        for i in jobs
    }
    jobs_last_cpu_used: Dict[int, int] = {i.identifier: -1 for i in jobs}
    jobs_last_preemption_remaining_cycles: Dict[int, int] = {
        i.identifier: -1
        for i in jobs
    }

    # Last time frequency was set
    last_frequency_set_time = simulation_start_time

    # Board id
    board_thermal_id: int = number_of_cpus

    # Available memory
    jobs_memory_consumption: Dict[int, int] = {
        i.identifier:
        i.task.memory_footprint if i.task.memory_footprint is not None else 0
        for i in jobs
    }
    memory_usage: int = 0
    memory_usage_record: Dict[float, int] = {}

    # Energy management objects
    cubed_space: Optional[Model] = None
    initial_state: Optional[SimulationState] = None
    core_frequency_energy_activator: Optional[Dict[Tuple[int, int],
                                                   int]] = None
    core_task_energy_activator: Optional[Dict[Tuple[int, int], int]] = None

    # Thermal options
    if simulation_options.simulate_thermal_behaviour:
        cubed_space, initial_state, core_frequency_energy_activator, core_task_energy_activator = _generate_cubed_space(
            tasks, processor_definition, environment_specification,
            simulation_options, board_thermal_id)

    # Main control loop
    while actual_lcm_cycle < final_lcm_cycle and not hard_rt_task_miss_deadline and \
            len(active_jobs) + len(activation_dict) > 0:
        # Actual time in seconds
        actual_time_seconds = actual_lcm_cycle / lcm_frequency

        # Record temperature
        if simulation_options.simulate_thermal_behaviour:
            cubes_temperatures = cubed_space.obtain_temperature(initial_state)
            temperature_measures[actual_time_seconds] = cubes_temperatures
            cores_max_temperature = obtain_max_temperature(cubes_temperatures)
            cores_max_temperature.pop(board_thermal_id)

        # Record memory usage
        if simulation_options.simulate_memory_footprint:
            memory_usage_record[actual_time_seconds] = memory_usage

        # Major cycle start event
        major_cycle_event_require_scheduling = scheduler.on_major_cycle_start(actual_time_seconds) \
            if actual_lcm_cycle % major_cycle_lcm == 0 else False

        # Job activation events
        activated_this_cycle = [(i, j) for i, j in activation_dict.items()
                                if i <= actual_lcm_cycle]

        for i, j in activated_this_cycle:
            activation_dict.pop(i)
            for k in j:
                active_jobs.add(k)

        activation_event_require_scheduling_list = [
            scheduler.on_jobs_activation(actual_time_seconds,
                                         i / lcm_frequency,
                                         [(k, jobs_to_task_dict[k])
                                          for k in j])
            for i, j in activated_this_cycle
        ]

        activation_event_require_scheduling = any(
            activation_event_require_scheduling_list)

        # Job end event
        jobs_that_have_end = [
            i for i in active_jobs if remaining_cc_dict[i] == 0
        ]

        for i in jobs_that_have_end:
            active_jobs.remove(i)

            # Update RawSimulationResult tables in case that a task end by cc
            # Remove it from executed tasks
            job_cpu_used = jobs_last_cpu_used[i]
            jobs_being_executed_id.pop(job_cpu_used)
            job_sections_execution[job_cpu_used].append((JobSectionExecution(
                i, jobs_to_task_dict[i], jobs_last_section_start_time[i],
                actual_time_seconds, jobs_last_preemption_remaining_cycles[i] -
                remaining_cc_dict[i])))

            # Remove job from memory
            if simulation_options.simulate_memory_footprint:
                memory_usage = memory_usage - jobs_memory_consumption[i]

        end_event_require_scheduling = scheduler.on_job_execution_finished(actual_time_seconds, jobs_that_have_end) \
            if len(jobs_that_have_end) > 0 else False

        # Job missed deadline events
        deadline_this_cycle = [(i, j) for i, j in deadlines_dict.items()
                               if i <= actual_lcm_cycle]

        for i, _ in deadline_this_cycle:
            deadlines_dict.pop(i)

        jobs_deadline_this_cycle: List[int] = list(
            itertools.chain(*[j for _, j in deadline_this_cycle]))

        deadline_missed_this_cycle = [
            i for i in jobs_deadline_this_cycle if i in active_jobs
        ]

        for i in (j for j in deadline_missed_this_cycle
                  if j in firm_real_time_jobs):
            active_jobs.remove(i)  # Remove firm real time from active set

            # Update RawSimulationResult tables in case that a task reach deadline and are firm
            job_cpu_used = jobs_last_cpu_used[i]

            if jobs_being_executed_id.__contains__(
                    job_cpu_used
            ) and jobs_being_executed_id[job_cpu_used] == i:
                jobs_being_executed_id.pop(job_cpu_used)
                job_sections_execution[job_cpu_used].append(
                    (JobSectionExecution(
                        i, jobs_to_task_dict[i],
                        jobs_last_section_start_time[i], actual_time_seconds,
                        jobs_last_preemption_remaining_cycles[i] -
                        remaining_cc_dict[i])))

                # Remove job from memory
                if simulation_options.simulate_memory_footprint:
                    memory_usage = memory_usage - jobs_memory_consumption[i]

        hard_rt_task_miss_deadline = any(
            (i in hard_real_time_jobs for i in deadline_missed_this_cycle
             ))  # If some jab is hard real time set the flag

        deadline_missed_event_require_scheduling = False

        if hard_rt_task_miss_deadline:
            hard_real_time_deadline_missed_stack_trace = SimulationStackTraceHardRTDeadlineMissed(
                actual_time_seconds, {
                    j: remaining_cc_dict[j]
                    for j in deadline_missed_this_cycle
                    if j in hard_real_time_jobs
                })
        else:
            # Check if a deadline missed require rescheduling
            deadline_missed_event_require_scheduling = scheduler.on_jobs_deadline_missed(actual_time_seconds,
                                                                                         deadline_missed_this_cycle) \
                if len(deadline_missed_this_cycle) > 0 else False

        # Do scheduling if required
        if not hard_rt_task_miss_deadline and (
                major_cycle_event_require_scheduling
                or activation_event_require_scheduling
                or end_event_require_scheduling
                or deadline_missed_event_require_scheduling
                or next_scheduling_point
                == actual_lcm_cycle) and len(active_jobs) > 0:
            # Call scheduler
            jobs_being_executed_id_next, cycles_until_next_scheduler_invocation, cores_frequency_next = \
                scheduler.schedule_policy(actual_time_seconds, active_jobs, jobs_being_executed_id, cpu_frequency, None)

            if cores_frequency_next is None:
                cores_frequency_next = cpu_frequency

            # Scheduler result checks
            if simulation_options.scheduler_selections_check:
                bad_scheduler_behaviour = not (
                    available_frequencies.__contains__(cores_frequency_next)
                    and all(
                        (0 <= i < number_of_cpus
                         for i in jobs_being_executed_id_next.keys())) and all(
                             (i in active_jobs
                              for i in jobs_being_executed_id_next.values()))
                    and (cycles_until_next_scheduler_invocation is None
                         or cycles_until_next_scheduler_invocation > 0))

                if bad_scheduler_behaviour:
                    exception_message = "Error due to bad scheduler behaviour\n" + \
                                        "\t Jobs to CPU assignation: " + str(jobs_being_executed_id_next) + "\n" + \
                                        "\t Active jobs: " + str(active_jobs) + "\n" + \
                                        "\t Selected frequency: " + str(cores_frequency_next) + "\n" + \
                                        "\t Available frequencies: " + \
                                        str(available_frequencies) + "\n" + \
                                        "\t Actual time: " + str(actual_time_seconds)
                    raise Exception(exception_message)

            # Check if none preemptive task is preempted
            for i, j in jobs_being_executed_id.items():
                if j in non_preemptive_jobs and remaining_cc_dict[j] > 0 and (
                        not jobs_being_executed_id_next.__contains__(i)
                        or jobs_being_executed_id_next[i] != j):
                    # If a non preemptive task have been preempted, its execution time must be restarted
                    remaining_cc_dict[j] = jobs_cc_dict[j]

            # Check if a task is preempted
            for i, j in jobs_being_executed_id.items():
                if not jobs_being_executed_id_next.__contains__(
                        i) or jobs_being_executed_id_next[i] != j:
                    job_sections_execution[i].append((JobSectionExecution(
                        j, jobs_to_task_dict[j],
                        jobs_last_section_start_time[j], actual_time_seconds,
                        jobs_last_preemption_remaining_cycles[j] -
                        remaining_cc_dict[j])))

            # Check new tasks in execution
            for i, j in jobs_being_executed_id_next.items():
                if not jobs_being_executed_id.__contains__(
                        i) or jobs_being_executed_id[i] != j:
                    jobs_last_preemption_remaining_cycles[
                        j] = remaining_cc_dict[j]
                    jobs_last_section_start_time[j] = actual_time_seconds

            # Check if frequency have changed
            if cores_frequency_next != cpu_frequency:
                for i in range(number_of_cpus):
                    cpus_frequencies[i].append(
                        CPUUsedFrequency(cores_frequency_next,
                                         last_frequency_set_time,
                                         actual_time_seconds))

                last_frequency_set_time = actual_time_seconds

            # Update memory usage
            if simulation_options.simulate_memory_footprint:
                memory_usage = memory_usage - sum(jobs_memory_consumption[i] for i in jobs_being_executed_id.values()) \
                               + sum(jobs_memory_consumption[i] for i in jobs_being_executed_id_next.values())

            # Update RawSimulationResult tables
            scheduling_points.append(actual_time_seconds)

            # Update frequency and executed tasks
            cpu_frequency = cores_frequency_next
            jobs_being_executed_id = jobs_being_executed_id_next
            next_scheduling_point = (
                (lcm_frequency // cpu_frequency) *
                cycles_until_next_scheduler_invocation + actual_lcm_cycle
            ) if cycles_until_next_scheduler_invocation is not None else None

            for i, j in jobs_being_executed_id.items():
                jobs_last_cpu_used[j] = i

        # In case that it has been missed the state of the variables must keep without alteration
        if not hard_rt_task_miss_deadline:
            # Next cycle == min(keys(activation_dict), keys(deadline_dict), remaining cycles)
            next_major_cycle: int = major_cycle_lcm * (
                (actual_lcm_cycle // major_cycle_lcm) + 1)

            next_job_end: int = min([
                remaining_cc_dict[i] for i in jobs_being_executed_id.values()
            ]) * (lcm_frequency // cpu_frequency) + actual_lcm_cycle if len(
                jobs_being_executed_id) > 0 else next_major_cycle

            next_job_deadline: int = min(deadlines_dict.keys(
            )) if len(deadlines_dict) != 0 else next_major_cycle

            next_job_activation: int = min(activation_dict.keys(
            )) if len(activation_dict) != 0 else next_major_cycle

            next_lcm_cycle: int = min([
                next_major_cycle, next_job_end, next_job_deadline,
                next_job_activation
            ] + ([next_scheduling_point]
                 if next_scheduling_point is not None else []))

            # This is just ceil((next_lcm_cycle - actual_lcm_cycle) / cpu_frequency) to advance an integer number
            # of cycles.
            # But with this formulation avoid floating point errors
            cc_to_advance = (((next_lcm_cycle - actual_lcm_cycle) //
                              (lcm_frequency // cpu_frequency)) +
                             (0 if (next_lcm_cycle - actual_lcm_cycle) %
                              (lcm_frequency // cpu_frequency) == 0 else 1))

            # Calculated update CC tables
            for i in jobs_being_executed_id.values():
                remaining_cc_dict[i] -= cc_to_advance

            # Obtain temperature in the next simulation point
            if simulation_options.simulate_thermal_behaviour:
                if simulation_options.thermal_simulation_type == "DVFS":
                    external_energy_point_execution = {
                        core_frequency_energy_activator[(used_cpu,
                                                         cpu_frequency)]
                        for used_cpu in jobs_being_executed_id.keys()
                    }

                elif simulation_options.thermal_simulation_type == "TASK_CONSUMPTION_MEASURED":
                    external_energy_point_execution = {
                        core_task_energy_activator[(used_cpu, task_executed)]
                        for used_cpu, task_executed in
                        jobs_being_executed_id.keys()
                    }
                else:
                    external_energy_point_execution = set()

                # Apply energy
                initial_state = cubed_space.apply_energy(
                    actual_state=initial_state,
                    amount_of_time=cc_to_advance / cpu_frequency,
                    external_energy_application_points=Set.union(
                        external_energy_point_execution,
                        {i
                         for i in range(number_of_cpus)}),
                    internal_energy_application_points={
                        i
                        for i in range(number_of_cpus)
                    })

            # Update actual_lcm_cycle
            actual_lcm_cycle += (lcm_frequency //
                                 cpu_frequency) * cc_to_advance

    # In the last cycle update RawSimulationResult tables (All jobs being executed)
    for i, j in jobs_being_executed_id.items():
        job_sections_execution[i].append((JobSectionExecution(
            j, jobs_to_task_dict[j], jobs_last_section_start_time[j],
            actual_lcm_cycle / lcm_frequency,
            jobs_last_preemption_remaining_cycles[j] - remaining_cc_dict[j])))

    # Record temperature
    if simulation_options.simulate_thermal_behaviour:
        cubes_temperatures = cubed_space.obtain_temperature(initial_state)
        temperature_measures[actual_lcm_cycle /
                             lcm_frequency] = cubes_temperatures

    # In the last cycle update RawSimulationResult tables (Used frequencies)
    for i in range(number_of_cpus):
        cpus_frequencies[i].append(
            CPUUsedFrequency(cpu_frequency, last_frequency_set_time,
                             simulation_end_time))

    return RawSimulationResult(
        have_been_scheduled=True,
        scheduler_acceptance_error_message=None,
        job_sections_execution=job_sections_execution,
        cpus_frequencies=cpus_frequencies,
        scheduling_points=scheduling_points,
        temperature_measures=temperature_measures,
        hard_real_time_deadline_missed_stack_trace=
        hard_real_time_deadline_missed_stack_trace,
        memory_usage_record=memory_usage_record
        if simulation_options.simulate_memory_footprint else None)