Beispiel #1
0
def min_min_mapping(tg, ag, shm, logging):
    """
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this function finds the task with the smallest WCET and
    # maps it on the machine that can offer smallest completion time...
    # this means that the mapping algorithm has to take into account the mapping
    # of the edges of the task graph on the links.
    # Note:: this is a heuristic for independent tasks... so we are not going to
    # schedule any link
    # Note 2:: This heuristic is not taking task ciriticality into account...
    print("===========================================")
    print("STARTING MIN-MIN MAPPING")
    shortest_tasks = Mapping_Functions.unmapped_task_with_smallest_wcet(
        tg, logging)
    while len(shortest_tasks) > 0:
        task_to_be_mapped = shortest_tasks.pop()
        # map the task on the Node that yields smallest Completion time
        candidate_nodes = Mapping_Functions.nodes_with_smallest_ct(
            ag, tg, shm, task_to_be_mapped)
        print("\tCANDIDATE NODES FOR MAPPING: " + str(candidate_nodes))
        if len(candidate_nodes) > 0:
            chosen_node = random.choice(candidate_nodes)
            print("\t\tMAPPING TASK " + str(task_to_be_mapped) +
                  " WITH RELEASE: " +
                  str(tg.node[task_to_be_mapped]['task'].release) +
                  " ---> NODE: " + str(chosen_node))
            tg.node[task_to_be_mapped]['task'].node = chosen_node
            ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
            ag.node[chosen_node]['PE'].utilization += tg.node[
                task_to_be_mapped]['task'].wcet

            node_speed_down = 1 + (
                (100.0 - shm.node[chosen_node]['NodeSpeed']) / 100)
            task_execution_on_node = tg.node[task_to_be_mapped][
                'task'].wcet * node_speed_down
            completion_on_node = tg.node[task_to_be_mapped][
                'task'].release + task_execution_on_node

            Scheduling_Functions_Nodes.add_tg_task_to_node(
                tg, ag, task_to_be_mapped, chosen_node,
                tg.node[task_to_be_mapped]['task'].release, completion_on_node,
                None)
        if len(shortest_tasks) == 0:
            shortest_tasks = Mapping_Functions.unmapped_task_with_smallest_wcet(
                tg, logging)
    print("MIN-MIN MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def Min_Min_Mapping(TG, AG, NoCRG, SHM, logging):
    """
    :param TG: Task Graph
    :param AG: Architecture Graph
    :param NoCRG: NoC Routing Graph
    :param SHM: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this function finds the task with the smallest WCET and
    # maps it on the machine that can offer smallest completion time...
    # this means that the mapping algorithm has to take into account the mapping
    # of the edges of the task graph on the links.
    # Note:: this is a heuristic for independent tasks... so we are not going to
    # schedule any link
    # Note 2:: This heuristic is not taking task ciriticality into account...
    print("===========================================")
    print("STARTING MIN-MIN MAPPING")
    ShortestTasks = Mapping_Functions.unmapped_task_with_smallest_wcet(
        TG, logging)
    while len(ShortestTasks) > 0:
        TaskToBeMapped = ShortestTasks.pop()
        # map the task on the Node that yields smallest Completion time
        CandidateNodes = Mapping_Functions.nodes_with_smallest_ct(
            AG, TG, SHM, TaskToBeMapped)
        print("\tCANDIDATE NODES FOR MAPPING: " + str(CandidateNodes))
        if len(CandidateNodes) > 0:
            ChosenNode = random.choice(CandidateNodes)
            print("\t\tMAPPING TASK " + str(TaskToBeMapped) +
                  " WITH RELEASE: " + str(TG.node[TaskToBeMapped]['Release']) +
                  " ---> NODE: " + str(ChosenNode))
            TG.node[TaskToBeMapped]['Node'] = ChosenNode
            AG.node[ChosenNode]['PE'].MappedTasks.append(TaskToBeMapped)
            AG.node[ChosenNode]['PE'].Utilization += TG.node[TaskToBeMapped][
                'WCET']

            NodeSpeedDown = 1 + (
                (100.0 - SHM.node[ChosenNode]['NodeSpeed']) / 100)
            TaskExecutionOnNode = TG.node[TaskToBeMapped][
                'WCET'] * NodeSpeedDown
            CompletionOnNode = TG.node[TaskToBeMapped][
                'Release'] + TaskExecutionOnNode

            Scheduling_Functions_Nodes.Add_TG_TaskToNode(
                TG, AG, TaskToBeMapped, ChosenNode,
                TG.node[TaskToBeMapped]['Release'], CompletionOnNode, logging)
        if len(ShortestTasks) == 0:
            ShortestTasks = Mapping_Functions.unmapped_task_with_smallest_wcet(
                TG, logging)
    print("MIN-MIN MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(AG, logging)
    return TG, AG
def max_min_mapping(tg, ag, shm, logging):
    """

    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this function finds the task with the biggest WCET and
    # maps it on the machine that can offer smallest completion time...
    # this means that the mapping algorithm has to take into account the mapping
    # of the edges of the task graph on the links.
    # Note:: this is a heuristic for independent tasks... so we are not going to
    # schedule any link
    # Note 2:: This heuristic is not taking task ciriticality into account...
    print ("===========================================")
    print ("STARTING MAX-MIN MAPPING")
    longest_tasks = Mapping_Functions.unmapped_task_with_biggest_wcet(tg, logging)
    while len(longest_tasks) > 0:
        task_to_be_mapped = longest_tasks.pop()
        # map the task on the Node that yields smallest Completion time
        candidate_nodes = Mapping_Functions.nodes_with_smallest_ct(ag, tg, shm, task_to_be_mapped)
        print ("CANDIDATE NODES FOR MAPPING: "+str(candidate_nodes))
        if len(candidate_nodes) > 0:
            chosen_node = random.choice(candidate_nodes)
            if len(candidate_nodes) > 1:
                print ("\tMAPPING TASK "+str(task_to_be_mapped)+" WITH RELEASE: " +
                       str(tg.node[task_to_be_mapped]['task'].release) +
                       " ---> NODE: "+str(chosen_node)+" (RANDOMLY CHOSEN FROM CANDIDATES)")
            else:
                print ("\tMAPPING TASK "+str(task_to_be_mapped)+" WITH RELEASE: " +
                       str(tg.node[task_to_be_mapped]['task'].release) +
                       " ---> NODE: "+str(chosen_node))
            tg.node[task_to_be_mapped]['task'].node = chosen_node
            ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
            ag.node[chosen_node]['PE'].utilization += tg.node[task_to_be_mapped]['task'].wcet

            node_speed_down = 1+((100.0-shm.node[chosen_node]['NodeSpeed'])/100)
            task_execution_on_node = tg.node[task_to_be_mapped]['task'].wcet*node_speed_down
            completion_on_node = tg.node[task_to_be_mapped]['task'].release + task_execution_on_node

            Scheduling_Functions_Nodes.add_tg_task_to_node(tg, ag, task_to_be_mapped, chosen_node,
                                                           tg.node[task_to_be_mapped]['task'].release,
                                                           completion_on_node, None)

        if len(longest_tasks) == 0:
            longest_tasks = Mapping_Functions.unmapped_task_with_biggest_wcet(tg, logging)
    print ("MIN-MAX MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def minimum_completion_time(tg, ag, shm, logging):
    """
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging File
    :return: (TG, AG)
    """
    # The difference with Min Min or Max Min is that we don't add priorities to
    # tasks based on their WCET but we randomly choose a task and schedule it...
    # Note :: This heuristic is not taking task ciriticality into account...
    print ("===========================================")
    print ("STARTING MIN COMPLETION TIME MAPPING")
    for task_to_be_mapped in tg.nodes():
        chosen_node = random.choice(Mapping_Functions.nodes_with_smallest_ct(ag, tg, shm, task_to_be_mapped))
        tg.node[task_to_be_mapped]['task'].node = chosen_node
        ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
        ag.node[chosen_node]['PE'].utilization += tg.node[task_to_be_mapped]['task'].wcet

        node_speed_down = 1+((100.0-shm.node[chosen_node]['NodeSpeed'])/100)
        task_execution_on_node = tg.node[task_to_be_mapped]['task'].wcet*node_speed_down
        completion_on_node = tg.node[task_to_be_mapped]['task'].release + task_execution_on_node

        Scheduling_Functions_Nodes.add_tg_task_to_node(tg, ag, task_to_be_mapped, chosen_node,
                                                       tg.node[task_to_be_mapped]['task'].release,
                                                       completion_on_node, None)

        print ("\tTASK "+str(task_to_be_mapped)+" MAPPED ON NODE: "+str(chosen_node))
    print ("MIN COMPLETION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def min_execution_time(tg, ag, shm, logging):
    """
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this sounds a little stupid because there are no job specific machines...
    # we can Add Specific Accelerators or define different run time on different
    # PEs so this becomes more interesting...
    print ("===========================================")
    print ("STARTING MIN EXECUTION TIME MAPPING")
    for task_to_be_mapped in tg.nodes():
        chosen_node = random.choice(Mapping_Functions.fastest_nodes(ag, shm))
        tg.node[task_to_be_mapped]['task'].node = chosen_node
        ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
        ag.node[chosen_node]['PE'].utilization += tg.node[task_to_be_mapped]['task'].wcet

        node_speed_down = 1+((100.0-shm.node[chosen_node]['NodeSpeed'])/100)
        task_execution_on_node = tg.node[task_to_be_mapped]['task'].wcet*node_speed_down
        completion_on_node = tg.node[task_to_be_mapped]['task'].release + task_execution_on_node

        Scheduling_Functions_Nodes.add_tg_task_to_node(tg, ag, task_to_be_mapped, chosen_node,
                                                       tg.node[task_to_be_mapped]['task'].release,
                                                       completion_on_node, None)

        print ("\tTASK "+str(task_to_be_mapped)+" MAPPED ON NODE: "+str(chosen_node))
    print ("MIN EXECUTION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def MinExecutionTime(TG, AG, SHM, logging):
    """
    :param TG: Task Graph
    :param AG: Architecture Graph
    :param SHM: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this sounds a little stupid because there are no job specific machines...
    # we can Add Specific Accelerators or define different run time on different
    # PEs so this becomes more interesting...
    print("===========================================")
    print("STARTING MIN EXECUTION TIME MAPPING")
    for TaskToBeMapped in TG.nodes():
        ChosenNode = random.choice(
            Mapping_Functions.fastest_nodes(AG, SHM, TaskToBeMapped))
        TG.node[TaskToBeMapped]['Node'] = ChosenNode
        AG.node[ChosenNode]['PE'].MappedTasks.append(TaskToBeMapped)
        AG.node[ChosenNode]['PE'].Utilization += TG.node[TaskToBeMapped][
            'WCET']

        NodeSpeedDown = 1 + ((100.0 - SHM.node[ChosenNode]['NodeSpeed']) / 100)
        TaskExecutionOnNode = TG.node[TaskToBeMapped]['WCET'] * NodeSpeedDown
        CompletionOnNode = TG.node[TaskToBeMapped][
            'Release'] + TaskExecutionOnNode

        Scheduling_Functions_Nodes.Add_TG_TaskToNode(
            TG, AG, TaskToBeMapped, ChosenNode,
            TG.node[TaskToBeMapped]['Release'], CompletionOnNode, logging)

        print("\tTASK " + str(TaskToBeMapped) + " MAPPED ON NODE: " +
              str(ChosenNode))
    print("MIN EXECUTION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(AG, logging)
    return TG, AG
 def AddCurrentMappingToMPM(self, tg):
     """
     Adds a mapping (Extracted from TG) under a fault configuration to MPM.
     The dictionary key would be the hash of fault config
     :param tg: Task Graph
     :return: None
     """
     MappingString = Mapping_Functions.mapping_into_string(tg)
     self.MPM[hashlib.md5(SHMU_Functions.GenerateFaultConfig(
         self)).hexdigest()] = MappingString
     return None
Beispiel #8
0
 def add_current_mapping_to_mpm(self, tg):
     """
     Adds a mapping (Extracted from TG) under a fault configuration to MPM.
     The dictionary key would be the hash of fault config
     :param tg: Task Graph
     :return: None
     """
     mapping_string = Mapping_Functions.mapping_into_string(tg)
     self.MPM[hashlib.md5(SHMU_Functions.generate_fault_config(
         self)).hexdigest()] = mapping_string
     return None
Beispiel #9
0
def swap_nodes(tg, ag, shm, noc_rg, critical_routing_graph,
               non_critical_routing_graph, node_1, node_2, logging):
    """
    Swaps tasks of two nodes with each other!
    :param tg:  Task Graph
    :param ag:  Architecture Graph
    :param shm: System Health Map
    :param noc_rg: NoC Routing Graph
    :param critical_routing_graph: NoC Routing Graph of Critical Region
    :param non_critical_routing_graph: NoC routing Graph of NonCritical Region
    :param node_1: first chosen node for swapping
    :param node_2:   2nd Chosen node for swapping
    :param logging: logging file
    :return: True if it successfully swaps two nodes tasks
    """
    if len(ag.node[node_1]['PE'].mapped_tasks) == 0 or len(ag.node[node_2]['PE'].mapped_tasks) == 0:
        raise ValueError("at least one of selected nodes for swapping doesnt have any tasks on it. ")

    task_1 = ag.node[node_1]['PE'].mapped_tasks[0]
    task_2 = ag.node[node_2]['PE'].mapped_tasks[0]
    Mapping_Functions.remove_task_from_node(tg, ag, noc_rg, critical_routing_graph,
                                            non_critical_routing_graph, task_1, node_1, logging)
    Mapping_Functions.remove_task_from_node(tg, ag, noc_rg, critical_routing_graph,
                                            non_critical_routing_graph, task_2, node_2, logging)
    if not Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_routing_graph,
                                              non_critical_routing_graph, task_1, node_2, logging):
        raise ValueError("swap_nodes FAILED WHILE TYING TO MAP FIRST CHOSEN TASK ON SECOND NODE ")
    if not Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_routing_graph,
                                              non_critical_routing_graph, task_2, node_1, logging):
        raise ValueError("swap_nodes FAILED WHILE TYING TO MAP SECOND CHOSEN TASK ON FIRST NODE ")
    return True
Beispiel #10
0
def swap_nodes(tg, ag, shm, noc_rg, critical_routing_graph,
               non_critical_routing_graph, node_1, node_2, logging):
    """
    Swaps tasks of two nodes with each other!
    :param tg:  Task Graph
    :param ag:  Architecture Graph
    :param shm: System Health Map
    :param noc_rg: NoC Routing Graph
    :param critical_routing_graph: NoC Routing Graph of Critical Region
    :param non_critical_routing_graph: NoC routing Graph of NonCritical Region
    :param node_1: first chosen node for swapping
    :param node_2:   2nd Chosen node for swapping
    :param logging: logging file
    :return: True if it successfully swaps two nodes tasks
    """
    if len(ag.node[node_1]['PE'].mapped_tasks) == 0 or len(ag.node[node_2]['PE'].mapped_tasks) == 0:
        raise ValueError("at least one of selected nodes for swapping doesnt have any tasks on it. ")

    task_1 = ag.node[node_1]['PE'].mapped_tasks[0]
    task_2 = ag.node[node_2]['PE'].mapped_tasks[0]
    Mapping_Functions.remove_task_from_node(tg, ag, noc_rg, critical_routing_graph,
                                            non_critical_routing_graph, task_1, node_1, logging)
    Mapping_Functions.remove_task_from_node(tg, ag, noc_rg, critical_routing_graph,
                                            non_critical_routing_graph, task_2, node_2, logging)
    if not Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_routing_graph,
                                              non_critical_routing_graph, task_1, node_2, logging):
        raise ValueError("swap_nodes FAILED WHILE TYING TO MAP FIRST CHOSEN TASK ON SECOND NODE ")
    if not Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_routing_graph,
                                              non_critical_routing_graph, task_2, node_1, logging):
        raise ValueError("swap_nodes FAILED WHILE TYING TO MAP SECOND CHOSEN TASK ON FIRST NODE ")
    return True
def move_to_next_solution(iteration, tg, ctg, ag, noc_rg, shm, critical_rg,
                          noncritical_rg, logging):

    random_seed = Config.mapping_random_seed
    random.seed(Config.mapping_random_seed)
    for i in range(0, iteration):
        random_seed = random.randint(1, 100000)
    random.seed(random_seed)
    logging.info("Moving to next solution: random_seed: " + str(random_seed) +
                 "    iteration: " + str(iteration))

    cluster_to_move = random.choice(ctg.nodes())
    current_node = ctg.node[cluster_to_move]['Node']
    Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg,
                                               critical_rg, noncritical_rg,
                                               cluster_to_move, current_node,
                                               logging)
    destination_node = random.choice(ag.nodes())
    if Config.EnablePartitioning:
        while ctg.node[cluster_to_move]['Criticality'] != ag.node[
                destination_node]['Region']:
            destination_node = random.choice(ag.nodes())
    try_counter = 0
    while not Mapping_Functions.add_cluster_to_node(
            tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
            cluster_to_move, destination_node, logging):

        # If add_cluster_to_node fails it automatically removes all the connections...
        # we need to add the cluster to the old place...
        Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg,
                                              critical_rg, noncritical_rg,
                                              cluster_to_move, current_node,
                                              logging)
        try_counter += 1
        if try_counter >= 3 * len(ag.nodes()):
            print(
                "CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH..."
            )
            return tg, ctg, ag

        # choosing another cluster to move
        cluster_to_move = random.choice(ctg.nodes())
        current_node = ctg.node[cluster_to_move]['Node']
        Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg,
                                                   critical_rg, noncritical_rg,
                                                   cluster_to_move,
                                                   current_node, logging)
        destination_node = random.choice(ag.nodes())
        if Config.EnablePartitioning:
            while ctg.node[cluster_to_move]['Criticality'] != ag.node[
                    destination_node]['Region']:
                destination_node = random.choice(ag.nodes())
    return tg, ctg, ag
def map_test_tasks(tg, ag, shm, noc_rg, logging):
    """

    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param noc_rg: NoC Routing GRaph
    :param logging: logging file
    :return: None
    """
    for task_id in tg.nodes():
        if tg.node[task_id]['task'].type == 'Test':
            node = tg.node[task_id]['task'].node
            if not Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, None, None, task_id, node, logging):
                raise ValueError(" MAPPING TEST TASK FAILED WHILE TYING TO MAP ", task_id, "ON NODE", node)
    return None
def MapTestTasks(TG, AG, SHM, NoCRG, logging):
    """

    :param TG: Task Graph
    :param AG: Architecture Graph
    :param SHM: System Health Map
    :param NoCRG: NoC Routing GRaph
    :param logging: logging file
    :return: None
    """
    for task in TG.nodes():
        if TG.node[task]['Type'] == 'Test':
            Node = TG.node[task]['Node']
            if not Mapping_Functions.map_task_to_node(
                    TG, AG, SHM, NoCRG, None, None, task, Node, logging):
                raise ValueError(
                    " MAPPING TEST TASK FAILED WHILE TYING TO MAP ", task,
                    "ON NODE", Node)
    return None
Beispiel #14
0
def map_test_tasks(tg, ag, shm, noc_rg, logging):
    """

    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param noc_rg: NoC Routing GRaph
    :param logging: logging file
    :return: None
    """
    for task_id in tg.nodes():
        if tg.node[task_id]['task'].type == 'Test':
            node = tg.node[task_id]['task'].node
            if not Mapping_Functions.map_task_to_node(
                    tg, ag, shm, noc_rg, None, None, task_id, node, logging):
                raise ValueError(
                    " MAPPING TEST TASK FAILED WHILE TYING TO MAP ", task_id,
                    "ON NODE", node)
    return None
Beispiel #15
0
def minimum_completion_time(tg, ag, shm, logging):
    """
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging File
    :return: (TG, AG)
    """
    # The difference with Min Min or Max Min is that we don't add priorities to
    # tasks based on their WCET but we randomly choose a task and schedule it...
    # Note :: This heuristic is not taking task ciriticality into account...
    print("===========================================")
    print("STARTING MIN COMPLETION TIME MAPPING")
    for task_to_be_mapped in tg.nodes():
        chosen_node = random.choice(
            Mapping_Functions.nodes_with_smallest_ct(ag, tg, shm,
                                                     task_to_be_mapped))
        tg.node[task_to_be_mapped]['task'].node = chosen_node
        ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
        ag.node[chosen_node]['PE'].utilization += tg.node[task_to_be_mapped][
            'task'].wcet

        node_speed_down = 1 + (
            (100.0 - shm.node[chosen_node]['NodeSpeed']) / 100)
        task_execution_on_node = tg.node[task_to_be_mapped][
            'task'].wcet * node_speed_down
        completion_on_node = tg.node[task_to_be_mapped][
            'task'].release + task_execution_on_node

        Scheduling_Functions_Nodes.add_tg_task_to_node(
            tg, ag, task_to_be_mapped, chosen_node,
            tg.node[task_to_be_mapped]['task'].release, completion_on_node,
            None)

        print("\tTASK " + str(task_to_be_mapped) + " MAPPED ON NODE: " +
              str(chosen_node))
    print("MIN COMPLETION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def move_to_next_solution(iteration, tg, ctg, ag, noc_rg, shm, critical_rg, noncritical_rg, logging):

    random_seed = Config.mapping_random_seed
    random.seed(Config.mapping_random_seed)
    for i in range(0, iteration):
        random_seed = random.randint(1, 100000)
    random.seed(random_seed)
    logging.info("Moving to next solution: random_seed: "+str(random_seed)+"    iteration: "+str(iteration))

    cluster_to_move = random.choice(ctg.nodes())
    current_node = ctg.node[cluster_to_move]['Node']
    Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                               cluster_to_move, current_node, logging)
    destination_node = random.choice(ag.nodes())
    if Config.EnablePartitioning:
        while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
            destination_node = random.choice(ag.nodes())
    try_counter = 0
    while not Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                    cluster_to_move, destination_node, logging):

            # If add_cluster_to_node fails it automatically removes all the connections...
            # we need to add the cluster to the old place...
            Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                  cluster_to_move, current_node, logging)
            try_counter += 1
            if try_counter >= 3*len(ag.nodes()):
                print ("CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH...")
                return tg, ctg, ag

            # choosing another cluster to move
            cluster_to_move = random.choice(ctg.nodes())
            current_node = ctg.node[cluster_to_move]['Node']
            Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                                       cluster_to_move, current_node, logging)
            destination_node = random.choice(ag.nodes())
            if Config.EnablePartitioning:
                while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
                    destination_node = random.choice(ag.nodes())
    return tg, ctg, ag
Beispiel #17
0
def min_execution_time(tg, ag, shm, logging):
    """
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param logging: logging file
    :return: (TG, AG)
    """
    # this sounds a little stupid because there are no job specific machines...
    # we can Add Specific Accelerators or define different run time on different
    # PEs so this becomes more interesting...
    print("===========================================")
    print("STARTING MIN EXECUTION TIME MAPPING")
    for task_to_be_mapped in tg.nodes():
        chosen_node = random.choice(Mapping_Functions.fastest_nodes(ag, shm))
        tg.node[task_to_be_mapped]['task'].node = chosen_node
        ag.node[chosen_node]['PE'].mapped_tasks.append(task_to_be_mapped)
        ag.node[chosen_node]['PE'].utilization += tg.node[task_to_be_mapped][
            'task'].wcet

        node_speed_down = 1 + (
            (100.0 - shm.node[chosen_node]['NodeSpeed']) / 100)
        task_execution_on_node = tg.node[task_to_be_mapped][
            'task'].wcet * node_speed_down
        completion_on_node = tg.node[task_to_be_mapped][
            'task'].release + task_execution_on_node

        Scheduling_Functions_Nodes.add_tg_task_to_node(
            tg, ag, task_to_be_mapped, chosen_node,
            tg.node[task_to_be_mapped]['task'].release, completion_on_node,
            None)

        print("\tTASK " + str(task_to_be_mapped) + " MAPPED ON NODE: " +
              str(chosen_node))
    print("MIN EXECUTION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(ag, logging)
    return tg, ag
def MinimumCompletionTime(TG, AG, SHM, logging):
    """
    :param TG: Task Graph
    :param AG: Architecture Graph
    :param SHM: System Health Map
    :param logging: logging File
    :return: (TG, AG)
    """
    # The difference with Min Min or Max Min is that we don't add priorities to
    # tasks based on their WCET but we randomly choose a task and schedule it...
    # Note :: This heuristic is not taking task ciriticality into account...
    print("===========================================")
    print("STARTING MIN COMPLETION TIME MAPPING")
    for TaskToBeMapped in TG.nodes():
        ChosenNode = random.choice(
            Mapping_Functions.nodes_with_smallest_ct(AG, TG, SHM,
                                                     TaskToBeMapped))
        TG.node[TaskToBeMapped]['Node'] = ChosenNode
        AG.node[ChosenNode]['PE'].MappedTasks.append(TaskToBeMapped)
        AG.node[ChosenNode]['PE'].Utilization += TG.node[TaskToBeMapped][
            'WCET']

        NodeSpeedDown = 1 + ((100.0 - SHM.node[ChosenNode]['NodeSpeed']) / 100)
        TaskExecutionOnNode = TG.node[TaskToBeMapped]['WCET'] * NodeSpeedDown
        CompletionOnNode = TG.node[TaskToBeMapped][
            'Release'] + TaskExecutionOnNode

        Scheduling_Functions_Nodes.Add_TG_TaskToNode(
            TG, AG, TaskToBeMapped, ChosenNode,
            TG.node[TaskToBeMapped]['Release'], CompletionOnNode, logging)

        print("\tTASK " + str(TaskToBeMapped) + " MAPPED ON NODE: " +
              str(ChosenNode))
    print("MIN COMPLETION TIME MAPPING FINISHED...")
    Scheduling_Reports.report_mapped_tasks(AG, logging)
    return TG, AG
def mapping_opt_local_search(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg, shm,
                             iteration_num, report, detailed_report, logging,
                             cost_data_file_name, mapping_process_file_name, random_seed,
                             initial_mapping_string=None):
    random.seed(random_seed)
    if report:
        print("===========================================")
        print("STARTING MAPPING OPTIMIZATION...USING LOCAL SEARCH...")
        print("NUMBER OF ITERATIONS: "+str(iteration_num))

    if type(cost_data_file_name) is str:
        mapping_cost_file = open('Generated_Files/Internal/'+cost_data_file_name+'.txt', 'a')
    else:
        raise ValueError("cost_data_file_name name is not string: "+str(cost_data_file_name))

    if type(mapping_process_file_name) is str:
        mapping_process_file = open('Generated_Files/Internal/'+mapping_process_file_name+'.txt', 'a')
    else:
        raise ValueError("mapping_process_file name is not string: "+str(mapping_process_file_name))

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False, initial_mapping_string=initial_mapping_string)
    starting_cost = best_cost
    for iteration in range(0, iteration_num):
        logging.info("       ITERATION:"+str(iteration))
        cluster_to_move = random.choice(list(ctg.nodes()))
        current_node = ctg.node[cluster_to_move]['Node']
        Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                                   cluster_to_move, current_node, logging)
        destination_node = random.choice(list(ag.nodes()))
        if Config.EnablePartitioning:
            while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
                destination_node = random.choice(list(ag.nodes()))
        # print(ctg.node[cluster_to_move]['Criticality'],AG.node[destination_node]['Region'])

        try_counter = 0
        while not Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                        cluster_to_move, destination_node, logging):

            # If add_cluster_to_node fails it automatically removes all the connections...
            # we need to add the cluster to the old place...
            Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                  cluster_to_move, current_node, logging)

            # choosing another cluster to move
            cluster_to_move = random.choice(list(ctg.nodes()))
            current_node = ctg.node[cluster_to_move]['Node']
            Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                                       cluster_to_move, current_node, logging)
            destination_node = random.choice(list(ag.nodes()))
            if Config.EnablePartitioning:
                while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
                    destination_node = random.choice(list(ag.nodes()))
            # print(ctg.node[cluster_to_move]['Criticality'],AG.node[destination_node]['Region'])

            if try_counter >= 3*len(ag.nodes()):
                if report:
                    print("CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH...")
                logging.info("CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH...")
                tg = copy.deepcopy(best_tg)
                ag = copy.deepcopy(best_ag)
                ctg = copy.deepcopy(ctg)
                if report:
                    Scheduling_Reports.report_mapped_tasks(ag, logging)
                    Mapping_Functions.mapping_cost_function(tg, ag, shm, True, initial_mapping_string=initial_mapping_string)
                return best_tg, best_ctg, best_ag
            try_counter += 1

        Scheduling_Functions.clear_scheduling(ag)
        Scheduler.schedule_all(tg, ag, shm, False, logging)

        current_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, detailed_report, initial_mapping_string= initial_mapping_string)
        mapping_process_file.write(Mapping_Functions.mapping_into_string(tg)+"\n")
        mapping_cost_file.write(str(current_cost)+"\n")
        if current_cost <= best_cost:
            if current_cost < best_cost:
                if report:
                    print("\033[32m* NOTE::\033[0mBETTER SOLUTION FOUND WITH COST: "+str(current_cost) +
                           "\t ITERATION:"+str(iteration))
                logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(iteration))
            else:
                logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(iteration))

            best_tg = copy.deepcopy(tg)
            best_ag = copy.deepcopy(ag)
            best_ctg = copy.deepcopy(ctg)
            best_cost = current_cost
        else:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            ctg = copy.deepcopy(best_ctg)
            mapping_process_file.write(Mapping_Functions.mapping_into_string(tg)+"\n")

    Scheduling_Functions.clear_scheduling(ag)
    Scheduler.schedule_all(tg, ag, shm, False, logging)
    mapping_process_file.close()
    mapping_cost_file.close()
    if report:
        print("-------------------------------------")
        print("STARTING COST: "+str(starting_cost)+"\tFINAL COST: "+str(best_cost) +
               "\tAFTER "+str(iteration_num)+" ITERATIONS")
        print("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
    return best_tg, best_ctg, best_ag
def mapping_opt_iterative_local_search(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg, shm, iteration_num,
                                       sub_iteration, report, detailed_report, logging):
    if report:
        print("===========================================")
        print("STARTING MAPPING OPTIMIZATION...USING ITERATIVE LOCAL SEARCH...")

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False)
    starting_cost = best_cost
    if report:
        print("INITIAL COST:"+str(starting_cost))
    mapping_cost_file = open('Generated_Files/Internal/LocalSearchMappingCost.txt', 'w')
    mapping_cost_file.close()
    mapping_process_file = open('Generated_Files/Internal/MappingProcess.txt', 'w')
    mapping_process_file.close()
    for Iteration in range(0, iteration_num):
        logging.info("        ITERATION:"+str(Iteration))
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for i in range(0, Iteration):
            random_seed = random.randint(1, 100000)
        (current_tg, current_ctg, current_ag) = mapping_opt_local_search(tg, ctg, ag, noc_rg, critical_rg,
                                                                         noncritical_rg, shm, sub_iteration,
                                                                         False, detailed_report, logging,
                                                                         "LocalSearchMappingCost",
                                                                         "mapping_process_file_name", random_seed)
        if current_tg is not False:
            current_cost = Mapping_Functions.mapping_cost_function(current_tg, current_ag, shm, False)
            if current_cost <= best_cost:
                if current_cost < best_cost:
                    if report:
                        print("\033[32m* NOTE::\033[0mBETTER SOLUTION FOUND WITH COST: "+str(current_cost) +
                               "\t ITERATION: "+str(Iteration))
                    logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(Iteration))
                else:
                    logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(Iteration))
                best_tg = copy.deepcopy(current_tg)
                best_ag = copy.deepcopy(current_ag)
                best_ctg = copy.deepcopy(current_ctg)
                best_cost = current_cost
        del current_tg
        del current_ag
        del current_ctg
        Mapping_Functions.clear_mapping(tg, ctg, ag)
        counter = 0
        schedule = True
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for i in range(0, Iteration):
            random_seed = random.randint(1, 100000)
        while not Mapping_Functions.make_initial_mapping(tg, ctg, ag, shm, noc_rg, critical_rg,
                                                         noncritical_rg, False, logging, random_seed):
            if counter == 10:   # we try 10 times to find some initial solution... how ever if it fails...
                schedule = False
                break
            counter += 1
        if schedule:
            Scheduling_Functions.clear_scheduling(ag)
            Scheduler.schedule_all(tg, ag, shm, False, logging)
        else:
            if report:
                print("\033[33mWARNING::\033[0m CAN NOT FIND ANOTHER FEASIBLE SOLUTION... ",
                       "ABORTING ITERATIVE LOCAL SEARCH...")
            logging.info("CAN NOT FIND ANOTHER FEASIBLE SOLUTION... ABORTING ITERATIVE LOCAL SEARCH...")
            if report:
                print("-------------------------------------")
                print("STARTING COST: "+str(starting_cost)+"\tFINAL COST: "+str(best_cost))
                print("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
            return best_tg, best_ctg, best_ag

    if report:
        print("-------------------------------------")
        print("STARTING COST:"+str(starting_cost)+"\tFINAL COST:"+str(best_cost))
        print("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
    return best_tg, best_ctg, best_ag
def mapping_opt_iterative_local_search(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg, shm, iteration_num,
                                       sub_iteration, report, detailed_report, logging):
    if report:
        print ("===========================================")
        print ("STARTING MAPPING OPTIMIZATION...USING ITERATIVE LOCAL SEARCH...")

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False)
    starting_cost = best_cost
    if report:
        print ("INITIAL COST:"+str(starting_cost))
    mapping_cost_file = open('Generated_Files/Internal/LocalSearchMappingCost.txt', 'w')
    mapping_cost_file.close()
    mapping_process_file = open('Generated_Files/Internal/MappingProcess.txt', 'w')
    mapping_process_file.close()
    for Iteration in range(0, iteration_num):
        logging.info("        ITERATION:"+str(Iteration))
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for i in range(0, Iteration):
            random_seed = random.randint(1, 100000)
        (current_tg, current_ctg, current_ag) = mapping_opt_local_search(tg, ctg, ag, noc_rg, critical_rg,
                                                                         noncritical_rg, shm, sub_iteration,
                                                                         False, detailed_report, logging,
                                                                         "LocalSearchMappingCost",
                                                                         "mapping_process_file_name", random_seed)
        if current_tg is not False:
            current_cost = Mapping_Functions.mapping_cost_function(current_tg, current_ag, shm, False)
            if current_cost <= best_cost:
                if current_cost < best_cost:
                    if report:
                        print ("\033[32m* NOTE::\033[0mBETTER SOLUTION FOUND WITH COST: "+str(current_cost) +
                               "\t ITERATION: "+str(Iteration))
                    logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(Iteration))
                else:
                    logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(Iteration))
                best_tg = copy.deepcopy(current_tg)
                best_ag = copy.deepcopy(current_ag)
                best_ctg = copy.deepcopy(current_ctg)
                best_cost = current_cost
        del current_tg
        del current_ag
        del current_ctg
        Mapping_Functions.clear_mapping(tg, ctg, ag)
        counter = 0
        schedule = True
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for i in range(0, Iteration):
            random_seed = random.randint(1, 100000)
        while not Mapping_Functions.make_initial_mapping(tg, ctg, ag, shm, noc_rg, critical_rg,
                                                         noncritical_rg, False, logging, random_seed):
            if counter == 10:   # we try 10 times to find some initial solution... how ever if it fails...
                schedule = False
                break
            counter += 1
        if schedule:
            Scheduling_Functions.clear_scheduling(ag)
            Scheduler.schedule_all(tg, ag, shm, False, logging)
        else:
            if report:
                print ("\033[33mWARNING::\033[0m CAN NOT FIND ANOTHER FEASIBLE SOLUTION... ",
                       "ABORTING ITERATIVE LOCAL SEARCH...")
            logging.info("CAN NOT FIND ANOTHER FEASIBLE SOLUTION... ABORTING ITERATIVE LOCAL SEARCH...")
            if report:
                print ("-------------------------------------")
                print ("STARTING COST: "+str(starting_cost)+"\tFINAL COST: "+str(best_cost))
                print ("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
            return best_tg, best_ctg, best_ag

    if report:
        print ("-------------------------------------")
        print ("STARTING COST:"+str(starting_cost)+"\tFINAL COST:"+str(best_cost))
        print ("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
    return best_tg, best_ctg, best_ag
Beispiel #22
0
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.check_acyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()

    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth, True)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        vl_opt.optimize_ag_vertical_links(ag, shmu, logging)
        vl_opt_functions.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.apply_initial_faults(shmu)
    if Config.viz.shm:
        SHMU_Reports.draw_shm(shmu.SHM)
        SHMU_Reports.draw_temp_distribution(shmu.SHM)
    # SHM_Reports.report_noc_shm()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(Routing.gen_noc_route_graph_from_file(ag, shmu, Config.RoutingFilePath,
                                                                     Config.DebugInfo, Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, Config.UsedTurnModel,
                                                                Config.DebugInfo, Config.DebugDetails))
    Routing_Functions.check_deadlock_freeness(noc_rg)
    print("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
           str(round(time.time()-routing_graph_start_time))+" SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.reachability_metric(ag, noc_rg, True)
    # Some visualization...
    if Config.viz.rg:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(ag, shmu)
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    else:
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.optimize_reachability_rectangles(ag, Config.NumberOfRects)
        # ReachabilityReports.report_reachability(ag)
        ReachabilityReports.report_reachability_in_file(ag, "ReachAbilityNodeReport")
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg, critical_rg, noncritical_rg,
                                                 Config.mapping_file_path, logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg, noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.add_current_mapping_to_mpm(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.viz.mapping_distribution:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.viz.mapping:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.viz.scheduling:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.gen_one_step_diagnosable_pmcg(ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.gen_sequentially_diagnosable_pmcg(ag, shmu.SHM)
        test_tg = TestSchedulingUnit.generate_test_tg_from_pmcg(pmcg)
        print("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
               str(round(time.time()-pmcg_start_time)) + " SECONDS")
        if Config.viz.pmcg:
            TestSchedulingUnit.draw_pmcg(pmcg)
        if Config.viz.ttg:
            TestSchedulingUnit.draw_ttg(test_tg)
        TestSchedulingUnit.insert_test_tasks_in_tg(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.map_test_tasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.remove_test_tasks_from_tg(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None
    Arch_Graph_Reports.gen_latex_ag(ag, shmu.SHM)
    print("===========================================")
    print("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    if Config.viz.mapping_frames:
        Mapping_Animation.generate_frames(ag, shmu.SHM)
    return tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
Beispiel #23
0
def NMap(tg, ag, NoCRG, CriticalRG, NonCriticalRG, SHM, logging):
    """
    Performs NMap Mapping algorithm
    :param tg: Task Graph
    :param AG: Architecture Graph
    :param NoCRG: NoC Routing Graph
    :param CriticalRG: NoC Routing Graph for Critical Region
    :param NonCriticalRG: NoC Routing Graph for Non-Critical Region
    :param SHM: System Health Map
    :param logging: logging File
    :return: TG and AG
    """
    print("===========================================")
    print("STARTING N-MAP MAPPING...\n")

    if len(tg.nodes()) > len(ag.nodes()):
        raise ValueError(
            "Number of tasks should be smaller or equal to number of PEs")

    mapped_tasks = []
    unmapped_tasks = copy.deepcopy(tg.nodes())
    allocated_nodes = []
    unallocated_nodes = copy.deepcopy(ag.nodes())

    # remove all broken nodes from unallocated_nodes list
    for node in unallocated_nodes:
        if not SHM.node[node]['NodeHealth']:
            unallocated_nodes.remove(node)
            print("REMOVED BROKEN NODE " + str(node) +
                  " FROM UN-ALLOCATED NODES")

    print("------------------")
    print("STEP 1:")
    # step 1: find the task with highest weighted communication volume
    tasks_com_dict = TG_Functions.tasks_communication_weight(tg)
    sorted_tasks_com = sorted(tasks_com_dict,
                              key=tasks_com_dict.get,
                              reverse=True)
    print("\t SORTED TASKS BY COMMUNICATION WEIGHT:\n" + "\t " +
          str(sorted_tasks_com))
    print("\t -------------")
    chosen_task = sorted_tasks_com[0]
    print("\t CHOSEN TASK: " + str(chosen_task))
    mapped_tasks.append(chosen_task)
    print("\t ADDED TASK " + str(chosen_task) + "TO MAPPED TASKS LIST")
    unmapped_tasks.remove(chosen_task)
    print("\t REMOVED TASK " + str(chosen_task) + "FROM UN-MAPPED TASKS LIST")

    print("------------------")
    print("STEP 2:")
    node_neighbors_dict = AG_Functions.node_neighbors(ag, SHM)
    sorted_node_neighbors = sorted(node_neighbors_dict,
                                   key=node_neighbors_dict.get,
                                   reverse=True)
    max_neighbors_node = AG_Functions.max_node_neighbors(
        node_neighbors_dict, sorted_node_neighbors)
    print("\t SORTED NODES BY NUMBER OF NEIGHBOURS:\n" + "\t " +
          str(sorted_node_neighbors))
    print("\t -------------")
    print("\t NODES WITH MAX NEIGHBOURS:\t" + str(max_neighbors_node))
    chosen_node = random.choice(max_neighbors_node)

    print("\t CHOSEN NODE: " + str(chosen_node))
    allocated_nodes.append(chosen_node)
    print("\t ADDED NODE " + str(chosen_node) + " TO ALLOCATED NODES LIST")
    unallocated_nodes.remove(chosen_node)
    print("\t REMOVED NODE " + str(chosen_node) +
          " FROM UN-ALLOCATED NODES LIST")
    # Map Chosen Task on Chosen Node...
    if Mapping_Functions.map_task_to_node(tg, ag, SHM, NoCRG, CriticalRG,
                                          NonCriticalRG, chosen_task,
                                          chosen_node, logging):
        print("\t \033[32m* NOTE::\033[0mTASK " + str(chosen_task) +
              " MAPPED ON NODE " + str(chosen_node))
    else:
        raise ValueError("Mapping task on node failed...")

    print("------------------")
    print("STEP 3:")
    while len(unmapped_tasks) > 0:
        print("\033[33m==>\033[0m  UN-MAPPED TASKS #: " +
              str(len(unmapped_tasks)))
        print("\t -------------")
        print("\t STEP 3.1:")
        # find the unmapped task which communicates most with mapped_tasks
        max_com = 0
        unmapped_tasks_com = {}
        tasks_with_max_com_to_mapped = []
        for Task in unmapped_tasks:
            task_weight = 0
            for mapped_task in mapped_tasks:
                if (Task, mapped_task) in tg.edges():
                    task_weight += tg.edge[Task][mapped_task]["ComWeight"]
                if (mapped_task, Task) in tg.edges():
                    task_weight += tg.edge[mapped_task][Task]["ComWeight"]
            unmapped_tasks_com[Task] = task_weight
            if max_com < task_weight:
                max_com = task_weight
                tasks_with_max_com_to_mapped = [Task]
            elif max_com == task_weight:
                tasks_with_max_com_to_mapped.append(Task)
        print("\t MAX COMMUNICATION WITH THE MAPPED TASKS: " + str(max_com))
        print("\t TASK(S) WITH MAX COMMUNICATION TO MAPPED TASKS: " +
              str(tasks_with_max_com_to_mapped))
        if len(tasks_with_max_com_to_mapped) > 1:
            # multiple tasks with same comm to mapped
            # Find the one that communicate most with Un-mapped takss...
            candid_task_with_max_com_to_unmapped = []
            max_com = 0
            for CandidateTask in tasks_with_max_com_to_mapped:
                task_weight = 0
                for unmapped_task in unmapped_tasks:
                    if (Task, unmapped_task) in tg.edges():
                        task_weight += tg.edge[Task][unmapped_task][
                            "ComWeight"]
                    if (unmapped_task, Task) in tg.edges():
                        task_weight += tg.edge[unmapped_task][Task][
                            "ComWeight"]
                if task_weight > max_com:
                    candid_task_with_max_com_to_unmapped = [CandidateTask]
                elif task_weight == max_com:
                    candid_task_with_max_com_to_unmapped.append(CandidateTask)
            print(
                "\t CANDIDATE TASK(S) THAT COMMUNICATE MOST WITH UN_MAPPED: " +
                str(candid_task_with_max_com_to_unmapped))
            if len(candid_task_with_max_com_to_unmapped) > 1:
                # if multiple tasks with the same com to unmmaped also,
                # choose randomly
                chosen_task = random.choice(
                    candid_task_with_max_com_to_unmapped)
            else:
                chosen_task = candid_task_with_max_com_to_unmapped[0]
        else:
            chosen_task = tasks_with_max_com_to_mapped[0]
        print("\t CHOSEN TASK: " + str(chosen_task))

        # Find the unallocated tile with lowest communication cost to/from the allocated_tiles_set.
        print("\t -------------")
        print("\t STEP 3.2:")
        min_cost = float("inf")
        node_candidates = []
        for unallocated_node in unallocated_nodes:
            cost = 0
            reachable = True
            for mapped_task in mapped_tasks:
                com_weight = 0
                if (chosen_task, mapped_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[chosen_task][mapped_task][
                        "ComWeight"]
                    destination_node = tg.node[mapped_task]['Node']
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.IsDestReachableFromSource(
                            NoCRG, unallocated_node, destination_node):
                        manhatan_distance = AG_Functions.manhattan_distance(
                            unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
                elif (mapped_task, chosen_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[mapped_task][chosen_task][
                        "ComWeight"]
                    destination_node = tg.node[mapped_task]['Node']
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.IsDestReachableFromSource(
                            NoCRG, destination_node, unallocated_node):
                        manhatan_distance = AG_Functions.manhattan_distance(
                            unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
            if reachable:
                if cost < min_cost:
                    node_candidates = [unallocated_node]
                    min_cost = cost
                elif cost == min_cost:
                    node_candidates.append(unallocated_node)
            else:
                print("\t \033[33m* NOTE::\033[0m NODE " +
                      str(unallocated_node) + " CAN NOT REACH...")
                pass
        print("\t CANDIDATE NODES: " + str(node_candidates) + " MIN COST: " +
              str(min_cost))

        if len(node_candidates) == 0:
            raise ValueError("COULD NOT FIND A REACHABLE CANDIDATE NODE...")
        elif len(node_candidates) > 1:
            chosen_node = random.choice(node_candidates)
        elif len(node_candidates) == 1:
            chosen_node = node_candidates[0]
        else:
            # this means that the chosen task is not connected to any other task... so its cost is infinity
            chosen_node = random.choice(unallocated_nodes)

        mapped_tasks.append(chosen_task)
        print("\t ADDED TASK " + str(chosen_task) + " TO MAPPED TASKS LIST")
        unmapped_tasks.remove(chosen_task)
        print("\t REMOVED TASK " + str(chosen_task) +
              " FROM UN-MAPPED TASKS LIST")

        allocated_nodes.append(chosen_node)
        print("\t ADDED NODE " + str(chosen_node) + " TO ALLOCATED NODES LIST")
        unallocated_nodes.remove(chosen_node)
        print("\t REMOVED NODE " + str(chosen_node) +
              " FROM UN-ALLOCATED NODES LIST")

        if Mapping_Functions.map_task_to_node(tg, ag, SHM, NoCRG, CriticalRG,
                                              NonCriticalRG, chosen_task,
                                              chosen_node, logging):
            print("\t \033[32m* NOTE::\033[0mTASK " + str(chosen_task) +
                  " MAPPED ON NODE " + str(chosen_node))
        else:
            raise ValueError("Mapping task on node failed...")

    # Added by Behrad (Still under development)
    # Swapping phase
    print "-----------------------"
    print "PHASE ONE IS DONE... STARTING SWAP PROCESS..."
    for node_id_1 in range(0, len(ag.nodes()) - 1):
        for node_id_2 in range(node_id_1 + 1, len(ag.nodes()) - 1):
            pass
            # Save current mapping in an array
            # Also save the mapping's csomm_cost in a variable
            comm_cost = calculate_com_cost(tg)

            # Swap (node_id_1 , node_id_2)
            swap_nodes(tg, ag, SHM, NoCRG, CriticalRG, NonCriticalRG,
                       node_id_1, node_id_2, logging)
            # Check and calculate communication cost for all communication flows in the task graph
            #   (which is equal to the total number of edges in the application graph
            #   starting from the communication flow with the largest communication volume first
            comm_cost_new = calculate_com_cost(tg)
            # If comm_cost of current mapping is the same or bigger than the previous mapping, discard mapping
            #   Revert back to previous mapping with better comm_cost
            # Else
            #   Save new mapping as better mapping with less comm_cost
            if comm_cost_new < comm_cost:
                print "\033[32m* NOTE::\033[0m BETTER SOLUTION FOUND WITH COST:", comm_cost_new
            else:
                pass
                # print "Reverting to old solution"
                swap_nodes(tg, ag, SHM, NoCRG, CriticalRG, NonCriticalRG,
                           node_id_2, node_id_1, logging)
            # Reset the comm_cost after each swapping

    # End of Swapping phase
    print "SWAP PROCESS FINISHED..."
    Scheduler.schedule_all(tg, ag, SHM, True, False, logging)
    return tg, ag
def optimize_mapping_sa(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                        shm, cost_data_file, logging):
    print ("===========================================")
    print ("STARTING MAPPING OPTIMIZATION...USING SIMULATED ANNEALING...")
    print ("STARTING TEMPERATURE: "+str(Config.SA_InitialTemp))
    print ("ANNEALING SCHEDULE: "+Config.SA_AnnealingSchedule)
    print ("TERMINATION CRITERIA: "+Config.TerminationCriteria)
    print ("================")

    if type(cost_data_file) is str:
        mapping_cost_file = open('Generated_Files/Internal/'+cost_data_file+'.txt', 'a')
    else:
        raise ValueError("cost_data_file name is not string: "+str(cost_data_file))

    mapping_process_file = open('Generated_Files/Internal/MappingProcess.txt', 'w')
    sa_temperature_file = open('Generated_Files/Internal/SATemp.txt', 'w')
    sa_cost_slop_file = open('Generated_Files/Internal/SACostSlope.txt', 'w')
    sa_huang_race_file = open('Generated_Files/Internal/SAHuangRace.txt', 'w')

    if Config.SA_AnnealingSchedule in ['Adaptive', 'Aart', 'Huang']:
        cost_monitor = deque([])
    else:
        cost_monitor = []

    if Config.DistanceBetweenMapping:
        init_map_string = Mapping_Functions.mapping_into_string(tg)
        if Config.Mapping_CostFunctionType == 'CONSTANT':
            Mapping_Functions.clear_mapping(tg, ctg, ag)
            if not Mapping_Functions.make_initial_mapping(tg, ctg, ag, shm, noc_rg, critical_rg,
                                                          noncritical_rg, True, logging,
                                                          Config.mapping_random_seed):
                raise ValueError("FEASIBLE MAPPING NOT FOUND...")
    else:
        init_map_string = None

    current_tg = copy.deepcopy(tg)
    current_ag = copy.deepcopy(ag)
    current_ctg = copy.deepcopy(ctg)
    current_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False, initial_mapping_string=init_map_string)
    starting_cost = current_cost

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = current_cost

    initial_temp = Config.SA_InitialTemp
    sa_temperature_file.write(str(initial_temp)+"\n")
    temperature = initial_temp
    slope = None
    zero_slope_counter = 0
    standard_deviation = None

    # for Huang Annealing schedule
    huang_counter1 = 0
    huang_counter2 = 0
    huang_steady_counter = 0
    iteration_num = Config.SimulatedAnnealingIteration
    # for i in range(0, iteration_num):
    #       move to another solution
    i = 0
    while True:
        i += 1
        new_tg, new_ctg, new_ag = move_to_next_solution(i, current_tg, current_ctg, current_ag,  noc_rg,
                                                        shm, critical_rg, noncritical_rg, logging)
        Scheduling_Functions.clear_scheduling(new_ag)
        Scheduler.schedule_all(new_tg, new_ag, shm, False, logging)

        # calculate the cost of new solution
        new_cost = Mapping_Functions.mapping_cost_function(new_tg, new_ag, shm, False,
                                                           initial_mapping_string=init_map_string)

        if new_cost < best_cost:
            best_tg = copy.deepcopy(new_tg)
            best_ag = copy.deepcopy(new_ag)
            best_ctg = copy.deepcopy(new_ctg)
            best_cost = new_cost
            print ("\033[33m* NOTE::\033[0mFOUND BETTER SOLUTION WITH COST:"+"{0:.2f}".format(new_cost) +
                   "\t ITERATION:"+str(i)+"\tIMPROVEMENT:" +
                   "{0:.2f}".format(100*(starting_cost-new_cost)/starting_cost)+" %")
        # calculate the probability P of accepting the solution
        prob = metropolis(current_cost, new_cost, temperature)
        # print ("prob:", prob)
        # throw the coin with probability P
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for j in range(0, i):
            random_seed = random.randint(1, 100000)
        random.seed(random_seed)
        logging.info("Throwing Dice: random_seed: "+str(random_seed)+"    iteration: "+str(i))
        if prob > random.random():
            # accept the new solution
            move_accepted = True
            current_tg = copy.deepcopy(new_tg)
            current_ag = copy.deepcopy(new_ag)
            current_ctg = copy.deepcopy(new_ctg)
            current_cost = new_cost
            if Config.SA_ReportSolutions:
                if slope is not None:
                    print ("\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:", "{0:.2f}".format(current_cost),
                           "\tprob:", "{0:.2f}".format(prob), "\tTemp:", "{0:.2f}".format(temperature),
                           "\t Iteration:", i, "\tSLOPE:", "{0:.2f}".format(slope))
                if standard_deviation is not None:
                    print ("\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:", "{0:.2f}".format(current_cost),
                           "\tprob:", "{0:.2f}".format(prob), "\tTemp:", "{0:.2f}".format(temperature),
                           "\t Iteration:", i, "\tSTD_DEV:", "{0:.2f}".format(standard_deviation))
                else:
                    print ("\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:", "{0:.2f}".format(current_cost),
                           "\tprob:", "{0:.2f}".format(prob), "\tTemp:", "{0:.2f}".format(temperature),
                           "\t Iteration:", i)
        else:
            move_accepted = False
            # move back to initial solution
            pass
        # update Temp
        mapping_process_file.write(Mapping_Functions.mapping_into_string(current_tg)+"\n")
        sa_temperature_file.write(str(temperature)+"\n")
        mapping_cost_file.write(str(current_cost)+"\n")

        if Config.SA_AnnealingSchedule == 'Adaptive':
            if len(cost_monitor) > Config.CostMonitorQueSize:
                cost_monitor.appendleft(current_cost)
                cost_monitor.pop()
            else:
                cost_monitor.appendleft(current_cost)
            slope = calculate_slope_of_cost(cost_monitor)
            if slope == 0:
                zero_slope_counter += 1
            else:
                zero_slope_counter = 0
            sa_cost_slop_file.write(str(slope)+"\n")

        if Config.SA_AnnealingSchedule == 'Aart':
            if len(cost_monitor) == Config.CostMonitorQueSize:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                # print (standard_deviation)
            else:
                cost_monitor.appendleft(current_cost)

        # Huang's annealing schedule is very much like Aart's Schedule... how ever, Aart's schedule stays in a fixed
        # temperature for a fixed number of steps, however, Huang's schedule decides about number of steps dynamically

        if Config.SA_AnnealingSchedule == 'Huang':
            cost_monitor.appendleft(current_cost)
            if len(cost_monitor) > 1:
                huang_cost_mean = sum(cost_monitor)/len(cost_monitor)
                huang_cost_sd = statistics.stdev(cost_monitor)
                if move_accepted:
                    if huang_cost_mean - Config.HuangAlpha * huang_cost_sd <= current_cost <= \
                            huang_cost_mean + Config.HuangAlpha * huang_cost_sd:
                        huang_counter1 += 1
                    else:
                        huang_counter2 += 1
            # print (huang_counter1, huang_counter2)
            sa_huang_race_file.write(str(huang_counter1)+" "+str(huang_counter2)+"\n")
            if huang_counter1 == Config.HuangTargetValue1:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                huang_counter1 = 0
                huang_counter2 = 0
                huang_steady_counter = 0
            elif huang_counter2 == Config.HuangTargetValue2:
                huang_counter1 = 0
                huang_counter2 = 0
                standard_deviation = None
            elif huang_steady_counter == Config.CostMonitorQueSize:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                huang_counter1 = 0
                huang_counter2 = 0
                huang_steady_counter = 0
                print ("\033[36m* COOLING::\033[0m REACHED MAX STEADY STATE... PREPARING FOR COOLING...")
            else:
                standard_deviation = None

            huang_steady_counter += 1

        temperature = next_temp(initial_temp, i, iteration_num, temperature, slope, standard_deviation)

        if Config.SA_AnnealingSchedule == 'Adaptive':
            if zero_slope_counter == Config.MaxSteadyState:
                print ("NO IMPROVEMENT POSSIBLE...")
                break
        if Config.TerminationCriteria == 'IterationNum':
            if i == Config.SimulatedAnnealingIteration:
                print ("REACHED MAXIMUM ITERATION NUMBER...")
                break
        elif Config.TerminationCriteria == 'StopTemp':
            if temperature <= Config.SA_StopTemp:
                print ("REACHED STOP TEMPERATURE...")
                break

    mapping_cost_file.close()
    mapping_process_file.close()
    sa_temperature_file.close()
    sa_cost_slop_file.close()
    sa_huang_race_file.close()
    print ("-------------------------------------")
    print ("STARTING COST:"+str(starting_cost)+"\tFINAL COST:"+str(best_cost))
    print ("IMPROVEMENT:"+"{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost)+" %")
    return best_tg, best_ctg, best_ag
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.check_acyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()

    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth, True)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        vl_opt.optimize_ag_vertical_links(ag, shmu, logging)
        vl_opt_functions.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.apply_initial_faults(shmu)
    if Config.viz.shm:
        SHMU_Reports.draw_shm(shmu.SHM)
        SHMU_Reports.draw_temp_distribution(shmu.SHM)
    # SHM_Reports.report_noc_shm()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(Routing.gen_noc_route_graph_from_file(ag, shmu, Config.RoutingFilePath,
                                                                     Config.DebugInfo, Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, Config.UsedTurnModel,
                                                                Config.DebugInfo, Config.DebugDetails))
    Routing_Functions.check_deadlock_freeness(noc_rg)
    print ("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
           str(round(time.time()-routing_graph_start_time))+" SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.reachability_metric(ag, noc_rg, True)
    # Some visualization...
    if Config.viz.rg:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(ag, shmu)
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    else:
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.optimize_reachability_rectangles(ag, Config.NumberOfRects)
        # ReachabilityReports.report_reachability(ag)
        ReachabilityReports.report_reachability_in_file(ag, "ReachAbilityNodeReport")
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg, critical_rg, noncritical_rg,
                                                 Config.mapping_file_path, logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg, noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.add_current_mapping_to_mpm(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.viz.mapping_distribution:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.viz.mapping:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.viz.scheduling:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.gen_one_step_diagnosable_pmcg(ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.gen_sequentially_diagnosable_pmcg(ag, shmu.SHM)
        test_tg = TestSchedulingUnit.generate_test_tg_from_pmcg(pmcg)
        print ("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
               str(round(time.time()-pmcg_start_time)) + " SECONDS")
        if Config.viz.pmcg:
            TestSchedulingUnit.draw_pmcg(pmcg)
        if Config.viz.ttg:
            TestSchedulingUnit.draw_ttg(test_tg)
        TestSchedulingUnit.insert_test_tasks_in_tg(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.map_test_tasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.remove_test_tasks_from_tg(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None
    Arch_Graph_Reports.gen_latex_ag(ag, shmu.SHM)
    print ("===========================================")
    print ("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    if Config.viz.mapping_frames:
        Mapping_Animation.generate_frames(ag, shmu.SHM)
    return tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
def mapping_opt_local_search(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg, shm,
                             iteration_num, report, detailed_report, logging,
                             cost_data_file_name, mapping_process_file_name, random_seed,
                             initial_mapping_string=None):
    random.seed(random_seed)
    if report:
        print ("===========================================")
        print ("STARTING MAPPING OPTIMIZATION...USING LOCAL SEARCH...")
        print ("NUMBER OF ITERATIONS: "+str(iteration_num))

    if type(cost_data_file_name) is str:
        mapping_cost_file = open('Generated_Files/Internal/'+cost_data_file_name+'.txt', 'a')
    else:
        raise ValueError("cost_data_file_name name is not string: "+str(cost_data_file_name))

    if type(mapping_process_file_name) is str:
        mapping_process_file = open('Generated_Files/Internal/'+mapping_process_file_name+'.txt', 'a')
    else:
        raise ValueError("mapping_process_file name is not string: "+str(mapping_process_file_name))

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False, initial_mapping_string=initial_mapping_string)
    starting_cost = best_cost
    for iteration in range(0, iteration_num):
        logging.info("       ITERATION:"+str(iteration))
        cluster_to_move = random.choice(ctg.nodes())
        current_node = ctg.node[cluster_to_move]['Node']
        Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                                   cluster_to_move, current_node, logging)
        destination_node = random.choice(ag.nodes())
        if Config.EnablePartitioning:
            while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
                destination_node = random.choice(ag.nodes())
        # print (ctg.node[cluster_to_move]['Criticality'],AG.node[destination_node]['Region'])

        try_counter = 0
        while not Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                        cluster_to_move, destination_node, logging):

            # If add_cluster_to_node fails it automatically removes all the connections...
            # we need to add the cluster to the old place...
            Mapping_Functions.add_cluster_to_node(tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                                                  cluster_to_move, current_node, logging)

            # choosing another cluster to move
            cluster_to_move = random.choice(ctg.nodes())
            current_node = ctg.node[cluster_to_move]['Node']
            Mapping_Functions.remove_cluster_from_node(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg,
                                                       cluster_to_move, current_node, logging)
            destination_node = random.choice(ag.nodes())
            if Config.EnablePartitioning:
                while ctg.node[cluster_to_move]['Criticality'] != ag.node[destination_node]['Region']:
                    destination_node = random.choice(ag.nodes())
            # print (ctg.node[cluster_to_move]['Criticality'],AG.node[destination_node]['Region'])

            if try_counter >= 3*len(ag.nodes()):
                if report:
                    print ("CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH...")
                logging.info("CAN NOT FIND ANY FEASIBLE SOLUTION... ABORTING LOCAL SEARCH...")
                tg = copy.deepcopy(best_tg)
                ag = copy.deepcopy(best_ag)
                ctg = copy.deepcopy(ctg)
                if report:
                    Scheduling_Reports.report_mapped_tasks(ag, logging)
                    Mapping_Functions.mapping_cost_function(tg, ag, shm, True, initial_mapping_string=initial_mapping_string)
                return best_tg, best_ctg, best_ag
            try_counter += 1

        Scheduling_Functions.clear_scheduling(ag)
        Scheduler.schedule_all(tg, ag, shm, False, logging)

        current_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, detailed_report, initial_mapping_string= initial_mapping_string)
        mapping_process_file.write(Mapping_Functions.mapping_into_string(tg)+"\n")
        mapping_cost_file.write(str(current_cost)+"\n")
        if current_cost <= best_cost:
            if current_cost < best_cost:
                if report:
                    print ("\033[32m* NOTE::\033[0mBETTER SOLUTION FOUND WITH COST: "+str(current_cost) +
                           "\t ITERATION:"+str(iteration))
                logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(iteration))
            else:
                logging.info("NOTE:: MOVED TO SOLUTION WITH COST: "+str(current_cost)+"ITERATION: "+str(iteration))

            best_tg = copy.deepcopy(tg)
            best_ag = copy.deepcopy(ag)
            best_ctg = copy.deepcopy(ctg)
            best_cost = current_cost
        else:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            ctg = copy.deepcopy(best_ctg)
            mapping_process_file.write(Mapping_Functions.mapping_into_string(tg)+"\n")

    Scheduling_Functions.clear_scheduling(ag)
    Scheduler.schedule_all(tg, ag, shm, False, logging)
    mapping_process_file.close()
    mapping_cost_file.close()
    if report:
        print ("-------------------------------------")
        print ("STARTING COST: "+str(starting_cost)+"\tFINAL COST: "+str(best_cost) +
               "\tAFTER "+str(iteration_num)+" ITERATIONS")
        print ("IMPROVEMENT:"+str("{0:.2f}".format(100*(starting_cost-best_cost)/starting_cost))+" %")
    return best_tg, best_ctg, best_ag
def optimize_mapping_sa(tg, ctg, ag, noc_rg, critical_rg, noncritical_rg, shm,
                        cost_data_file, logging):
    print("===========================================")
    print("STARTING MAPPING OPTIMIZATION...USING SIMULATED ANNEALING...")
    print("STARTING TEMPERATURE: " + str(Config.SA_InitialTemp))
    print("ANNEALING SCHEDULE: " + Config.SA_AnnealingSchedule)
    print("TERMINATION CRITERIA: " + Config.TerminationCriteria)
    print("================")

    if type(cost_data_file) is str:
        mapping_cost_file = open(
            'Generated_Files/Internal/' + cost_data_file + '.txt', 'a')
    else:
        raise ValueError("cost_data_file name is not string: " +
                         str(cost_data_file))

    mapping_process_file = open('Generated_Files/Internal/MappingProcess.txt',
                                'w')
    sa_temperature_file = open('Generated_Files/Internal/SATemp.txt', 'w')
    sa_cost_slop_file = open('Generated_Files/Internal/SACostSlope.txt', 'w')
    sa_huang_race_file = open('Generated_Files/Internal/SAHuangRace.txt', 'w')

    if Config.SA_AnnealingSchedule in ['Adaptive', 'Aart', 'Huang']:
        cost_monitor = deque([])
    else:
        cost_monitor = []

    if Config.DistanceBetweenMapping:
        init_map_string = Mapping_Functions.mapping_into_string(tg)
        if Config.Mapping_CostFunctionType == 'CONSTANT':
            Mapping_Functions.clear_mapping(tg, ctg, ag)
            if not Mapping_Functions.make_initial_mapping(
                    tg, ctg, ag, shm, noc_rg, critical_rg, noncritical_rg,
                    True, logging, Config.mapping_random_seed):
                raise ValueError("FEASIBLE MAPPING NOT FOUND...")
    else:
        init_map_string = None

    current_tg = copy.deepcopy(tg)
    current_ag = copy.deepcopy(ag)
    current_ctg = copy.deepcopy(ctg)
    current_cost = Mapping_Functions.mapping_cost_function(
        tg, ag, shm, False, initial_mapping_string=init_map_string)
    starting_cost = current_cost

    best_tg = copy.deepcopy(tg)
    best_ag = copy.deepcopy(ag)
    best_ctg = copy.deepcopy(ctg)
    best_cost = current_cost

    initial_temp = Config.SA_InitialTemp
    sa_temperature_file.write(str(initial_temp) + "\n")
    temperature = initial_temp
    slope = None
    zero_slope_counter = 0
    standard_deviation = None

    # for Huang Annealing schedule
    huang_counter1 = 0
    huang_counter2 = 0
    huang_steady_counter = 0
    iteration_num = Config.SimulatedAnnealingIteration
    # for i in range(0, iteration_num):
    #       move to another solution
    i = 0
    while True:
        i += 1
        new_tg, new_ctg, new_ag = move_to_next_solution(
            i, current_tg, current_ctg, current_ag, noc_rg, shm, critical_rg,
            noncritical_rg, logging)
        Scheduling_Functions.clear_scheduling(new_ag)
        Scheduler.schedule_all(new_tg, new_ag, shm, False, logging)

        # calculate the cost of new solution
        new_cost = Mapping_Functions.mapping_cost_function(
            new_tg, new_ag, shm, False, initial_mapping_string=init_map_string)

        if new_cost < best_cost:
            best_tg = copy.deepcopy(new_tg)
            best_ag = copy.deepcopy(new_ag)
            best_ctg = copy.deepcopy(new_ctg)
            best_cost = new_cost
            print("\033[33m* NOTE::\033[0mFOUND BETTER SOLUTION WITH COST:" +
                  "{0:.2f}".format(new_cost) + "\t ITERATION:" + str(i) +
                  "\tIMPROVEMENT:" +
                  "{0:.2f}".format(100 * (starting_cost - new_cost) /
                                   starting_cost) + " %")
        # calculate the probability P of accepting the solution
        prob = metropolis(current_cost, new_cost, temperature)
        # print("prob:", prob)
        # throw the coin with probability P
        random_seed = Config.mapping_random_seed
        random.seed(Config.mapping_random_seed)
        for j in range(0, i):
            random_seed = random.randint(1, 100000)
        random.seed(random_seed)
        logging.info("Throwing Dice: random_seed: " + str(random_seed) +
                     "    iteration: " + str(i))
        if prob > random.random():
            # accept the new solution
            move_accepted = True
            current_tg = copy.deepcopy(new_tg)
            current_ag = copy.deepcopy(new_ag)
            current_ctg = copy.deepcopy(new_ctg)
            current_cost = new_cost
            if Config.SA_ReportSolutions:
                if slope is not None:
                    print(
                        "\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:",
                        "{0:.2f}".format(current_cost), "\tprob:",
                        "{0:.2f}".format(prob), "\tTemp:",
                        "{0:.2f}".format(temperature), "\t Iteration:", i,
                        "\tSLOPE:", "{0:.2f}".format(slope))
                if standard_deviation is not None:
                    print(
                        "\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:",
                        "{0:.2f}".format(current_cost), "\tprob:",
                        "{0:.2f}".format(prob), "\tTemp:",
                        "{0:.2f}".format(temperature), "\t Iteration:", i,
                        "\tSTD_DEV:", "{0:.2f}".format(standard_deviation))
                else:
                    print(
                        "\033[32m* NOTE::\033[0mMOVED TO SOLUTION WITH COST:",
                        "{0:.2f}".format(current_cost), "\tprob:",
                        "{0:.2f}".format(prob), "\tTemp:",
                        "{0:.2f}".format(temperature), "\t Iteration:", i)
        else:
            move_accepted = False
            # move back to initial solution
            pass
        # update Temp
        mapping_process_file.write(
            Mapping_Functions.mapping_into_string(current_tg) + "\n")
        sa_temperature_file.write(str(temperature) + "\n")
        mapping_cost_file.write(str(current_cost) + "\n")

        if Config.SA_AnnealingSchedule == 'Adaptive':
            if len(cost_monitor) > Config.CostMonitorQueSize:
                cost_monitor.appendleft(current_cost)
                cost_monitor.pop()
            else:
                cost_monitor.appendleft(current_cost)
            slope = calculate_slope_of_cost(cost_monitor)
            if slope == 0:
                zero_slope_counter += 1
            else:
                zero_slope_counter = 0
            sa_cost_slop_file.write(str(slope) + "\n")

        if Config.SA_AnnealingSchedule == 'Aart':
            if len(cost_monitor) == Config.CostMonitorQueSize:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                # print(standard_deviation)
            else:
                cost_monitor.appendleft(current_cost)

        # Huang's annealing schedule is very much like Aart's Schedule... how ever, Aart's schedule stays in a fixed
        # temperature for a fixed number of steps, however, Huang's schedule decides about number of steps dynamically

        if Config.SA_AnnealingSchedule == 'Huang':
            cost_monitor.appendleft(current_cost)
            if len(cost_monitor) > 1:
                huang_cost_mean = sum(cost_monitor) / len(cost_monitor)
                huang_cost_sd = statistics.stdev(cost_monitor)
                if move_accepted:
                    if huang_cost_mean - Config.HuangAlpha * huang_cost_sd <= current_cost <= \
                            huang_cost_mean + Config.HuangAlpha * huang_cost_sd:
                        huang_counter1 += 1
                    else:
                        huang_counter2 += 1
            # print(huang_counter1, huang_counter2)
            sa_huang_race_file.write(
                str(huang_counter1) + " " + str(huang_counter2) + "\n")
            if huang_counter1 == Config.HuangTargetValue1:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                huang_counter1 = 0
                huang_counter2 = 0
                huang_steady_counter = 0
            elif huang_counter2 == Config.HuangTargetValue2:
                huang_counter1 = 0
                huang_counter2 = 0
                standard_deviation = None
            elif huang_steady_counter == Config.CostMonitorQueSize:
                standard_deviation = statistics.stdev(cost_monitor)
                cost_monitor.clear()
                huang_counter1 = 0
                huang_counter2 = 0
                huang_steady_counter = 0
                print(
                    "\033[36m* COOLING::\033[0m REACHED MAX STEADY STATE... PREPARING FOR COOLING..."
                )
            else:
                standard_deviation = None

            huang_steady_counter += 1

        temperature = next_temp(initial_temp, i, iteration_num, temperature,
                                slope, standard_deviation)

        if Config.SA_AnnealingSchedule == 'Adaptive':
            if zero_slope_counter == Config.MaxSteadyState:
                print("NO IMPROVEMENT POSSIBLE...")
                break
        if Config.TerminationCriteria == 'IterationNum':
            if i == Config.SimulatedAnnealingIteration:
                print("REACHED MAXIMUM ITERATION NUMBER...")
                break
        elif Config.TerminationCriteria == 'StopTemp':
            if temperature <= Config.SA_StopTemp:
                print("REACHED STOP TEMPERATURE...")
                break

    mapping_cost_file.close()
    mapping_process_file.close()
    sa_temperature_file.close()
    sa_cost_slop_file.close()
    sa_huang_race_file.close()
    print("-------------------------------------")
    print("STARTING COST:" + str(starting_cost) + "\tFINAL COST:" +
          str(best_cost))
    print("IMPROVEMENT:" +
          "{0:.2f}".format(100 *
                           (starting_cost - best_cost) / starting_cost) + " %")
    return best_tg, best_ctg, best_ag
Beispiel #28
0
def n_map(tg, ag, noc_rg, critical_rg, non_critical_rg, shm, logging):
    """
    Performs NMap Mapping algorithm
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param noc_rg: NoC Routing Graph
    :param critical_rg: NoC Routing Graph for Critical Region
    :param non_critical_rg: NoC Routing Graph for Non-Critical Region
    :param shm: System Health Map
    :param logging: logging File
    :return: TG and AG
    """
    print ("===========================================")
    print ("STARTING N-MAP MAPPING...\n")

    if len(tg.nodes()) > len(ag.nodes()):
        raise ValueError("Number of tasks should be smaller or equal to number of PEs")

    mapped_tasks = []
    unmapped_tasks = copy.deepcopy(tg.nodes())
    allocated_nodes = []
    unallocated_nodes = copy.deepcopy(ag.nodes())

    # remove all broken nodes from unallocated_nodes list
    for node in unallocated_nodes:
        if not shm.node[node]['NodeHealth']:
            unallocated_nodes.remove(node)
            print ("REMOVED BROKEN NODE "+str(node)+" FROM UN-ALLOCATED NODES")

    print ("------------------")
    print ("STEP 1:")
    # step 1: find the task with highest weighted communication volume
    tasks_com_dict = TG_Functions.tasks_communication_weight(tg)
    sorted_tasks_com = sorted(tasks_com_dict, key=tasks_com_dict.get, reverse=True)
    print ("\t SORTED TASKS BY COMMUNICATION WEIGHT:\n"+"\t "+str(sorted_tasks_com))
    print ("\t -------------")
    chosen_task = sorted_tasks_com[0]
    print ("\t CHOSEN TASK: "+str(chosen_task))
    mapped_tasks.append(chosen_task)
    print ("\t ADDED TASK "+str(chosen_task)+"TO MAPPED TASKS LIST")
    unmapped_tasks.remove(chosen_task)
    print ("\t REMOVED TASK "+str(chosen_task)+"FROM UN-MAPPED TASKS LIST")

    print ("------------------")
    print ("STEP 2:")
    node_neighbors_dict = AG_Functions.node_neighbors(ag, shm)
    sorted_node_neighbors = sorted(node_neighbors_dict, key=node_neighbors_dict.get, reverse=True)
    max_neighbors_node = AG_Functions.max_node_neighbors(node_neighbors_dict, sorted_node_neighbors)
    print ("\t SORTED NODES BY NUMBER OF NEIGHBOURS:\n"+"\t "+str(sorted_node_neighbors))
    print ("\t -------------")
    print ("\t NODES WITH MAX NEIGHBOURS:\t"+str(max_neighbors_node))
    chosen_node = random.choice(max_neighbors_node)

    print ("\t CHOSEN NODE: "+str(chosen_node))
    allocated_nodes.append(chosen_node)
    print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
    unallocated_nodes.remove(chosen_node)
    print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")
    # Map Chosen Task on Chosen Node...
    if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                          non_critical_rg, chosen_task, chosen_node, logging):
        print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
    else:
        raise ValueError("Mapping task on node failed...")

    print ("------------------")
    print ("STEP 3:")
    while len(unmapped_tasks) > 0:
        print ("\033[33m==>\033[0m  UN-MAPPED TASKS #: "+str(len(unmapped_tasks)))
        print ("\t -------------")
        print ("\t STEP 3.1:")
        # find the unmapped task which communicates most with mapped_tasks
        max_com = 0
        unmapped_tasks_com = {}
        tasks_with_max_com_to_mapped = []
        for Task in unmapped_tasks:
            task_weight = 0
            for mapped_task in mapped_tasks:
                if (Task, mapped_task) in tg.edges():
                    task_weight += tg.edge[Task][mapped_task]["ComWeight"]
                if (mapped_task, Task) in tg.edges():
                    task_weight += tg.edge[mapped_task][Task]["ComWeight"]
            unmapped_tasks_com[Task] = task_weight
            if max_com < task_weight:
                max_com = task_weight
                tasks_with_max_com_to_mapped = [Task]
            elif max_com == task_weight:
                tasks_with_max_com_to_mapped.append(Task)
        print ("\t MAX COMMUNICATION WITH THE MAPPED TASKS: "+str(max_com))
        print ("\t TASK(S) WITH MAX COMMUNICATION TO MAPPED TASKS: "+str(tasks_with_max_com_to_mapped))
        if len(tasks_with_max_com_to_mapped) > 1:
            # multiple tasks with same comm to mapped
            # Find the one that communicate most with Un-mapped takss...
            candid_task_with_max_com_to_unmapped = []
            max_com = 0
            for CandidateTask in tasks_with_max_com_to_mapped:
                task_weight = 0
                for unmapped_task in unmapped_tasks:
                    if (Task, unmapped_task) in tg.edges():
                        task_weight += tg.edge[Task][unmapped_task]["ComWeight"]
                    if (unmapped_task, Task) in tg.edges():
                        task_weight += tg.edge[unmapped_task][Task]["ComWeight"]
                if task_weight > max_com:
                    candid_task_with_max_com_to_unmapped = [CandidateTask]
                elif task_weight == max_com:
                    candid_task_with_max_com_to_unmapped.append(CandidateTask)
            print ("\t CANDIDATE TASK(S) THAT COMMUNICATE MOST WITH UN_MAPPED: " +
                   str(candid_task_with_max_com_to_unmapped))
            if len(candid_task_with_max_com_to_unmapped) > 1:
                # if multiple tasks with the same com to unmmaped also,
                # choose randomly
                chosen_task = random.choice(candid_task_with_max_com_to_unmapped)
            else:
                chosen_task = candid_task_with_max_com_to_unmapped[0]
        else:
            chosen_task = tasks_with_max_com_to_mapped[0]
        print ("\t CHOSEN TASK: "+str(chosen_task))

        # Find the unallocated tile with lowest communication cost to/from the allocated_tiles_set.
        print ("\t -------------")
        print ("\t STEP 3.2:")
        min_cost = float("inf")
        node_candidates = []
        for unallocated_node in unallocated_nodes:
            cost = 0
            reachable = True
            for mapped_task in mapped_tasks:
                com_weight = 0
                if (chosen_task, mapped_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[chosen_task][mapped_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, unallocated_node,
                                                                                   destination_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
                elif (mapped_task, chosen_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[mapped_task][chosen_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, destination_node,
                                                                                   unallocated_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
            if reachable:
                if cost < min_cost:
                    node_candidates = [unallocated_node]
                    min_cost = cost
                elif cost == min_cost:
                    node_candidates.append(unallocated_node)
            else:
                print ("\t \033[33m* NOTE::\033[0m NODE "+str(unallocated_node)+" CAN NOT REACH...")
                pass
        print ("\t CANDIDATE NODES: "+str(node_candidates)+" MIN COST: "+str(min_cost))

        if len(node_candidates) == 0:
            raise ValueError("COULD NOT FIND A REACHABLE CANDIDATE NODE...")
        elif len(node_candidates) > 1:
            chosen_node = random.choice(node_candidates)
        elif len(node_candidates) == 1:
            chosen_node = node_candidates[0]
        else:
            # this means that the chosen task is not connected to any other task... so its cost is infinity
            chosen_node = random.choice(unallocated_nodes)

        mapped_tasks.append(chosen_task)
        print ("\t ADDED TASK "+str(chosen_task)+" TO MAPPED TASKS LIST")
        unmapped_tasks.remove(chosen_task)
        print ("\t REMOVED TASK "+str(chosen_task)+" FROM UN-MAPPED TASKS LIST")

        allocated_nodes.append(chosen_node)
        print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
        unallocated_nodes.remove(chosen_node)
        print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")

        if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                              non_critical_rg, chosen_task, chosen_node, logging):
            print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
        else:
            raise ValueError("Mapping task on node failed...")

    # Added by Behrad (Still under development)
    # Swapping phase
    print "-----------------------"
    print "PHASE ONE IS DONE... STARTING SWAP PROCESS..."
    for node_id_1 in range(0, len(ag.nodes())-1):
        for node_id_2 in range(node_id_1+1, len(ag.nodes())-1):
            pass
            # Save current mapping in an array
            # Also save the mapping's csomm_cost in a variable
            comm_cost = calculate_com_cost(tg)

            # Swap (node_id_1 , node_id_2)
            swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg, node_id_1, node_id_2, logging)
            # Check and calculate communication cost for all communication flows in the task graph
            #   (which is equal to the total number of edges in the application graph
            #   starting from the communication flow with the largest communication volume first
            comm_cost_new = calculate_com_cost(tg)
            # If comm_cost of current mapping is the same or bigger than the previous mapping, discard mapping
            #   Revert back to previous mapping with better comm_cost
            # Else
            #   Save new mapping as better mapping with less comm_cost
            if comm_cost_new < comm_cost:
                print "\033[32m* NOTE::\033[0m BETTER SOLUTION FOUND WITH COST:", comm_cost_new
            else:
                pass
                # print "Reverting to old solution"
                swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg,
                           node_id_2, node_id_1, logging)
            # Reset the comm_cost after each swapping

    # End of Swapping phase
    print "SWAP PROCESS FINISHED..."
    Scheduler.schedule_all(tg, ag, shm, True, logging)
    return tg, ag
Beispiel #29
0
def mapping(tg, ag, noc_rg, critical_rg, non_critical_rg, shm, logging, iteration=None
            , initial_mapping_string = None):
    """
    Calculate different mapping algorithms
    Returns tg And ag after Mapping in case of success
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param noc_rg: NoC Routing Graph
    :param critical_rg: NoC Routing Graph for Critical Region
    :param non_critical_rg: NoC Routing Graph for non-Critical Region
    :param shm: System Health Map! (Please note that mapper should not even have access to ful SHMU info)
    :param logging: logging file
    :return: (tg, ag) in case of failing returns (None, None)
    """
    # to run the following heuristics (Min_Min,Max_Min), one needs to use independent
    # tasks... Please use: generate_random_independent_tg
    if Config.Mapping_Function == 'MinMin':
        if Config.tg.type == 'RandomIndependent':
            return SimpleGreedy.min_min_mapping(tg, ag, shm, logging)
        else:
            raise ValueError('WRONG TG TYPE FOR THIS MAPPING FUNCTION. SHOULD USE::RandomIndependent')

    elif Config.Mapping_Function == 'MaxMin':
        if Config.tg.type == 'RandomIndependent':
            return SimpleGreedy.max_min_mapping(tg, ag, shm, logging)
        else:
            raise ValueError('WRONG TG TYPE FOR THIS MAPPING FUNCTION. SHOULD USE::RandomIndependent')

    elif Config.Mapping_Function == 'MinExecutionTime':
        if Config.tg.type == 'RandomIndependent':
            return SimpleGreedy.min_execution_time(tg, ag, shm, logging)
        else:
            raise ValueError('WRONG TG TYPE FOR THIS MAPPING FUNCTION. SHOULD USE::RandomIndependent')

    elif Config.Mapping_Function == 'MinimumCompletionTime':
        if Config.tg.type == 'RandomIndependent':
            return SimpleGreedy.minimum_completion_time(tg, ag, shm, logging)
        else:
            raise ValueError('WRONG TG TYPE FOR THIS MAPPING FUNCTION. SHOULD USE::RandomIndependent')

    elif Config.Mapping_Function == 'NMap':
        return NMap.n_map(tg, ag, noc_rg, critical_rg, non_critical_rg, shm, logging)

    elif Config.Mapping_Function in ['LocalSearch', 'IterativeLocalSearch', 'SimulatedAnnealing']:
        if Config.tg.type in ['RandomDependent', 'Manual', 'FromDOTFile']:
            pass
        else:
            raise ValueError('WRONG TG TYPE FOR THIS MAPPING FUNCTION. SHOULD USE::RandomDependent')
        clustering_start_time = time.time()
        # clustered task graph
        if Config.task_clustering:
            ctg = copy.deepcopy(Clustering.generate_ctg(len(ag.nodes())))
            if Clustering.initial_clustering(tg, ctg):
                # Clustered Task Graph Optimization
                if Config.Clustering_Optimization:
                    (best_clustering, best_task_graph) = \
                        Clustering.ctg_opt_local_search(tg, ctg, Config.clustering.iterations, logging)
                    tg = copy.deepcopy(best_task_graph)
                    ctg = copy.deepcopy(best_clustering)
                    del best_clustering, best_task_graph
                    # Clustering_Test.double_check_ctg(tg, ctg)
                    Clustering_Reports.report_ctg(ctg, "CTG_PostOpt.png")
                    Clustering_Reports.viz_clustering_opt()
                else:
                    print("CLUSTERING OPTIMIZATION TURNED OFF...")
                    print("REMOVING EMPTY CLUSTERS...")
                    Clustering_Functions.remove_empty_clusters(ctg)
                    Clustering_Reports.report_ctg(ctg, "CTG_PostCleaning.png")

                print("\033[92mTIME::\033[0m CLUSTERING AND OPTIMIZATION TOOK: "
                       + str(round(time.time()-clustering_start_time))+" SECONDS")
            else:
                print("Initial Clustering Failed....")
                raise ValueError("INITIAL CLUSTERING FAILED...")
        else:
            ctg = copy.deepcopy(Clustering.gen_transparent_clusters(tg))
        mapping_start_time = time.time()
        # Mapping CTG on AG
        random_seed = Config.mapping_random_seed
        if Mapping_Functions.make_initial_mapping(tg, ctg, ag, shm, noc_rg, critical_rg, non_critical_rg,
                                                  True, logging, random_seed, iteration):
            #if Config.DistanceBetweenMapping:
            #    init_mapping_string = Mapping_Functions.mapping_into_string(tg)
                # print(init_mapping_string)
            #else:
            #    init_mapping_string = None

            Mapping_Reports.report_mapping(ag, logging)
            # Schedule all tasks
            Scheduling_Functions.clear_scheduling(ag)
            Scheduler.schedule_all(tg, ag, shm, Config.DebugDetails, logging)
            Scheduling_Reports.report_mapped_tasks(ag, logging)
            Mapping_Functions.mapping_cost_function(tg, ag, shm, Config.DebugInfo)
            if Config.Mapping_Function == 'LocalSearch':
                mapping_cost_file = open('Generated_Files/Internal/LocalSearchMappingCost.txt', 'w')
                current_cost = Mapping_Functions.mapping_cost_function(tg, ag, shm, False, initial_mapping_string=initial_mapping_string)
                mapping_cost_file.write(str(current_cost)+"\n")
                mapping_cost_file.close()

                mapping_process_file = open('Generated_Files/Internal/MappingProcess.txt', 'w')
                mapping_process_file.write(Mapping_Functions.mapping_into_string(tg)+"\n")
                mapping_process_file.close()

                (best_tg, best_ctg, best_ag) = \
                    Local_Search.mapping_opt_local_search(tg, ctg, ag, noc_rg, critical_rg,
                                                          non_critical_rg, shm,
                                                          Config.LocalSearchIteration,
                                                          Config.DebugInfo, Config.DebugDetails, logging,
                                                          "LocalSearchMappingCost", "MappingProcess",
                                                          Config.mapping_random_seed, initial_mapping_string=initial_mapping_string)
                tg = copy.deepcopy(best_tg)
                ag = copy.deepcopy(best_ag)
                del best_tg, best_ctg, best_ag
                Mapping_Reports.viz_mapping_opt('LocalSearchMappingCost', iteration)
            elif Config.Mapping_Function == 'IterativeLocalSearch':
                (best_tg, best_ctg, best_ag) = \
                    Local_Search.mapping_opt_iterative_local_search(tg, ctg, ag, noc_rg, critical_rg,
                                                                    non_critical_rg, shm,
                                                                    Config.IterativeLocalSearchIterations,
                                                                    Config.LocalSearchIteration,
                                                                    Config.DebugInfo, Config.DebugDetails,
                                                                    logging)
                tg = copy.deepcopy(best_tg)
                ag = copy.deepcopy(best_ag)
                del best_tg, best_ctg, best_ag
                Mapping_Reports.viz_mapping_opt('LocalSearchMappingCost', iteration)
            elif Config.Mapping_Function == 'SimulatedAnnealing':
                (best_tg, best_ctg, best_ag) = SimulatedAnnealing.optimize_mapping_sa(tg, ctg, ag, noc_rg,
                                                                                      critical_rg, non_critical_rg,
                                                                                      shm, 'SA_MappingCost',
                                                                                      logging)
                Mapping_Reports.viz_mapping_opt('SA_MappingCost', iteration=None)
                if Config.SA_AnnealingSchedule == 'Adaptive':
                    Mapping_Reports.viz_cost_slope()
                elif Config.SA_AnnealingSchedule == 'Huang':
                    Mapping_Reports.viz_huang_race()
                tg = copy.deepcopy(best_tg)
                ag = copy.deepcopy(best_ag)
                del best_tg, best_ctg, best_ag
            # print(Mapping_Functions.mapping_into_string(TG))
            print("\033[92mTIME::\033[0m MAPPING AND OPTIMIZATION TOOK: "
                   + str(round(time.time()-mapping_start_time))+" SECONDS")

            Mapping_Reports.report_mapping(ag, logging)
            Scheduling_Functions.clear_scheduling(ag)
            Scheduler.schedule_all(tg, ag, shm, False, logging)
            Scheduling_Reports.report_mapped_tasks(ag, logging)
            if not Scheduling_Functions.check_if_all_deadlines_are_met(tg,ag):
                raise ValueError("not all critical tasks have met their deadline!")
            Mapping_Functions.mapping_cost_function(tg, ag, shm, True,  initial_mapping_string=initial_mapping_string)
            return tg, ag
        else:
            Mapping_Reports.report_mapping(ag, logging)
            print("===========================================")
            raise ValueError("INITIAL MAPPING FAILED...")

    return None, None
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.CheckAcyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.TestMode:
        AG_Test.ag_test()
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()
    if Config.TestMode:
        SHMU_Test.test_shmu(ag)
    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        Optimize_3D_AG.optimize_ag_vertical_links(ag, shmu, logging)
        Optimize_3D_AG.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.ApplyInitialFaults(shmu)
    if Config.SHM_Drawing:
        SHMU_Reports.DrawSHM(shmu.SHM)
        SHMU_Reports.DrawTempDistribution(shmu.SHM)
    # SHM_Reports.Report_NoC_SystemHealthMap()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(
            Routing.GenerateNoCRouteGraphFromFile(ag, shmu,
                                                  Config.RoutingFilePath,
                                                  Config.DebugInfo,
                                                  Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(
            Routing.GenerateNoCRouteGraph(ag, shmu, Config.UsedTurnModel,
                                          Config.DebugInfo,
                                          Config.DebugDetails))
    print("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
          str(round(time.time() - routing_graph_start_time)) + " SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.ReachabilityMetric(ag, noc_rg, True)
    # Some visualization...
    if Config.RG_Draw:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(
            ag, shmu)
        ReachabilityReports.ReportGSNoCFriendlyReachabilityInFile(ag)
    else:
        if Config.TestMode:
            Reachability_Test.ReachabilityTest()
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.OptimizeReachabilityRectangles(
            ag, Config.NumberOfRects)
        # ReachabilityReports.ReportReachability(ag)
        ReachabilityReports.ReportReachabilityInFile(ag,
                                                     "ReachAbilityNodeReport")
        ReachabilityReports.ReportGSNoCFriendlyReachabilityInFile(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg,
                                                 critical_rg, noncritical_rg,
                                                 Config.mapping_file_path,
                                                 logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg,
                                           noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.AddCurrentMappingToMPM(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.Mapping_Dstr_Drawing:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.Mapping_Drawing:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.Scheduling_Drawing:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.GenerateOneStepDiagnosablePMCG(
                ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.GenerateSequentiallyDiagnosablePMCG(
                ag, shmu.SHM)
        test_tg = TestSchedulingUnit.GenerateTestTGFromPMCG(pmcg)
        print("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
              str(round(time.time() - pmcg_start_time)) + " SECONDS")
        if Config.PMCG_Drawing:
            TestSchedulingUnit.DrawPMCG(pmcg)
        if Config.TTG_Drawing:
            TestSchedulingUnit.DrawTTG(test_tg)
        TestSchedulingUnit.InsertTestTasksInTG(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.MapTestTasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.RemoveTestTasksFromTG(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None

    print("===========================================")
    print("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    TrafficTableGenerator.generate_gsnoc_traffic_table(ag, tg)
    if Config.GenMappingFrames:
        Mapping_Animation.generate_frames(tg, ag, shmu.SHM)
    return ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg