def generate_tg_from_dot(file_path):
    tg = networkx.DiGraph()
    print("PREPARING TASK GRAPH (TG) FROM DOT FILE...")

    try:
        tg_dot_file = open(file_path, 'r')
        while True:
            line = tg_dot_file.readline()
            if "->" in line:
                edge_list = line.split()
                # print(EdgeList[0], EdgeList[2], EdgeList[6])
                if edge_list[0] not in tg.nodes():
                    tg.add_node(edge_list[0],
                                WCET=1,
                                Criticality='L',
                                Cluster=None,
                                Node=None,
                                Priority=None,
                                Distance=None,
                                Release=0,
                                Type='App')
                if edge_list[2] not in tg.nodes():
                    tg.add_node(edge_list[2],
                                WCET=1,
                                Criticality='L',
                                Cluster=None,
                                Node=None,
                                Priority=None,
                                Distance=None,
                                Release=0,
                                Type='App')
                # TODO: the edge weight is not correct... it should depend on the amount of data instead of being
                # constant ...
                communication_weight = 1
                tg.add_edge(edge_list[0],
                            edge_list[2],
                            Criticality='L',
                            Link=[],
                            ComWeight=communication_weight)
            if line == '':
                break
    except IOError:
        raise ValueError('CAN NOT OPEN', file_path)
    TG_Functions.assign_distance(tg)
    print("TASK GRAPH (TG) IS READY...")
    return tg
def asap_scheduling(tg, ag, shm, report, logging=None):
    """

    :param tg:  Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param report: report Switch
    :param logging: logging file
    :return: None
    """
    if logging is not None:
        logging.info("STARTING ASAP SCHEDULING ...")
    max_distance = TG_Functions.calculate_max_distance(tg) + 1
    for distance in range(0, max_distance):
        for task in tg.nodes():
            if tg.node[task]['task'].type == 'App':
                if tg.node[task]['task'].distance == distance:
                    node = tg.node[task]['task'].node
                    # logging.info("\tSCHEDULING TASK "+str(task)+" ON NODE:"+str(node))
                    (start_time, end_time) = find_task_asap_scheduling(tg, ag, shm, task, node, logging)
                    add_tg_task_to_node(tg, ag, task, node, start_time, end_time, None)
                    for edge in tg.edges():
                        if edge[0] == task:
                            destination_node = tg.node[edge[1]]['task'].node
                            if len(tg.edge[edge[0]][edge[1]]['Link']) > 0:
                                for batch_and_link in tg.edge[edge[0]][edge[1]]['Link']:
                                    batch = batch_and_link[0]
                                    link = batch_and_link[1]
                                    probability = batch_and_link[2]
                                    # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON Router: "+str(link) +
                                    #             " FROM BATCH: "+str(batch))
                                    (start_time, end_time) = find_edge_asap_scheduling_router(tg, ag, edge, link[0],
                                                                                              batch, probability,
                                                                                              report, logging)
                                    add_tg_edge_to_router(ag, edge, link[0], batch, probability, start_time,
                                                          end_time, logging)
                                    # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON LINK: "+str(link) +
                                    #             " FROM BATCH: "+str(batch))
                                    (start_time, end_time) = find_edge_asap_scheduling_link(tg, ag, edge, link, batch,
                                                                                            probability, report,
                                                                                            logging)
                                    add_tg_edge_to_link(ag, edge, link, batch, probability, start_time, end_time,
                                                        logging)

                                    if destination_node == link[1]:
                                        # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON Router: "+str(link) +
                                        #             " FROM BATCH: "+str(batch))
                                        (start_time, end_time) = find_edge_asap_scheduling_router(tg, ag, edge, link[1],
                                                                                                  batch, probability,
                                                                                                  report, logging)
                                        add_tg_edge_to_router(ag, edge, link[1], batch, probability, start_time,
                                                              end_time, logging)
    if logging is not None:
        logging.info("DONE ASAP SCHEDULING...")
    return None
Example #3
0
def asap_scheduling(tg, ag, shm, report, logging=None):
    """

    :param tg:  Task Graph
    :param ag: Architecture Graph
    :param shm: System Health Map
    :param report: report Switch
    :param logging: logging file
    :return: None
    """
    if logging is not None:
        logging.info("STARTING ASAP SCHEDULING ...")
    max_distance = TG_Functions.calculate_max_distance(tg) + 1
    for distance in range(0, max_distance):
        for task in tg.nodes():
            if tg.node[task]['task'].type == 'App':
                if tg.node[task]['task'].distance == distance:
                    node = tg.node[task]['task'].node
                    # logging.info("\tSCHEDULING TASK "+str(task)+" ON NODE:"+str(node))
                    (start_time, end_time) = find_task_asap_scheduling(tg, ag, shm, task, node, logging)
                    add_tg_task_to_node(tg, ag, task, node, start_time, end_time, None)
                    for edge in tg.edges():
                        if edge[0] == task:
                            destination_node = tg.node[edge[1]]['task'].node
                            if len(tg.edges[edge]['Link']) > 0:
                                for batch_and_link in tg.edges[edge]['Link']:
                                    batch = batch_and_link[0]
                                    link = batch_and_link[1]
                                    probability = batch_and_link[2]
                                    # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON Router: "+str(link) +
                                    #             " FROM BATCH: "+str(batch))
                                    (start_time, end_time) = find_edge_asap_scheduling_router(tg, ag, edge, link[0],
                                                                                              batch, probability,
                                                                                              report, logging)
                                    add_tg_edge_to_router(ag, edge, link[0], batch, probability, start_time,
                                                          end_time, logging)
                                    # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON LINK: "+str(link) +
                                    #             " FROM BATCH: "+str(batch))
                                    (start_time, end_time) = find_edge_asap_scheduling_link(tg, ag, edge, link, batch,
                                                                                            probability, report,
                                                                                            logging)
                                    add_tg_edge_to_link(ag, edge, link, batch, probability, start_time, end_time,
                                                        logging)

                                    if destination_node == link[1]:
                                        # logging.info("\tSCHEDULING EDGE "+str(edge)+" ON Router: "+str(link) +
                                        #             " FROM BATCH: "+str(batch))
                                        (start_time, end_time) = find_edge_asap_scheduling_router(tg, ag, edge, link[1],
                                                                                                  batch, probability,
                                                                                                  report, logging)
                                        add_tg_edge_to_router(ag, edge, link[1], batch, probability, start_time,
                                                              end_time, logging)
    if logging is not None:
        logging.info("DONE ASAP SCHEDULING...")
    return None
Example #4
0
def n_map(tg, ag, noc_rg, critical_rg, non_critical_rg, shm, logging):
    """
    Performs NMap Mapping algorithm
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param noc_rg: NoC Routing Graph
    :param critical_rg: NoC Routing Graph for Critical Region
    :param non_critical_rg: NoC Routing Graph for Non-Critical Region
    :param shm: System Health Map
    :param logging: logging File
    :return: TG and AG
    """
    print ("===========================================")
    print ("STARTING N-MAP MAPPING...\n")

    if len(tg.nodes()) > len(ag.nodes()):
        raise ValueError("Number of tasks should be smaller or equal to number of PEs")

    mapped_tasks = []
    unmapped_tasks = copy.deepcopy(tg.nodes())
    allocated_nodes = []
    unallocated_nodes = copy.deepcopy(ag.nodes())

    # remove all broken nodes from unallocated_nodes list
    for node in unallocated_nodes:
        if not shm.node[node]['NodeHealth']:
            unallocated_nodes.remove(node)
            print ("REMOVED BROKEN NODE "+str(node)+" FROM UN-ALLOCATED NODES")

    print ("------------------")
    print ("STEP 1:")
    # step 1: find the task with highest weighted communication volume
    tasks_com_dict = TG_Functions.tasks_communication_weight(tg)
    sorted_tasks_com = sorted(tasks_com_dict, key=tasks_com_dict.get, reverse=True)
    print ("\t SORTED TASKS BY COMMUNICATION WEIGHT:\n"+"\t "+str(sorted_tasks_com))
    print ("\t -------------")
    chosen_task = sorted_tasks_com[0]
    print ("\t CHOSEN TASK: "+str(chosen_task))
    mapped_tasks.append(chosen_task)
    print ("\t ADDED TASK "+str(chosen_task)+"TO MAPPED TASKS LIST")
    unmapped_tasks.remove(chosen_task)
    print ("\t REMOVED TASK "+str(chosen_task)+"FROM UN-MAPPED TASKS LIST")

    print ("------------------")
    print ("STEP 2:")
    node_neighbors_dict = AG_Functions.node_neighbors(ag, shm)
    sorted_node_neighbors = sorted(node_neighbors_dict, key=node_neighbors_dict.get, reverse=True)
    max_neighbors_node = AG_Functions.max_node_neighbors(node_neighbors_dict, sorted_node_neighbors)
    print ("\t SORTED NODES BY NUMBER OF NEIGHBOURS:\n"+"\t "+str(sorted_node_neighbors))
    print ("\t -------------")
    print ("\t NODES WITH MAX NEIGHBOURS:\t"+str(max_neighbors_node))
    chosen_node = random.choice(max_neighbors_node)

    print ("\t CHOSEN NODE: "+str(chosen_node))
    allocated_nodes.append(chosen_node)
    print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
    unallocated_nodes.remove(chosen_node)
    print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")
    # Map Chosen Task on Chosen Node...
    if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                          non_critical_rg, chosen_task, chosen_node, logging):
        print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
    else:
        raise ValueError("Mapping task on node failed...")

    print ("------------------")
    print ("STEP 3:")
    while len(unmapped_tasks) > 0:
        print ("\033[33m==>\033[0m  UN-MAPPED TASKS #: "+str(len(unmapped_tasks)))
        print ("\t -------------")
        print ("\t STEP 3.1:")
        # find the unmapped task which communicates most with mapped_tasks
        max_com = 0
        unmapped_tasks_com = {}
        tasks_with_max_com_to_mapped = []
        for Task in unmapped_tasks:
            task_weight = 0
            for mapped_task in mapped_tasks:
                if (Task, mapped_task) in tg.edges():
                    task_weight += tg.edge[Task][mapped_task]["ComWeight"]
                if (mapped_task, Task) in tg.edges():
                    task_weight += tg.edge[mapped_task][Task]["ComWeight"]
            unmapped_tasks_com[Task] = task_weight
            if max_com < task_weight:
                max_com = task_weight
                tasks_with_max_com_to_mapped = [Task]
            elif max_com == task_weight:
                tasks_with_max_com_to_mapped.append(Task)
        print ("\t MAX COMMUNICATION WITH THE MAPPED TASKS: "+str(max_com))
        print ("\t TASK(S) WITH MAX COMMUNICATION TO MAPPED TASKS: "+str(tasks_with_max_com_to_mapped))
        if len(tasks_with_max_com_to_mapped) > 1:
            # multiple tasks with same comm to mapped
            # Find the one that communicate most with Un-mapped takss...
            candid_task_with_max_com_to_unmapped = []
            max_com = 0
            for CandidateTask in tasks_with_max_com_to_mapped:
                task_weight = 0
                for unmapped_task in unmapped_tasks:
                    if (Task, unmapped_task) in tg.edges():
                        task_weight += tg.edge[Task][unmapped_task]["ComWeight"]
                    if (unmapped_task, Task) in tg.edges():
                        task_weight += tg.edge[unmapped_task][Task]["ComWeight"]
                if task_weight > max_com:
                    candid_task_with_max_com_to_unmapped = [CandidateTask]
                elif task_weight == max_com:
                    candid_task_with_max_com_to_unmapped.append(CandidateTask)
            print ("\t CANDIDATE TASK(S) THAT COMMUNICATE MOST WITH UN_MAPPED: " +
                   str(candid_task_with_max_com_to_unmapped))
            if len(candid_task_with_max_com_to_unmapped) > 1:
                # if multiple tasks with the same com to unmmaped also,
                # choose randomly
                chosen_task = random.choice(candid_task_with_max_com_to_unmapped)
            else:
                chosen_task = candid_task_with_max_com_to_unmapped[0]
        else:
            chosen_task = tasks_with_max_com_to_mapped[0]
        print ("\t CHOSEN TASK: "+str(chosen_task))

        # Find the unallocated tile with lowest communication cost to/from the allocated_tiles_set.
        print ("\t -------------")
        print ("\t STEP 3.2:")
        min_cost = float("inf")
        node_candidates = []
        for unallocated_node in unallocated_nodes:
            cost = 0
            reachable = True
            for mapped_task in mapped_tasks:
                com_weight = 0
                if (chosen_task, mapped_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[chosen_task][mapped_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, unallocated_node,
                                                                                   destination_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
                elif (mapped_task, chosen_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[mapped_task][chosen_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, destination_node,
                                                                                   unallocated_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
            if reachable:
                if cost < min_cost:
                    node_candidates = [unallocated_node]
                    min_cost = cost
                elif cost == min_cost:
                    node_candidates.append(unallocated_node)
            else:
                print ("\t \033[33m* NOTE::\033[0m NODE "+str(unallocated_node)+" CAN NOT REACH...")
                pass
        print ("\t CANDIDATE NODES: "+str(node_candidates)+" MIN COST: "+str(min_cost))

        if len(node_candidates) == 0:
            raise ValueError("COULD NOT FIND A REACHABLE CANDIDATE NODE...")
        elif len(node_candidates) > 1:
            chosen_node = random.choice(node_candidates)
        elif len(node_candidates) == 1:
            chosen_node = node_candidates[0]
        else:
            # this means that the chosen task is not connected to any other task... so its cost is infinity
            chosen_node = random.choice(unallocated_nodes)

        mapped_tasks.append(chosen_task)
        print ("\t ADDED TASK "+str(chosen_task)+" TO MAPPED TASKS LIST")
        unmapped_tasks.remove(chosen_task)
        print ("\t REMOVED TASK "+str(chosen_task)+" FROM UN-MAPPED TASKS LIST")

        allocated_nodes.append(chosen_node)
        print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
        unallocated_nodes.remove(chosen_node)
        print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")

        if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                              non_critical_rg, chosen_task, chosen_node, logging):
            print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
        else:
            raise ValueError("Mapping task on node failed...")

    # Added by Behrad (Still under development)
    # Swapping phase
    print "-----------------------"
    print "PHASE ONE IS DONE... STARTING SWAP PROCESS..."
    for node_id_1 in range(0, len(ag.nodes())-1):
        for node_id_2 in range(node_id_1+1, len(ag.nodes())-1):
            pass
            # Save current mapping in an array
            # Also save the mapping's csomm_cost in a variable
            comm_cost = calculate_com_cost(tg)

            # Swap (node_id_1 , node_id_2)
            swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg, node_id_1, node_id_2, logging)
            # Check and calculate communication cost for all communication flows in the task graph
            #   (which is equal to the total number of edges in the application graph
            #   starting from the communication flow with the largest communication volume first
            comm_cost_new = calculate_com_cost(tg)
            # If comm_cost of current mapping is the same or bigger than the previous mapping, discard mapping
            #   Revert back to previous mapping with better comm_cost
            # Else
            #   Save new mapping as better mapping with less comm_cost
            if comm_cost_new < comm_cost:
                print "\033[32m* NOTE::\033[0m BETTER SOLUTION FOUND WITH COST:", comm_cost_new
            else:
                pass
                # print "Reverting to old solution"
                swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg,
                           node_id_2, node_id_1, logging)
            # Reset the comm_cost after each swapping

    # End of Swapping phase
    print "SWAP PROCESS FINISHED..."
    Scheduler.schedule_all(tg, ag, shm, True, logging)
    return tg, ag
Example #5
0
def NMap(tg, ag, NoCRG, CriticalRG, NonCriticalRG, SHM, logging):
    """
    Performs NMap Mapping algorithm
    :param tg: Task Graph
    :param AG: Architecture Graph
    :param NoCRG: NoC Routing Graph
    :param CriticalRG: NoC Routing Graph for Critical Region
    :param NonCriticalRG: NoC Routing Graph for Non-Critical Region
    :param SHM: System Health Map
    :param logging: logging File
    :return: TG and AG
    """
    print("===========================================")
    print("STARTING N-MAP MAPPING...\n")

    if len(tg.nodes()) > len(ag.nodes()):
        raise ValueError(
            "Number of tasks should be smaller or equal to number of PEs")

    mapped_tasks = []
    unmapped_tasks = copy.deepcopy(tg.nodes())
    allocated_nodes = []
    unallocated_nodes = copy.deepcopy(ag.nodes())

    # remove all broken nodes from unallocated_nodes list
    for node in unallocated_nodes:
        if not SHM.node[node]['NodeHealth']:
            unallocated_nodes.remove(node)
            print("REMOVED BROKEN NODE " + str(node) +
                  " FROM UN-ALLOCATED NODES")

    print("------------------")
    print("STEP 1:")
    # step 1: find the task with highest weighted communication volume
    tasks_com_dict = TG_Functions.tasks_communication_weight(tg)
    sorted_tasks_com = sorted(tasks_com_dict,
                              key=tasks_com_dict.get,
                              reverse=True)
    print("\t SORTED TASKS BY COMMUNICATION WEIGHT:\n" + "\t " +
          str(sorted_tasks_com))
    print("\t -------------")
    chosen_task = sorted_tasks_com[0]
    print("\t CHOSEN TASK: " + str(chosen_task))
    mapped_tasks.append(chosen_task)
    print("\t ADDED TASK " + str(chosen_task) + "TO MAPPED TASKS LIST")
    unmapped_tasks.remove(chosen_task)
    print("\t REMOVED TASK " + str(chosen_task) + "FROM UN-MAPPED TASKS LIST")

    print("------------------")
    print("STEP 2:")
    node_neighbors_dict = AG_Functions.node_neighbors(ag, SHM)
    sorted_node_neighbors = sorted(node_neighbors_dict,
                                   key=node_neighbors_dict.get,
                                   reverse=True)
    max_neighbors_node = AG_Functions.max_node_neighbors(
        node_neighbors_dict, sorted_node_neighbors)
    print("\t SORTED NODES BY NUMBER OF NEIGHBOURS:\n" + "\t " +
          str(sorted_node_neighbors))
    print("\t -------------")
    print("\t NODES WITH MAX NEIGHBOURS:\t" + str(max_neighbors_node))
    chosen_node = random.choice(max_neighbors_node)

    print("\t CHOSEN NODE: " + str(chosen_node))
    allocated_nodes.append(chosen_node)
    print("\t ADDED NODE " + str(chosen_node) + " TO ALLOCATED NODES LIST")
    unallocated_nodes.remove(chosen_node)
    print("\t REMOVED NODE " + str(chosen_node) +
          " FROM UN-ALLOCATED NODES LIST")
    # Map Chosen Task on Chosen Node...
    if Mapping_Functions.map_task_to_node(tg, ag, SHM, NoCRG, CriticalRG,
                                          NonCriticalRG, chosen_task,
                                          chosen_node, logging):
        print("\t \033[32m* NOTE::\033[0mTASK " + str(chosen_task) +
              " MAPPED ON NODE " + str(chosen_node))
    else:
        raise ValueError("Mapping task on node failed...")

    print("------------------")
    print("STEP 3:")
    while len(unmapped_tasks) > 0:
        print("\033[33m==>\033[0m  UN-MAPPED TASKS #: " +
              str(len(unmapped_tasks)))
        print("\t -------------")
        print("\t STEP 3.1:")
        # find the unmapped task which communicates most with mapped_tasks
        max_com = 0
        unmapped_tasks_com = {}
        tasks_with_max_com_to_mapped = []
        for Task in unmapped_tasks:
            task_weight = 0
            for mapped_task in mapped_tasks:
                if (Task, mapped_task) in tg.edges():
                    task_weight += tg.edge[Task][mapped_task]["ComWeight"]
                if (mapped_task, Task) in tg.edges():
                    task_weight += tg.edge[mapped_task][Task]["ComWeight"]
            unmapped_tasks_com[Task] = task_weight
            if max_com < task_weight:
                max_com = task_weight
                tasks_with_max_com_to_mapped = [Task]
            elif max_com == task_weight:
                tasks_with_max_com_to_mapped.append(Task)
        print("\t MAX COMMUNICATION WITH THE MAPPED TASKS: " + str(max_com))
        print("\t TASK(S) WITH MAX COMMUNICATION TO MAPPED TASKS: " +
              str(tasks_with_max_com_to_mapped))
        if len(tasks_with_max_com_to_mapped) > 1:
            # multiple tasks with same comm to mapped
            # Find the one that communicate most with Un-mapped takss...
            candid_task_with_max_com_to_unmapped = []
            max_com = 0
            for CandidateTask in tasks_with_max_com_to_mapped:
                task_weight = 0
                for unmapped_task in unmapped_tasks:
                    if (Task, unmapped_task) in tg.edges():
                        task_weight += tg.edge[Task][unmapped_task][
                            "ComWeight"]
                    if (unmapped_task, Task) in tg.edges():
                        task_weight += tg.edge[unmapped_task][Task][
                            "ComWeight"]
                if task_weight > max_com:
                    candid_task_with_max_com_to_unmapped = [CandidateTask]
                elif task_weight == max_com:
                    candid_task_with_max_com_to_unmapped.append(CandidateTask)
            print(
                "\t CANDIDATE TASK(S) THAT COMMUNICATE MOST WITH UN_MAPPED: " +
                str(candid_task_with_max_com_to_unmapped))
            if len(candid_task_with_max_com_to_unmapped) > 1:
                # if multiple tasks with the same com to unmmaped also,
                # choose randomly
                chosen_task = random.choice(
                    candid_task_with_max_com_to_unmapped)
            else:
                chosen_task = candid_task_with_max_com_to_unmapped[0]
        else:
            chosen_task = tasks_with_max_com_to_mapped[0]
        print("\t CHOSEN TASK: " + str(chosen_task))

        # Find the unallocated tile with lowest communication cost to/from the allocated_tiles_set.
        print("\t -------------")
        print("\t STEP 3.2:")
        min_cost = float("inf")
        node_candidates = []
        for unallocated_node in unallocated_nodes:
            cost = 0
            reachable = True
            for mapped_task in mapped_tasks:
                com_weight = 0
                if (chosen_task, mapped_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[chosen_task][mapped_task][
                        "ComWeight"]
                    destination_node = tg.node[mapped_task]['Node']
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.IsDestReachableFromSource(
                            NoCRG, unallocated_node, destination_node):
                        manhatan_distance = AG_Functions.manhattan_distance(
                            unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
                elif (mapped_task, chosen_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[mapped_task][chosen_task][
                        "ComWeight"]
                    destination_node = tg.node[mapped_task]['Node']
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.IsDestReachableFromSource(
                            NoCRG, destination_node, unallocated_node):
                        manhatan_distance = AG_Functions.manhattan_distance(
                            unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
            if reachable:
                if cost < min_cost:
                    node_candidates = [unallocated_node]
                    min_cost = cost
                elif cost == min_cost:
                    node_candidates.append(unallocated_node)
            else:
                print("\t \033[33m* NOTE::\033[0m NODE " +
                      str(unallocated_node) + " CAN NOT REACH...")
                pass
        print("\t CANDIDATE NODES: " + str(node_candidates) + " MIN COST: " +
              str(min_cost))

        if len(node_candidates) == 0:
            raise ValueError("COULD NOT FIND A REACHABLE CANDIDATE NODE...")
        elif len(node_candidates) > 1:
            chosen_node = random.choice(node_candidates)
        elif len(node_candidates) == 1:
            chosen_node = node_candidates[0]
        else:
            # this means that the chosen task is not connected to any other task... so its cost is infinity
            chosen_node = random.choice(unallocated_nodes)

        mapped_tasks.append(chosen_task)
        print("\t ADDED TASK " + str(chosen_task) + " TO MAPPED TASKS LIST")
        unmapped_tasks.remove(chosen_task)
        print("\t REMOVED TASK " + str(chosen_task) +
              " FROM UN-MAPPED TASKS LIST")

        allocated_nodes.append(chosen_node)
        print("\t ADDED NODE " + str(chosen_node) + " TO ALLOCATED NODES LIST")
        unallocated_nodes.remove(chosen_node)
        print("\t REMOVED NODE " + str(chosen_node) +
              " FROM UN-ALLOCATED NODES LIST")

        if Mapping_Functions.map_task_to_node(tg, ag, SHM, NoCRG, CriticalRG,
                                              NonCriticalRG, chosen_task,
                                              chosen_node, logging):
            print("\t \033[32m* NOTE::\033[0mTASK " + str(chosen_task) +
                  " MAPPED ON NODE " + str(chosen_node))
        else:
            raise ValueError("Mapping task on node failed...")

    # Added by Behrad (Still under development)
    # Swapping phase
    print "-----------------------"
    print "PHASE ONE IS DONE... STARTING SWAP PROCESS..."
    for node_id_1 in range(0, len(ag.nodes()) - 1):
        for node_id_2 in range(node_id_1 + 1, len(ag.nodes()) - 1):
            pass
            # Save current mapping in an array
            # Also save the mapping's csomm_cost in a variable
            comm_cost = calculate_com_cost(tg)

            # Swap (node_id_1 , node_id_2)
            swap_nodes(tg, ag, SHM, NoCRG, CriticalRG, NonCriticalRG,
                       node_id_1, node_id_2, logging)
            # Check and calculate communication cost for all communication flows in the task graph
            #   (which is equal to the total number of edges in the application graph
            #   starting from the communication flow with the largest communication volume first
            comm_cost_new = calculate_com_cost(tg)
            # If comm_cost of current mapping is the same or bigger than the previous mapping, discard mapping
            #   Revert back to previous mapping with better comm_cost
            # Else
            #   Save new mapping as better mapping with less comm_cost
            if comm_cost_new < comm_cost:
                print "\033[32m* NOTE::\033[0m BETTER SOLUTION FOUND WITH COST:", comm_cost_new
            else:
                pass
                # print "Reverting to old solution"
                swap_nodes(tg, ag, SHM, NoCRG, CriticalRG, NonCriticalRG,
                           node_id_2, node_id_1, logging)
            # Reset the comm_cost after each swapping

    # End of Swapping phase
    print "SWAP PROCESS FINISHED..."
    Scheduler.schedule_all(tg, ag, SHM, True, False, logging)
    return tg, ag
def draw_task_graph(tg, ttg=None):
    print("DRAWING TASK GRAPH...")
    plt.figure()
    node_colors = []
    for Node in tg.nodes():
        if tg.node[Node]['task'].criticality == 'H':
            node_colors.append('#FF878B')
        elif tg.node[Node]['task'].criticality == 'GH':
            node_colors.append('#FFC29C')
        elif tg.node[Node]['task'].criticality == 'GNH':
            node_colors.append('#928AFF')
        else:
            node_colors.append('#A0CBE2')
    edge_colors = []
    for Edge in tg.edges():
        if tg.edges[Edge]['Criticality'] == 'H':
            edge_colors.append('red')
        else:
            edge_colors.append('black')
    tg_edge_list = []
    tg_edge_weight = []
    for Edge in tg.edges():
        tg_edge_list.append(Edge)
        tg_edge_weight.append(tg.edges[Edge]['ComWeight'])

    if Config.tg.type == "RandomIndependent":
        pos = networkx.shell_layout(tg)
    else:
        width = 1000
        height = 10000
        pos = {}
        max_distance = TG_Functions.calculate_max_distance(tg)
        for current_distance in range(0, max_distance + 1):
            num_tasks_with_same_distance = 0
            for node in tg.nodes():
                if tg.node[node]['task'].type == 'App':
                    distance = tg.node[node]['task'].distance
                    if current_distance == distance:
                        num_tasks_with_same_distance += 1
            counter = 0
            for node in tg.nodes():
                if tg.node[node]['task'].type == 'App':
                    distance = tg.node[node]['task'].distance
                    if current_distance == distance:
                        counter += 1
                        pos[node] = (counter *
                                     (width / num_tasks_with_same_distance) +
                                     width, (max_distance - current_distance) *
                                     height / max_distance)
        if ttg is not None:
            temp_pos = networkx.shell_layout(ttg)
            for test_node in tg.nodes():
                if tg.node[test_node]['task'].type == 'Test':
                    pos[test_node] = [
                        temp_pos[test_node][0] * (width / 2) + width / 2,
                        temp_pos[test_node][1] * (height / 2) + height / 2
                    ]

    networkx.draw_networkx_nodes(tg,
                                 pos,
                                 with_labels=True,
                                 node_color=node_colors,
                                 node_size=50)
    networkx.draw_networkx_edges(tg,
                                 pos,
                                 edge_color=tg_edge_weight,
                                 edge_cmap=plt.cm.Reds,
                                 width=3,
                                 arrows=False)
    networkx.draw_networkx_edges(tg, pos, arrows=False, width=0.5)
    networkx.draw_networkx_labels(tg, pos, font_size=4)
    # networkx.draw_networkx_edge_labels(TG, pos, edge_labels=dict(zip(tg_edge_list, tg_edge_weight)),
    #                                    font_size=10, label_pos=0.7)
    if ttg is None:
        plt.savefig("GraphDrawings/TG.png", dpi=200, bbox_inches='tight')
    else:
        plt.savefig("GraphDrawings/TG_And_TTG.png",
                    dpi=200,
                    bbox_inches='tight')
    plt.clf()
    print(
        "\033[35m* VIZ::\033[0mTASK GRAPH DRAWINGS CREATED AT: GraphDrawings/TG.png"
    )
    return None
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.check_acyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()

    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth, True)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        vl_opt.optimize_ag_vertical_links(ag, shmu, logging)
        vl_opt_functions.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.apply_initial_faults(shmu)
    if Config.viz.shm:
        SHMU_Reports.draw_shm(shmu.SHM)
        SHMU_Reports.draw_temp_distribution(shmu.SHM)
    # SHM_Reports.report_noc_shm()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(Routing.gen_noc_route_graph_from_file(ag, shmu, Config.RoutingFilePath,
                                                                     Config.DebugInfo, Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, Config.UsedTurnModel,
                                                                Config.DebugInfo, Config.DebugDetails))
    Routing_Functions.check_deadlock_freeness(noc_rg)
    print ("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
           str(round(time.time()-routing_graph_start_time))+" SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.reachability_metric(ag, noc_rg, True)
    # Some visualization...
    if Config.viz.rg:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(ag, shmu)
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    else:
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.optimize_reachability_rectangles(ag, Config.NumberOfRects)
        # ReachabilityReports.report_reachability(ag)
        ReachabilityReports.report_reachability_in_file(ag, "ReachAbilityNodeReport")
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg, critical_rg, noncritical_rg,
                                                 Config.mapping_file_path, logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg, noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.add_current_mapping_to_mpm(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.viz.mapping_distribution:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.viz.mapping:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.viz.scheduling:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.gen_one_step_diagnosable_pmcg(ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.gen_sequentially_diagnosable_pmcg(ag, shmu.SHM)
        test_tg = TestSchedulingUnit.generate_test_tg_from_pmcg(pmcg)
        print ("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
               str(round(time.time()-pmcg_start_time)) + " SECONDS")
        if Config.viz.pmcg:
            TestSchedulingUnit.draw_pmcg(pmcg)
        if Config.viz.ttg:
            TestSchedulingUnit.draw_ttg(test_tg)
        TestSchedulingUnit.insert_test_tasks_in_tg(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.map_test_tasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.remove_test_tasks_from_tg(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None
    Arch_Graph_Reports.gen_latex_ag(ag, shmu.SHM)
    print ("===========================================")
    print ("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    if Config.viz.mapping_frames:
        Mapping_Animation.generate_frames(ag, shmu.SHM)
    return tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.CheckAcyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.TestMode:
        AG_Test.ag_test()
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()
    if Config.TestMode:
        SHMU_Test.test_shmu(ag)
    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        Optimize_3D_AG.optimize_ag_vertical_links(ag, shmu, logging)
        Optimize_3D_AG.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.ApplyInitialFaults(shmu)
    if Config.SHM_Drawing:
        SHMU_Reports.DrawSHM(shmu.SHM)
        SHMU_Reports.DrawTempDistribution(shmu.SHM)
    # SHM_Reports.Report_NoC_SystemHealthMap()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(
            Routing.GenerateNoCRouteGraphFromFile(ag, shmu,
                                                  Config.RoutingFilePath,
                                                  Config.DebugInfo,
                                                  Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(
            Routing.GenerateNoCRouteGraph(ag, shmu, Config.UsedTurnModel,
                                          Config.DebugInfo,
                                          Config.DebugDetails))
    print("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
          str(round(time.time() - routing_graph_start_time)) + " SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.ReachabilityMetric(ag, noc_rg, True)
    # Some visualization...
    if Config.RG_Draw:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(
            ag, shmu)
        ReachabilityReports.ReportGSNoCFriendlyReachabilityInFile(ag)
    else:
        if Config.TestMode:
            Reachability_Test.ReachabilityTest()
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.OptimizeReachabilityRectangles(
            ag, Config.NumberOfRects)
        # ReachabilityReports.ReportReachability(ag)
        ReachabilityReports.ReportReachabilityInFile(ag,
                                                     "ReachAbilityNodeReport")
        ReachabilityReports.ReportGSNoCFriendlyReachabilityInFile(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg,
                                                 critical_rg, noncritical_rg,
                                                 Config.mapping_file_path,
                                                 logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg,
                                           noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.AddCurrentMappingToMPM(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.Mapping_Dstr_Drawing:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.Mapping_Drawing:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.Scheduling_Drawing:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.GenerateOneStepDiagnosablePMCG(
                ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.GenerateSequentiallyDiagnosablePMCG(
                ag, shmu.SHM)
        test_tg = TestSchedulingUnit.GenerateTestTGFromPMCG(pmcg)
        print("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
              str(round(time.time() - pmcg_start_time)) + " SECONDS")
        if Config.PMCG_Drawing:
            TestSchedulingUnit.DrawPMCG(pmcg)
        if Config.TTG_Drawing:
            TestSchedulingUnit.DrawTTG(test_tg)
        TestSchedulingUnit.InsertTestTasksInTG(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.MapTestTasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.RemoveTestTasksFromTG(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None

    print("===========================================")
    print("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    TrafficTableGenerator.generate_gsnoc_traffic_table(ag, tg)
    if Config.GenMappingFrames:
        Mapping_Animation.generate_frames(tg, ag, shmu.SHM)
    return ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
Example #9
0
def initialize_system(logging):
    """
    Generates the Task graph, Architecture Graph, System Health Monitoring Unit, NoC routing graph(s) and
    Test Task Graphs and does the mapping and scheduling and returns to the user the initial system
    :param logging: logging file
    :return:  tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg
    """
    tg = copy.deepcopy(TG_Functions.generate_tg())
    if Config.DebugInfo:
        Task_Graph_Reports.report_task_graph(tg, logging)
    Task_Graph_Reports.draw_task_graph(tg)
    if Config.TestMode:
        TG_Test.check_acyclic(tg, logging)
    ####################################################################
    ag = copy.deepcopy(AG_Functions.generate_ag(logging))
    AG_Functions.update_ag_regions(ag)
    AG_Functions.random_darkness(ag)
    if Config.EnablePartitioning:
        AG_Functions.setup_network_partitioning(ag)
    if Config.FindOptimumAG:
        Arch_Graph_Reports.draw_ag(ag, "AG_Full")
    else:
        Arch_Graph_Reports.draw_ag(ag, "AG")
    ####################################################################
    Config.setup_turns_health()

    shmu = SystemHealthMonitoringUnit.SystemHealthMonitoringUnit()
    shmu.setup_noc_shm(ag, Config.TurnsHealth, True)
    # Here we are injecting initial faults of the system: we assume these fault
    # information is obtained by post manufacturing system diagnosis
    if Config.FindOptimumAG:
        vl_opt.optimize_ag_vertical_links(ag, shmu, logging)
        vl_opt_functions.cleanup_ag(ag, shmu)
        Arch_Graph_Reports.draw_ag(ag, "AG_VLOpt")
    SHMU_Functions.apply_initial_faults(shmu)
    if Config.viz.shm:
        SHMU_Reports.draw_shm(shmu.SHM)
        SHMU_Reports.draw_temp_distribution(shmu.SHM)
    # SHM_Reports.report_noc_shm()
    ####################################################################
    routing_graph_start_time = time.time()
    if Config.SetRoutingFromFile:
        noc_rg = copy.deepcopy(Routing.gen_noc_route_graph_from_file(ag, shmu, Config.RoutingFilePath,
                                                                     Config.DebugInfo, Config.DebugDetails))
    else:
        noc_rg = copy.deepcopy(Routing.generate_noc_route_graph(ag, shmu, Config.UsedTurnModel,
                                                                Config.DebugInfo, Config.DebugDetails))
    Routing_Functions.check_deadlock_freeness(noc_rg)
    print("\033[92mTIME::\033[0m ROUTING GRAPH GENERATION TOOK: " +
           str(round(time.time()-routing_graph_start_time))+" SECONDS")
    # this is for double checking...
    if Config.FindOptimumAG:
        Calculate_Reachability.reachability_metric(ag, noc_rg, True)
    # Some visualization...
    if Config.viz.rg:
        RoutingGraph_Reports.draw_rg(noc_rg)
    ####################################################################
    # in case of partitioning, we have to route based on different Route-graphs
    if Config.EnablePartitioning:
        critical_rg, noncritical_rg = Calculate_Reachability.calculate_reachability_with_regions(ag, shmu)
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    else:
        critical_rg, noncritical_rg = None, None
        Calculate_Reachability.calculate_reachability(ag, noc_rg)
        Calculate_Reachability.optimize_reachability_rectangles(ag, Config.NumberOfRects)
        # ReachabilityReports.report_reachability(ag)
        ReachabilityReports.report_reachability_in_file(ag, "ReachAbilityNodeReport")
        ReachabilityReports.report_gsnoc_friendly_reachability_in_file(ag)
    ####################################################################
    if Config.read_mapping_from_file:
        Mapping_Functions.read_mapping_from_file(tg, ag, shmu.SHM, noc_rg, critical_rg, noncritical_rg,
                                                 Config.mapping_file_path, logging)
        Scheduler.schedule_all(tg, ag, shmu.SHM, False, logging)
    else:
        best_tg, best_ag = Mapping.mapping(tg, ag, noc_rg, critical_rg, noncritical_rg, shmu.SHM, logging)
        if best_ag is not None and best_tg is not None:
            tg = copy.deepcopy(best_tg)
            ag = copy.deepcopy(best_ag)
            del best_tg, best_ag
            # SHM.add_current_mapping_to_mpm(tg)
            Mapping_Functions.write_mapping_to_file(ag, "mapping_report")
    if Config.viz.mapping_distribution:
        Mapping_Reports.draw_mapping_distribution(ag, shmu)
    if Config.viz.mapping:
        Mapping_Reports.draw_mapping(tg, ag, shmu.SHM, "Mapping_post_opt")
    if Config.viz.scheduling:
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingTG")
    ####################################################################
    # PMC-Graph
    # at this point we assume that the system health map knows about the initial faults from
    # the diagnosis process
    if Config.GeneratePMCG:
        pmcg_start_time = time.time()
        if Config.OneStepDiagnosable:
            pmcg = TestSchedulingUnit.gen_one_step_diagnosable_pmcg(ag, shmu.SHM)
        else:
            pmcg = TestSchedulingUnit.gen_sequentially_diagnosable_pmcg(ag, shmu.SHM)
        test_tg = TestSchedulingUnit.generate_test_tg_from_pmcg(pmcg)
        print("\033[92mTIME::\033[0m PMCG AND TTG GENERATION TOOK: " +
               str(round(time.time()-pmcg_start_time)) + " SECONDS")
        if Config.viz.pmcg:
            TestSchedulingUnit.draw_pmcg(pmcg)
        if Config.viz.ttg:
            TestSchedulingUnit.draw_ttg(test_tg)
        TestSchedulingUnit.insert_test_tasks_in_tg(pmcg, tg)
        Task_Graph_Reports.draw_task_graph(tg, ttg=test_tg)
        TestSchedulingUnit.map_test_tasks(tg, ag, shmu.SHM, noc_rg, logging)
        Scheduler.schedule_test_in_tg(tg, ag, shmu.SHM, False, logging)
        Scheduling_Reports.report_mapped_tasks(ag, logging)
        # TestSchedulingUnit.remove_test_tasks_from_tg(test_tg, tg)
        # Task_Graph_Reports.draw_task_graph(tg, TTG=test_tg)
        Scheduling_Reports.generate_gantt_charts(tg, ag, "SchedulingWithTTG")
    else:
        pmcg = None
    Arch_Graph_Reports.gen_latex_ag(ag, shmu.SHM)
    print("===========================================")
    print("SYSTEM IS UP...")

    TrafficTableGenerator.generate_noxim_traffic_table(ag, tg)
    if Config.viz.mapping_frames:
        Mapping_Animation.generate_frames(ag, shmu.SHM)
    return tg, ag, shmu, noc_rg, critical_rg, noncritical_rg, pmcg