Exemplo n.º 1
0
def case1_synthesis(formula, ts_file):
    _, dfa_0, dfa_inf, bdd = twtl.translate(formula, kind='both', norm=True)

    logging.debug('alphabet: {}'.format(dfa_inf.props))

    for u, v, d in dfa_inf.g.edges_iter(data=True):
        logging.debug('({}, {}): {}'.format(u, v, d))

    dfa_inf.visualize(draw='matplotlib')
    plt.show()

    logging.debug('\nEnd of translate\n\n')

    logging.info('The bound of formula "%s" is (%d, %d)!', formula, *bdd)
    logging.info('Translated formula "%s" to normal DFA of size (%d, %d)!',
                 formula, *dfa_0.size())
    logging.info('Translated formula "%s" to infinity DFA of size (%d, %d)!',
                 formula, *dfa_inf.size())

    logging.debug('\n\nStart policy computation\n')

    ts = Ts(directed=True, multi=False)
    ts.read_from_file(ts_file)
    ets = expand_duration_ts(ts)

    for name, dfa in [('normal', dfa_0), ('infinity', dfa_inf)]:
        logging.info('Constructing product automaton with %s DFA!', name)
        pa = ts_times_fsa(ets, dfa)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())

        if name == 'infinity':
            for u in pa.g.nodes_iter():
                logging.debug('{} -> {}'.format(u, pa.g.neighbors(u)))

            pa.visualize(draw='matplotlib')
            plt.show()

        # compute optimal path in PA and then project onto the TS
        policy, output, tau = compute_control_policy(pa, dfa, dfa.kind)
        logging.info('Max deadline: %s', tau)
        if policy is not None:
            logging.info('Generated output word is: %s',
                         [tuple(o) for o in output])

            policy = [x for x in policy if x not in ets.state_map]
            out = StringIO.StringIO()
            for u, v in zip(policy[:-1], policy[1:]):
                print >> out, u, '->', ts.g[u][v][0]['duration'], '->',
            print >> out, policy[-1],
            logging.info('Generated control policy is: %s', out.getvalue())
            out.close()

            logging.info('Relaxation is: %s',
                         twtl.temporal_relaxation(output, formula=formula))
        else:
            logging.info('No control policy found!')
Exemplo n.º 2
0
def case1_synthesis(formulas, ts_files, alpha, radius, time_wp, lab_testing,
                    always_active):
    startFull = timeit.default_timer()
    startOff = timeit.default_timer()
    dfa_dict = {}
    for ind, f in enumerate(formulas):
        _, dfa_inf, bdd = twtl.translate(f, kind=DFAType.Infinity, norm=True)

        logging.debug('\nEnd of translate\n\n')
        logging.info('The bound of formula "%s" is (%d, %d)!', f, *bdd)
        logging.info(
            'Translated formula "%s" to infinity DFA of size (%d, %d)!', f,
            *dfa_inf.size())
        dfa_dict[ind + 1] = copy.deepcopy(
            dfa_inf)  # The key is set to the agent number

    logging.debug('\n\nStart policy computation\n')

    ts_dict = {}
    ets_dict = {}
    for ind, ts_f in enumerate(ts_files):
        ts_dict[ind + 1] = Ts(directed=True, multi=False)
        ts_dict[ind + 1].read_from_file(ts_f)
        ets_dict[ind + 1] = expand_duration_ts(ts_dict[ind + 1])
    for ind in ts_dict:
        print 'Size of TS:', ets_dict[ind].size()
    # Get the nominal PA for each agent
    pa_nom_dict = {}
    norm_factor = {}
    startPA = timeit.default_timer()
    for key in dfa_dict:
        logging.info('Constructing product automaton with infinity DFA!')
        pa = ts_times_fsa(ets_dict[key], dfa_dict[key])
        # Give length and weight attributes to all edges in pa
        nom_weight_dict = {}
        edges_all = nx.get_edge_attributes(ts_dict[key].g, 'edge_weight')
        max_edge = max(edges_all, key=edges_all.get)
        norm_factor[key] = edges_all[max_edge]
        for pa_edge in pa.g.edges():
            edge = (pa_edge[0][0], pa_edge[1][0], 0)
            nom_weight_dict[pa_edge] = edges_all[edge] / norm_factor[key]
        nx.set_edge_attributes(pa.g, 'edge_weight', nom_weight_dict)
        nx.set_edge_attributes(pa.g, 'weight', 1)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())
        # Make a copy of the nominal PA to change
        pa_nom_dict[key] = copy.deepcopy(pa)
    stopPA = timeit.default_timer()
    print 'Run Time (s) to get all three PAs is: ', stopPA - startPA

    for key in pa_nom_dict:
        print 'Size of PA:', pa_nom_dict[key].size()

    # Use alpha to perform weighted optimization of time and edge_weight and make this a
    # new edge attribute to find "shortest path" over
    for key in pa_nom_dict:
        weight_dict = {}
        time_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'weight')
        edge_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'edge_weight')
        for pa_edge in pa_nom_dict[key].g.edges():
            weight_dict[pa_edge] = alpha * time_weight[pa_edge] + (
                1 - alpha) * edge_weight[pa_edge]
        # Append the multi-objective cost to the edge attribtues of the PA
        nx.set_edge_attributes(pa_nom_dict[key].g, 'new_weight', weight_dict)

    # Compute the energy (multi-objective cost function) for each agent's PA at every node
    startEnergy = timeit.default_timer()
    for key in pa_nom_dict:
        compute_energy(pa_nom_dict[key])
    stopEnergy = timeit.default_timer()
    print 'Run Time (s) to get the moc energy function for all three PA: ', stopEnergy - startEnergy

    # Compute optimal path in PA and project onto the TS
    ts_policy_dict_nom = {}
    pa_policy_dict_nom = {}
    tau_dict_nom = {}
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key], tau_dict_nom[key] = \
                    compute_control_policy(pa_nom_dict[key], dfa_dict[key], dfa_dict[key].kind)
    # Perform initial check on nominal control policies
    for key in ts_policy_dict_nom:
        if ts_policy_dict_nom[key] is None:
            logging.info('No control policy found!')

    # set empty control policies that will be iteratively updated
    ts_control_policy_dict = {}
    pa_control_policy_dict = {}

    # Initialize policy variables
    for key in ts_policy_dict_nom:
        ts_control_policy_dict[key] = []
        pa_control_policy_dict[key] = []

    # Concatenate nominal policies for searching
    policy_match, key_list, policy_match_index = update_policy_match(
        ts_policy_dict_nom)

    # Initialize vars, give nominal policies
    iter_step = 0
    running = True
    traj_length = 0
    ts_policy = copy.deepcopy(ts_policy_dict_nom)
    pa_policy = copy.deepcopy(pa_policy_dict_nom)
    tau_dict = tau_dict_nom
    # Choose parameter for n-horizon local trajectory, must be at least 2
    num_hops = 2
    # Get agent priority based on lowest energy
    prev_states = {}
    for key in ts_policy_dict_nom:
        prev_states[key] = pa_policy_dict_nom[key][0]
    priority = get_priority(pa_nom_dict, pa_policy_dict_nom, prev_states,
                            key_list)

    # Create Agent energy dictionary for post-processing
    # Create Termination indicator to assign terminated agents lowest priority
    F_indicator = {}
    agent_energy_dict = {}
    for key in ts_policy_dict_nom:
        agent_energy_dict[key] = []
        F_indicator[key] = False

    # Print time statistics
    stopOff = timeit.default_timer()
    print 'Offline run time for all initial setup: ', stopOff - startOff
    startOnline = timeit.default_timer()

    # Execute takeoff command for all crazyflies in lab testing
    if lab_testing:
        startTakeoff = timeit.default_timer()
        os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
        os.system(
            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_takeoff.py"
        )  # make sure executable
        os.chdir("/home/ryan/Desktop/pyTWTL/src")
        stopTakeoff = timeit.default_timer()
        print 'Takeoff time, should be ~2.7sec: ', stopTakeoff - startTakeoff

    ############################################################################

    # Iterate through all policies sequentially
    while running:
        while policy_match:
            for p_ind, p_val in enumerate(priority):
                if p_ind < 1:
                    weighted_nodes = {}
                    for i in range(num_hops):
                        weighted_nodes[i] = []
                else:
                    # Get local neighborhood (n-hop) of nodes to search for a conflict
                    for k_c, key_c in enumerate(key_list):
                        if p_val == key_c:
                            node = policy_match[0][k_c]
                            break
                    # Receive path information from 2*H neighborhood
                    local_set = get_neighborhood(node, ts_dict[p_val],
                                                 2 * num_hops)
                    # Get constraints for each transition
                    weighted_nodes = {}
                    for pty in priority[0:p_ind]:
                        for key in key_list:
                            if pty == key:
                                ts_length = len(ts_policy[key])
                                if ts_length >= num_hops:
                                    for i in range(num_hops):
                                        if ts_policy[key][i] in local_set:
                                            try:
                                                weighted_nodes[i].append(
                                                    ts_policy[key][i])
                                                # Add downwash nodes to constraint, for drone experiments
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                            except KeyError:
                                                weighted_nodes[i] = [
                                                    ts_policy[key][i]
                                                ]
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                else:
                                    for i in range(ts_length):
                                        if ts_policy[key][i] in local_set:
                                            try:
                                                weighted_nodes[i].append(
                                                    ts_policy[key][i])
                                                # Add downwash nodes to constraint, for drone experiments
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                            except KeyError:
                                                weighted_nodes[i] = [
                                                    ts_policy[key][i]
                                                ]
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                for i in range(num_hops):
                                    try:
                                        weighted_nodes[i]
                                    except KeyError:
                                        weighted_nodes[i] = []
                    # Update constraint set with intersecting transitions
                    if traj_length >= 1:
                        for p_ind2, p_val2 in enumerate(priority[0:p_ind]):
                            for k, key in enumerate(key_list):
                                if p_val2 == key:
                                    # initialize previous state
                                    comp_prev_state = ts_control_policy_dict[
                                        key][-1]
                                    cur_prev_state = ts_control_policy_dict[
                                        key_c][-1]
                                    cur_ts_policy_length = len(
                                        ts_policy[key_c])
                                    ts_length = len(ts_policy[key])
                                    if ts_length >= num_hops:
                                        for i in range(num_hops):
                                            comp_next_state = ts_policy[key][i]
                                            if i < cur_ts_policy_length:
                                                cur_next_state = ts_policy[
                                                    key_c][i]
                                                if comp_next_state in local_set:
                                                    # Check if the trajectories cross during transition (or use same transition)
                                                    cross_weight = check_intersectDP(ets_dict[key], cur_prev_state, cur_next_state, \
                                                                                comp_prev_state, comp_next_state, radius, time_wp)
                                                    if cross_weight:
                                                        for cross_node in cross_weight:
                                                            if cross_node not in weighted_nodes[
                                                                    i]:
                                                                weighted_nodes[
                                                                    i].append(
                                                                        cross_node
                                                                    )
                                                    # Check if using same transition in updated case
                                                    if comp_next_state == cur_prev_state:
                                                        if comp_prev_state not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    comp_prev_state
                                                                )
                                                    # Set previous state for next iteration
                                                    comp_prev_state = ts_policy[
                                                        key][i]
                                                    cur_prev_state = ts_policy[
                                                        key_c][i]
                                            else:
                                                break
                                    else:
                                        for i in range(ts_length):
                                            comp_next_state = ts_policy[key][i]
                                            if i < cur_ts_policy_length:
                                                cur_next_state = ts_policy[
                                                    key_c][i]
                                                if comp_next_state in local_set:
                                                    # Check if the trajectories cross during transition (or use same transition)
                                                    cross_weight = check_intersectDP(ets_dict[key], cur_prev_state, cur_next_state, \
                                                                                comp_prev_state, comp_next_state, radius, time_wp)
                                                    if cross_weight:
                                                        for cross_node in cross_weight:
                                                            if cross_node not in weighted_nodes[
                                                                    i]:
                                                                weighted_nodes[
                                                                    i].append(
                                                                        cross_node
                                                                    )
                                                    # Check if using same transition in updated case
                                                    if comp_next_state == cur_prev_state:
                                                        if comp_prev_state not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    comp_prev_state
                                                                )
                                                    # Set previous state for next iteration
                                                    comp_prev_state = ts_policy[
                                                        key][i]
                                                    cur_prev_state = ts_policy[
                                                        key_c][i]
                                            else:
                                                break
                # Generate receding horizon path and check for termination
                if traj_length >= 1:
                    init_loc = pa_control_policy_dict[p_val][-1]
                    ts_temp = ts_policy[p_val]
                    pa_temp = pa_policy[p_val]
                    # Compute receding horizon shortest path
                    ts_policy[p_val], pa_policy[
                        p_val], D_flag = local_horizonDP(
                            pa_nom_dict[p_val], weighted_nodes, num_hops,
                            init_loc)
                    # Check for deadlock, and if so resolve deadlock
                    if p_ind > 0:
                        if D_flag == True:
                            # Agent in deadlock is to remain stationary
                            ts_policy[p_val] = [
                                ts_control_policy_dict[p_val][-1],
                                ts_control_policy_dict[p_val][-1]
                            ]
                            pa_policy[p_val] = [
                                pa_control_policy_dict[p_val][-1],
                                pa_control_policy_dict[p_val][-1]
                            ]
                            # Assign deadlock node
                            x_d = ts_control_policy_dict[p_val][-1]
                            x_d_val = p_val
                            x_d_flag = True
                            hp_set = priority[0:p_ind]
                            while x_d_flag == True and hp_set:
                                x_d_flag = False
                                for hp in hp_set:
                                    if ts_policy[hp][0] == x_d:
                                        if hp == priority[0]:
                                            # Make all agents stationary and perform Dijkstra's shortest path
                                            for j in priority[1:p_ind]:
                                                ts_policy[j] = [
                                                    ts_control_policy_dict[j]
                                                    [-1],
                                                    ts_control_policy_dict[j]
                                                    [-1]
                                                ]
                                                pa_policy[j] = [
                                                    pa_control_policy_dict[j]
                                                    [-1],
                                                    pa_control_policy_dict[j]
                                                    [-1]
                                                ]
                                            occupied_nodes = [
                                                ts_control_policy_dict[x_d_val]
                                                [-1]
                                            ]
                                            for j in priority[0:p_ind]:
                                                occupied_nodes.append(
                                                    ts_control_policy_dict[j]
                                                    [-1])
                                            init_loc = pa_control_policy_dict[
                                                x_d_val][-1]
                                            ts_policy[x_d_val], pa_policy[
                                                x_d_val] = deadlock_path(
                                                    pa_nom_dict[x_d_val],
                                                    occupied_nodes, init_loc)
                                            for j in priority[1:p_ind]:
                                                for ind, node in enumerate(
                                                        ts_policy[x_d_val]
                                                    [:-1]):
                                                    if ts_policy[j][0] == node:
                                                        ts_policy[j] = [
                                                            ts_policy[x_d_val][
                                                                ind + 1],
                                                            ts_policy[x_d_val][
                                                                ind + 1]
                                                        ]
                                                        # Find the actual state on agent's PA that corresponds to this node
                                                        neighbors = pa_nom_dict[
                                                            j].g.neighbors(
                                                                pa_policy[j]
                                                                [0])
                                                        for node in neighbors:
                                                            if node[0] == ts_policy[
                                                                    j][0]:
                                                                pa_policy[
                                                                    j] = [
                                                                        node,
                                                                        node
                                                                    ]
                                            break
                                        else:
                                            ts_policy[hp] = [
                                                ts_control_policy_dict[hp][-1],
                                                ts_control_policy_dict[hp][-1]
                                            ]
                                            pa_policy[hp] = [
                                                pa_control_policy_dict[hp][-1],
                                                pa_control_policy_dict[hp][-1]
                                            ]
                                            x_d = ts_control_policy_dict[hp][
                                                -1]
                                            x_d_val = hp
                                            x_d_flag = True
                                            hp_set.remove(hp)
                                            break
                    # Increase iteration step (for statistics at end)
                    iter_step += 1

            # Update policy match
            policy_match, key_list, policy_match_index = update_policy_match(
                ts_policy)

            # Account for agents which have finished, also accounts for other finished agents through agent ID ordering
            if always_active == True:
                finished_ID = []
                for key in F_indicator:
                    if F_indicator[key] == True:
                        finished_ID.append(key)
                        current_node = ts_control_policy_dict[key][-1]
                        hp_nodes_avoid = []
                        for k in key_list:
                            hp_nodes_avoid.append(ts_policy[k][0])
                            hp_nodes_avoid.append(
                                ts_control_policy_dict[k][-1])
                        for fID in finished_ID[:-1]:
                            hp_nodes_avoid.append(
                                ts_control_policy_dict[fID][-1])
                        if current_node in hp_nodes_avoid:
                            local_set = ts_dict[key].g.neighbors(current_node)
                            for node in local_set:
                                if node not in hp_nodes_avoid:
                                    ts_control_policy_dict[key].append(node)
                                    break
                        else:
                            ts_control_policy_dict[key].append(current_node)

            # Append trajectories
            for key in ts_policy:
                agent_energy_dict[key].append(
                    pa_nom_dict[key].g.node[pa_policy[key][0]]['energy'])
                ts_control_policy_dict[key].append(ts_policy[key].pop(0))
                pa_policy_temp = list(pa_policy[key])
                pa_control_policy_dict[key].append(pa_policy_temp.pop(0))
                pa_policy[key] = tuple(pa_policy_temp)
            ts_write = policy_match.pop(0)
            traj_length += 1

            # publish waypoint to a csv file
            write_to_csv_iter(ts_dict, ts_write, key_list, time_wp)
            # Execute waypoint in crazyswarm lab testing
            if lab_testing:
                startWaypoint = timeit.default_timer()
                os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
                os.system(
                    "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_waypoint.py"
                )  # make sure executable
                os.chdir("/home/ryan/Desktop/pyTWTL/src")
                stopWaypoint = timeit.default_timer()
                print 'Waypoint time, should be ~2.0sec: ', stopWaypoint - startWaypoint

            # Update policy_match now that a trajectory has finalized and policy_match is empty
            if ts_policy:
                # Remove keys from policies that have terminated
                land_keys = []
                for key, val in ts_policy.items():
                    if len(val) == 0:
                        F_indicator[key] = True
                        land_keys.append(key)
                        del ts_policy[key]
                        del pa_policy[key]
                # publish to the land csv file when finished (for experiments)
                if land_keys:
                    if lab_testing:
                        write_to_land_file(land_keys)
                        os.chdir(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts"
                        )
                        os.system(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_land.py"
                        )  # make sure executable
                        os.chdir("/home/ryan/Desktop/pyTWTL/src")
                if not ts_policy:
                    running = False
                    break
                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)
                # Get agent priority based on lowest energy
                for key in key_list:
                    prev_states[key] = pa_control_policy_dict[key][-1]
                priority = get_priority(pa_nom_dict, pa_policy, prev_states,
                                        key_list)
            else:
                running = False

    # Print run time statistics
    stopOnline = timeit.default_timer()
    print 'Online run time for safe algorithm: ', stopOnline - startOnline
    stopFull = timeit.default_timer()
    print 'Full run time for safe algorithm: ', stopFull - startFull
    # Print other statistics from simulation
    print 'Number of iterations for run: ', iter_step
    print 'Average time for itertion is: ', (stopOnline -
                                             startOnline) / iter_step
    print 'Number of full updates in run: ', traj_length
    print 'Average update time for single step: ', (stopOnline -
                                                    startOnline) / traj_length

    # Print energy graph for each agent and the system from run
    plot_energy(agent_energy_dict)

    # Not exact, but gives insight
    for key in pa_nom_dict:
        tau_dict[key] = tau_dict_nom[key] + len(
            ts_control_policy_dict[key]) - len(ts_policy_dict_nom[key])

    # Write the nominal and final control policies to a file
    for key in pa_nom_dict:
        write_to_control_policy_file(ts_policy_dict_nom[key], pa_policy_dict_nom[key], \
                tau_dict_nom[key], dfa_dict[key],ts_dict[key],ets_dict[key],\
                ts_control_policy_dict[key], pa_control_policy_dict[key], tau_dict[key], key)

    # Write the CSV files for experiments
    for key in pa_nom_dict:
        write_to_csv(ts_dict[key], ts_control_policy_dict[key], key, time_wp)
Exemplo n.º 3
0
def case1_synthesis(formulas, ts_files, alpha, gamma, radius, time_wp,
                    lab_testing):
    startFull = timeit.default_timer()
    startOff = timeit.default_timer()
    dfa_dict = {}
    for ind, f in enumerate(formulas):
        _, dfa_inf, bdd = twtl.translate(f, kind=DFAType.Infinity, norm=True)

        logging.debug('\nEnd of translate\n\n')
        logging.info('The bound of formula "%s" is (%d, %d)!', f, *bdd)
        logging.info(
            'Translated formula "%s" to infinity DFA of size (%d, %d)!', f,
            *dfa_inf.size())
        dfa_dict[ind + 1] = copy.deepcopy(
            dfa_inf)  # Note that the key is set to the agent number

    logging.debug('\n\nStart policy computation\n')

    ts_dict = {}
    ets_dict = {}
    for ind, ts_f in enumerate(ts_files):
        ts_dict[ind + 1] = Ts(directed=True, multi=False)
        ts_dict[ind + 1].read_from_file(ts_f)
        ets_dict[ind + 1] = expand_duration_ts(ts_dict[ind + 1])
    for ind in ts_dict:
        print 'Size of TS:', ets_dict[ind].size()
    # Get the nominal PA for each agent
    pa_nom_dict = {}
    norm_factor = {}
    for key in dfa_dict:
        logging.info('Constructing product automaton with infinity DFA!')
        pa = ts_times_fsa(ets_dict[key], dfa_dict[key])
        # Give length and weight attributes to all edges in pa
        nom_weight_dict = {}
        edges_all = nx.get_edge_attributes(ts_dict[key].g, 'edge_weight')
        max_edge = max(edges_all, key=edges_all.get)
        norm_factor[key] = edges_all[max_edge]
        for pa_edge in pa.g.edges():
            edge = (pa_edge[0][0], pa_edge[1][0], 0)
            nom_weight_dict[pa_edge] = edges_all[edge] / norm_factor[key]
        nx.set_edge_attributes(pa.g, 'edge_weight', nom_weight_dict)
        nx.set_edge_attributes(pa.g, 'weight', 1)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())
        # Make a copy of the nominal PA to change
        pa_nom_dict[key] = copy.deepcopy(pa)

    for key in pa_nom_dict:
        print 'Size of PA:', pa_nom_dict[key].size()

    # Use alpha to perform weighted optimization of time and edge_weight and make this a
    # new edge attribute to find "shortest path" over
    for key in pa_nom_dict:
        weight_dict = {}
        time_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'weight')
        edge_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'edge_weight')
        for pa_edge in pa_nom_dict[key].g.edges():
            weight_dict[pa_edge] = alpha * time_weight[pa_edge] + (
                1 - alpha) * edge_weight[pa_edge]
        # Append the multi-objective cost to the edge attribtues of the PA
        nx.set_edge_attributes(pa_nom_dict[key].g, 'new_weight', weight_dict)

    # Compute the energy (multi-objective cost function) for each agent's PA at every node
    startEnergy = timeit.default_timer()
    for key in pa_nom_dict:
        compute_energy(pa_nom_dict[key])
    stopEnergy = timeit.default_timer()
    print 'Run Time (s) to get the energy function for all three PA: ', stopEnergy - startEnergy

    # Compute optimal path in Pa_Prime and project onto the TS, and initial policy based on new_weight
    ts_policy_dict_nom = {}
    pa_policy_dict_nom = {}
    tau_dict_nom = {}
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key], tau_dict_nom[key] = \
                    compute_control_policy(pa_nom_dict[key], dfa_dict[key], dfa_dict[key].kind)
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key] = \
                    compute_control_policy3(pa_nom_dict[key], dfa_dict[key], pa_policy_dict_nom[key][0])
    # Perform initial check on nominal control policies
    for key in ts_policy_dict_nom:
        if ts_policy_dict_nom[key] is None:
            logging.info('No control policy found!')

    # set empty control policies that will be iteratively updated
    ts_control_policy_dict = {}
    pa_control_policy_dict = {}

    # Initialize policy variables
    for key in ts_policy_dict_nom:
        ts_control_policy_dict[key] = []
        pa_control_policy_dict[key] = []

    # Concatenate nominal policies for searching
    policy_match, key_list, policy_match_index = update_policy_match(
        ts_policy_dict_nom)

    # Initialize vars, give nominal policies
    iter_step = 0
    running = True
    traj_length = 0
    ts_policy = copy.deepcopy(ts_policy_dict_nom)
    pa_policy = copy.deepcopy(pa_policy_dict_nom)
    tau_dict = tau_dict_nom
    # Choose parameter for n-horizon local trajectory and information sharing,
    # must be at least 2
    num_hops = 3
    # Get agent priority based on lowest energy
    prev_priority = key_list
    prev_states = {}
    for key in ts_policy_dict_nom:
        prev_states[key] = pa_policy_dict_nom[key][0]
    priority = get_priority(pa_nom_dict, pa_policy_dict_nom, prev_states,
                            key_list, prev_priority)
    # Create Agent energy dictionary for post-processing
    agent_energy_dict = {}
    for key in ts_policy_dict_nom:
        agent_energy_dict[key] = []

    # Print time statistics
    stopOff = timeit.default_timer()
    print 'Offline run time for all initial setup: ', stopOff - startOff
    startOnline = timeit.default_timer()

    # Execute takeoff command for all crazyflies in lab testing
    if lab_testing:
        startTakeoff = timeit.default_timer()
        os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
        os.system(
            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_takeoff.py"
        )  # make sure file is an executable
        os.chdir("/home/ryan/Desktop/pyTWTL/src")
        stopTakeoff = timeit.default_timer()
        print 'Takeoff time, should be ~2.7sec: ', stopTakeoff - startTakeoff

    # Iterate through all policies sequentially
    while running:
        while policy_match:
            for p_ind, p_val in enumerate(priority):
                if p_ind < 1:
                    weighted_nodes = []
                    weighted_soft_nodes = {}
                    for i in range(num_hops - 1):
                        weighted_soft_nodes[i + 1] = []
                else:
                    # Get local neighborhood (n-hop) of nodes to search for a conflict
                    for k, key in enumerate(key_list):
                        if p_val == key:
                            node = policy_match[0][k]
                            break
                    local_set = get_neighborhood(node, ts_dict[p_val],
                                                 num_hops)
                    one_hop_set = ts_dict[p_val].g.neighbors(node)
                    # Assign hard constraint nodes in local neighborhood
                    weighted_nodes = []
                    for pty in priority[0:p_ind]:
                        for k, key in enumerate(key_list):
                            if pty == key:
                                prev_node = policy_match[0][k]
                                if prev_node in one_hop_set:
                                    weighted_nodes.append(prev_node)
                                # Check if downwash constraint needs to be added, mostly for physical testing
                                downwash_weight = downwash_check(k, ets_dict[key], policy_match[0], \
                                                                priority[0:k], key_list, radius)
                                if downwash_weight:
                                    for downwash_node in downwash_weight:
                                        if downwash_node not in weighted_nodes:
                                            weighted_nodes.append(
                                                downwash_node)
                                break
                    # Get soft constraint nodes from sharing n-hop trajectory
                    soft_nodes = {}
                    for pty in priority[0:p_ind]:
                        for k, key in enumerate(key_list):
                            if pty == key:
                                ts_length = len(ts_policy[key])
                                if ts_length >= num_hops:
                                    for i in range(num_hops - 1):
                                        if ts_policy[key][i + 1] in local_set:
                                            try:
                                                soft_nodes[i + 1]
                                                soft_nodes[i + 1].append(
                                                    ts_policy[key][i + 1])
                                            except KeyError:
                                                soft_nodes[i + 1] = [
                                                    ts_policy[key][i + 1]
                                                ]
                                else:
                                    for i in range(ts_length - 1):
                                        if ts_policy[key][i + 1] in local_set:
                                            try:
                                                soft_nodes[i + 1]
                                                soft_nodes[i + 1].append(
                                                    ts_policy[key][i + 1])
                                            except KeyError:
                                                soft_nodes[i + 1] = [
                                                    ts_policy[key][i + 1]
                                                ]
                                for i in range(num_hops - 1):
                                    try:
                                        soft_nodes[i + 1]
                                    except KeyError:
                                        soft_nodes[i + 1] = []
                    # Assign soft constraint nodes
                    weighted_soft_nodes = soft_nodes

                    # Update weights if transitioning between same two nodes
                    ts_prev_states = []
                    ts_index = []
                    if len(policy_match[0]) > 1 and traj_length >= 1:
                        for key in ts_control_policy_dict:
                            if len(ts_control_policy_dict[key]) == traj_length:
                                ts_prev_states.append(
                                    ts_control_policy_dict[key][-1])
                    if ts_prev_states:
                        for p_ind2, p_val2 in enumerate(priority[0:p_ind]):
                            if p_ind2 > 0:
                                for k_c, key in enumerate(key_list):
                                    if p_val2 == key:
                                        node = policy_match[0][k_c]
                                        break
                                # Check if the trajectories will cross each other in transition
                                cross_weight = check_intersect(k_c, ets_dict[key], ts_prev_states, policy_match[0], \
                                                                    priority[0:p_ind2], key_list, radius, time_wp)
                                if cross_weight:
                                    for cross_node in cross_weight:
                                        if cross_node not in weighted_nodes:
                                            weighted_nodes.append(cross_node)
                                    # Check if agents using same transition
                                    for p_ind3, p_val3 in enumerate(
                                            priority[0:p_ind2]):
                                        for k, key in enumerate(key_list):
                                            if p_val3 == key:
                                                if ts_prev_states[k] == node:
                                                    if policy_match[0][
                                                            k] == ts_prev_states[
                                                                k_c]:
                                                        temp_node = policy_match[
                                                            0][k]
                                                        if temp_node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                temp_node)
                                                        if node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                node)
                                                        break
                                        else:
                                            continue
                                        break
                                    else:
                                        continue
                                    break
                                else:
                                    # Check if agents using same transition
                                    for p_ind3, p_val3 in enumerate(
                                            priority[0:p_ind2]):
                                        for k, key in enumerate(key_list):
                                            if p_val3 == key:
                                                if ts_prev_states[k] == node:
                                                    if policy_match[0][
                                                            k] == ts_prev_states[
                                                                k_c]:
                                                        temp_node = policy_match[
                                                            0][k]
                                                        if temp_node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                temp_node)
                                                        if node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                node)
                                                        break
                                        else:
                                            continue
                                        break
                                    else:
                                        continue
                                    break
                # Compute local horizon function to account for receding horizon all the time
                # while checking for termination
                if traj_length >= 1:
                    init_loc = pa_control_policy_dict[p_val][-1]
                    # Compute receding horizon shortest path
                    ts_policy[p_val], pa_policy[p_val] = local_horizon(pa_nom_dict[p_val], weighted_nodes,\
                                                            weighted_soft_nodes, num_hops, init_loc, gamma)
                    # Write updates to file
                    # iter_step += 1
                    # write_to_iter_file(ts_policy[p_val], ts_dict[p_val], ets_dict[p_val], p_val, iter_step)

                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)

            # Append trajectories
            for key in ts_policy:
                agent_energy_dict[key].append(
                    pa_nom_dict[key].g.node[pa_policy[key][0]]['energy'])
                ts_control_policy_dict[key].append(ts_policy[key].pop(0))
                pa_policy_temp = list(pa_policy[key])
                pa_control_policy_dict[key].append(pa_policy_temp.pop(0))
                pa_policy[key] = tuple(pa_policy_temp)
            ts_write = policy_match.pop(0)
            traj_length += 1
            # publish this waypoint to a csv file
            write_to_csv_iter(ts_dict, ts_write, key_list, time_wp)
            # Execute waypoint in crazyswarm lab testing
            if lab_testing:
                startWaypoint = timeit.default_timer()
                os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
                os.system(
                    "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_waypoint.py"
                )
                os.chdir("/home/ryan/Desktop/pyTWTL/src")
                stopWaypoint = timeit.default_timer()
                print 'Waypoint time, should be ~2.0sec: ', stopWaypoint - startWaypoint

            # Update policy_match now that a trajectory has finalized and policy_match is empty
            if ts_policy:
                # Remove keys from policies that have terminated
                land_keys = []
                for key, val in ts_policy.items():
                    if len(val) == 0:
                        land_keys.append(key)
                        del ts_policy[key]
                        del pa_policy[key]
                # publish to the land csv file for lab testing
                if land_keys:
                    if lab_testing:
                        write_to_land_file(land_keys)
                        os.chdir(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts"
                        )
                        os.system(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_land.py"
                        )
                        os.chdir("/home/ryan/Desktop/pyTWTL/src")
                if not ts_policy:
                    running = False
                    break
                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)
                # Get agent priority based on lowest energy
                for key in key_list:
                    prev_states[key] = pa_control_policy_dict[key][-1]
                priority = get_priority(pa_nom_dict, pa_policy, prev_states,
                                        key_list, priority)
            else:
                running = False

    # Print run time statistics
    stopOnline = timeit.default_timer()
    print 'Online run time for safe algorithm: ', stopOnline - startOnline
    stopFull = timeit.default_timer()
    print 'Full run time for safe algorithm: ', stopFull - startFull

    # Print energy statistics from run
    plot_energy(agent_energy_dict)

    # Possibly just set the relaxation to the nominal + additional nodes added *** Change (10/28)
    for key in pa_nom_dict:
        tau_dict[key] = tau_dict_nom[key] + len(
            ts_control_policy_dict[key]) - len(ts_policy_dict_nom[key])

    # Write the nominal and final control policies to a file
    for key in pa_nom_dict:
        write_to_control_policy_file(ts_policy_dict_nom[key], pa_policy_dict_nom[key], \
                tau_dict_nom[key], dfa_dict[key],ts_dict[key],ets_dict[key],\
                ts_control_policy_dict[key], pa_control_policy_dict[key], tau_dict[key], key)
    # Write the CSV files for experiments
    for key in pa_nom_dict:
        write_to_csv(ts_dict[key], ts_control_policy_dict[key], key, time_wp)