Example #1
0
def plot_ts():
    # 1. load mission and environment
    mission = Mission.from_file(
        '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/mission.yaml'
    )
    #     logging.info('\n' + str(mission))
    #     logging.info('Seed: %s', mission.simulation.get('seed', None))

    # load environment
    env = mission.environment

    # load transition system
    ts_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/transition_system.txt'
    with open(ts_filename, 'r') as fin:
        data = yaml.load(fin)

    ts = Ts(multi=False)
    ts.g.add_edges_from(data['ts'])
    ts.init[data['ts_init']] = 1

    figure = plt.figure()
    figure.add_subplot('111')
    axes = figure.axes[0]
    axes.axis('equal')  # sets aspect ration to 1

    b = np.array(env['boundary']['limits'])
    plt.xlim(b.T[0])
    plt.ylim(b.T[1])

    mission.draw_environment(axes, figure, origin=np.array((0, 0)))
    draw_ts(ts, axes, figure)

    path = None
    with open(
            '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/policy_new.txt',
            'r') as fin:
        lines = fin.readlines()
    lines = ''.join(map(str.strip, lines))
    exec lines

    print path

    axes.plot([p[0] for p in path], [p[1] for p in path],
              'o',
              color=(1, 0.5, 0),
              markersize=10)
    for u, v in it.izip(path[:-1], path[1:]):
        x, y = np.array([u[:2], v[:2]]).T
        axes.plot(x, y, color=(1, 0.5, 0), linestyle='-', lw=4)

    plt.xlabel('x [m]')
    plt.ylabel('y [m]')
    plt.tight_layout(pad=0.5)

    for img in ['png', 'jpg', 'pdf', 'svg']:
        plt.savefig(
            '/home/cristi/Dropbox/FIRM/Experiments/bu_case/bu_ts.{}'.format(
                img))

    plt.show()
Example #2
0
def extract_ts():
    # 1. load mission and environment
    mission = Mission.from_file(
        '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/mission.yaml'
    )
    #     logging.info('\n' + str(mission))
    #     logging.info('Seed: %s', mission.simulation.get('seed', None))

    # load environment
    env = mission.environment

    # load transition system
    ts_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/transition_system.txt'
    with open(ts_filename, 'r') as fin:
        data = yaml.load(fin)

    ts = Ts(multi=False)
    ts.g.add_edges_from(data['ts'])
    ts.init[data['ts_init']] = 1

    figure = plt.figure()
    figure.add_subplot('111')
    axes = figure.axes[0]
    axes.axis('equal')  # sets aspect ration to 1

    b = np.array(env['boundary']['limits'])
    plt.xlim(b.T[0])
    plt.ylim(b.T[1])

    mission.draw_environment(axes, figure, origin=np.array((0, 0)))
    draw_ts(ts, axes, figure)

    policy_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/policy_new.txt'
    with open(policy_filename, 'w') as fout:
        print >> fout, '    path = np.array(['

    def onclick(event):
        print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
              (event.button, event.x, event.y, event.xdata, event.ydata))

        node, d = min([(p, dist(p[:2], (event.xdata, event.ydata)))
                       for p in ts.g.nodes_iter()],
                      key=lambda x: x[1])
        print node, d
        assert ts.g.has_node(node)

        with open(policy_filename, 'a') as fout:
            print >> fout, '        {},'.format(node)

    cid = figure.canvas.mpl_connect('button_press_event', onclick)

    plt.xlabel('x [m]')
    plt.ylabel('y [m]')
    plt.tight_layout(pad=0.5)
    plt.show()

    with open(policy_filename, 'a') as fout:
        print >> fout, '        ]*10)'
Example #3
0
def extract_ts():
    # 1. load mission and environment
    mission = Mission.from_file('/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/mission.yaml')
#     logging.info('\n' + str(mission))
#     logging.info('Seed: %s', mission.simulation.get('seed', None))
     
    # load environment
    env = mission.environment
    
    # load transition system
    ts_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/transition_system.txt'
    with open(ts_filename, 'r') as fin:
        data = yaml.load(fin)
    
    
    ts = Ts(multi=False)
    ts.g.add_edges_from(data['ts'])
    ts.init[data['ts_init']] = 1
    
    figure = plt.figure()
    figure.add_subplot('111')
    axes = figure.axes[0]
    axes.axis('equal') # sets aspect ration to 1
    
    b = np.array(env['boundary']['limits'])
    plt.xlim(b.T[0])
    plt.ylim(b.T[1])
    
    mission.draw_environment(axes, figure, origin=np.array((0, 0)))
    draw_ts(ts, axes, figure)
    
    policy_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/policy_new.txt'
    with open(policy_filename, 'w') as fout:
        print>>fout, '    path = np.array(['
    
    def onclick(event):
        print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
              (event.button, event.x, event.y, event.xdata, event.ydata))
        
        node, d = min([(p, dist(p[:2], (event.xdata, event.ydata)))
                        for p in ts.g.nodes_iter()],
                   key=lambda x: x[1])
        print node, d
        assert ts.g.has_node(node)
        
        with open(policy_filename, 'a') as fout:
            print>>fout, '        {},'.format(node)
    
    cid = figure.canvas.mpl_connect('button_press_event', onclick)
    
    plt.xlabel('x [m]')
    plt.ylabel('y [m]')
    plt.tight_layout(pad=0.5)
    plt.show()
    
    with open(policy_filename, 'a') as fout:
        print>>fout, '        ]*10)'
Example #4
0
def case1_synthesis(formula, ts_file):
    _, dfa_0, dfa_inf, bdd = twtl.translate(formula, kind='both', norm=True)

    logging.debug('alphabet: {}'.format(dfa_inf.props))

    for u, v, d in dfa_inf.g.edges_iter(data=True):
        logging.debug('({}, {}): {}'.format(u, v, d))

    dfa_inf.visualize(draw='matplotlib')
    plt.show()

    logging.debug('\nEnd of translate\n\n')

    logging.info('The bound of formula "%s" is (%d, %d)!', formula, *bdd)
    logging.info('Translated formula "%s" to normal DFA of size (%d, %d)!',
                 formula, *dfa_0.size())
    logging.info('Translated formula "%s" to infinity DFA of size (%d, %d)!',
                 formula, *dfa_inf.size())

    logging.debug('\n\nStart policy computation\n')

    ts = Ts(directed=True, multi=False)
    ts.read_from_file(ts_file)
    ets = expand_duration_ts(ts)

    for name, dfa in [('normal', dfa_0), ('infinity', dfa_inf)]:
        logging.info('Constructing product automaton with %s DFA!', name)
        pa = ts_times_fsa(ets, dfa)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())

        if name == 'infinity':
            for u in pa.g.nodes_iter():
                logging.debug('{} -> {}'.format(u, pa.g.neighbors(u)))

            pa.visualize(draw='matplotlib')
            plt.show()

        # compute optimal path in PA and then project onto the TS
        policy, output, tau = compute_control_policy(pa, dfa, dfa.kind)
        logging.info('Max deadline: %s', tau)
        if policy is not None:
            logging.info('Generated output word is: %s',
                         [tuple(o) for o in output])

            policy = [x for x in policy if x not in ets.state_map]
            out = StringIO.StringIO()
            for u, v in zip(policy[:-1], policy[1:]):
                print >> out, u, '->', ts.g[u][v][0]['duration'], '->',
            print >> out, policy[-1],
            logging.info('Generated control policy is: %s', out.getvalue())
            out.close()

            logging.info('Relaxation is: %s',
                         twtl.temporal_relaxation(output, formula=formula))
        else:
            logging.info('No control policy found!')
def construct_ts():
    ts = Ts(directed=True, multi=False)
    ts.g = nx.grid_2d_graph(4, 3)

    ts.init[(1, 1)] = 1

    ts.g.add_node((0, 0), attr_dict={'prop': set(['a'])})
    ts.g.add_node((3, 2), attr_dict={'prop': set(['b'])})

    ts.g.add_edges_from(ts.g.edges(), weight=1)

    return ts
Example #6
0
def plot_ts():
    # 1. load mission and environment
    mission = Mission.from_file('/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/mission.yaml')
#     logging.info('\n' + str(mission))
#     logging.info('Seed: %s', mission.simulation.get('seed', None))
     
    # load environment
    env = mission.environment
    
    # load transition system
    ts_filename = '/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/transition_system.txt'
    with open(ts_filename, 'r') as fin:
        data = yaml.load(fin)
    
    
    ts = Ts(multi=False)
    ts.g.add_edges_from(data['ts'])
    ts.init[data['ts_init']] = 1
    
    figure = plt.figure()
    figure.add_subplot('111')
    axes = figure.axes[0]
    axes.axis('equal') # sets aspect ration to 1
    
    b = np.array(env['boundary']['limits'])
    plt.xlim(b.T[0])
    plt.ylim(b.T[1])
    
    mission.draw_environment(axes, figure, origin=np.array((0, 0)))
    draw_ts(ts, axes, figure)
    
    path = None
    with open('/home/cristi/Dropbox/work/workspace_linux/PyFIRM/src/data/bu/policy_new.txt', 'r') as fin:
        lines = fin.readlines()
    lines = ''.join(map(str.strip, lines))
    exec lines
    
    print path
    
    axes.plot([p[0] for p in path], [p[1] for p in path], 'o', color=(1, 0.5, 0), markersize=10)
    for u, v in it.izip(path[:-1], path[1:]):
        x, y = np.array([u[:2], v[:2]]).T
        axes.plot(x, y, color=(1, 0.5, 0), linestyle='-', lw=4)
    
    plt.xlabel('x [m]')
    plt.ylabel('y [m]')
    plt.tight_layout(pad=0.5)
    
    for img in ['png', 'jpg', 'pdf', 'svg']:
        plt.savefig('/home/cristi/Dropbox/FIRM/Experiments/bu_case/bu_ts.{}'.format(img))
    
    plt.show()
Example #7
0
def ts_load(fname, mission, motionModel):
    ts = Ts(multi=False)
    with open(fname, 'r') as fin:
        data = yaml.load(fin)
    
    data['ts'] = [((pxu, pyu, normalizeAngle(thu)), (pxv, pyv, normalizeAngle(thv)))
                    for (pxu, pyu, thu), (pxv, pyv, thv) in data['ts']]
    
    ts.g.add_nodes_from([u for u, _ in data['ts']] + [v for _, v in data['ts']])
    ts.g.add_edges_from(((x, x) for x in ts.g.nodes_iter()), weight=1)
    ts.init[data['ts_init']] = 1 # add initial state
    assert data['ts_init'] in ts.g # check if initial state is in Ts
    
    # expand edges based on open loop controls, adds states and transitions
    for u, v in data['ts']:
        assert u in ts.g
        assert v in ts.g
        traj, controls = motionModel.generateOpenLoopTrajectory(
                                            SE2BeliefState(u[0], u[1], u[2]),
                                            SE2BeliefState(v[0], v[1], v[2]))
        assert np.allclose(traj[0].conf, u, rtol=0, atol=1e-8)
        assert np.allclose(traj[-1].conf, v, rtol=0, atol=1e-8)
        traj[0].conf[:] = u
        traj[-1].conf[:] = v
        assert tuple(map(float, traj[0].conf)) == u
        assert tuple(map(float, traj[-1].conf)) == v
        
        ts.g.add_edges_from(
        [(tuple(map(float,x.conf)), tuple(map(float, y.conf)),
          {'control': c, 'weight': 1})
            for x, y, c in zip(traj[:-1], traj[1:], controls)])
    
    # label states
    region_data = mission.environment['regions']
    regions = dict([(reg.lower(), (np.array(d['position']), np.array(d['sides'])))
                                        for reg, d in region_data.iteritems()])
    regions.update([(reg, np.array([[c[0] - s[0]/2.0, c[0] + s[0]/2.0],
                                    [c[1] - s[1]/2.0, c[1] + s[1]/2.0]]))
                    for reg, (c, s) in regions.iteritems()])
    inBox = lambda x, b: (b[0, 0] <= x[0] <= b[0, 1]) and (b[1, 0] <= x[1] <= b[1, 1])
     
    for reg, b in regions.iteritems():
        print 'Region:', reg, 'x:', b[0], 'y:', b[1]
    print
     
    print 'TS:', ts.g.number_of_nodes(), ts.g.number_of_edges()
     
    for x in ts.g.nodes_iter():
        ts.g.node[x]['prop'] = set([reg for reg, b in regions.iteritems() if inBox(x, b)])
    
    return ts
Example #8
0
    def __init__(self, env, motion_model, observation_model, parameters):
        '''Constructor'''
        self.name = "LTL FIRM Planner"

        self.bounds = np.array(env['boundary']['limits']).reshape(2, 2)
        self.motion_model = motion_model
        self.observation_model = observation_model

        # sampler used for generating valid samples in the state space
        self.sampler = None

        nn = parameters['nearest_neighbors']
        self.connection_strategy = FStrategy(nn['max_number_of_neighbors'],
                                             nn['max_connection_radius'])
        self.min_steering_dist, self.steering_radius = parameters[
            'steering_dist_bounds']

        # TS and Product MDP
        self.ts = Ts(multi=False)
        self.pa = MDP(multi=False)
        self.pa.rabin_states = defaultdict(set)
        # set specification
        self.specification = None
        # number of Monte Carlo simulations
        self.numParticles = parameters['number_monte_carlo_trials']

        # state and control weights for the LQR controllers
        cc = parameters['controller']
        self.state_weight = np.array(cc['state_weight'])
        self.control_weight = np.array(cc['control_weight'])
        assert np.all(np.linalg.eigvals(self.state_weight) > 0)  # check pdef
        assert np.all(np.linalg.eigvals(self.control_weight) > 0)  # check pdef

        self.cnt = it.count()
Example #9
0
def case2_verification(formula, ts_file):
    _, dfa_inf, bdd = twtl.translate(formula, kind=DFAType.Infinity, norm=True)

    logging.info('The bound of formula "%s" is (%d, %d)!', formula, *bdd)
    logging.info('Translated formula "%s" to infinity DFA of size (%d, %d)!',
                 formula, *dfa_inf.size())

    ts = Ts(directed=True, multi=False)
    ts.read_from_file(ts_file)
    ts.g = nx.DiGraph(ts.g)
    ts.g.add_edges_from(ts.g.edges(), weight=1)

    for u, v in ts.g.edges_iter():
        print u, '->', v

    result = verify(ts, dfa_inf)
    logging.info('The result of the verification procedure is %s!', result)
Example #10
0
def expand_duration_ts(ts):
    '''Expands the edges of the duration transition system such that all edges
    in the new transition system have duration one.
    '''
    assert not ts.multi
    ets = Ts(directed=ts.directed, multi=False)

    # copy attributes
    ets.name = ts.name
    ets.init = ts.init

    # copy nodes with data
    ets.g.add_nodes_from(ts.g.nodes_iter(data=True))

    # expand edges
    ets.state_map = dict() # reverse lookup dictionary for intermediate nodes
    ng = it.count()
    for u, v, data in ts.g.edges_iter(data=True):
        # generate intermediate nodes
        aux_nodes = [u] + [ng.next() for _ in range(data['duration']-1)] + [v]
        u_pos = np.array(ts.g.node[u]['position'])
        v_pos = np.array(ts.g.node[v]['position'])
        aux_pos = [{'position' : tuple(u_pos + s * (v_pos - u_pos)), 's': s}
                                 for s in np.linspace(0, 1, num=len(aux_nodes))]
        ets.g.add_nodes_from(zip(aux_nodes, aux_pos))
        # create intermediate edges
        edge_list = zip(aux_nodes[:-1], aux_nodes[1:]) + zip(aux_nodes, aux_nodes)
        # add intermediate edges
        ets.g.add_edges_from(edge_list, weight=1)
        # update reverse lookup map
        for state in aux_nodes[1:-1]:
            ets.state_map[state] = (u, v)

    if logging.getLogger().isEnabledFor(logging.DEBUG):
        logging.debug('[extend_ts] TS: ({}, {}) ETS:({}, {})'.format(
                        nx.number_of_nodes(ts.g), nx.number_of_edges(ts.g),
                        nx.number_of_nodes(ets.g), nx.number_of_edges(ets.g)))
    return ets
Example #11
0
def case1_synthesis(formulas, ts_files):

    startG = timeit.default_timer()
    dfa_dict = {}
    for ind, f in enumerate(formulas):
        _, dfa_inf, bdd = twtl.translate(f, kind=DFAType.Infinity, norm=True)
        dfa_dict[ind + 1] = copy.deepcopy(dfa_inf)

    ts_dict = {}
    ets_dict = {}
    for ind, ts_f in enumerate(ts_files):
        ts_dict[ind + 1] = Ts(directed=True, multi=False)
        ts_dict[ind + 1].read_from_file(ts_f)
        ets_dict[ind + 1] = expand_duration_ts(ts_dict[ind + 1])

    # Get the nominal PA
    logging.info('Constructing product automaton with infinity DFA!')
    startPA = timeit.default_timer()
    pa_dict = {}
    for key in dfa_dict:
        logging.info('Constructing product automaton with infinity DFA!')
        pa = ts_times_fsa(ets_dict[key], dfa_dict[key])
        # Give initial weight attribute to all edges in pa
        nx.set_edge_attributes(pa.g, "weight", 1)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())
        # Make a copy of the nominal PA to change
        pa_dict[key] = copy.deepcopy(pa)

    # Calculate PA for the entire system
    pa_tuple = (pa_dict[1], pa_dict[2])
    ets_tuple = (ets_dict[1], ets_dict[2])
    team_ts = ts_times_ts(ets_tuple)
    team_pa = pa_times_pa(pa_tuple, team_ts)
    # pdb.set_trace()

    stopPA = timeit.default_timer()
    print 'Product automaton size is:', team_pa.size()
    print 'Run Time (s) to get PA is: ', stopPA - startPA

    startPath = timeit.default_timer()
    # Compute the optimal path in PA and project onto the TS
    pa_policy_multi = compute_multiagent_policy(team_pa)

    stopPath = timeit.default_timer()
    print 'Run Time (s) to get optimal paths for agents is: ', stopPath - startPath
    stopG = timeit.default_timer()
    print 'Run Time (s) for full algorithm: ', stopG - startG
    print 'PA Policy for 2 agents: ', pa_policy_multi
Example #12
0
    def __init__(self, robot, checker, iterations=0, eta=[0.5, 1.0]):
        '''Constructor'''
        self.name = 'Sparse RRG-based LTL path planner'
        self.maxIterations = iterations
        self.iteration = 0

        self.robot = robot
        symbols = self.robot.getSymbols(robot.initConf)

        # initialize transition system
        self.ts = Ts(name='Transition system', directed=True, multi=False)
        self.ts.g.add_node(robot.initConf, prop=symbols)
        self.ts.init[robot.initConf] = 1.0

        # initialize checker
        self.checker = checker
        if checker:
            self.checker.addInitialState(robot.initConf, symbols)

        self.eta = eta
Example #13
0
    def __init__(self, robot, checker, iterations):
        '''Constructor'''
        self.name = 'RRG-based LTL path planner'
        self.maxIterations = iterations
        self.iteration = 0

        self.robot = robot
        symbols = self.robot.getSymbols(robot.initConf)

        # initialize transition system
        self.ts = Ts(name='global transition system', multi=False)
        self.ts.g.add_node(robot.initConf, prop=symbols)
        self.ts.init[robot.initConf] = 1.0

        # initialize checker
        self.checker = checker
        if checker:
            self.checker.addInitialState(robot.initConf, symbols)

        # initialize bounds for far and nearest checks
        # should be set based on specific problem
        self.eta = [0.5, 1.0]

        self.detailed_logging = True
Example #14
0
def expand_duration_ts(ts):
    '''Expands the edges of the duration transition system such that all edges
    in the new transition system have duration one.
    '''
    assert not ts.multi
    ets = Ts(directed=ts.directed, multi=False)

    # copy attributes
    ets.name = ts.name
    ets.init = ts.init

    # copy nodes with data
    ets.g.add_nodes_from(ts.g.nodes_iter(data=True))

    # expand edges
    ets.state_map = dict()  # reverse lookup dictionary for intermediate nodes
    ng = it.count()
    for u, v, data in ts.g.edges_iter(data=True):
        # generate intermediate nodes
        aux_nodes = [u] + [ng.next()
                           for _ in range(data['duration'] - 1)] + [v]
        u_pos = np.array(ts.g.node[u]['position'])
        v_pos = np.array(ts.g.node[v]['position'])
        aux_pos = [{
            'position': tuple(u_pos + s * (v_pos - u_pos)),
            's': s
        } for s in np.linspace(0, 1, num=len(aux_nodes))]
        ets.g.add_nodes_from(zip(aux_nodes, aux_pos))
        # create intermediate edges
        edge_list = zip(aux_nodes[:-1], aux_nodes[1:]) + zip(
            aux_nodes, aux_nodes)
        # add intermediate edges
        ets.g.add_edges_from(edge_list, weight=1)
        # update reverse lookup map
        for state in aux_nodes[1:-1]:
            ets.state_map[state] = (u, v)

    if logging.getLogger().isEnabledFor(logging.DEBUG):
        logging.debug('[extend_ts] TS: ({}, {}) ETS:({}, {})'.format(
            nx.number_of_nodes(ts.g), nx.number_of_edges(ts.g),
            nx.number_of_nodes(ets.g), nx.number_of_edges(ets.g)))
    return ets
Example #15
0
def test_srfs():
    ts = Ts(directed=False, multi=False)

    M, N = 10, 10

    ts.g.add_nodes_from([((i, j), {
        'location': (i, j),
        'prop': set(),
        'label': ''
    }) for i in range(M) for j in range(N)])

    for i in range(M):
        for j in range(N):
            if i > 0:
                ts.g.add_edge((i, j), (i - 1, j), weight=1)
                if j > 0:
                    ts.g.add_edge((i, j), (i - 1, j - 1), weight=1)
                if j < N - 1:
                    ts.g.add_edge((i, j), (i - 1, j + 1), weight=1)
            if i < N - 1:
                ts.g.add_edge((i, j), (i + 1, j), weight=1)
                if j > 0:
                    ts.g.add_edge((i, j), (i + 1, j - 1), weight=1)
                if j < N - 1:
                    ts.g.add_edge((i, j), (i + 1, j + 1), weight=1)
            if j > 0:
                ts.g.add_edge((i, j), (i, j - 1), weight=1)
            if j < N - 1:
                ts.g.add_edge((i, j), (i, j + 1), weight=1)

    ts.init[(9, 9)] = 1

    ts.g.node[(1, 1)]['prop'] = {'a'}
    ts.g.node[(1, 1)]['label'] = 'a'
    ts.g.node[(3, 8)]['prop'] = {'b'}
    ts.g.node[(3, 8)]['label'] = 'b'
    ts.g.node[(7, 2)]['prop'] = {'b'}
    ts.g.node[(7, 2)]['label'] = 'b'

    ts.g.node[(5, 5)]['prop'] = {'o'}
    ts.g.node[(5, 4)]['prop'] = {'o'}
    ts.g.node[(5, 3)]['prop'] = {'o'}
    ts.g.node[(5, 2)]['prop'] = {'o'}
    ts.g.node[(5, 6)]['prop'] = {'o'}
    ts.g.node[(5, 7)]['prop'] = {'o'}
    ts.g.node[(6, 6)]['prop'] = {'o'}
    ts.g.node[(7, 6)]['prop'] = {'o'}
    ts.g.node[(4, 4)]['prop'] = {'o'}
    ts.g.node[(3, 4)]['prop'] = {'o'}
    ts.g.node[(2, 4)]['prop'] = {'o'}

    prop_colors = {('a', ): 'y', ('b', ): 'g', ('o', ): 'k'}

    draw_grid(ts,
              edgelabel='weight',
              prop_colors=prop_colors,
              current_node='init')
    plt.xlim(-1, M)
    plt.ylim(-1, N)
    plt.show()

    spec = 'G (F a && F b && !o)'
    buchi = Buchi()
    buchi.from_formula(spec)
    print('Created Buchi automaton of size', buchi.size())
    #     buchi.visualize(draw='matplotlib')
    #     plt.show()

    print
    for u, d in buchi.g.nodes_iter(data=True):
        print u, d
    print
    for u, v, d in buchi.g.edges_iter(data=True):
        print u, v, d

    pa = ts_times_buchi(ts, buchi)
    print('Created product automaton of size', pa.size())
    #     pa.visualize(draw='matplotlib')
    #     plt.show()

    compute_potentials(pa)
    #     print
    #     for u, d in pa.g.nodes_iter(data=True):
    #         print u, d
    pa.max_potential = 1 + max(
        [d['potential'] for _, d in pa.g.nodes_iter(data=True)])

    seed(1)

    horizon = 2
    current_pa_state = next(pa.init.iterkeys())
    policy = None
    while True:
        current_ts_state, _ = current_pa_state

        neighborhood_rewards = generate_rewards(ts, current_ts_state)
        print 'rewards', neighborhood_rewards

        policy = compute_receding_horizon_policy(pa, current_pa_state,
                                                 neighborhood_rewards, horizon,
                                                 policy)
        #         policy =  compute_receding_horizon_policy_dp(pa, current_pa_state,
        #                                          neighborhood_rewards, horizon, policy)

        draw_grid(ts,
                  edgelabel='weight',
                  prop_colors=prop_colors,
                  current_node=current_ts_state)
        plt.xlim(-1, M)
        plt.ylim(-1, N)

        for v, r in neighborhood_rewards.iteritems():
            c = plt.Circle(v, radius=0.05 * r, color=(.8, .8, .8, .7))
            plt.gcf().gca().add_artist(c)

        plt.show()

        current_pa_state = policy[0]
Example #16
0
def postprocessing(logfilename,
                   ts_filename,
                   outdir,
                   lts_index,
                   rrg_iterations,
                   lts_iterations,
                   local_traj_iterations,
                   generate=()):
    '''Parses log file and generate statistics and figures.'''

    if not os.path.isdir(outdir):
        os.makedirs(outdir)

    with open(logfilename, 'r') as logfile:
        # first process general data
        data = dict()
        line = ''
        for line in logfile:
            prefix, line_data = line.split('--')
            if prefix.lower().rfind('info') >= 0:
                data.update(eval(line_data))
                if line_data.lower().find('start global planning') >= 0:
                    break
        print 'general data:', len(data)

        # second process data on global planner
        rrg_data = []
        iteration_data = None
        for line in logfile:
            prefix, line_data = line.split('--')
            if prefix.lower().rfind('info') >= 0:
                if line_data.lower().rfind('found solution') >= 0:
                    rrg_data.append(iteration_data)
                    break
                if line_data.lower().find('iteration') >= 0:
                    if iteration_data is not None:
                        rrg_data.append(iteration_data)
                    iteration_data = dict()
                iteration_data.update(eval(line_data))
        print 'rrg data:', len(rrg_data)

        rrg_stat_data = eval(line_data)
        for line in logfile:
            prefix, line_data = line.split('--')
            if prefix.lower().rfind('info') >= 0:
                if line_data.lower().find('end global planning') >= 0:
                    break
                rrg_stat_data.update(eval(line_data))
        print 'rrg_stat_data', len(rrg_stat_data)

        assert rrg_stat_data['Iterations'] == len(rrg_data)

        # third process data on local planner
        rrt_stat_data = dict()
        for line in logfile:
            prefix, line_data = line.split('--')
            if prefix.lower().rfind('info') >= 0:
                if line_data.lower().find('initialize local planner') >= 0:
                    break
                rrt_stat_data.update(eval(line_data))
        print 'rrt_stat_data:', len(rrt_stat_data)

        rrt_data = []
        execution_data = dict()
        for line in logfile:
            prefix, line_data = line.split('--')
            if prefix.lower().rfind('info') >= 0:
                if line_data.lower().find(
                        'local online planning finished') >= 0:
                    rrt_data.append(execution_data)
                    break
                # NOTE: second condition is for compatibility with Cozmo logs
                if (line_data.lower().find('start local planning step') >= 0
                        or line_data.lower().find('plan start time') >= 0):
                    rrt_data.append(execution_data)
                    execution_data = dict()
                execution_data.update(eval(line_data))
        print 'rrt data:', len(rrt_data)

    # save useful information
    with open(os.path.join(outdir, 'general_data.txt'), 'w') as f:
        for key in [
                'Robot initial configuration', 'Robot step size',
                'Global specification', 'Buchi size', 'Local specification'
        ]:
            print >> f, key, ':', data[key]

        for key in [
                'Iterations', 'global planning runtime', 'Size of TS',
                'Size of PA'
        ]:
            print >> f, key, ':', rrg_stat_data[key]
        pre, suff = rrg_stat_data['global policy']
        print >> f, 'Global policy size :', (len(pre), len(suff))

        # key = 'Computing potential function runtime'
        # print>>f, key, ':', rrt_stat_data[key]

    # get workspace
    wspace, style = data['Workspace']
    wspace.boundary.style = style
    ewspace, style = data['Expanded workspace']
    ewspace.boundary.style = style

    # get robot
    robot_name = data['Robot name']
    initConf = data['Robot initial configuration']
    stepsize = data['Robot step size']
    robot = eval(data['Robot constructor'])
    robot.diameter = data['Robot diameter']
    robot.localObst = data['Local obstacle label']

    # create simulation object
    sim = Simulate2D(wspace, robot, ewspace)
    sim.config['output-dir'] = outdir

    # add regions to workspace
    for key, value in data.iteritems():
        if isinstance(key, tuple) and key[0] == "Global region":
            r, style = value
            # add styles to region
            addStyle(r, style=style)
            # add region to workspace
            sim.workspace.addRegion(r)
            # create expanded region
            er = expandRegion(r, robot.diameter / 2)
            # add style to the expanded region
            addStyle(er, style=style)
            # add expanded region to the expanded workspace
            sim.expandedWorkspace.addRegion(er)

    # get local requests and obstacles
    localSpec = data['Local specification']
    requests = []
    obstacles = []
    for key, value in data.iteritems():
        if isinstance(key, tuple) and key[0] == "Local region":
            r, style, path, is_request = value
            # add styles to region
            addStyle(r, style=style)
            # add path
            if path:
                r.path = it.cycle(path)
                r.original_path = path[:]
            # add to requests or obstacles lists
            if is_request:
                name = next(iter(r.symbols))
                requests.append(Request(r, name, localSpec[name]))
            else:
                obstacles.append(r)

    # get robot sensor
    robot.sensor = eval(data['Robot sensor constructor'])

    # create RRG planner and load transition system, and global policy
    sim.offline = RRGPlanner(robot, None, 1)
    prefix, suffix = rrg_stat_data['global policy']
    sim.solution = (prefix, suffix[1:])
    print 'Global policy size:', len(prefix), len(suffix)

    if any(option in generate for option in ('workspace', 'expanded workspace',
                                             'global solution')):
        # set larger font for saving figures
        for r in sim.workspace.regions | sim.expandedWorkspace.regions:
            r.fontsize_orig = r.textStyle.get('fontsize', 12)
            r.textStyle['fontsize'] = 24

        # display workspace
        if 'workspace' in generate:
            sim.display(save='workspace.png')
        # display expanded workspace
        if 'expanded workspace' in generate:
            sim.display(expanded=True, save='eworkspace.png')
        # display solution for off-line problem
        if 'global solution' in generate:
            ts = sim.offline.ts
            sim.offline.ts = Ts.load(ts_filename)
            sim.display(expanded=True,
                        solution=prefix + suffix[1:],
                        save='global_solution.png')
            sim.offline.ts = ts

        # restore original fontsize
        for r in sim.workspace.regions:
            r.textStyle['fontsize'] = r.fontsize_orig
            del r.fontsize_orig

    # display both workspaces
    if 'both workspaces' in generate:
        sim.display(expanded='both')

    # show construction of rrg
    sim.offline.ts.init[initConf] = 1
    if 'RRG construction' in generate:
        sim.config['global-ts-color'] = {
            'node_color': 'blue',
            'edge_color': 'black'
        }
        sim.config['video-interval'] = 500
        sim.config['video-file'] = 'rrg_construction.mp4'
        sim.save_rrg_process(rrg_data)
        # reset to default colors
        sim.defaultConfiguration(reset=['global-ts-color'])

    if 'RRG iterations' in generate:
        sim.config['global-ts-color'] = {
            'node_color': 'blue',
            'edge_color': 'black'
        }
        rrg_iterations = [
            i + (i == -1) * (len(rrg_data) + 1) for i in rrg_iterations
        ]
        sim.save_rrg_iterations(rrg_data, rrg_iterations)
        # reset to default colors
        sim.defaultConfiguration(reset=['global-ts-color'])

    # set to global and to save animation
    sim.offline.ts = Ts.load(ts_filename)
    print 'TS size', sim.offline.ts.size()
    if 'offline plan' in generate:
        sim.config['video-interval'] = 30
        sim.config['sim-step'] = 0.02
        sim.config['video-file'] = 'global_plan.mp4'
        sim.simulate(loops=1, offline=True)
        sim.play(output='video', show=False)
        sim.save()

    # get online trajectory
    sim.offline.ts = Ts.load(ts_filename)
    trajectory = [d['new configuration'] for d in rrt_data]
    local_plans = [d['local plan'] for d in rrt_data] + [[]]
    potential = [d['potential'] for d in rrt_data] + [0]
    requests = [d['requests'] for d in rrt_data] + [[]]
    print len(trajectory), len(local_plans)

    if 'trajectory' in generate:
        # set larger font for saving figures
        for r in sim.workspace.regions | sim.expandedWorkspace.regions:
            r.fontsize_orig = r.textStyle.get('fontsize', 12)
            r.textStyle['fontsize'] = 24

        sim.config['trajectory-min-transparency'] = 0.2  # fading
        sim.config['trajectory-history-length'] = len(
            trajectory)  # full history
        sim.config['global-policy-color'] = 'orange'

        sim.online = LocalPlanner(None, sim.offline.ts, robot, localSpec)
        sim.online.trajectory = trajectory
        sim.online.robot.sensor.requests = []
        sim.display(expanded=True,
                    solution=prefix + suffix[1:],
                    show_robot=False,
                    localinfo=('trajectory', ),
                    save='trajectory.png')
        sim.defaultConfiguration(reset=[
            'trajectory-min-transparency', 'trajectory-history-length',
            'global-policy-color'
        ])

        # restore original fontsize
        for r in sim.workspace.regions:
            r.textStyle['fontsize'] = r.fontsize_orig
            del r.fontsize_orig

    # local plan visualization
    sim.online = LocalPlanner(None, sim.offline.ts, robot, localSpec)
    sim.online.trajectory = trajectory
    if 'online plan' in generate:
        sim.config['video-interval'] = 30
        sim.config['sim-step'] = 0.01
        sim.config['video-file'] = 'local_plan.mp4'
        sim.simulate(offline=False)
        sim.play(output='video',
                 show=False,
                 localinfo={
                     'trajectory': trajectory,
                     'plans': local_plans,
                     'potential': potential,
                     'requests': requests
                 })
        sim.save()

    if 'online trajectory iterations' in generate:
        sim.config['sim-step'] = 0.05
        sim.config['trajectory-min-transparency'] = 1.0  # no fading
        sim.config['trajectory-history-length'] = 100000  # entire history
        sim.config['image-file-template'] = 'local_trajectory_{frame:04d}.png'
        sim.config['global-policy-color'] = 'orange'
        # simulate and save
        sim.simulate(offline=False)
        sim.play(output='plots',
                 show=False,
                 localinfo={
                     'trajectory': trajectory,
                     'plans': local_plans,
                     'potential': potential,
                     'requests': requests
                 })
        sim.save(output='plots', frames=local_traj_iterations)
        # set initial values
        sim.defaultConfiguration(reset=[
            'trajectory-min-transparency', 'trajectory-history-length',
            'image-file-template', 'global-policy-color'
        ])

    msize = np.mean([d['tree size'] for d in rrt_data if d['tree size'] > 0])
    print 'Mean size:', msize
    for k, d in enumerate(rrt_data):
        if d['tree size'] > 0:
            print(k, d['tree size'])

    if any(option in generate
           for option in ('LTS iterations', 'LTS construction')):
        idx = lts_index
        print rrt_data[idx]['tree size']
        print 'current state:', rrt_data[idx - 1]['new configuration']

        lts_data = sorted([
            v for k, v in rrt_data[idx].items()
            if str(k).startswith('lts iteration')
        ],
                          key=lambda x: x[0])
        #     print lts_data

        sim.online.lts = Ts(directed=True, multi=False)
        sim.online.lts.init[rrt_data[idx - 1]['new configuration']] = 1
        # reset and fast-forward requests' locations
        for r in sim.robot.sensor.all_requests:
            aux = it.cycle(r.region.original_path)
            for _ in range(idx - 1):
                r.region.translate(next(aux))
        sim.robot.sensor.requests = [
            r for r in sim.robot.sensor.all_requests
            if r in rrt_data[idx]['requests']
        ]
        if 'LTS construction' in generate:
            sim.config['video-interval'] = 500
            sim.config['video-file'] = 'lts_construction.mp4'
            sim.save_lts_process(lts_data, endframe_hold=20)

        if 'LTS iterations' in generate:
            lts_iterations = [
                i + (i == -1) * len(lts_data) for i in lts_iterations
            ]
            sim.save_lts_iterations(lts_data, lts_iterations)

    if 'LTS statistics' in generate:
        metrics = [('tree size', True), ('local planning runtime', False),
                   ('local planning execution', False)]
        rrt_data = rrt_data[1:]
        # NOTE: This is for compatibility with the Cozmo logs
        if 'local planning execution' not in rrt_data[0]:
            for d in rrt_data:
                duration = (d['plan stop time'] - d['plan start time']) * 1000
                d['local planning execution'] = duration

        data = [len(d['requests']) for d in rrt_data]
        serviced = sum(n1 - n2 for n1, n2 in it.izip(data, data[1:])
                       if n1 > n2)

        with open(os.path.join(outdir, 'local_performance_stats.txt'),
                  'w') as f:
            print >> f, 'no trials:', len(rrt_data)
            print >> f, 'no requests serviced', serviced
            ops = [np.mean, np.min, np.max, np.std]
            for key, positive in metrics:
                if positive:
                    print >> f, key, [
                        op([
                            trial[key] for trial in rrt_data if trial[key] > 0
                        ]) for op in ops
                    ]
                else:
                    print >> f, key, [
                        op([trial[key] for trial in rrt_data]) for op in ops
                    ]
Example #17
0
    def generate_local_plan(self):
        '''Generates the local plan from the current configuration.
        NOTE: Assumes that it is called either because there is no local
        policy or there are new events => re-planning is needed
        NOTE: exclude current position as next target position
        NOTE: free movement if there are no local requests
        '''

        local_requests, local_obstacles = self.requests, self.obstacles
        # 0. no local requests => move towards global state of minimum potential
        if not local_requests:
            if not local_obstacles:
                collision_free = True
            else:
                state = self.robot.currentConf
                assert self.robot.isSimpleSegment(state, self.global_target_state)
                collision_free = self.robot.collision_free_segment(state,
                                     self.global_target_state, local_obstacles)
            if collision_free:
                local_plan = self.free_movement()
                if local_plan:
                    return local_plan, False

        target = self.tracking_req

        # 1. initialize local ts
        self.lts = Ts(multi=False)
        # add current state to local ts
        global_prop = self.robot.getSymbols(self.robot.currentConf)
        local_prop = self.robot.getSymbols(self.robot.currentConf, local=True)
        self.lts.g.add_node(self.robot.currentConf, global_prop=global_prop,
                       buchi_states=self.buchi_states,
                       hit=(target.name in local_prop) if target else False)

        done = False
        cnt = it.count()
        # test if solution was found
        while not done:
            iteration = cnt.next()
            relax = iteration >= self.relax_global_connection
            # 3. generate sample
            random_sample = self.robot.sample(local=True)
            # 4. get nearest neighbor in local ts
            source_state = nearest(self.lts, random_sample)
            # 5. steer robot towards random sample
            dest_state = self.robot.steer(source_state, random_sample)
            # 6. label new sample
            dest_global_prop = self.robot.getSymbols(dest_state)
            dest_local_prop = self.robot.getSymbols(dest_state, local=True)

            simple_segment = self.robot.isSimpleSegment(source_state, dest_state)
            empty_buchi = collision_free = hit = global_state = None
            if simple_segment:
                # 7. compute Buchi states for new sample
                source_data = self.lts.g.node[source_state]
                B = monitor(source_data['buchi_states'], self.pa.buchi,
                            source_data['global_prop'], dest_global_prop)
                empty_buchi = len(B) > 0

                if B:
                    collision_free = self.robot.collision_free_segment(
                                    source_state, dest_state, local_obstacles)
                    if collision_free:
                        # 8. update local transition system
                        hit = True # all samples hit when there are not targets
                        if target:
                            hit = ((target.name in dest_local_prop)
                                                        or source_data['hit'])

                        self.lts.g.add_node(dest_state,
                                            global_prop=dest_global_prop,
                                            buchi_states=B, hit=hit)
                        self.lts.g.add_edge(source_state, dest_state)

                        if hit:
                            # test if the node can be connected to the global ts
                            global_state = self.connection_global_state(
                                                           dest_state, B, relax)
                            if global_state:
                                self.global_target_state = global_state
                                done = True

            if self.detailed_logging:
                logging.info('"lts iteration %d" : '
                             '(%d, %s, %s, %s, %s, %s, %s, %s, %s)',
                             iteration, iteration,
                             random_sample, source_state, dest_state,
                             simple_segment, empty_buchi, collision_free, hit,
                             global_state)

            # test samples that hit the request for the relaxed connection
            # strategy to the gloval TS
            if iteration == self.relax_global_connection:
                for s in self.lts.g:
                    if self.lts.g.node[s]['hit']:
                        # test if the node can be connected to the global ts
                        B = self.lts.g.node[s]['buchi_states']
                        global_state = self.connection_global_state(s, B, relax)
                        if global_state:
                            self.global_target_state = global_state
                            done = True
                            dest_state = s # make the node the destination
                            break

        # 11. return local plan
        plan_to_leaf = nx.shortest_path(self.lts.g, self.robot.currentConf,
                                        dest_state)[1:]

        return (plan_to_leaf + self.free_movement(dest_state), True)
def case1_synthesis(formulas, ts_files, alpha, radius, time_wp, lab_testing,
                    always_active):
    startFull = timeit.default_timer()
    startOff = timeit.default_timer()
    dfa_dict = {}
    for ind, f in enumerate(formulas):
        _, dfa_inf, bdd = twtl.translate(f, kind=DFAType.Infinity, norm=True)

        logging.debug('\nEnd of translate\n\n')
        logging.info('The bound of formula "%s" is (%d, %d)!', f, *bdd)
        logging.info(
            'Translated formula "%s" to infinity DFA of size (%d, %d)!', f,
            *dfa_inf.size())
        dfa_dict[ind + 1] = copy.deepcopy(
            dfa_inf)  # The key is set to the agent number

    logging.debug('\n\nStart policy computation\n')

    ts_dict = {}
    ets_dict = {}
    for ind, ts_f in enumerate(ts_files):
        ts_dict[ind + 1] = Ts(directed=True, multi=False)
        ts_dict[ind + 1].read_from_file(ts_f)
        ets_dict[ind + 1] = expand_duration_ts(ts_dict[ind + 1])
    for ind in ts_dict:
        print 'Size of TS:', ets_dict[ind].size()
    # Get the nominal PA for each agent
    pa_nom_dict = {}
    norm_factor = {}
    startPA = timeit.default_timer()
    for key in dfa_dict:
        logging.info('Constructing product automaton with infinity DFA!')
        pa = ts_times_fsa(ets_dict[key], dfa_dict[key])
        # Give length and weight attributes to all edges in pa
        nom_weight_dict = {}
        edges_all = nx.get_edge_attributes(ts_dict[key].g, 'edge_weight')
        max_edge = max(edges_all, key=edges_all.get)
        norm_factor[key] = edges_all[max_edge]
        for pa_edge in pa.g.edges():
            edge = (pa_edge[0][0], pa_edge[1][0], 0)
            nom_weight_dict[pa_edge] = edges_all[edge] / norm_factor[key]
        nx.set_edge_attributes(pa.g, 'edge_weight', nom_weight_dict)
        nx.set_edge_attributes(pa.g, 'weight', 1)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())
        # Make a copy of the nominal PA to change
        pa_nom_dict[key] = copy.deepcopy(pa)
    stopPA = timeit.default_timer()
    print 'Run Time (s) to get all three PAs is: ', stopPA - startPA

    for key in pa_nom_dict:
        print 'Size of PA:', pa_nom_dict[key].size()

    # Use alpha to perform weighted optimization of time and edge_weight and make this a
    # new edge attribute to find "shortest path" over
    for key in pa_nom_dict:
        weight_dict = {}
        time_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'weight')
        edge_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'edge_weight')
        for pa_edge in pa_nom_dict[key].g.edges():
            weight_dict[pa_edge] = alpha * time_weight[pa_edge] + (
                1 - alpha) * edge_weight[pa_edge]
        # Append the multi-objective cost to the edge attribtues of the PA
        nx.set_edge_attributes(pa_nom_dict[key].g, 'new_weight', weight_dict)

    # Compute the energy (multi-objective cost function) for each agent's PA at every node
    startEnergy = timeit.default_timer()
    for key in pa_nom_dict:
        compute_energy(pa_nom_dict[key])
    stopEnergy = timeit.default_timer()
    print 'Run Time (s) to get the moc energy function for all three PA: ', stopEnergy - startEnergy

    # Compute optimal path in PA and project onto the TS
    ts_policy_dict_nom = {}
    pa_policy_dict_nom = {}
    tau_dict_nom = {}
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key], tau_dict_nom[key] = \
                    compute_control_policy(pa_nom_dict[key], dfa_dict[key], dfa_dict[key].kind)
    # Perform initial check on nominal control policies
    for key in ts_policy_dict_nom:
        if ts_policy_dict_nom[key] is None:
            logging.info('No control policy found!')

    # set empty control policies that will be iteratively updated
    ts_control_policy_dict = {}
    pa_control_policy_dict = {}

    # Initialize policy variables
    for key in ts_policy_dict_nom:
        ts_control_policy_dict[key] = []
        pa_control_policy_dict[key] = []

    # Concatenate nominal policies for searching
    policy_match, key_list, policy_match_index = update_policy_match(
        ts_policy_dict_nom)

    # Initialize vars, give nominal policies
    iter_step = 0
    running = True
    traj_length = 0
    ts_policy = copy.deepcopy(ts_policy_dict_nom)
    pa_policy = copy.deepcopy(pa_policy_dict_nom)
    tau_dict = tau_dict_nom
    # Choose parameter for n-horizon local trajectory, must be at least 2
    num_hops = 2
    # Get agent priority based on lowest energy
    prev_states = {}
    for key in ts_policy_dict_nom:
        prev_states[key] = pa_policy_dict_nom[key][0]
    priority = get_priority(pa_nom_dict, pa_policy_dict_nom, prev_states,
                            key_list)

    # Create Agent energy dictionary for post-processing
    # Create Termination indicator to assign terminated agents lowest priority
    F_indicator = {}
    agent_energy_dict = {}
    for key in ts_policy_dict_nom:
        agent_energy_dict[key] = []
        F_indicator[key] = False

    # Print time statistics
    stopOff = timeit.default_timer()
    print 'Offline run time for all initial setup: ', stopOff - startOff
    startOnline = timeit.default_timer()

    # Execute takeoff command for all crazyflies in lab testing
    if lab_testing:
        startTakeoff = timeit.default_timer()
        os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
        os.system(
            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_takeoff.py"
        )  # make sure executable
        os.chdir("/home/ryan/Desktop/pyTWTL/src")
        stopTakeoff = timeit.default_timer()
        print 'Takeoff time, should be ~2.7sec: ', stopTakeoff - startTakeoff

    ############################################################################

    # Iterate through all policies sequentially
    while running:
        while policy_match:
            for p_ind, p_val in enumerate(priority):
                if p_ind < 1:
                    weighted_nodes = {}
                    for i in range(num_hops):
                        weighted_nodes[i] = []
                else:
                    # Get local neighborhood (n-hop) of nodes to search for a conflict
                    for k_c, key_c in enumerate(key_list):
                        if p_val == key_c:
                            node = policy_match[0][k_c]
                            break
                    # Receive path information from 2*H neighborhood
                    local_set = get_neighborhood(node, ts_dict[p_val],
                                                 2 * num_hops)
                    # Get constraints for each transition
                    weighted_nodes = {}
                    for pty in priority[0:p_ind]:
                        for key in key_list:
                            if pty == key:
                                ts_length = len(ts_policy[key])
                                if ts_length >= num_hops:
                                    for i in range(num_hops):
                                        if ts_policy[key][i] in local_set:
                                            try:
                                                weighted_nodes[i].append(
                                                    ts_policy[key][i])
                                                # Add downwash nodes to constraint, for drone experiments
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                            except KeyError:
                                                weighted_nodes[i] = [
                                                    ts_policy[key][i]
                                                ]
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                else:
                                    for i in range(ts_length):
                                        if ts_policy[key][i] in local_set:
                                            try:
                                                weighted_nodes[i].append(
                                                    ts_policy[key][i])
                                                # Add downwash nodes to constraint, for drone experiments
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                            except KeyError:
                                                weighted_nodes[i] = [
                                                    ts_policy[key][i]
                                                ]
                                                downwash_nodes = downwash_checkDP(
                                                    ets_dict[key],
                                                    ts_policy[key][i], radius)
                                                if downwash_nodes:
                                                    for downwash_node in downwash_nodes:
                                                        if downwash_node not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    downwash_node
                                                                )
                                for i in range(num_hops):
                                    try:
                                        weighted_nodes[i]
                                    except KeyError:
                                        weighted_nodes[i] = []
                    # Update constraint set with intersecting transitions
                    if traj_length >= 1:
                        for p_ind2, p_val2 in enumerate(priority[0:p_ind]):
                            for k, key in enumerate(key_list):
                                if p_val2 == key:
                                    # initialize previous state
                                    comp_prev_state = ts_control_policy_dict[
                                        key][-1]
                                    cur_prev_state = ts_control_policy_dict[
                                        key_c][-1]
                                    cur_ts_policy_length = len(
                                        ts_policy[key_c])
                                    ts_length = len(ts_policy[key])
                                    if ts_length >= num_hops:
                                        for i in range(num_hops):
                                            comp_next_state = ts_policy[key][i]
                                            if i < cur_ts_policy_length:
                                                cur_next_state = ts_policy[
                                                    key_c][i]
                                                if comp_next_state in local_set:
                                                    # Check if the trajectories cross during transition (or use same transition)
                                                    cross_weight = check_intersectDP(ets_dict[key], cur_prev_state, cur_next_state, \
                                                                                comp_prev_state, comp_next_state, radius, time_wp)
                                                    if cross_weight:
                                                        for cross_node in cross_weight:
                                                            if cross_node not in weighted_nodes[
                                                                    i]:
                                                                weighted_nodes[
                                                                    i].append(
                                                                        cross_node
                                                                    )
                                                    # Check if using same transition in updated case
                                                    if comp_next_state == cur_prev_state:
                                                        if comp_prev_state not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    comp_prev_state
                                                                )
                                                    # Set previous state for next iteration
                                                    comp_prev_state = ts_policy[
                                                        key][i]
                                                    cur_prev_state = ts_policy[
                                                        key_c][i]
                                            else:
                                                break
                                    else:
                                        for i in range(ts_length):
                                            comp_next_state = ts_policy[key][i]
                                            if i < cur_ts_policy_length:
                                                cur_next_state = ts_policy[
                                                    key_c][i]
                                                if comp_next_state in local_set:
                                                    # Check if the trajectories cross during transition (or use same transition)
                                                    cross_weight = check_intersectDP(ets_dict[key], cur_prev_state, cur_next_state, \
                                                                                comp_prev_state, comp_next_state, radius, time_wp)
                                                    if cross_weight:
                                                        for cross_node in cross_weight:
                                                            if cross_node not in weighted_nodes[
                                                                    i]:
                                                                weighted_nodes[
                                                                    i].append(
                                                                        cross_node
                                                                    )
                                                    # Check if using same transition in updated case
                                                    if comp_next_state == cur_prev_state:
                                                        if comp_prev_state not in weighted_nodes[
                                                                i]:
                                                            weighted_nodes[
                                                                i].append(
                                                                    comp_prev_state
                                                                )
                                                    # Set previous state for next iteration
                                                    comp_prev_state = ts_policy[
                                                        key][i]
                                                    cur_prev_state = ts_policy[
                                                        key_c][i]
                                            else:
                                                break
                # Generate receding horizon path and check for termination
                if traj_length >= 1:
                    init_loc = pa_control_policy_dict[p_val][-1]
                    ts_temp = ts_policy[p_val]
                    pa_temp = pa_policy[p_val]
                    # Compute receding horizon shortest path
                    ts_policy[p_val], pa_policy[
                        p_val], D_flag = local_horizonDP(
                            pa_nom_dict[p_val], weighted_nodes, num_hops,
                            init_loc)
                    # Check for deadlock, and if so resolve deadlock
                    if p_ind > 0:
                        if D_flag == True:
                            # Agent in deadlock is to remain stationary
                            ts_policy[p_val] = [
                                ts_control_policy_dict[p_val][-1],
                                ts_control_policy_dict[p_val][-1]
                            ]
                            pa_policy[p_val] = [
                                pa_control_policy_dict[p_val][-1],
                                pa_control_policy_dict[p_val][-1]
                            ]
                            # Assign deadlock node
                            x_d = ts_control_policy_dict[p_val][-1]
                            x_d_val = p_val
                            x_d_flag = True
                            hp_set = priority[0:p_ind]
                            while x_d_flag == True and hp_set:
                                x_d_flag = False
                                for hp in hp_set:
                                    if ts_policy[hp][0] == x_d:
                                        if hp == priority[0]:
                                            # Make all agents stationary and perform Dijkstra's shortest path
                                            for j in priority[1:p_ind]:
                                                ts_policy[j] = [
                                                    ts_control_policy_dict[j]
                                                    [-1],
                                                    ts_control_policy_dict[j]
                                                    [-1]
                                                ]
                                                pa_policy[j] = [
                                                    pa_control_policy_dict[j]
                                                    [-1],
                                                    pa_control_policy_dict[j]
                                                    [-1]
                                                ]
                                            occupied_nodes = [
                                                ts_control_policy_dict[x_d_val]
                                                [-1]
                                            ]
                                            for j in priority[0:p_ind]:
                                                occupied_nodes.append(
                                                    ts_control_policy_dict[j]
                                                    [-1])
                                            init_loc = pa_control_policy_dict[
                                                x_d_val][-1]
                                            ts_policy[x_d_val], pa_policy[
                                                x_d_val] = deadlock_path(
                                                    pa_nom_dict[x_d_val],
                                                    occupied_nodes, init_loc)
                                            for j in priority[1:p_ind]:
                                                for ind, node in enumerate(
                                                        ts_policy[x_d_val]
                                                    [:-1]):
                                                    if ts_policy[j][0] == node:
                                                        ts_policy[j] = [
                                                            ts_policy[x_d_val][
                                                                ind + 1],
                                                            ts_policy[x_d_val][
                                                                ind + 1]
                                                        ]
                                                        # Find the actual state on agent's PA that corresponds to this node
                                                        neighbors = pa_nom_dict[
                                                            j].g.neighbors(
                                                                pa_policy[j]
                                                                [0])
                                                        for node in neighbors:
                                                            if node[0] == ts_policy[
                                                                    j][0]:
                                                                pa_policy[
                                                                    j] = [
                                                                        node,
                                                                        node
                                                                    ]
                                            break
                                        else:
                                            ts_policy[hp] = [
                                                ts_control_policy_dict[hp][-1],
                                                ts_control_policy_dict[hp][-1]
                                            ]
                                            pa_policy[hp] = [
                                                pa_control_policy_dict[hp][-1],
                                                pa_control_policy_dict[hp][-1]
                                            ]
                                            x_d = ts_control_policy_dict[hp][
                                                -1]
                                            x_d_val = hp
                                            x_d_flag = True
                                            hp_set.remove(hp)
                                            break
                    # Increase iteration step (for statistics at end)
                    iter_step += 1

            # Update policy match
            policy_match, key_list, policy_match_index = update_policy_match(
                ts_policy)

            # Account for agents which have finished, also accounts for other finished agents through agent ID ordering
            if always_active == True:
                finished_ID = []
                for key in F_indicator:
                    if F_indicator[key] == True:
                        finished_ID.append(key)
                        current_node = ts_control_policy_dict[key][-1]
                        hp_nodes_avoid = []
                        for k in key_list:
                            hp_nodes_avoid.append(ts_policy[k][0])
                            hp_nodes_avoid.append(
                                ts_control_policy_dict[k][-1])
                        for fID in finished_ID[:-1]:
                            hp_nodes_avoid.append(
                                ts_control_policy_dict[fID][-1])
                        if current_node in hp_nodes_avoid:
                            local_set = ts_dict[key].g.neighbors(current_node)
                            for node in local_set:
                                if node not in hp_nodes_avoid:
                                    ts_control_policy_dict[key].append(node)
                                    break
                        else:
                            ts_control_policy_dict[key].append(current_node)

            # Append trajectories
            for key in ts_policy:
                agent_energy_dict[key].append(
                    pa_nom_dict[key].g.node[pa_policy[key][0]]['energy'])
                ts_control_policy_dict[key].append(ts_policy[key].pop(0))
                pa_policy_temp = list(pa_policy[key])
                pa_control_policy_dict[key].append(pa_policy_temp.pop(0))
                pa_policy[key] = tuple(pa_policy_temp)
            ts_write = policy_match.pop(0)
            traj_length += 1

            # publish waypoint to a csv file
            write_to_csv_iter(ts_dict, ts_write, key_list, time_wp)
            # Execute waypoint in crazyswarm lab testing
            if lab_testing:
                startWaypoint = timeit.default_timer()
                os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
                os.system(
                    "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_waypoint.py"
                )  # make sure executable
                os.chdir("/home/ryan/Desktop/pyTWTL/src")
                stopWaypoint = timeit.default_timer()
                print 'Waypoint time, should be ~2.0sec: ', stopWaypoint - startWaypoint

            # Update policy_match now that a trajectory has finalized and policy_match is empty
            if ts_policy:
                # Remove keys from policies that have terminated
                land_keys = []
                for key, val in ts_policy.items():
                    if len(val) == 0:
                        F_indicator[key] = True
                        land_keys.append(key)
                        del ts_policy[key]
                        del pa_policy[key]
                # publish to the land csv file when finished (for experiments)
                if land_keys:
                    if lab_testing:
                        write_to_land_file(land_keys)
                        os.chdir(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts"
                        )
                        os.system(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_land.py"
                        )  # make sure executable
                        os.chdir("/home/ryan/Desktop/pyTWTL/src")
                if not ts_policy:
                    running = False
                    break
                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)
                # Get agent priority based on lowest energy
                for key in key_list:
                    prev_states[key] = pa_control_policy_dict[key][-1]
                priority = get_priority(pa_nom_dict, pa_policy, prev_states,
                                        key_list)
            else:
                running = False

    # Print run time statistics
    stopOnline = timeit.default_timer()
    print 'Online run time for safe algorithm: ', stopOnline - startOnline
    stopFull = timeit.default_timer()
    print 'Full run time for safe algorithm: ', stopFull - startFull
    # Print other statistics from simulation
    print 'Number of iterations for run: ', iter_step
    print 'Average time for itertion is: ', (stopOnline -
                                             startOnline) / iter_step
    print 'Number of full updates in run: ', traj_length
    print 'Average update time for single step: ', (stopOnline -
                                                    startOnline) / traj_length

    # Print energy graph for each agent and the system from run
    plot_energy(agent_energy_dict)

    # Not exact, but gives insight
    for key in pa_nom_dict:
        tau_dict[key] = tau_dict_nom[key] + len(
            ts_control_policy_dict[key]) - len(ts_policy_dict_nom[key])

    # Write the nominal and final control policies to a file
    for key in pa_nom_dict:
        write_to_control_policy_file(ts_policy_dict_nom[key], pa_policy_dict_nom[key], \
                tau_dict_nom[key], dfa_dict[key],ts_dict[key],ets_dict[key],\
                ts_control_policy_dict[key], pa_control_policy_dict[key], tau_dict[key], key)

    # Write the CSV files for experiments
    for key in pa_nom_dict:
        write_to_csv(ts_dict[key], ts_control_policy_dict[key], key, time_wp)
def main():

    Rho = namedtuple('Rho', ['lower', 'upper'])
    rhos = [Rho(lower=0.98, upper=1.04), Rho(lower=0.98, upper=1.04)]

#     # Case Study 1
#     with Timer('IJRR 2013 Case-Study 1'):
#         r1 = Ts.load('./robot_1.yaml')
#         r2 = Ts.load('./robot_2.yaml')
#         ts_tuple = (r1, r2)
#         formula = ('[]<>gather && [](r1gather -> X(!r1gather U r1upload)) '
#                   '&& [](r2gather -> X(!r2gather U r2upload))')
#         opt_prop = set(['gather'])
#         logger.info('Formula: %s', formula)
#         logger.info('opt_prop: %s', opt_prop)
#         prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
#                     lomap.multi_agent_optimal_run(ts_tuple, formula, opt_prop)
#         logger.info('Cost: %d', suffix_cycle_cost)
#         logger.info('Prefix length: %d', prefix_length)
#         # Find the controls that will produce this run
#         control_prefixes = []
#         control_suffix_cycles = []
#         for i in range(0, len(ts_tuple)):
#             ts = ts_tuple[i]
#             control_prefixes.append(ts.controls_from_run(prefixes[i]))
#             control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
#             logger.info('%s run prefix: %s', ts.name, prefixes[i])
#             logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
#             logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
#             logger.info('%s control suffix cycle: %s', ts.name,
#                                                     control_suffix_cycles[i])
#
#     logger.info('<><><> <><><> <><><>')

    # Case Study 2
    with Timer('IJRR 2013 Case-Study 2'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather && r2gather)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather','r2gather'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 3
    with Timer('IJRR 2013 Case-Study 3'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather && r2gather)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload)) '
                   '&& [](!(r1gather1 && r2gather1) && !(r1gather2 && r2gather2)'
                   '&& !(r1gather3 && r2gather3) && !(r1gather4 && r2gather4))')
        opt_prop = set(['r1gather','r2gather'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 4
    with Timer('IJRR 2013 Case-Study 4'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather4 && r2gather2)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather4','r2gather2'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
                      lomap.multi_agent_optimal_run(ts_tuple, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 4 w/ sync
    with Timer('IJRR 2013 Case-Study 4 (w/ sync)'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather4 && r2gather2)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather4','r2gather2'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])
Example #20
0
def test_srfs():
    ts = Ts(directed=False, multi=False)

    M, N = 10, 10

    ts.g.add_nodes_from([((i, j),
                          {'location': (i, j), 'prop': set(), 'label': ''})
                                          for i in range(M) for j in range(N)])

    for i in range(M):
        for j in range(N):
            if i > 0:
                ts.g.add_edge((i, j), (i-1, j), weight=1)
                if j > 0:
                    ts.g.add_edge((i, j), (i-1, j-1), weight=1)
                if j < N-1:
                    ts.g.add_edge((i, j), (i-1, j+1), weight=1)
            if i < N-1:
                ts.g.add_edge((i, j), (i+1, j), weight=1)
                if j > 0:
                    ts.g.add_edge((i, j), (i+1, j-1), weight=1)
                if j < N-1:
                    ts.g.add_edge((i, j), (i+1, j+1), weight=1)
            if j > 0:
                ts.g.add_edge((i, j), (i, j-1), weight=1)
            if j < N-1:
                ts.g.add_edge((i, j), (i, j+1), weight=1)

    ts.init[(9, 9)] = 1

    ts.g.node[(1, 1)]['prop'] = {'a'}
    ts.g.node[(1, 1)]['label'] = 'a'
    ts.g.node[(3, 8)]['prop'] = {'b'}
    ts.g.node[(3, 8)]['label'] = 'b'
    ts.g.node[(7, 2)]['prop'] = {'b'}
    ts.g.node[(7, 2)]['label'] = 'b'

    ts.g.node[(5, 5)]['prop'] = {'o'}
    ts.g.node[(5, 4)]['prop'] = {'o'}
    ts.g.node[(5, 3)]['prop'] = {'o'}
    ts.g.node[(5, 2)]['prop'] = {'o'}
    ts.g.node[(5, 6)]['prop'] = {'o'}
    ts.g.node[(5, 7)]['prop'] = {'o'}
    ts.g.node[(6, 6)]['prop'] = {'o'}
    ts.g.node[(7, 6)]['prop'] = {'o'}
    ts.g.node[(4, 4)]['prop'] = {'o'}
    ts.g.node[(3, 4)]['prop'] = {'o'}
    ts.g.node[(2, 4)]['prop'] = {'o'}

    prop_colors = {('a',):  'y', ('b',): 'g', ('o',): 'k'}

    draw_grid(ts, edgelabel='weight', prop_colors=prop_colors,
              current_node='init')
    plt.xlim(-1, M)
    plt.ylim(-1, N)
    plt.show()

    spec = 'G (F a && F b && !o)'
    buchi = Buchi()
    buchi.from_formula(spec)
    print('Created Buchi automaton of size', buchi.size())
#     buchi.visualize(draw='matplotlib')
#     plt.show()

    print
    for u, d in buchi.g.nodes_iter(data=True):
        print u, d
    print
    for u, v, d in buchi.g.edges_iter(data=True):
        print u, v, d

    pa = ts_times_buchi(ts, buchi)
    print('Created product automaton of size', pa.size())
#     pa.visualize(draw='matplotlib')
#     plt.show()

    compute_potentials(pa)
#     print
#     for u, d in pa.g.nodes_iter(data=True):
#         print u, d
    pa.max_potential = 1 + max([d['potential'] for _, d in pa.g.nodes_iter(data=True)])

    seed(1)

    horizon=2
    current_pa_state = next(pa.init.iterkeys())
    policy = None
    while True:
        current_ts_state, _ = current_pa_state

        neighborhood_rewards = generate_rewards(ts, current_ts_state)
        print 'rewards', neighborhood_rewards

        policy =  compute_receding_horizon_policy(pa, current_pa_state,
                                         neighborhood_rewards, horizon, policy)
#         policy =  compute_receding_horizon_policy_dp(pa, current_pa_state,
#                                          neighborhood_rewards, horizon, policy)

        draw_grid(ts, edgelabel='weight', prop_colors=prop_colors,
                  current_node=current_ts_state)
        plt.xlim(-1, M)
        plt.ylim(-1, N)

        for v, r in neighborhood_rewards.iteritems():
            c = plt.Circle(v, radius=0.05*r, color=(.8, .8, .8, .7))
            plt.gcf().gca().add_artist(c)

        plt.show()

        current_pa_state = policy[0]
Example #21
0
def case1_synthesis(formulas, ts_files, alpha, gamma, radius, time_wp,
                    lab_testing):
    startFull = timeit.default_timer()
    startOff = timeit.default_timer()
    dfa_dict = {}
    for ind, f in enumerate(formulas):
        _, dfa_inf, bdd = twtl.translate(f, kind=DFAType.Infinity, norm=True)

        logging.debug('\nEnd of translate\n\n')
        logging.info('The bound of formula "%s" is (%d, %d)!', f, *bdd)
        logging.info(
            'Translated formula "%s" to infinity DFA of size (%d, %d)!', f,
            *dfa_inf.size())
        dfa_dict[ind + 1] = copy.deepcopy(
            dfa_inf)  # Note that the key is set to the agent number

    logging.debug('\n\nStart policy computation\n')

    ts_dict = {}
    ets_dict = {}
    for ind, ts_f in enumerate(ts_files):
        ts_dict[ind + 1] = Ts(directed=True, multi=False)
        ts_dict[ind + 1].read_from_file(ts_f)
        ets_dict[ind + 1] = expand_duration_ts(ts_dict[ind + 1])
    for ind in ts_dict:
        print 'Size of TS:', ets_dict[ind].size()
    # Get the nominal PA for each agent
    pa_nom_dict = {}
    norm_factor = {}
    for key in dfa_dict:
        logging.info('Constructing product automaton with infinity DFA!')
        pa = ts_times_fsa(ets_dict[key], dfa_dict[key])
        # Give length and weight attributes to all edges in pa
        nom_weight_dict = {}
        edges_all = nx.get_edge_attributes(ts_dict[key].g, 'edge_weight')
        max_edge = max(edges_all, key=edges_all.get)
        norm_factor[key] = edges_all[max_edge]
        for pa_edge in pa.g.edges():
            edge = (pa_edge[0][0], pa_edge[1][0], 0)
            nom_weight_dict[pa_edge] = edges_all[edge] / norm_factor[key]
        nx.set_edge_attributes(pa.g, 'edge_weight', nom_weight_dict)
        nx.set_edge_attributes(pa.g, 'weight', 1)
        logging.info('Product automaton size is: (%d, %d)', *pa.size())
        # Make a copy of the nominal PA to change
        pa_nom_dict[key] = copy.deepcopy(pa)

    for key in pa_nom_dict:
        print 'Size of PA:', pa_nom_dict[key].size()

    # Use alpha to perform weighted optimization of time and edge_weight and make this a
    # new edge attribute to find "shortest path" over
    for key in pa_nom_dict:
        weight_dict = {}
        time_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'weight')
        edge_weight = nx.get_edge_attributes(pa_nom_dict[key].g, 'edge_weight')
        for pa_edge in pa_nom_dict[key].g.edges():
            weight_dict[pa_edge] = alpha * time_weight[pa_edge] + (
                1 - alpha) * edge_weight[pa_edge]
        # Append the multi-objective cost to the edge attribtues of the PA
        nx.set_edge_attributes(pa_nom_dict[key].g, 'new_weight', weight_dict)

    # Compute the energy (multi-objective cost function) for each agent's PA at every node
    startEnergy = timeit.default_timer()
    for key in pa_nom_dict:
        compute_energy(pa_nom_dict[key])
    stopEnergy = timeit.default_timer()
    print 'Run Time (s) to get the energy function for all three PA: ', stopEnergy - startEnergy

    # Compute optimal path in Pa_Prime and project onto the TS, and initial policy based on new_weight
    ts_policy_dict_nom = {}
    pa_policy_dict_nom = {}
    tau_dict_nom = {}
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key], tau_dict_nom[key] = \
                    compute_control_policy(pa_nom_dict[key], dfa_dict[key], dfa_dict[key].kind)
    for key in pa_nom_dict:
        ts_policy_dict_nom[key], pa_policy_dict_nom[key] = \
                    compute_control_policy3(pa_nom_dict[key], dfa_dict[key], pa_policy_dict_nom[key][0])
    # Perform initial check on nominal control policies
    for key in ts_policy_dict_nom:
        if ts_policy_dict_nom[key] is None:
            logging.info('No control policy found!')

    # set empty control policies that will be iteratively updated
    ts_control_policy_dict = {}
    pa_control_policy_dict = {}

    # Initialize policy variables
    for key in ts_policy_dict_nom:
        ts_control_policy_dict[key] = []
        pa_control_policy_dict[key] = []

    # Concatenate nominal policies for searching
    policy_match, key_list, policy_match_index = update_policy_match(
        ts_policy_dict_nom)

    # Initialize vars, give nominal policies
    iter_step = 0
    running = True
    traj_length = 0
    ts_policy = copy.deepcopy(ts_policy_dict_nom)
    pa_policy = copy.deepcopy(pa_policy_dict_nom)
    tau_dict = tau_dict_nom
    # Choose parameter for n-horizon local trajectory and information sharing,
    # must be at least 2
    num_hops = 3
    # Get agent priority based on lowest energy
    prev_priority = key_list
    prev_states = {}
    for key in ts_policy_dict_nom:
        prev_states[key] = pa_policy_dict_nom[key][0]
    priority = get_priority(pa_nom_dict, pa_policy_dict_nom, prev_states,
                            key_list, prev_priority)
    # Create Agent energy dictionary for post-processing
    agent_energy_dict = {}
    for key in ts_policy_dict_nom:
        agent_energy_dict[key] = []

    # Print time statistics
    stopOff = timeit.default_timer()
    print 'Offline run time for all initial setup: ', stopOff - startOff
    startOnline = timeit.default_timer()

    # Execute takeoff command for all crazyflies in lab testing
    if lab_testing:
        startTakeoff = timeit.default_timer()
        os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
        os.system(
            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_takeoff.py"
        )  # make sure file is an executable
        os.chdir("/home/ryan/Desktop/pyTWTL/src")
        stopTakeoff = timeit.default_timer()
        print 'Takeoff time, should be ~2.7sec: ', stopTakeoff - startTakeoff

    # Iterate through all policies sequentially
    while running:
        while policy_match:
            for p_ind, p_val in enumerate(priority):
                if p_ind < 1:
                    weighted_nodes = []
                    weighted_soft_nodes = {}
                    for i in range(num_hops - 1):
                        weighted_soft_nodes[i + 1] = []
                else:
                    # Get local neighborhood (n-hop) of nodes to search for a conflict
                    for k, key in enumerate(key_list):
                        if p_val == key:
                            node = policy_match[0][k]
                            break
                    local_set = get_neighborhood(node, ts_dict[p_val],
                                                 num_hops)
                    one_hop_set = ts_dict[p_val].g.neighbors(node)
                    # Assign hard constraint nodes in local neighborhood
                    weighted_nodes = []
                    for pty in priority[0:p_ind]:
                        for k, key in enumerate(key_list):
                            if pty == key:
                                prev_node = policy_match[0][k]
                                if prev_node in one_hop_set:
                                    weighted_nodes.append(prev_node)
                                # Check if downwash constraint needs to be added, mostly for physical testing
                                downwash_weight = downwash_check(k, ets_dict[key], policy_match[0], \
                                                                priority[0:k], key_list, radius)
                                if downwash_weight:
                                    for downwash_node in downwash_weight:
                                        if downwash_node not in weighted_nodes:
                                            weighted_nodes.append(
                                                downwash_node)
                                break
                    # Get soft constraint nodes from sharing n-hop trajectory
                    soft_nodes = {}
                    for pty in priority[0:p_ind]:
                        for k, key in enumerate(key_list):
                            if pty == key:
                                ts_length = len(ts_policy[key])
                                if ts_length >= num_hops:
                                    for i in range(num_hops - 1):
                                        if ts_policy[key][i + 1] in local_set:
                                            try:
                                                soft_nodes[i + 1]
                                                soft_nodes[i + 1].append(
                                                    ts_policy[key][i + 1])
                                            except KeyError:
                                                soft_nodes[i + 1] = [
                                                    ts_policy[key][i + 1]
                                                ]
                                else:
                                    for i in range(ts_length - 1):
                                        if ts_policy[key][i + 1] in local_set:
                                            try:
                                                soft_nodes[i + 1]
                                                soft_nodes[i + 1].append(
                                                    ts_policy[key][i + 1])
                                            except KeyError:
                                                soft_nodes[i + 1] = [
                                                    ts_policy[key][i + 1]
                                                ]
                                for i in range(num_hops - 1):
                                    try:
                                        soft_nodes[i + 1]
                                    except KeyError:
                                        soft_nodes[i + 1] = []
                    # Assign soft constraint nodes
                    weighted_soft_nodes = soft_nodes

                    # Update weights if transitioning between same two nodes
                    ts_prev_states = []
                    ts_index = []
                    if len(policy_match[0]) > 1 and traj_length >= 1:
                        for key in ts_control_policy_dict:
                            if len(ts_control_policy_dict[key]) == traj_length:
                                ts_prev_states.append(
                                    ts_control_policy_dict[key][-1])
                    if ts_prev_states:
                        for p_ind2, p_val2 in enumerate(priority[0:p_ind]):
                            if p_ind2 > 0:
                                for k_c, key in enumerate(key_list):
                                    if p_val2 == key:
                                        node = policy_match[0][k_c]
                                        break
                                # Check if the trajectories will cross each other in transition
                                cross_weight = check_intersect(k_c, ets_dict[key], ts_prev_states, policy_match[0], \
                                                                    priority[0:p_ind2], key_list, radius, time_wp)
                                if cross_weight:
                                    for cross_node in cross_weight:
                                        if cross_node not in weighted_nodes:
                                            weighted_nodes.append(cross_node)
                                    # Check if agents using same transition
                                    for p_ind3, p_val3 in enumerate(
                                            priority[0:p_ind2]):
                                        for k, key in enumerate(key_list):
                                            if p_val3 == key:
                                                if ts_prev_states[k] == node:
                                                    if policy_match[0][
                                                            k] == ts_prev_states[
                                                                k_c]:
                                                        temp_node = policy_match[
                                                            0][k]
                                                        if temp_node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                temp_node)
                                                        if node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                node)
                                                        break
                                        else:
                                            continue
                                        break
                                    else:
                                        continue
                                    break
                                else:
                                    # Check if agents using same transition
                                    for p_ind3, p_val3 in enumerate(
                                            priority[0:p_ind2]):
                                        for k, key in enumerate(key_list):
                                            if p_val3 == key:
                                                if ts_prev_states[k] == node:
                                                    if policy_match[0][
                                                            k] == ts_prev_states[
                                                                k_c]:
                                                        temp_node = policy_match[
                                                            0][k]
                                                        if temp_node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                temp_node)
                                                        if node not in weighted_nodes:
                                                            weighted_nodes.append(
                                                                node)
                                                        break
                                        else:
                                            continue
                                        break
                                    else:
                                        continue
                                    break
                # Compute local horizon function to account for receding horizon all the time
                # while checking for termination
                if traj_length >= 1:
                    init_loc = pa_control_policy_dict[p_val][-1]
                    # Compute receding horizon shortest path
                    ts_policy[p_val], pa_policy[p_val] = local_horizon(pa_nom_dict[p_val], weighted_nodes,\
                                                            weighted_soft_nodes, num_hops, init_loc, gamma)
                    # Write updates to file
                    # iter_step += 1
                    # write_to_iter_file(ts_policy[p_val], ts_dict[p_val], ets_dict[p_val], p_val, iter_step)

                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)

            # Append trajectories
            for key in ts_policy:
                agent_energy_dict[key].append(
                    pa_nom_dict[key].g.node[pa_policy[key][0]]['energy'])
                ts_control_policy_dict[key].append(ts_policy[key].pop(0))
                pa_policy_temp = list(pa_policy[key])
                pa_control_policy_dict[key].append(pa_policy_temp.pop(0))
                pa_policy[key] = tuple(pa_policy_temp)
            ts_write = policy_match.pop(0)
            traj_length += 1
            # publish this waypoint to a csv file
            write_to_csv_iter(ts_dict, ts_write, key_list, time_wp)
            # Execute waypoint in crazyswarm lab testing
            if lab_testing:
                startWaypoint = timeit.default_timer()
                os.chdir("/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts")
                os.system(
                    "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_waypoint.py"
                )
                os.chdir("/home/ryan/Desktop/pyTWTL/src")
                stopWaypoint = timeit.default_timer()
                print 'Waypoint time, should be ~2.0sec: ', stopWaypoint - startWaypoint

            # Update policy_match now that a trajectory has finalized and policy_match is empty
            if ts_policy:
                # Remove keys from policies that have terminated
                land_keys = []
                for key, val in ts_policy.items():
                    if len(val) == 0:
                        land_keys.append(key)
                        del ts_policy[key]
                        del pa_policy[key]
                # publish to the land csv file for lab testing
                if land_keys:
                    if lab_testing:
                        write_to_land_file(land_keys)
                        os.chdir(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts"
                        )
                        os.system(
                            "/home/ryan/crazyswarm/ros_ws/src/crazyswarm/scripts/twtl_land.py"
                        )
                        os.chdir("/home/ryan/Desktop/pyTWTL/src")
                if not ts_policy:
                    running = False
                    break
                # Update policy match
                policy_match, key_list, policy_match_index = update_policy_match(
                    ts_policy)
                # Get agent priority based on lowest energy
                for key in key_list:
                    prev_states[key] = pa_control_policy_dict[key][-1]
                priority = get_priority(pa_nom_dict, pa_policy, prev_states,
                                        key_list, priority)
            else:
                running = False

    # Print run time statistics
    stopOnline = timeit.default_timer()
    print 'Online run time for safe algorithm: ', stopOnline - startOnline
    stopFull = timeit.default_timer()
    print 'Full run time for safe algorithm: ', stopFull - startFull

    # Print energy statistics from run
    plot_energy(agent_energy_dict)

    # Possibly just set the relaxation to the nominal + additional nodes added *** Change (10/28)
    for key in pa_nom_dict:
        tau_dict[key] = tau_dict_nom[key] + len(
            ts_control_policy_dict[key]) - len(ts_policy_dict_nom[key])

    # Write the nominal and final control policies to a file
    for key in pa_nom_dict:
        write_to_control_policy_file(ts_policy_dict_nom[key], pa_policy_dict_nom[key], \
                tau_dict_nom[key], dfa_dict[key],ts_dict[key],ets_dict[key],\
                ts_control_policy_dict[key], pa_control_policy_dict[key], tau_dict[key], key)
    # Write the CSV files for experiments
    for key in pa_nom_dict:
        write_to_csv(ts_dict[key], ts_control_policy_dict[key], key, time_wp)
def prep_for_learning(ep_len, m, n, h, init_states, obstacles, pick_up_state,
                      delivery_state, rewards, rew_val, custom_flag,
                      custom_task):
    # Create the environment and get the TS #
    ts_start_time = timeit.default_timer()
    disc = 1
    TS, obs_mat, state_mat = create_ts(m, n, h)
    path = '../data/ts_' + str(m) + 'x' + str(n) + 'x' + str(h) + '_1Ag_1.txt'
    paths = [path]
    bases = {init_states[0]: 'Base1'}
    obs_mat = update_obs_mat(obs_mat, state_mat, m, obstacles, init_states[0])
    TS = update_adj_mat_3D(m, n, h, TS, obs_mat)
    create_input_file(TS, state_mat, obs_mat, paths[0], bases, disc, m, n, h,
                      0)
    ts_file = paths
    ts_dict = Ts(directed=True, multi=False)
    ts_dict.read_from_file(ts_file[0])
    ts = expand_duration_ts(ts_dict)
    ts_timecost = timeit.default_timer() - ts_start_time

    # Get the DFA #
    dfa_start_time = timeit.default_timer()
    pick_ups = pick_up_state[0][0] * n + pick_up_state[0][1]
    deliveries = delivery_state[0][0] * n + delivery_state[0][1]
    pick_up = str(pick_ups)  # Check later
    delivery = str(deliveries)
    tf = str((ep_len - 1) / 2)  # time bound
    if custom_flag == 1:
        phi = custom_task
    else:
        phi = '[H^1 r' + pick_up + ']^[0, ' + tf + '] * [H^1 r' + delivery + ']^[0,' + tf + ']'  # Construc the task according to pickup/delivery )^[0, ' + tf + ']'
    _, dfa_inf, bdd = twtl.translate(
        phi, kind=DFAType.Infinity, norm=True
    )  # states and sim. time ex. phi = '([H^1 r47]^[0, 30] * [H^1 r31]^[0, 30])^[0, 30]'
    dfa_timecost = timeit.default_timer(
    ) - dfa_start_time  # DFAType.Normal for normal, DFAType.Infinity for relaxed

    # Get the PA #
    pa_start_time = timeit.default_timer()
    alpha = 1
    nom_weight_dict = {}
    weight_dict = {}
    pa_or = ts_times_fsa(ts, dfa_inf)  # Original pa
    edges_all = nx.get_edge_attributes(ts_dict.g, 'edge_weight')
    max_edge = max(edges_all, key=edges_all.get)
    norm_factor = edges_all[max_edge]
    for pa_edge in pa_or.g.edges():
        edge = (pa_edge[0][0], pa_edge[1][0], 0)
        nom_weight_dict[pa_edge] = edges_all[edge] / norm_factor
    nx.set_edge_attributes(pa_or.g, 'edge_weight', nom_weight_dict)
    nx.set_edge_attributes(pa_or.g, 'weight', 1)
    pa = copy.deepcopy(pa_or)  # copy the pa
    time_weight = nx.get_edge_attributes(pa.g, 'weight')
    edge_weight = nx.get_edge_attributes(pa.g, 'edge_weight')
    for pa_edge in pa.g.edges():
        weight_dict[pa_edge] = alpha * time_weight[pa_edge] + (
            1 - alpha) * edge_weight[pa_edge]
    nx.set_edge_attributes(pa.g, 'new_weight', weight_dict)
    pa_timecost = timeit.default_timer() - pa_start_time

    # Compute the energy of the states #
    energy_time = timeit.default_timer()
    compute_energy(pa)
    energy_dict = nx.get_node_attributes(pa.g, 'energy')
    energy_pa = []
    for ind in range(len(pa.g.nodes())):
        energy_pa.append(pa.g.nodes([0])[ind][1].values()[0])

    # projection of pa on ts #
    init_state = [init_states[0][0] * n + init_states[0][1]]
    pa2ts = []
    for i in range(len(pa.g.nodes())):
        if pa.g.nodes()[i][0] != 'Base1':
            pa2ts.append(int(pa.g.nodes()[i][0].replace("r", "")))
        else:
            pa2ts.append(init_state[0])
            i_s = i  # Agent's initial location in pa
    energy_timecost = timeit.default_timer() - pa_start_time

    # TS adjacency matrix and source-target
    TS_adj = TS
    TS_s = []
    TS_t = []
    for i in range(len(TS_adj)):
        for j in range(len(TS_adj)):
            if TS_adj[i, j] != 0:
                TS_s.append(i)
                TS_t.append(j)

    # pa adjacency matrix and source-target
    pa_adj_st = nx.adjacency_matrix(pa.g)
    pa_adj = pa_adj_st.todense()
    pa_s = []  # source node
    pa_t = []  # target node
    for i in range(len(pa_adj)):
        for j in range(len(pa_adj)):
            if pa_adj[i, j] == 1:
                pa_s.append(i)
                pa_t.append(j)

# PA rewards matrix
    rewards_ts = np.zeros(m * n)  #-0.25#
    rewards_pa = np.zeros(len(pa2ts))
    rewards_ts_indexes = []
    for i in range(len(rewards)):
        rewards_ts_indexes.append(
            rewards[i][0] * n + rewards[i][1]
        )  # rewards_ts_indexes[i] = rewards[i][0] * n + rewards[i][1]
        rewards_ts[rewards_ts_indexes[i]] = rew_val

    for i in range(len(rewards_pa)):
        rewards_pa[i] = rewards_ts[pa2ts[i]]

    # # Display some important info
    print('##### PICK-UP and DELIVERY MISSION #####' + "\n")
    print('Initial Location  : ' + str(init_states[0]) + ' <---> Region ' +
          str(init_state[0]))
    print('Pick-up Location  : ' + str(pick_up_state[0]) + ' <---> Region ' +
          pick_up)
    print('Delivery Location : ' + str(delivery_state[0]) + ' <---> Regions ' +
          delivery)
    print('Reward Locations  : ' + str(rewards) + ' <---> Regions ' +
          str(rewards_ts_indexes) + "\n")
    print('State Matrix : ')
    print(state_mat)
    print("\n")
    print('Mission Duration  : ' + str(ep_len) + ' time steps')
    print('TWTL Task : ' + phi + "\n")
    print('Computational Costst : TS created in ' + str(ts_timecost) +
          ' seconds')
    # print('			TS created in ' + str(ts_timecost) + ' seconds')
    print('		       DFA created in ' + str(dfa_timecost) + ' seconds')
    print('		       PA created in ' + str(pa_timecost) + ' seconds')
    print('		       Energy of PA states calculated in ' +
          str(energy_timecost) + ' seconds')

    return i_s, pa, pa_s, pa_t, pa2ts, energy_pa, rewards_pa, pick_up, delivery, pick_ups, deliveries, pa.g.nodes(
    )
Example #23
0
def main():

    Rho = namedtuple('Rho', ['lower', 'upper'])
    rhos = [Rho(lower=0.98, upper=1.04), Rho(lower=0.98, upper=1.04)]

#     # Case Study 1
#     with Timer('IJRR 2013 Case-Study 1'):
#         r1 = Ts.load('./robot_1.yaml')
#         r2 = Ts.load('./robot_2.yaml')
#         ts_tuple = (r1, r2)
#         formula = ('[]<>gather && [](r1gather -> X(!r1gather U r1upload)) '
#                   '&& [](r2gather -> X(!r2gather U r2upload))')
#         opt_prop = set(['gather'])
#         logger.info('Formula: %s', formula)
#         logger.info('opt_prop: %s', opt_prop)
#         prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
#                     lomap.multi_agent_optimal_run(ts_tuple, formula, opt_prop)
#         logger.info('Cost: %d', suffix_cycle_cost)
#         logger.info('Prefix length: %d', prefix_length)
#         # Find the controls that will produce this run
#         control_prefixes = []
#         control_suffix_cycles = []
#         for i in range(0, len(ts_tuple)):
#             ts = ts_tuple[i]
#             control_prefixes.append(ts.controls_from_run(prefixes[i]))
#             control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
#             logger.info('%s run prefix: %s', ts.name, prefixes[i])
#             logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
#             logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
#             logger.info('%s control suffix cycle: %s', ts.name,
#                                                     control_suffix_cycles[i])
#
#     logger.info('<><><> <><><> <><><>')

    # Case Study 2
    with Timer('IJRR 2013 Case-Study 2'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather && r2gather)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather','r2gather'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 3
    with Timer('IJRR 2013 Case-Study 3'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather && r2gather)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload)) '
                   '&& [](!(r1gather1 && r2gather1) && !(r1gather2 && r2gather2)'
                   '&& !(r1gather3 && r2gather3) && !(r1gather4 && r2gather4))')
        opt_prop = set(['r1gather','r2gather'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 4
    with Timer('IJRR 2013 Case-Study 4'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather4 && r2gather2)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather4','r2gather2'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
                      lomap.multi_agent_optimal_run(ts_tuple, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])

    logger.info('<><><> <><><> <><><>')

    # Case Study 4 w/ sync
    with Timer('IJRR 2013 Case-Study 4 (w/ sync)'):
        r1 = Ts.load('./robot_1.yaml')
        r2 = Ts.load('./robot_2.yaml')
        ts_tuple = (r1, r2)
        formula = ('[]<>gather && [](gather->(r1gather4 && r2gather2)) '
                   '&& [](r1gather -> X(!r1gather U r1upload)) '
                   '&& [](r2gather -> X(!r2gather U r2upload))')
        opt_prop = set(['r1gather4','r2gather2'])
        logger.info('Formula: %s', formula)
        logger.info('opt_prop: %s', opt_prop)
        prefix_length, prefixes, suffix_cycle_cost, suffix_cycles = \
         lomap.robust_multi_agent_optimal_run(ts_tuple, rhos, formula, opt_prop)
        logger.info('Cost: %d', suffix_cycle_cost)
        logger.info('Prefix length: %d', prefix_length)
        # Find the controls that will produce this run
        control_prefixes = []
        control_suffix_cycles = []
        for i in range(0, len(ts_tuple)):
            ts = ts_tuple[i]
            control_prefixes.append(ts.controls_from_run(prefixes[i]))
            control_suffix_cycles.append(ts.controls_from_run(suffix_cycles[i]))
            logger.info('%s run prefix: %s', ts.name, prefixes[i])
            logger.info('%s control perfix: %s', ts.name, control_prefixes[i])
            logger.info('%s suffix cycle: %s', ts.name, suffix_cycles[i])
            logger.info('%s control suffix cycle: %s', ts.name,
                                                    control_suffix_cycles[i])