Пример #1
0
def rmpyl_observation_risk():
    prog = RMPyL()
    prog *= prog.observe(
        {
            'name': 'travel',
            'ctype': 'probabilistic',
            'domain': ['Short', 'Long'],
            'probability': [0.7, 0.3]
        },
        Episode(action='(cab-ride-long)',
                duration={
                    'ctype': 'uncontrollable_probabilistic',
                    'distribution': {
                        'type': 'uniform',
                        'lb': 5.0,
                        'ub': 11.0
                    }
                }),
        Episode(action='(cab-ride-short)',
                duration={
                    'ctype': 'uncontrollable_probabilistic',
                    'distribution': {
                        'type': 'uniform',
                        'lb': 5.0,
                        'ub': 10.0
                    }
                }))
    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=10.0)
    return prog
Пример #2
0
 def make_fly_scan(hello,uav):
     """Subplan where one choose which UAV should fly and which should scan."""
     p_fly_scan = RMPyL()
     p_fly_scan*= p_fly_scan.decide({'name':'Choose-FLY','domain':['H','U'],'utility':[5,7]},
                                     hello.fly()+uav.scan(),
                                     hello.scan()+uav.fly())
     return p_fly_scan
Пример #3
0
def rmpyl_low_risk():
    prog = RMPyL()
    prog *= Episode(action='(cab-ride)',duration={'ctype':'uncontrollable_probabilistic',
                                                  'distribution':{'type':'uniform',
                                                                  'lb':5.0,'ub':10.0}})
    prog.add_overall_temporal_constraint(ctype='controllable',lb=5.0,ub=9.9)
    return prog
Пример #4
0
def rmpyl_simple_observe(hello,uav):
    """Simple RMPyL example using an uncontrollable choice."""
    prog = RMPyL()
    prog *= prog.observe({'name':'UAV-crash','ctype':'probabilistic',
                          'domain':['OK','FAULT'],'probability':[0.99,0.01]},
                        uav.scan(),
                        uav.crash())
    return prog
Пример #5
0
def rmpyl_simple_nested_choice(hello,uav):
    """Simple RMPyL example with nested choice."""
    prog = RMPyL()
    prog *= prog.decide({'name':'UAV-choice1','domain':['H','U'],'utility':[5,7]},
                         prog.decide({'name':'UAV-choice2','domain':['H','U'],'utility':[5,7]},
                            hello.fly(),uav.fly()),
                         uav.scan())
    return prog
Пример #6
0
def rmpyl_simple_decide_looping(hello,uav):
    """Simple RMPyL example where we choose to fly loops with different UAVs."""
    prog = RMPyL()
    prog *= prog.decide({'name':'Choose-loop','domain':['H','U'],'utility':[1,1]},
                        prog.loop(episode_func=hello.fly,repetitions=3,
                                  run_utility=1,stop_utility=0),
                        prog.loop(episode_func=uav.scan,repetitions=2,
                                  run_utility=2,stop_utility=0))
    return prog
Пример #7
0
def to_rmpyl(tcs):
    """
    Converts the temporal constraints into an RMPyL program, which can then
    be exported to a TPN.
    """
    prog = RMPyL()
    for tc in tcs:
        prog.add_temporal_constraint(tc)
    return prog
Пример #8
0
def rmpyl_infeasible():
    prog = RMPyL()
    prog *= Episode(action='(cab-ride)',
                    duration={
                        'ctype': 'controllable',
                        'lb': 10,
                        'ub': 20
                    })
    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=5.0)
    return prog
Пример #9
0
def rmpyl_original_verbose(hello,uav):
    """
    Implementation of the original RMPL using a more verbose syntax and adding
    a chance constraint.

    ##### Original RMPL

    class UAV {
        value on;
        value off;

        primitive method fly() [3,10];
        primitive method scan() [1,10];
    }

    class Main {
      UAV helo;
      UAV uav;

      method run () {
        [0, 18] sequence {
            parallel {
                sequence {
                    helo.scan();
                    helo.fly();
                }
                sequence {
                    uav.fly();
                    uav.scan();
                }
            }
            choose {
                with reward: 5 {helo.fly();}
                with reward: 7 {uav.fly();}
            }
        }
      }
    }
    """
    prog = RMPyL()
    prog.plan = prog.sequence(
                prog.parallel(
                    prog.sequence(
                        hello.scan(),
                        hello.fly()),
                    prog.sequence(
                        uav.fly(),
                        uav.scan())),
                prog.decide({'name':'UAV-choice','domain':['Hello','UAV'],'utility':[5,7]},
                            hello.fly(),
                            uav.fly()))
    overall_tc = prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=18.0)
    cc_time = ChanceConstraint(constraint_scope=[overall_tc],risk=0.1)
    prog.add_chance_constraint(cc_time)
    return prog
Пример #10
0
def rmpyl_single_episode(hello,uav):
    """Extremely simple plan composed of a single primitive episode."""
    prog = RMPyL()
    prog.plan = uav.scan()
    healthy = StateVariable(name='Healthy',
                           domain_dict={'type':'finite-discrete',
                                        'domain':['True','False','Maybe']})

    assig_sc = AssignmentStateConstraint(scope=[healthy],values=['True'])
    prog.add_overall_state_constraint(assig_sc)
    return prog
Пример #11
0
def rmpyl_low_risk():
    prog = RMPyL()
    prog *= Episode(action='(cab-ride)',
                    duration={
                        'ctype': 'uncontrollable_probabilistic',
                        'distribution': {
                            'type': 'uniform',
                            'lb': 5.0,
                            'ub': 10.0
                        }
                    })
    prog.add_overall_temporal_constraint(ctype='controllable', lb=5.0, ub=9.9)
    return prog
Пример #12
0
def rmpyl_parallel_and_choice_user_defined_tcs(hello,uav):
    """Simple RMPyL example with parallel execution of actions on different choice
    branches with user-defined constraints."""
    prog = RMPyL()

    #Choice using the previous example as a subroutine to generate partial progs
    prog *= prog.decide({'name':'Choose-branch','domain':['1','2'],'utility':[1,2]},
                        rmpyl_parallel_user_defined_tcs(hello,uav),
                        rmpyl_parallel_user_defined_tcs(hello,uav))

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=70.0)

    return prog
Пример #13
0
def rmpyl_parallel_choices(hello,uav):
    """Simple RMPyL example with parallel execution of choices."""
    uav2 = UAV(name='uav2')

    prog = RMPyL()
    prog *= prog.parallel(
                prog.observe({'name':'HELLO-OBS','domain':['FLY','SCAN','CRASH'],
                          'ctype':'probabilistic','probability':[0.50,0.49,0.01]},
                           hello.fly(),hello.scan(),hello.crash()),
                prog.observe({'name':'UAV-OBS','domain':['FLY','SCAN','CRASH'],
                          'ctype':'probabilistic','probability':[0.50,0.49,0.01]},
                           uav.fly(),uav.scan(),uav.crash()))*uav2.fly()
    return prog
Пример #14
0
def rmpyl_observation_risk():
    prog = RMPyL()
    prog *= prog.observe(
                {'name':'travel','ctype':'probabilistic',
                 'domain':['Short','Long'],'probability':[0.7,0.3]},
                 Episode(action='(cab-ride-long)',duration={'ctype':'uncontrollable_probabilistic',
                                                             'distribution':{'type':'uniform',
                                                                             'lb':5.0,'ub':11.0}}),
                 Episode(action='(cab-ride-short)',duration={'ctype':'uncontrollable_probabilistic',
                                                            'distribution':{'type':'uniform',
                                                                            'lb':5.0,'ub':10.0}}))
    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=10.0)
    return prog
Пример #15
0
def rmpyl_icaps14():
    """
    Example from (Santana & Williams, ICAPS14).
    """
    prog = RMPyL()
    prog *= prog.decide(
                {'name':'transport-choice','domain':['Bike','Car','Stay'],
                 'utility':[100,70,0]},
                 prog.observe(
                    {'name':'slip','domain':[True,False],
                     'ctype':'probabilistic','probability':[0.051,1.0-0.051]},
                     prog.sequence(Episode(action='(ride-bike)',
                                           duration={'ctype':'controllable','lb':15,'ub':25}),
                                   Episode(action='(change)',
                                           duration={'ctype':'controllable','lb':20,'ub':30})),
                      Episode(action='(ride-bike)',duration={'ctype':'controllable','lb':15,'ub':25})),
                 prog.observe(
                    {'name':'accident','domain':[True,False],
                     'ctype':'probabilistic','probability':[0.013,1.0-0.013]},
                     prog.sequence(Episode(action='(tow-vehicle)',
                                          duration={'ctype':'controllable','lb':30,'ub':90}),
                                  Episode(action='(cab-ride)',
                                          duration={'ctype':'controllable','lb':10,'ub':20})),
                     Episode(action='(drive)',duration={'ctype':'controllable','lb':10,'ub':20})),
                 Episode(action='(stay)'))

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=30.0)
    return prog
Пример #16
0
def nominal_case(blocks):
    """
    Nominal case, where the robot observes what the human has already completed,
    and acts accordingly
    """
    agent='Baxter'
    manip='BaxterRight'

    prog = RMPyL(name='run()')

    prog *= prog.sequence(say('Should I start?'),
                          prog.observe({'name':'observe-human-%d'%(len(blocks)),
                                        'ctype':'uncontrollable',
                                        'domain':['YES','NO']},
                                        observe_and_act(prog,blocks,manip,agent),
                                        say('All done!')))
    return prog
Пример #17
0
def rmpyl_parallel_user_defined_tcs(hello,uav):
    """Simple RMPyL example with parallel execution of actions with user-defined
    constraints."""
    prog = RMPyL()
    hello_flight = hello.fly()
    uav_flight = uav.fly()
    uav_scan = uav.scan()
    prog *= hello_flight+(uav_scan*uav_flight)

    tc1 = TemporalConstraint(start=uav_scan.end,end=hello_flight.start,
                             ctype='controllable',lb=2.0,ub=3.0)

    tc2 = TemporalConstraint(start=hello_flight.end,end=uav_flight.start,
                             ctype='controllable',lb=3.0,ub=4.0)
    for tc in [tc1,tc2]:
        prog.add_temporal_constraint(tc)

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=50.0)

    return prog
Пример #18
0
def rmpyl_choice_risk():
    prog = RMPyL()
    prog *= prog.decide(
                {'name':'transport-choice','domain':['Bike','Car','Stay'],
                 'utility':[100,70,0]},
                 prog.observe(
                             {'name':'travel','ctype':'probabilistic',
                              'domain':['Short','Long'],'probability':[0.7,0.3]},
                              Episode(action='(cab-ride-long)',duration={'ctype':'uncontrollable_probabilistic',
                                                                          'distribution':{'type':'uniform',
                                                                                          'lb':5.0,'ub':11.0}}),
                              Episode(action='(cab-ride-short)',duration={'ctype':'uncontrollable_probabilistic',
                                                                         'distribution':{'type':'uniform',
                                                                                         'lb':5.0,'ub':10.0}})),
                 Episode(action='(drive-car)',
                                       duration={'ctype':'controllable','lb':6.0,'ub':8.0}),
                 Episode(action='(stay)'))

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=10.0)
    return prog
Пример #19
0
def nominal_case(blocks,time_window=-1,dur_dict=None):
    """
    Nominal case, where the robot observes what the human has already completed,
    and acts accordingly
    """
    agent='Baxter'
    manip='BaxterRight'

    prog = RMPyL(name='run()')

    prog *= prog.sequence(say('Should I start?'),
                          prog.observe({'name':'ask-human',
                                        'ctype':'probabilistic',
                                        'domain':['YES','NO'],
                                        'probability':[0.9,0.1]},
                                        observe_decide_act(prog,blocks,manip,agent,dur_dict),
                                        say('All done!')))
    if time_window>0.0:
        prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=time_window)

    return prog
Пример #20
0
def nominal_case(blocks, time_window=-1, dur_dict=None):
    """
    Nominal case, where the robot observes what the human has already completed,
    and acts accordingly
    """
    agent = 'Baxter'
    manip = 'BaxterRight'

    prog = RMPyL(name='run()')

    prog *= prog.sequence(
        say('Should I start?'),
        prog.observe(
            {
                'name': 'ask-human',
                'ctype': 'probabilistic',
                'domain': ['YES', 'NO'],
                'probability': [0.9, 0.1]
            }, observe_decide_act(prog, blocks, manip, agent, dur_dict),
            say('All done!')))
    if time_window > 0.0:
        prog.add_overall_temporal_constraint(ctype='controllable',
                                             lb=0.0,
                                             ub=time_window)

    return prog
Пример #21
0
def _fake_plan_path(sites,start_site,goal_site,risk,velocities,duration_type,agent,**kwargs):
    """
    Fakes a chance-constrained path from a start location to a goal location.
    Specific parameters are given as keyword arguments.
    """
    start = sites[start_site]['coords']
    goal = sites[goal_site]['coords']

    line_dist = la.norm(goal-start)
    dist = line_dist*1.2 if risk<0.005 else line_dist*1.1
    lb_duration = dist/velocities['max']
    ub_duration = dist/velocities['avg']

    if duration_type=='uncontrollable_bounded':
        duration_dict={'ctype':'uncontrollable_bounded',
                       'lb':lb_duration,'ub':ub_duration}
    elif duration_type=='uniform':
        duration_dict={'ctype':'uncontrollable_probabilistic',
                       'distribution':{'type':'uniform','lb':lb_duration,'ub':ub_duration}}
    elif duration_type=='gaussian':
        duration_dict={'ctype':'uncontrollable_probabilistic',
                       'distribution':{'type':'gaussian',
                                       'mean':(lb_duration+ub_duration)/2.0,
                                       'variance':((ub_duration-lb_duration)**2)/36.0}}
    elif duration_type=='no_constraint':
        duration_dict={'ctype':'controllable','lb':0.0,'ub':float('inf')}
    else:
        raise ValueError('Duration type %s currently not supported in Fake Planner.'%duration_type)

    path_episode = Episode(duration=duration_dict,
                           action='(go-from-to %s %s %s)'%(agent,start_site,goal_site),
                           distance=dist,**kwargs)
    path_episode.properties['distance']=dist
    path_episode.properties['start_coords']=start
    path_episode.properties['goal_coords']=goal
    prog_path = RMPyL(); prog_path.plan = path_episode

    return prog_path
Пример #22
0
def pysat_planner(dom_file,prob_file,max_steps,duration_func):
    """
    Uses PySAT as the planner.
    """
    py_sat = PySAT(dom_file,prob_file,precompute_steps=max_steps,remove_static=True,
                   write_dimacs=False,verbose=True)

    domain,problem,task = model_parser(dom_file,prob_file,remove_static=True)

    print('\n##### Determining optimal plan length!\n')
    start = time.time()
    min_steps = len(task.goals-task.initial_state)
    plans = py_sat.plan(task.initial_state,task.goals,time_steps=max_steps,
                        find_shortest=True,min_steps=min_steps)
    elapsed = time.time()-start
    print('\n##### All solving took %.4f s'%(elapsed))

    if len(plans)>0:
        plan = plans[0]
        print('\n##### Plan found!\n')
        for t,action in enumerate(plan):
            print('%d: %s'%(t,action))

        prog = RMPyL(name='run()')
        if duration_func!=None:
            prog.plan = prog.sequence(*[Episode(start=Event(name='start-of-'+op),
                                                end=Event(name='end-of-'+op),
                                                action=op,
                                                duration=duration_func(op)) for op in plan])
        else:
            prog.plan = prog.sequence(*[Episode(start=Event(name='start-of-'+op),
                                                end=Event(name='end-of-'+op),
                                                action=op) for op in plan])
    else:
        prog = None

    return {'policy':None,'explicit':None,'performance':None,'rmpyl':prog}
Пример #23
0
def rmpyl_inconsistent_user_defined_tcs(hello,uav):
    """Simple RMPyL example with parallel execution of actions on different choice
    branches with user-defined constraints."""
    try:
        prog = RMPyL()
        hello_flights = [hello.fly() for i in range(2)] #two different hello flights
        uav_flights = [uav.fly() for i in range(2)] #two different uav flights

        #Choice using the previous example as a subroutine to generate partial plans
        prog *= prog.decide({'name':'Choose-branch','domain':['1','2'],'utility':[1,2]},
                            hello_flights[0]*uav_flights[0],
                            uav_flights[1]*hello_flights[1])

        tc = TemporalConstraint(start=hello_flights[0].end,end=hello_flights[1].start,
                                 ctype='controllable',lb=2.0,ub=3.0)

        #This is a constraint between disjoint plan branches. Therefore, it MUST
        #cause an error of empty constraint support
        prog.add_temporal_constraint(tc)
    except InconsistentSupportError:
        print('Empty support error correctly caught!')
        prog=None

    return prog
Пример #24
0
def rmpyl_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    # prog *= hello.fly()
    prog.plan = prog.sequence(
        hello.scan(), hello.fly(), uav.fly(), uav.scan(),
        prog.decide(
            {
                'name': 'UAV-choice',
                'domain': ['Hello', 'UAV'],
                'utility': [5, 7]
            }, hello.fly(), uav.fly()))

    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=18.0)
    return prog
Пример #25
0
 def policy_to_rmpyl(G,
                     policy,
                     name='run()',
                     constraint_fields=['constraints'],
                     global_start=None,
                     global_end=None):
     """
     Returns an RMPyL program corresponding to the RAO* policy, which can
     be subsequently converted into a pTPN.
     """
     prog = RMPyL(name=name)
     constraints = set()
     prog *= _recursive_convert(G, prog, G.root, policy, constraints,
                                constraint_fields, global_end)
     if global_start != None:
         constraints.add(
             TemporalConstraint(start=global_start,
                                end=prog.first_event,
                                ctype='controllable',
                                lb=0.0,
                                ub=float('inf')))
     _add_constraints(prog, constraints)
     return prog
Пример #26
0
def rmpyl_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    # prog *= hello.fly()
    prog.plan = prog.sequence(
                    hello.scan(),
                    hello.fly(),
                    uav.fly(),
                    uav.scan(),
                    prog.decide({'name':'UAV-choice','domain':['Hello','UAV'],
                                 'utility':[5,7]},
                                 hello.fly(),
                                 uav.fly()))

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=18.0)
    return prog
Пример #27
0
def rmpyl_choice_risk():
    prog = RMPyL()
    prog *= prog.decide(
        {
            'name': 'transport-choice',
            'domain': ['Bike', 'Car', 'Stay'],
            'utility': [100, 70, 0]
        },
        prog.observe(
            {
                'name': 'travel',
                'ctype': 'probabilistic',
                'domain': ['Short', 'Long'],
                'probability': [0.7, 0.3]
            },
            Episode(action='(cab-ride-long)',
                    duration={
                        'ctype': 'uncontrollable_probabilistic',
                        'distribution': {
                            'type': 'uniform',
                            'lb': 5.0,
                            'ub': 11.0
                        }
                    }),
            Episode(action='(cab-ride-short)',
                    duration={
                        'ctype': 'uncontrollable_probabilistic',
                        'distribution': {
                            'type': 'uniform',
                            'lb': 5.0,
                            'ub': 10.0
                        }
                    })),
        Episode(action='(drive-car)',
                duration={
                    'ctype': 'controllable',
                    'lb': 6.0,
                    'ub': 8.0
                }), Episode(action='(stay)'))

    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=10.0)
    return prog
Пример #28
0
def rmpyl_nested_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    prog.plan = prog.sequence(
                    hello.scan(),
                    uav.scan(),
                    prog.decide(
                        {'name':'UAV-choice','domain':['Hello','UAV'],
                        'utility':[7,5]},
                        prog.sequence(
                            hello.fly(),
                            prog.observe(
                                {'name':'hello-success','domain':['Success','Failure'],
                                 'ctype':'probabilistic','probability':[0.8,0.2]},
                                prog.decide(
                                    {'name':'hello-assert-success',
                                     'domain':['Success'],
                                     'utility':[10]},
                                    hello.stop()),
                                prog.decide(
                                    {'name':'hello-assert-failure',
                                     'domain':['Failure'],
                                     'utility':[0]},
                                    hello.stop()))),
                         prog.sequence(
                            uav.fly(),
                            prog.observe(
                                {'name':'uav-success','domain':['Success','Failure'],
                                 'ctype':'probabilistic','probability':[0.95,0.05]},
                                prog.decide(
                                    {'name':'uav-assert-success',
                                     'domain':['Success'],
                                     'utility':[10]},
                                    uav.stop()),
                                prog.decide(
                                    {'name':'uav-assert-failure',
                                     'domain':['Failure'],
                                     'utility':[0]},
                                    uav.stop())))))
    return prog
Пример #29
0
    py_sat = PySAT(dom_file,prob_file,precompute_steps=40,remove_static=True,verbose=True)
    domain,problem,task = model_parser(dom_file,prob_file,remove_static=True)

    start = time.time()
    # sat_plans = py_sat.plan(task.initial_state,task.goals,time_steps=30,find_shortest=True) #find optimal plan size
    sat_plans = py_sat.plan(task.initial_state,task.goals,time_steps=18,find_shortest=True) # find sub-optimal plan
    # print("---------plan: %d" % len(sat_plans))
    if len(sat_plans)>0:
        plan = sat_plans[0] # get the first plan (default returns a list with one plan)
        for t,op in enumerate(plan):
            print('%d: %s'%(t,op))

        elapsed = time.time()-start
        print('\n##### All solving took %.4f s'%(elapsed))

        prog = RMPyL(name='run()')
        pddl_episodes = [Episode(id=make_episode_id(t,op),
                                 start=Event(name='start-of-%d-%s'%(t,op)),
                                 end=Event(name='end-of-%d-%s'%(t,op)),
                                 action=op,
                                 duration=rss_duration_model_func(op)) for t,op in enumerate(plan)]
        prog.plan = prog.sequence(*pddl_episodes)
        # prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=2000.0)
        #Adds temporal window to the plan
        for t,op in enumerate(plan):
            bounds, tc_type = rss_time_window_model_func(op)
            for tc in time_window_constraints(tc_type,bounds,prog.first_event,prog.episode_by_id(make_episode_id(t,op))):
                prog.add_temporal_constraint(tc)

        #Dummy episodes that enable transmissions
        activation_episodes=[]
Пример #30
0
def rmpyl_parallel_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    prog.plan = prog.parallel(
        prog.sequence(
            prog.decide(
                {
                    'name': 'hello-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, hello.fly(), hello.scan()),
            prog.decide(
                {
                    'name': 'hello-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, hello.fly(), hello.scan()),
            prog.decide(
                {
                    'name': 'hello-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, hello.fly(), hello.scan())),
        prog.sequence(
            prog.decide(
                {
                    'name': 'uav-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, uav.fly(), uav.scan()),
            prog.decide(
                {
                    'name': 'uav-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, uav.fly(), uav.scan()),
            prog.decide(
                {
                    'name': 'uav-action',
                    'domain': ['Fly', 'Scan'],
                    'utility': [0, 1]
                }, uav.fly(), uav.scan())))
    return prog
Пример #31
0
    else:
        return prog.decide({'name':'loop-choice-'+str(repetitions),
                            'domain':['RUN','HALT'],
                            'utility':[loop_utility,stop_utility]},
                    prog.sequence(
                        action_func(),
                        prog.observe({'name':'observe-success-'+str(repetitions),
                                     'ctype':'uncontrollable','domain':['SUCCESS','FAIL']},
                            halt_func(),
                            try_try_again(prog,action_func,
                                          halt_func,loop_utility,stop_utility,
                                          repetitions-1))),
                    halt_func())


prog = RMPyL()
rob = Robot(name='ResilientRobot')

repetitions = int(sys.argv[1]) if len(sys.argv)==2 else 3

prog*= try_try_again(prog,rob.do_action,rob.stop,loop_utility=1,
                     stop_utility=0,repetitions=repetitions)
prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=10.0)


print('\n***** Start event\n')
print(prog.first_event)

print('\n***** Last event\n')
print(prog.last_event)
Пример #32
0
def _fake_plan_path(sites, start_site, goal_site, risk, velocities,
                    duration_type, agent, **kwargs):
    """
    Fakes a chance-constrained path from a start location to a goal location.
    Specific parameters are given as keyword arguments.
    """
    start = sites[start_site]['coords']
    goal = sites[goal_site]['coords']

    line_dist = la.norm(goal - start)
    dist = line_dist * 1.2 if risk < 0.005 else line_dist * 1.1
    lb_duration = dist / velocities['max']
    ub_duration = dist / velocities['avg']

    if duration_type == 'uncontrollable_bounded':
        duration_dict = {
            'ctype': 'uncontrollable_bounded',
            'lb': lb_duration,
            'ub': ub_duration
        }
    elif duration_type == 'uniform':
        duration_dict = {
            'ctype': 'uncontrollable_probabilistic',
            'distribution': {
                'type': 'uniform',
                'lb': lb_duration,
                'ub': ub_duration
            }
        }
    elif duration_type == 'gaussian':
        duration_dict = {
            'ctype': 'uncontrollable_probabilistic',
            'distribution': {
                'type': 'gaussian',
                'mean': (lb_duration + ub_duration) / 2.0,
                'variance': ((ub_duration - lb_duration)**2) / 36.0
            }
        }
    elif duration_type == 'no_constraint':
        duration_dict = {
            'ctype': 'controllable',
            'lb': 0.0,
            'ub': float('inf')
        }
    else:
        raise ValueError(
            'Duration type %s currently not supported in Fake Planner.' %
            duration_type)

    path_episode = Episode(duration=duration_dict,
                           action='(go-from-to %s %s %s)' %
                           (agent, start_site, goal_site),
                           distance=dist,
                           **kwargs)
    path_episode.properties['distance'] = dist
    path_episode.properties['start_coords'] = start
    path_episode.properties['goal_coords'] = goal
    prog_path = RMPyL()
    prog_path.plan = path_episode

    return prog_path
Пример #33
0
                  propagate_risk=True,
                  halt_on_violation=False,
                  verbose=1)

#Searches for the optimal policy
policy, explicit, performance = planner.search(b0)

#Converts policy to graphical SVG format
dot_policy = policy_to_dot(explicit, policy)
dot_policy.write('flightgear_policy.svg', format='svg')

#Converts optimal exploration policy into an RMPyL program
exploration_policy = policy_to_rmpyl(explicit, policy)

#The flight policy has the additional actions of taking off and landing.
flight_policy = RMPyL(name='run()')
flight_policy *= flight_policy.sequence(Episode(action='(takeoff plane)'),
                                        exploration_policy,
                                        Episode(action='(land plane)'))

#Eliminates probabilistic choices from the policy, since Pike (in fact, the
#Lisp tpn package) cannot properly handle them.
for obs in flight_policy.observations:
    if obs.type == 'probabilistic':
        obs.type = 'uncontrollable'
        del obs.properties['probability']

#Converts the program to a TPN
flight_policy.to_ptpn(filename='flightgear_rmpyl.tpn')

# Writes control program to pickle file
Пример #34
0
def rmpyl_icaps14():
    """
    Example from (Santana & Williams, ICAPS14).
    """
    prog = RMPyL()
    prog *= prog.decide(
        {
            'name': 'transport-choice',
            'domain': ['Bike', 'Car', 'Stay'],
            'utility': [100, 70, 0]
        },
        prog.observe(
            {
                'name': 'slip',
                'domain': [True, False],
                'ctype': 'probabilistic',
                'probability': [0.051, 1.0 - 0.051]
            },
            prog.sequence(
                Episode(action='(ride-bike)',
                        duration={
                            'ctype': 'controllable',
                            'lb': 15,
                            'ub': 25
                        }),
                Episode(action='(change)',
                        duration={
                            'ctype': 'controllable',
                            'lb': 20,
                            'ub': 30
                        })),
            Episode(action='(ride-bike)',
                    duration={
                        'ctype': 'controllable',
                        'lb': 15,
                        'ub': 25
                    })),
        prog.observe(
            {
                'name': 'accident',
                'domain': [True, False],
                'ctype': 'probabilistic',
                'probability': [0.013, 1.0 - 0.013]
            },
            prog.sequence(
                Episode(action='(tow-vehicle)',
                        duration={
                            'ctype': 'controllable',
                            'lb': 30,
                            'ub': 90
                        }),
                Episode(action='(cab-ride)',
                        duration={
                            'ctype': 'controllable',
                            'lb': 10,
                            'ub': 20
                        })),
            Episode(action='(drive)',
                    duration={
                        'ctype': 'controllable',
                        'lb': 10,
                        'ub': 20
                    })), Episode(action='(stay)'))

    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=30.0)
    return prog
Пример #35
0
def rmpyl_simple_verbose(hello,uav):
    """Simple RMPyL example using verbose syntax."""
    prog = RMPyL()
    prog *= prog.sequence(hello.scan(),uav.scan(),prog.parallel(hello.fly(),uav.fly()))
    return prog
Пример #36
0
        """
        Returns the episode representing the rover sending data back to a satellite.
        """
        return Episode(duration={'ctype':'controllable','lb':5,'ub':30},
                       action='(relay %s)'%(self.name))

loc={'start':(8.751,-8.625),
     'minerals':(0.0,-10.0),
     'funny_rock':(-5.0,-2.0),
     'relay':(0.0,0.0),
     'alien_lair':(0.0,10.0)}


rov1 = Rover(name='spirit')

prog = RMPyL(name='run()')#name=run() is a requirement for Enterprise at the moment
prog *= prog.sequence(
            rov1.go_to(start=loc['start'],goal=loc['minerals'],risk=0.01),
            rov1.go_to(start=loc['minerals'],goal=loc['funny_rock'],risk=0.01),
            rov1.go_to(start=loc['funny_rock'],goal=loc['alien_lair'],risk=0.01),
            rov1.go_to(start=loc['alien_lair'],goal=loc['relay'],risk=0.01))
tc=prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=2000.0)
cc_time = ChanceConstraint(constraint_scope=[tc],risk=0.1)
prog.add_chance_constraint(cc_time)

#Option to export the RMPyL program to an Enterprise-compliant TPN.
prog.to_ptpn(filename='picard_rovers_rmpyl.tpn')

#Writes RMPyL program to pickle file.
with open('picard_rovers_rmpyl.pickle','wb') as f:
    print('Writing RMPyL program to pickle file.')
Пример #37
0
def rmpyl_parallel_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    prog.plan = prog.parallel(
                    prog.sequence(
                        prog.decide({'name':'hello-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     hello.fly(),
                                     hello.scan()),
                        prog.decide({'name':'hello-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     hello.fly(),
                                     hello.scan()),
                        prog.decide({'name':'hello-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     hello.fly(),
                                     hello.scan())
                    ),
                    prog.sequence(
                        prog.decide({'name':'uav-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     uav.fly(),
                                     uav.scan()),
                        prog.decide({'name':'uav-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     uav.fly(),
                                     uav.scan()),
                        prog.decide({'name':'uav-action','domain':['Fly','Scan'],
                                     'utility':[0,1]},
                                     uav.fly(),
                                     uav.scan())

                    ))
    return prog
Пример #38
0
def rmpyl_breakfast():
    """
    Example from (Levine & Williams, ICAPS14).
    """
    #Actions that Alice performs
    get_mug_ep = Episode(action='(get alice mug)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    get_glass_ep = Episode(action='(get alice glass)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})

    make_cofee_ep = Episode(action='(make-coffee alice)',duration={'ctype':'controllable','lb':3.0,'ub':5.0})
    pour_cofee_ep = Episode(action='(pour-coffee alice mug)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    pour_juice_glass = Episode(action='(pour-juice alice glass)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})

    get_bagel_ep = Episode(action='(get alice bagel)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    get_cereal_ep = Episode(action='(get alice cereal)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})

    toast_bagel_ep = Episode(action='(toast alice bagel)',duration={'ctype':'controllable','lb':3.0,'ub':5.0})
    add_cheese_bagel_ep = Episode(action='(add-cheese alice bagel)',duration={'ctype':'controllable','lb':1.0,'ub':2.0})
    mix_cereal_ep = Episode(action='(mix-cereal alice milk)',duration={'ctype':'controllable','lb':1.0,'ub':2.0})

    #Actions that the robot performs
    get_grounds_ep = Episode(action='(get grounds robot)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    get_juice_ep = Episode(action='(get juice robot)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    get_milk_ep = Episode(action='(get milk robot)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})
    get_cheese_ep = Episode(action='(get cheese robot)',duration={'ctype':'controllable','lb':0.5,'ub':1.0})

    prog = RMPyL()
    prog *= prog.sequence(
                prog.parallel(
                    prog.observe(
                        {'name':'observe-utensil','domain':['Mug','Glass'],'ctype':'uncontrollable'},
                        get_mug_ep,
                        get_glass_ep,
                        id='observe-utensil-ep'),
                    prog.decide(
                        {'name':'choose-beverage-ingredient','domain':['Grounds','Juice'],'utility':[0,0]},
                        get_grounds_ep,
                        get_juice_ep,
                        id='choose-beverage-ingredient-ep')),
                prog.observe(
                    {'name':'observe-alice-drink','domain':['Coffee','Juice'],'ctype':'uncontrollable'},
                    prog.sequence(make_cofee_ep,pour_cofee_ep),
                    pour_juice_glass,
                    id='observe-alice-drink-ep'),
                prog.parallel(
                    prog.observe(
                        {'name':'observe-food','domain':['Bagel','Cereal'],'ctype':'uncontrollable'},
                        get_bagel_ep,
                        get_cereal_ep,
                        id='observe-food-ep'),
                    prog.decide(
                        {'name':'choose-food-ingredient','domain':['Milk','Cheese'],'utility':[0,0]},
                        get_milk_ep,
                        get_cheese_ep,
                        id='choose-food-ingredient-ep'),
                    id='parallel-food-ep'),
                prog.observe(
                    {'name':'observe-alice-food','domain':['Bagel','Cereal'],'ctype':'uncontrollable'},
                    prog.sequence(toast_bagel_ep,add_cheese_bagel_ep),
                    mix_cereal_ep),
                id='breakfast-sequence')

    extra_tcs = [TemporalConstraint(start=prog.episode_by_id('breakfast-sequence').start,
                                  end=prog.episode_by_id('observe-utensil-ep').start,
                                  ctype='controllable',lb=0.0,ub=0.0),
                 TemporalConstraint(start=prog.episode_by_id('breakfast-sequence').start,
                                    end=prog.episode_by_id('choose-beverage-ingredient-ep').start,
                                    ctype='controllable',lb=0.2,ub=0.3),
                 TemporalConstraint(start=prog.episode_by_id('parallel-food-ep').start,
                                    end=prog.episode_by_id('observe-food-ep').start,
                                    ctype='controllable',lb=0.0,ub=0.0),
                 TemporalConstraint(start=prog.episode_by_id('parallel-food-ep').start,
                                    end=prog.episode_by_id('choose-food-ingredient-ep').start,
                                    ctype='controllable',lb=0.2,ub=0.3)]

    for tc in extra_tcs:
        prog.add_temporal_constraint(tc)

    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=7.0)
    prog.simplify_temporal_constraints()

    return prog
Пример #39
0
def rmpyl_infeasible():
    prog = RMPyL()
    prog *= Episode(action='(cab-ride)',duration={'ctype':'controllable','lb':10,'ub':20})
    prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=5.0)
    return prog
Пример #40
0
def rmpyl_breakfast():
    """
    Example from (Levine & Williams, ICAPS14).
    """
    #Actions that Alice performs
    get_mug_ep = Episode(action='(get alice mug)',
                         duration={
                             'ctype': 'controllable',
                             'lb': 0.5,
                             'ub': 1.0
                         })
    get_glass_ep = Episode(action='(get alice glass)',
                           duration={
                               'ctype': 'controllable',
                               'lb': 0.5,
                               'ub': 1.0
                           })

    make_cofee_ep = Episode(action='(make-coffee alice)',
                            duration={
                                'ctype': 'controllable',
                                'lb': 3.0,
                                'ub': 5.0
                            })
    pour_cofee_ep = Episode(action='(pour-coffee alice mug)',
                            duration={
                                'ctype': 'controllable',
                                'lb': 0.5,
                                'ub': 1.0
                            })
    pour_juice_glass = Episode(action='(pour-juice alice glass)',
                               duration={
                                   'ctype': 'controllable',
                                   'lb': 0.5,
                                   'ub': 1.0
                               })

    get_bagel_ep = Episode(action='(get alice bagel)',
                           duration={
                               'ctype': 'controllable',
                               'lb': 0.5,
                               'ub': 1.0
                           })
    get_cereal_ep = Episode(action='(get alice cereal)',
                            duration={
                                'ctype': 'controllable',
                                'lb': 0.5,
                                'ub': 1.0
                            })

    toast_bagel_ep = Episode(action='(toast alice bagel)',
                             duration={
                                 'ctype': 'controllable',
                                 'lb': 3.0,
                                 'ub': 5.0
                             })
    add_cheese_bagel_ep = Episode(action='(add-cheese alice bagel)',
                                  duration={
                                      'ctype': 'controllable',
                                      'lb': 1.0,
                                      'ub': 2.0
                                  })
    mix_cereal_ep = Episode(action='(mix-cereal alice milk)',
                            duration={
                                'ctype': 'controllable',
                                'lb': 1.0,
                                'ub': 2.0
                            })

    #Actions that the robot performs
    get_grounds_ep = Episode(action='(get grounds robot)',
                             duration={
                                 'ctype': 'controllable',
                                 'lb': 0.5,
                                 'ub': 1.0
                             })
    get_juice_ep = Episode(action='(get juice robot)',
                           duration={
                               'ctype': 'controllable',
                               'lb': 0.5,
                               'ub': 1.0
                           })
    get_milk_ep = Episode(action='(get milk robot)',
                          duration={
                              'ctype': 'controllable',
                              'lb': 0.5,
                              'ub': 1.0
                          })
    get_cheese_ep = Episode(action='(get cheese robot)',
                            duration={
                                'ctype': 'controllable',
                                'lb': 0.5,
                                'ub': 1.0
                            })

    prog = RMPyL()
    prog *= prog.sequence(
        prog.parallel(
            prog.observe(
                {
                    'name': 'observe-utensil',
                    'domain': ['Mug', 'Glass'],
                    'ctype': 'uncontrollable'
                },
                get_mug_ep,
                get_glass_ep,
                id='observe-utensil-ep'),
            prog.decide(
                {
                    'name': 'choose-beverage-ingredient',
                    'domain': ['Grounds', 'Juice'],
                    'utility': [0, 0]
                },
                get_grounds_ep,
                get_juice_ep,
                id='choose-beverage-ingredient-ep')),
        prog.observe(
            {
                'name': 'observe-alice-drink',
                'domain': ['Coffee', 'Juice'],
                'ctype': 'uncontrollable'
            },
            prog.sequence(make_cofee_ep, pour_cofee_ep),
            pour_juice_glass,
            id='observe-alice-drink-ep'),
        prog.parallel(prog.observe(
            {
                'name': 'observe-food',
                'domain': ['Bagel', 'Cereal'],
                'ctype': 'uncontrollable'
            },
            get_bagel_ep,
            get_cereal_ep,
            id='observe-food-ep'),
                      prog.decide(
                          {
                              'name': 'choose-food-ingredient',
                              'domain': ['Milk', 'Cheese'],
                              'utility': [0, 0]
                          },
                          get_milk_ep,
                          get_cheese_ep,
                          id='choose-food-ingredient-ep'),
                      id='parallel-food-ep'),
        prog.observe(
            {
                'name': 'observe-alice-food',
                'domain': ['Bagel', 'Cereal'],
                'ctype': 'uncontrollable'
            }, prog.sequence(toast_bagel_ep, add_cheese_bagel_ep),
            mix_cereal_ep),
        id='breakfast-sequence')

    extra_tcs = [
        TemporalConstraint(
            start=prog.episode_by_id('breakfast-sequence').start,
            end=prog.episode_by_id('observe-utensil-ep').start,
            ctype='controllable',
            lb=0.0,
            ub=0.0),
        TemporalConstraint(
            start=prog.episode_by_id('breakfast-sequence').start,
            end=prog.episode_by_id('choose-beverage-ingredient-ep').start,
            ctype='controllable',
            lb=0.2,
            ub=0.3),
        TemporalConstraint(start=prog.episode_by_id('parallel-food-ep').start,
                           end=prog.episode_by_id('observe-food-ep').start,
                           ctype='controllable',
                           lb=0.0,
                           ub=0.0),
        TemporalConstraint(
            start=prog.episode_by_id('parallel-food-ep').start,
            end=prog.episode_by_id('choose-food-ingredient-ep').start,
            ctype='controllable',
            lb=0.2,
            ub=0.3)
    ]

    for tc in extra_tcs:
        prog.add_temporal_constraint(tc)

    prog.add_overall_temporal_constraint(ctype='controllable', lb=0.0, ub=7.0)
    prog.simplify_temporal_constraints()

    return prog
Пример #41
0
    # sat_plans = py_sat.plan(task.initial_state,task.goals,time_steps=30,find_shortest=True) #find optimal plan size
    sat_plans = py_sat.plan(task.initial_state,
                            task.goals,
                            time_steps=18,
                            find_shortest=True)  # find sub-optimal plan
    # print("---------plan: %d" % len(sat_plans))
    if len(sat_plans) > 0:
        plan = sat_plans[
            0]  # get the first plan (default returns a list with one plan)
        for t, op in enumerate(plan):
            print('%d: %s' % (t, op))

        elapsed = time.time() - start
        print('\n##### All solving took %.4f s' % (elapsed))

        prog = RMPyL(name='run()')
        pddl_episodes = [
            Episode(id=make_episode_id(t, op),
                    start=Event(name='start-of-%d-%s' % (t, op)),
                    end=Event(name='end-of-%d-%s' % (t, op)),
                    action=op,
                    duration=rss_duration_model_func(op))
            for t, op in enumerate(plan)
        ]
        prog.plan = prog.sequence(*pddl_episodes)
        # prog.add_overall_temporal_constraint(ctype='controllable',lb=0.0,ub=2000.0)
        #Adds temporal window to the plan
        for t, op in enumerate(plan):
            bounds, tc_type = rss_time_window_model_func(op)
            for tc in time_window_constraints(
                    tc_type, bounds, prog.first_event,
Пример #42
0
               verbose=True)

domain, problem, task = model_parser(dom_file, prob_file, remove_static=True)

start = time.time()
sat_plans = py_sat.plan(task.initial_state, task.goals, time_steps=18)
elapsed = time.time() - start
print('\n##### All solving took %.4f s' % (elapsed))

if len(sat_plans) > 0:
    plan = sat_plans[0]
    print('\n##### Plan found!\n')
    for t, action in enumerate(plan):
        print('%d: %s' % (t, action))

    prog = RMPyL(name='run()')
    prog.plan = prog.sequence(*[
        Episode(start=Event(name='start-of-' + op),
                end=Event(name='end-of-' + op),
                action=op,
                duration=rss_duration_func(op)) for op in plan
    ])
    prog.add_overall_temporal_constraint(ctype='controllable',
                                         lb=0.0,
                                         ub=2000.0)
    prog.to_ptpn(filename='rss_pysat_before_stnu_reform.tpn')

    paris = PARIS()
    risk_bound, sc_sched = paris.stnu_reformulation(prog,
                                                    makespan=True,
                                                    cc=0.001)
Пример #43
0
def rmpyl_nested_uav():
    hello = UAV('hello')
    uav = UAV('uav')

    prog = RMPyL()
    prog.plan = prog.sequence(
        hello.scan(), uav.scan(),
        prog.decide(
            {
                'name': 'UAV-choice',
                'domain': ['Hello', 'UAV'],
                'utility': [7, 5]
            },
            prog.sequence(
                hello.fly(),
                prog.observe(
                    {
                        'name': 'hello-success',
                        'domain': ['Success', 'Failure'],
                        'ctype': 'probabilistic',
                        'probability': [0.8, 0.2]
                    },
                    prog.decide(
                        {
                            'name': 'hello-assert-success',
                            'domain': ['Success'],
                            'utility': [10]
                        }, hello.stop()),
                    prog.decide(
                        {
                            'name': 'hello-assert-failure',
                            'domain': ['Failure'],
                            'utility': [0]
                        }, hello.stop()))),
            prog.sequence(
                uav.fly(),
                prog.observe(
                    {
                        'name': 'uav-success',
                        'domain': ['Success', 'Failure'],
                        'ctype': 'probabilistic',
                        'probability': [0.95, 0.05]
                    },
                    prog.decide(
                        {
                            'name': 'uav-assert-success',
                            'domain': ['Success'],
                            'utility': [10]
                        }, uav.stop()),
                    prog.decide(
                        {
                            'name': 'uav-assert-failure',
                            'domain': ['Failure'],
                            'utility': [0]
                        }, uav.stop())))))
    return prog
planner = RAOStar(cc_fp,node_name='id',cc=0.01,cc_type='overall',
                  terminal_prob=1.0,randomization=0.0,propagate_risk=True,
                  halt_on_violation=False,verbose=1)

#Searches for the optimal policy
policy,explicit,performance = planner.search(b0)

#Converts policy to graphical SVG format
dot_policy = policy_to_dot(explicit,policy)
dot_policy.write('flightgear_policy.svg',format='svg')

#Converts optimal exploration policy into an RMPyL program
exploration_policy = policy_to_rmpyl(explicit,policy)

#The flight policy has the additional actions of taking off and landing.
flight_policy = RMPyL(name='run()')
flight_policy *= flight_policy.sequence(Episode(action='(takeoff plane)'),
                                        exploration_policy,
                                        Episode(action='(land plane)'))

#Eliminates probabilistic choices from the policy, since Pike (in fact, the
#Lisp tpn package) cannot properly handle them.
for obs in flight_policy.observations:
    if obs.type=='probabilistic':
        obs.type = 'uncontrollable'
        del obs.properties['probability']

#Converts the program to a TPN
flight_policy.to_ptpn(filename='flightgear_rmpyl.tpn')

# Writes control program to pickle file
Пример #45
0
def rmpyl_episode_ids(hello,uav):
    """Example of how episode ID's can be used to retrieve them."""
    prog = RMPyL()

    first_uav_seq = prog.sequence(uav.scan(),uav.fly(),id='uav-1-seq')
    second_uav_seq = prog.sequence(uav.scan(),uav.fly(),id='uav-2-seq')

    first_hello_seq = prog.sequence(hello.scan(),hello.fly(),id='hello-1-seq')
    second_hello_seq = prog.sequence(hello.scan(),hello.fly(),id='hello-2-seq')

    prog *= prog.parallel(prog.sequence(first_uav_seq,second_uav_seq,id='uav-seqs'),
                          prog.sequence(first_hello_seq,second_hello_seq,id='hello-seqs'),id='par-seqs')

    #This could have been accomplished much more easily by using the sequence
    #variables directly, but I wanted to show how episodes can be retrieved by
    #ID.
    tc1 = TemporalConstraint(start=prog.episode_by_id('uav-1-seq').end,
                            end=prog.episode_by_id('hello-2-seq').start,
                            ctype='controllable',lb=2.0,ub=3.0)

    tc2 = TemporalConstraint(start=prog.episode_by_id('hello-1-seq').end,
                            end=prog.episode_by_id('uav-2-seq').start,
                            ctype='controllable',lb=0.5,ub=1.0)

    prog.add_temporal_constraint(tc1)
    prog.add_temporal_constraint(tc2)

    return prog