コード例 #1
0
def benchmark_a_star(verbose=0):

    num_worlds = 1
    num_problems = 1
    num_repeat = 5
    problem_bank = {}

    planner = Planner.get_HPlanner_v14()

    for i in range(num_worlds):
        # world = get_random_world(10, 10) # Defaut to 10 x 10
        # world.uncertainties = get_uncertainty_fun(world, num_step=100, a_prob=0.5)
        # world.uncertainties(world, 0)

        world = make_rand_nav_problem(10, 10)
        world.uncertainties(world, 0)

        # Get all solutions using A-star for navigating
        print_board(world)
        if verbose:
            print_board(world)
            print_state(world)

        world.a_star = True

        for j in range(num_problems):
            # Make a navigation problem
            random_loc = random.choice(world.loc_available.keys())
            world.goals["A1"] = [("navigate", "A1", random_loc)]
            world.settings["verbose"] = 0

            print("Goal: {}".format(world.goals["A1"]))

            for k in range(num_repeat):
                start = time.time()
                sol = planner.plan(world, "A1")[0]
                end = time.time()

                print("time:", end - start)
                print("problem: ", world.goals)
                print("solutiON; ", sol)

                print("solution length", len(sol.get_actions()))
                problem_bank[world.goals["A1"][0]] = end - start

                if (end - start) > 1:
                    print_board(world)
                    print("solution", solutions_b)
                    print("*** Re-run problem with verbose=3")
                    world.settings["verbose"] = 2
                    pyhop(world, "agent1", 3, all_solutions=True, amortize=False)  # only one solution
                    raw_input("Above problem took too long... {} seconds".format(end - start))
コード例 #2
0
def benchmark_amortized(verbose=0):
    world = get_random_world(6, 5)

    print_board(world)

    start = time.time()
    solutions_a = pyhop(world, "agent1", verbose, all_solutions=True, amortize=False)  # only one solution
    end = time.time()
    print("before:", end - start)
    print("num_recurse calls", get_num_recurse_calls())
    start = time.time()
    solutions_b = pyhop(world, "agent1", verbose, all_solutions=True, amortize=True)  # only one solution
    end = time.time()
    print("after:", end - start)
    print("num_recurse calls", get_num_recurse_calls())

    print("solution_a size: ", len(solutions_a))
    print("solution_b size: ", len(solutions_b))
コード例 #3
0
    def show_single_agent(cur_world, real_world, solution, agent, uncertainty=False):
        (actions, states) = solution

        # Take 1 step at a time
        step_counter = 0
        while len(actions) != 0:
            
            print("*** Old stuff ***")
            # 0: Info
            print('length of remaining plan: {}; \nlength of remaining states: {}'
                .format(len(actions), len(states)))
            print('\ttimestep: {}; \n\tactions: {};'.format(step_counter, actions))

            cur_action = actions.pop(0)
            next_state = states.pop(0)

            # 1: Generate possible Uncertainty to the real-world
            generate_uncertainty(real_world, a_prob=1, verbose=True)

            # 2: This agent get observation about surrounding world and decides to replan
            replan = get_observation(agent, next_state, cur_action, real_world)

            # 3: Agent MIGHT need to re-plan.
            if replan:
                raw_input("Need to re-plan...")
                print('replanning')

                print_board(real_world)

                solutions = pyhop(real_world, 'agent1', verbose=0, all_solutions=False)
                solution = solutions[0]
                
                # print('new solution', solution)

                if solution != False: 
                    (actions, states) = solution

                else:
                    print('no solution found for agent:{}, goal:{}'.format(agent, real_world.goals[agent]))
                    return

            else:
                # 4: (if not replan) Agent takes action
                # next_state = act(cur_world, cur_action) # This is the same as states[i]
                real_world = act(real_world, cur_action)

                # end: Info
                print('next state')
                print_board(next_state)
                print('real world')
                print_board(real_world)

                raw_input("Press Enter to continue...")

            step_counter += 0
コード例 #4
0
def benchmark_compare_a_star(verbose=0):
    world = get_random_world(6, 5)

    print_board(world)

    # Get all solutions using Hierarchical Decompositions for navigating (Baseline3)
    world.settings["a-star"] = False
    start = time.time()
    solutions_a = pyhop(world, "agent1", verbose, all_solutions=True, amortize=False)  # only one solution
    end = time.time()

    print("before:", end - start)
    print("num solutions found:", len(solutions_a))
    print("num_recurse calls", get_num_recurse_calls())

    # Get all solutions using A-star for navigating
    world.settings["a-star"] = True
    start = time.time()
    solutions_b = pyhop(world, "agent1", 0, all_solutions=True, amortize=False)  # only one solution
    end = time.time()
    print("after:", end - start)
    print("num solutions found:", len(solutions_b))
    print("num_recurse calls", get_num_recurse_calls())
コード例 #5
0
def single_agent_benchmark():

    num_repeat = 7
    board_X = range(4, 6)
    board_Y = range(3, 5)
    world_gen_times = {}
    board_size_times = {}
    num_solutions_times = {}
    num_recurse_calls = {}

    for x in board_X:
        for y in board_Y:
            board_size = x * y
            print("board size: ", x, y)
            world_gen_sum = 0
            board_size_sum = 0

            for i in range(num_repeat):
                start = time.time()
                world = get_random_world(x, y)
                end = time.time()
                world_gen_sum += end - start

                print_board(world)

                start = time.time()
                solutions = pyhop(world, "agent1", verbose=0, all_solutions=True)
                end = time.time()
                board_size_sum += end - start
                num_recurse_calls[len(solutions)] = get_num_recurse_calls()
                num_solutions_times[len(solutions)] = end - start
                print("find {} solutions for board of size {}".format(len(solutions), board_size))
                print("num_recurse_calls", num_recurse_calls)
                print("num_solutions_times", num_solutions_times)

    # plot time with respect to the number of solutions found.
    od_num_solutions_times = collections.OrderedDict(sorted(num_solutions_times.items()))
    print("Ordered od_num_solutions_time", od_num_solutions_times)
    od_num_recurse_calls = collections.OrderedDict(sorted(num_recurse_calls.items()))
    print("Ordered od_num_recurse_calls", od_num_recurse_calls)

    plt.plot(od_num_solutions_times.keys(), od_num_solutions_times.values())
    plt.plot(od_num_recurse_calls.keys(), od_num_recurse_calls.values())
    plt.show()
コード例 #6
0
    def show_single_agent_recurse(cur_world, real_world, solution, agent, uncertainty=False):
        (plan, states) = solution

        # Take 1 step at a time
        for (i, cur_action) in enumerate(plan):

            # 0: Info
            logging.info('length of plan: {}; length of states: {}'.format(len(plan), len(states)))
            logging.info('\ttimestep: {}; \n\tactions: {};'.format(i, plan[i:]))

            # 1: Generate possible Uncertainty to the real-world
            generate_uncertainty(real_world, a_prob=1, verbose=True)

            # 2: This agent get observation about surrounding world and decides to replan
            replan = get_observation(agent, states[i], cur_action, real_world)

            # 3: Agent MIGHT need to re-plan.
            if replan:
                raw_input("Need to re-plan...")
                real_world = remove_traps(copy.deepcopy(real_world))
                print('remove_traps...')
                print_board(real_world)

                solutions = pyhop(real_world, 'agent1', verbose=3, all_solutions=False)
                solution = solutions[0]
                print('new solution', solution)

                if solution != False: 
                    show_single_agent(real_world, real_world, solution, agent)
                    return
                else:
                    print('no solution found for agent:{}, goal:{}'.format(agent, real_world.goals[agent]))
                    return
            # 4: (if not replan) Agent takes action
            next_state = act(cur_world, cur_action) # This is the same as states[i]
            real_world = act(real_world, cur_action)

            # Infity: Info
            print('next state')
            print_board(states[i])
            print('real world')
            print_board(real_world)

            raw_input("Press Enter to continue...")
コード例 #7
0
ファイル: convgraph.py プロジェクト: arithmetica/Galois-1
def get_conversion(start, start_ty, end, end_ty, existing, verbose=0):
    s = State('initial')

    # do we need existing?
    s.existing = {}
    s.files = {}
    s.tried_existing = set()

    for f1, f2 in conversions.keys():
        s.files[f1] = None
        s.files[f2] = None

    for k, v in existing.iteritems():
        s.existing[k] = v

    s.files[start_ty] = start

    x = pyhop(s, [('convert', start, start_ty, end, end_ty)], verbose=verbose)
    return x
コード例 #8
0
    def step(self, agent_name):
        # 0: Info
        step_info = {}
        step_info['replan'] = False
        
        replan = False
        agent = self.agents[agent_name]
        solution = agent.get_solution()
        cur_step = agent.get_cur_step()

        # Get actions and states
        # If agent is Done and successful
        actions = solution.get_actions()
        states = solution.get_states()
        if cur_step == len(actions):
            print("Done")
            agent.done = True
            agent.add_history('done', 0)
            return (self.real_world, step_info)
        cur_action = actions[cur_step]
        next_state = states[cur_step]     

        step_info['cur_action'] = cur_action

        # 1: Generate possible Uncertainty to the real-world
        if hasattr(self.real_world, 'uncertainties'):
            # If a world comes with it's own uncertainty-funciton, then apply that
            self.real_world.uncertainties(self.real_world, cur_step)
        elif self.PARAMS['uncertainty']:
            # Else, generate randomly
            new_uncertainties = generate_uncertainty(self.real_world, a_prob=self.PARAMS['uncertainty'], verbose=True)

        # 2: This agent get observation about surrounding world and decides to replan if not already
        if self.PARAMS['re_plan']:
            replan = get_observation(agent_name, None, cur_action, self.real_world)

        # 3: Agent MIGHT need to re-plan.
        if replan:
            step_info['replan'] = True

            print('agent {} is replanning; world is: '.format(agent_name))
            print_board(self.real_world)

            # When re-plan need to reset "visited" from the Real-world
            self.real_world.visited[agent_name] = set()
            results = pyhop(copy.deepcopy(self.real_world), agent_name, plantree=self.PARAMS['use_tree'])
            result = random.choice(results)

            if result == None or result == False:
                print('*** no solution found for agent:{}, goal:{}'.format(agent_name, self.real_world.goals[agent_name]))
                agent.add_history('None', sys.maxint)
                agent.done = True
                return None

            agent.set_solution(result)
            agent.cur_step = 0
            agent.global_step += 1
            agent.add_history('replan', 1)
            return (self.real_world, step_info)

        else:
            # 4: (if not replan) Agent takes action
            print('cur_action: ', cur_action)
            self.real_world = act(self.real_world, cur_action)
            agent.mental_world = act(agent.mental_world, cur_action)

            agent.add_history(cur_action, self.real_world.cost_func(self.real_world, cur_action))

            # end: Info
            print('next state')
            print_board(next_state)
            print('real world')
            print_board(self.real_world)


        agent.cur_step += 1
        agent.global_step += 1
        return (self.real_world, step_info)
コード例 #9
0
ファイル: testing_cost.py プロジェクト: ksenglee/meng_pyhop
"""
Test the various implementations of handling Cost.
- Brute Force Method for finding minimum Cost
- Using Heuristics (Sorting of preconditions) for finding min cost
- Using Branch and Bound
"""
from __future__ import print_function
from pyhop import *
import random, time
from random_rovers_world import *


if __name__ == "__main__":
	world = get_random_world(5, 5)
	print('')
	print('*** World Generated ***')
	print_state(world)
	print('')
	print('Board: ')
	print_board(world)
	# We argue that implementing heuristics for sorting decomposition is equivalent to a*
	
	pyhop(world, 'A1', 0, all_solutions=False, plantree=False, rand=False)
コード例 #10
0
print("- Define state1: a on b, b on tale, c on table")

"""
A state is a collection of all of the state variables and their values. Every state variable in the domain should have a value.
"""

state1 = State('state1')
state1.pos={'a':'b', 'b':'table', 'c':'table'}
state1.clear={'c':True, 'b':False,'a':True}
state1.holding=False

print_state(state1)
print('')

print('- these should fail:')
pyhop(state1,[('pickup','a')], verbose=1)
pyhop(state1,[('pickup','b')], verbose=1)
print('- these should succeed:')
pyhop(state1,[('pickup','c')], verbose=1)
pyhop(state1,[('unstack','a','b')], verbose=1)
pyhop(state1,[('get','a')], verbose=1)
print('- this should fail:')
pyhop(state1,[('get','b')], verbose=1)
print('- this should succeed:')
pyhop(state1,[('get','c')], verbose=1)

print("""
****************************************
Run pyhop on two block-stacking problems, both of which start in state1.
The goal for the 2nd problem omits some of the conditions in the goal
of the 1st problemk, but those conditions will need to be achieved
コード例 #11
0
print("- Define state1: a on b, b on tale, c on table")
"""
A state is a collection of all of the state variables and their values. Every state variable in the domain should have a value.
"""

state1 = State('state1')
state1.pos = {'a': 'b', 'b': 'table', 'c': 'table'}
state1.clear = {'c': True, 'b': False, 'a': True}
state1.holding = False

print_state(state1)
print('')

print('- these should fail:')
pyhop(state1, [('pickup', 'a')], verbose=1)
pyhop(state1, [('pickup', 'b')], verbose=1)
print('- these should succeed:')
pyhop(state1, [('pickup', 'c')], verbose=1)
pyhop(state1, [('unstack', 'a', 'b')], verbose=1)
pyhop(state1, [('get', 'a')], verbose=1)
print('- this should fail:')
pyhop(state1, [('get', 'b')], verbose=1)
print('- this should succeed:')
pyhop(state1, [('get', 'c')], verbose=1)

print("""
****************************************
Run pyhop on two block-stacking problems, both of which start in state1.
The goal for the 2nd problem omits some of the conditions in the goal
of the 1st problemk, but those conditions will need to be achieved
コード例 #12
0
s1 = State('state1')
# Locations will be in standard graph axes and ordered pairs (x,y)
s1.locContents = bidict({(1,3):'b1',(2,3):'b2',(3,3):'b3',(4,3):'b4',(5,3):'b5',(1,5):'b6',(2,5):'b7',(3,5):'b8',(4,5):'b9',(5,5):'b10'},) # (1,1) holds b1, (1,2) holds b2
# TODO: Definitely come up with alternate solution to available blocks list - probably block status
s1.blocksAvail = s1.locContents.values()
# Could maybe at some point replace this by just checking if the key exists in loc?
s1.locOccupied = {(x,y):False for x in range(1,6) for y in range(1,6)}
s1.locOccupied.update({loc:True for loc in s1.locContents.keys()}) # make sure these reflect the occupied locs
s1.locRobot = (2,2)
s1.holding = False 

print_state(s1)
print('')

print("- Define goal1:")

#g1 = Goal('goal1')
#g1.locContents = bidict({(1,1):'b1',(1,2):'b2',(1,3):'b3'})
#g1.locOccupied = {loc:False for loc in s1.locContents.keys()} #locContents.keys() gives all locs
#g1.locOccupied.update({loc:True for loc in g1.locContents.keys()}) 
#g1.locRobot = (2,2)

#print_goal(g1)
#print('')


result = pyhop(s1,[('createRect',(3,3),3,4)], verbose=1)

import ipdb
ipdb.set_trace()
コード例 #13
0
ファイル: convgraph.py プロジェクト: arithmetica/Galois-1
    for k, v in existing.iteritems():
        s.existing[k] = v

    s.files[start_ty] = start

    x = pyhop(s, [('convert', start, start_ty, end, end_ty)], verbose=verbose)
    return x


if __name__ == "__main__":
    start_file = 'a'
    start_file_fmt = 'binary/gr'

    s = State('initial')
    s.existing = {}
    s.files = {}

    for f1, f2 in conversions.keys():
        s.files[f1] = None
        s.files[f2] = None

    s.files[start_file_fmt] = start_file

    s.existing[start_file_fmt] = start_file
    s.existing['other/format1'] = 'c'

    x = pyhop(s, [('convert', 'a', 'binary/gr', 'b', 'other/format')],
              verbose=2)
    if not x:
        print "conversion is unsupported"
コード例 #14
0
# of mortar
state1.hasmortar = {}
for blockid in state1.pos.keys():
    state1.hasmortar[blockid] = False

state1.mortaravailable = {}  # key is id, value is Available/Used
num_mortar = 5
for i in range(num_mortar):
    key = 'M' + str(i)
    state1.mortaravailable[key] = True

print_state(state1)
print('')

print('- these should fail:')
pyhop(state1, [('pickup', 'a')], verbose=1)
pyhop(state1, [('pickup', 'b')], verbose=1)
print('- these should succeed:')
pyhop(state1, [('pickup', 'c')], verbose=1)
pyhop(state1, [('unstack', 'a', 'b')], verbose=1)
pyhop(state1, [('get', 'a')], verbose=1)
print('- this should fail:')
pyhop(state1, [('get', 'b')], verbose=1)
print('- this should succeed:')
pyhop(state1, [('get', 'c')], verbose=1)

print(' - testing stack mortared')
print_state(state1)
goal1a = Goal('goal1a')
goal1a.pos = {'c': 'b', 'b': 'a', 'a': 'e', 'e': 'd', 'd': 'table'}
pyhop(state1, [('move_blocks', goal1a)], verbose=1)
コード例 #15
0
    # Set uncertainty
    world.uncertainties = get_uncertainty_fun(world, None, None, 
        sequence=SEQ, randoms=RANDs)
    return world

# Here, we set the parameters necesary for generating a world. 
CAPABILITIES = ["equipped_for_imaging", "equipped_for_rock_analysis", "equipped_for_soil_analysis"]
AGENTS = ['A1', 'A2', 'A3']
LANDER = "G"
LAB = "L"
NUM_ROCKS = 2
NUM_SOILS = 2
NUM_OBJECTIVES = 0


if __name__ == "__main__":
    world = get_random_world(5, 5)
    print('')
    print('*** World Generated ***')
    print_state(world)
    print('')
    print('Board: ')
    print_board(world)
    world.settings['a-star'] = True
    world.settings['verbose'] = False
    world.settings['sample'] = True
    pyhop(world, 'A1', 3, all_solutions=False, plantree=True, rand=False) # only one solution



コード例 #16
0
ファイル: goda-pyhop.py プロジェクト: allienson/GODA2Pyhop
'T2.22:_Post_data':True,\
'T2.2:_Process_data':False,\
'T2:_Process_qualification':False,\
'G9:_Qualification_is_collected':False,\
'G3:_Manual_data_is_sent':False,\
'T3.11:_Fetch_GPS':True,\
'T3.12:_Fetch_triangulation':False,\
'T3.1:_Fetch_geolocation':False,\
'T3.21:_Validate_data':True,\
'T3.22:_Post_data':True,\
'T3.2:_Process_data':False,\
'T3:_Track_line_locator':False,\
'G10:_Line_locations_tracked':False,\
'G4:_Automatic_data_is_sent':False,\
'G1:_Transport_info_is_shared':False}

pyhop(state, [\
('and_par', 'T1.2:_Process_data', 'T1.21:_Validate_data', 'T1.22:_Post_data'),\
('and_seq', 'T1:_Process_modification', 'T1.1:_Render_view', 'T1.2:_Process_data', 'T1.3:_Update_view'),\
('means_end', 'G8:_Modification_is_collected', 'T1:_Process_modification'),\
('and_seq', 'T2.2:_Process_data', 'T2.21:_Validate_data', 'T2.22:_Post_data'),\
('and_seq', 'T2:_Process_qualification', 'T2.1:_Render_view', 'T2.2:_Process_data'),\
('means_end', 'G9:_Qualification_is_collected', 'T2:_Process_qualification'),\
('and_par', 'G3:_Manual_data_is_sent', 'G8:_Modification_is_collected', 'G9:_Qualification_is_collected'),\
('xor', 'T3.1:_Fetch_geolocation', 'T3.11:_Fetch_GPS', 'T3.12:_Fetch_triangulation'),\
('and_seq', 'T3.2:_Process_data', 'T3.21:_Validate_data', 'T3.22:_Post_data'),\
('try_op', 'T3:_Track_line_locator', 'T3.1:_Fetch_geolocation', 'T3.2:_Process_data', 'skip'),\
('means_end', 'G10:_Line_locations_tracked', 'T3:_Track_line_locator'),\
('means_end', 'G4:_Automatic_data_is_sent', 'G10:_Line_locations_tracked'),\
('and_par', 'G1:_Transport_info_is_shared', 'G3:_Manual_data_is_sent', 'G4:_Automatic_data_is_sent'),\
], verbose=1)
コード例 #17
0
ファイル: mazeHTN.py プロジェクト: kevinhou168/mazeHTN
from __future__ import print_function
from pyhop import *
from maze_layout import *

import mazeHTN_methods
print('')
print_methods()

import mazeHTN_operators
print('')
print_operators()

# Establish maze layout
maze = Maze(10, 10, 0, 0)
maze.make_maze()

# Establish Domain and define traits
state1 = State('state1')
state1.x = {'me': 0}
state1.y = {'me': 0}
state1.xpath = {'me': [0]}
state1.ypath = {'me': [0]}
state1.count = {'me': 1}
state1.maze = maze
state1.goal_x = {'me': state1.maze.nx - 1}
state1.goal_y = {'me': state1.maze.ny - 1}

# Return results and record in .svg file
results = pyhop(state1, [('FindGoal', 'me')], verbose=2)
maze.write_svg('maze.svg', maze, results)
コード例 #18
0
ファイル: hanoi_3.py プロジェクト: james7132/RIP2014
from pyhop import *
import hanoi_domain
import sys

sys.setrecursionlimit(30000)

state = State("hanoi-state")
state.diskLocation = [1 for i in range(3)]

pyhop(state, [("move", 2, 1, 3, 2)], verbose=1)
コード例 #19
0
ファイル: hanoi_10.py プロジェクト: james7132/RIP2014
from pyhop import *
import hanoi_domain
import sys
sys.setrecursionlimit(30000)

state = State('hanoi-state')
state.diskLocation = [1 for i in range(0, 10)]

pyhop(state, [('move', 9, 1, 3, 2)], verbose=1)
コード例 #20
0
ファイル: task.py プロジェクト: GillinedUp/htn-xcom
def grenade_assault(state, a, t, goal):
    return assault(state, a, t, 'throw_grenade', goal)


def move_gen(steps):
    def move(state, a, t, goal):
        if state.ap[a] >= steps_to_ap(
                state1.distance[a][t]) and steps <= state1.distance[a][t]:
            return [('walk', a, t, steps), ('act', a, t, goal)]
        return False

    return move


dist = 10

move_list = [move_gen(x) for x in range(dist)]

declare_methods('act', rifle_assault, grenade_assault, *move_list)

state1 = State('state1')
state1.weapons = {'ally': ['rifle', 'grenade'], 'enemy': ['rifle']}
state1.hp = {'ally': 20, 'enemy': 30}
state1.ap = {'ally': 15, 'enemy': 10}
state1.distance = {'ally': {'enemy': dist}}

goal1 = Goal('goal1')
goal1.hp = {'enemy': 0}

pyhop(state1, [('act', 'ally', 'enemy', goal1)], verbose=3)