def h_pg_levelsum(self, node: Node): """This heuristic uses a planning graph representation of the problem state space to estimate the sum of all actions that must be carried out from the current state in order to satisfy each individual goal condition. """ # requires implemented PlanningGraph class pg = PlanningGraph(self, node.state) pg_levelsum = pg.h_levelsum() return pg_levelsum
def setUp(self): super().setUp() self.ac_problem = air_cargo_p1() self.ac_pg_serial = PlanningGraph(self.ac_problem, self.ac_problem.initial).fill() # In(C1, P2) and In(C2, P1) have inconsistent support when they first appear in # the air cargo problem, self.inconsistent_support_literals = [ expr("In(C1, P2)"), expr("In(C2, P1)") ]
def test_levelsum2(self): # make sure we can handle negations in the goals problem = HaveCakeProblem( FluentState( [], [expr('Have(Cake)'), expr('Eaten(Cake)')]), [expr('Have(Cake)'), expr('~Eaten(Cake)')], ) pg = PlanningGraph(problem, problem.initial) self.assertEqual(1, pg.h_levelsum())
def h_pg_levelsum(self, node: Node): ''' This heuristic uses a planning graph representation of the problem state space to estimate the sum of all actions that must be carried out from the current state in order to satisfy each individual goal condition. ''' pg = PlanningGraph(self, node.state) level = pg.h_levelsum() if level == False: return float("-inf") return level
def test_levelsum3(self): # make sure we can handle multiple steps problem = HaveCakeProblem( FluentState( [], [expr('Have(Cake)'), expr('Eaten(Cake)')]), [expr('Have(Cake)'), expr('Eaten(Cake)')], ) pg = PlanningGraph(problem, problem.initial) # 1. bake 2. eat 3. bake self.assertEqual(3, pg.h_levelsum())
def h_pg_levelsum_cn(self, node: Node): """Variant of the pg heuristic for analysis purposes. Ignores inconsistent_effects_mutex and interference_mutex check. """ # requires implemented PlanningGraph class pg = PlanningGraph(self, node.state, inconsistent_effects_mutex=False, interference_mutex=False) pg_levelsum = pg.h_levelsum() return pg_levelsum
def h_pg_setlevel(self, node): """ This heuristic uses a planning graph representation of the problem to estimate the level cost in the planning graph to achieve all of the goal literals such that none of them are mutually exclusive. See Also -------- Russell-Norvig 10.3.1 (3rd Edition) """ pg = PlanningGraph(self, node.state, serialize=True) score = pg.h_setlevel() return score
def h_pg_setlevel(self, node: Node): ''' This heuristic uses a planning graph representation of the problem state space to estimate the minimum number of actions that must be carried out from the current state in order to satisfy all of the goal conditions. ''' # TODO: Complete the implmentation of this heuristic in the # PlanningGraph class pg = PlanningGraph(self, node.state) pg_setlevel = pg.h_setlevel() return pg_setlevel
def h_pg_levelsum(self, node): """ This heuristic uses a planning graph representation of the problem state space to estimate the sum of the number of actions that must be carried out from the current state in order to satisfy each individual goal condition. See Also -------- Russell-Norvig 10.3.1 (3rd Edition) """ pg = PlanningGraph(self, node.state, serialize=True, ignore_mutexes=True) score = pg.h_levelsum() return score
def h_pg_maxlevel(self, node): """ This heuristic uses a planning graph representation of the problem to estimate the maximum level cost out of all the individual goal literals. The level cost is the first level where a goal literal appears in the planning graph. See Also -------- Russell-Norvig 10.3.1 (3rd Edition) """ pg = PlanningGraph(self, node.state, serialize=True, ignore_mutexes=True) score = pg.h_maxlevel() return score
def h_pg_levelsum(self, node: Node): """This heuristic uses a planning graph representation of the problem state space to estimate the sum of all actions that must be carried out from the current state in order to satisfy each individual goal condition. """ # requires implemented PlanningGraph class time_in = time() pg = PlanningGraph(self, node.state) time_mid = time() pg_levelsum = pg.h_levelsum() time_out = time() print("LSH Timing: {} {} {}".format(time_in, time_mid, time_out)) print("LSH Create PG: {}".format(time_mid - time_in)) print("LSH Call LS: {}".format(time_out - time_mid)) return pg_levelsum
def setUp(self): self.cake_problem = have_cake() self.cake_pg = PlanningGraph(self.cake_problem, self.cake_problem.initial, serialize=False).fill() self.eat_action, self.bake_action = [ a for a in self.cake_pg._actionNodes if not a.no_op ] no_ops = [a for a in self.cake_pg._actionNodes if a.no_op] self.null_action = make_node( Action(expr('Null()'), [set(), set()], [set(), set()])) # some independent nodes for testing mutexes at_here = expr('At(here)') at_there = expr('At(there)') self.pos_literals = [at_here, at_there] self.neg_literals = [~x for x in self.pos_literals] self.literal_layer = LiteralLayer( self.pos_literals + self.neg_literals, ActionLayer()) self.literal_layer.update_mutexes() # independent actions for testing mutex self.actions = [ make_node( Action(expr('Go(here)'), [set(), set()], [set([at_here]), set()])), make_node( Action(expr('Go(there)'), [set(), set()], [set([at_there]), set()])) ] self.no_ops = [ make_node(x) for x in chain(*(makeNoOp(l) for l in self.pos_literals)) ] self.action_layer = ActionLayer(self.no_ops + self.actions, self.literal_layer) self.action_layer.update_mutexes() for action in self.no_ops + self.actions: self.action_layer.add_inbound_edges(action, action.preconditions) self.action_layer.add_outbound_edges(action, action.effects)
def setUp(self): self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial) # some independent nodes for testing mutex self.na1 = PgNode_a(Action(expr('Go(here)'), [[], []], [[expr('At(here)')], []])) self.na2 = PgNode_a(Action(expr('Go(there)'), [[], []], [[expr('At(there)')], []])) self.na3 = PgNode_a(Action(expr('Noop(At(there))'), [[expr('At(there)')], []], [[expr('At(there)')], []])) self.na4 = PgNode_a(Action(expr('Noop(At(here))'), [[expr('At(here)')], []], [[expr('At(here)')], []])) self.na5 = PgNode_a(Action(expr('Reverse(At(here))'), [[expr('At(here)')], []], [[], [expr('At(here)')]])) self.ns1 = PgNode_s(expr('At(here)'), True) self.ns2 = PgNode_s(expr('At(there)'), True) self.ns3 = PgNode_s(expr('At(here)'), False) self.ns4 = PgNode_s(expr('At(there)'), False) self.na1.children.add(self.ns1) self.ns1.parents.add(self.na1) self.na2.children.add(self.ns2) self.ns2.parents.add(self.na2) self.na1.parents.add(self.ns3) self.na2.parents.add(self.ns4)
def setUp(self): self.p = have_cake() #Run have_cake constructor function to create new instance of have cake problem self.pg = PlanningGraph(self.p, self.p.initial) #Create planning graph based on have cake problem passing
def setUp(self): self.cake_problem = have_cake() self.cake_pg = PlanningGraph(self.cake_problem, self.cake_problem.initial, serialize=False).fill() eat_action, bake_action = [ a for a in self.cake_pg._actionNodes if not a.no_op ] no_ops = [a for a in self.cake_pg._actionNodes if a.no_op] # bake has the effect Have(Cake) which is the logical negation of the effect # ~Have(cake) from the persistence action ~NoOp::Have(cake) self.inconsistent_effects_actions = [bake_action, no_ops[3]] # the persistence action ~NoOp::Have(cake) has the effect ~Have(cake), which is # the logical negation of Have(cake) -- the precondition for the Eat(cake) action self.interference_actions = [eat_action, no_ops[3]] # eat has precondition Have(cake) and bake has precondition ~Have(cake) # which are logical inverses, so eat & bake should be mutex at every # level of the planning graph where both actions appear self.competing_needs_actions = [eat_action, bake_action] self.ac_problem = air_cargo_p1() self.ac_pg_serial = PlanningGraph(self.ac_problem, self.ac_problem.initial).fill() # In(C1, P2) and In(C2, P1) have inconsistent support when they first appear in # the air cargo problem, self.inconsistent_support_literals = [ expr("In(C1, P2)"), expr("In(C2, P1)") ] # some independent nodes for testing mutexes at_here = expr('At(here)') at_there = expr('At(there)') self.pos_literals = [at_here, at_there] self.neg_literals = [~x for x in self.pos_literals] self.literal_layer = LiteralLayer( self.pos_literals + self.neg_literals, ActionLayer()) self.literal_layer.update_mutexes() # independent actions for testing mutex self.actions = [ make_node( Action(expr('Go(here)'), [set(), set()], [set([at_here]), set()])), make_node( Action(expr('Go(there)'), [set(), set()], [set([at_there]), set()])) ] self.no_ops = [ make_node(x) for x in chain(*(makeNoOp(l) for l in self.pos_literals)) ] self.action_layer = ActionLayer(self.no_ops + self.actions, self.literal_layer) self.action_layer.update_mutexes() for action in self.no_ops + self.actions: self.action_layer.add_inbound_edges(action, action.preconditions) self.action_layer.add_outbound_edges(action, action.effects) # competing needs tests -- build two copies of the planning graph: one where # A, B, and C are pairwise mutex, and another where they are not A, B, C = expr('A'), expr('B'), expr('C') self.fake_competing_needs_actions = [ make_node( Action(expr('FakeAction(A)'), [set([A]), set()], [set([A]), set()])), make_node( Action(expr('FakeAction(B)'), [set([B]), set()], [set([B]), set()])), make_node( Action(expr('FakeAction(C)'), [set([C]), set()], [set([C]), set()])) ] competing_layer = LiteralLayer([A, B, C], ActionLayer()) for a1, a2 in combinations([A, B, C], 2): competing_layer.set_mutex(a1, a2) self.competing_action_layer = ActionLayer(competing_layer.parent_layer, competing_layer, False, True) for action in self.fake_competing_needs_actions: self.competing_action_layer.add(action) competing_layer |= action.effects competing_layer.add_outbound_edges(action, action.preconditions) self.competing_action_layer.add_inbound_edges( action, action.preconditions) self.competing_action_layer.add_outbound_edges( action, action.effects) not_competing_layer = LiteralLayer([A, B, C], ActionLayer()) self.not_competing_action_layer = ActionLayer( not_competing_layer.parent_layer, not_competing_layer, False, True) for action in self.fake_competing_needs_actions: self.not_competing_action_layer.add(action) not_competing_layer |= action.effects not_competing_layer.add_outbound_edges(action, action.preconditions) self.not_competing_action_layer.add_inbound_edges( action, action.preconditions) self.not_competing_action_layer.add_outbound_edges( action, action.effects)
def setUp(self): self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial)
def setUp(self): #print("in GH setup") self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial)
def p_run(arg): return PlanningGraph(arg).h_levelsum()
def h_pg_levelsum(self, node: Node): pg = PlanningGraph(self, node.state) pg_levelsum = pg.h_levelsum() return pg_levelsum
This heuristic uses a planning graph representation of the problem state space to estimate the sum of all actions that must be carried out from the current state in order to satisfy each individual goal condition. <<<<<<< HEAD <<<<<<< HEAD """ >>>>>>> dc9e870... Base Code ======= ''' >>>>>>> 8d1ef1b... Submission_01 ======= ''' >>>>>>> a7568d9f54a58f4956b458fc6a3732a6565cdda0 # requires implemented PlanningGraph class pg = PlanningGraph(self, node.state) pg_levelsum = pg.h_levelsum() return pg_levelsum <<<<<<< HEAD <<<<<<< HEAD <<<<<<< HEAD ======= >>>>>>> a7568d9f54a58f4956b458fc6a3732a6565cdda0 def h_ignore_preconditions(self, node: Node): ''' This heuristic estimates the minimum number of actions that must be carried out from the current state in order to satisfy all of the goal conditions by ignoring the preconditions required for an action to be executed. '''
expr('At(P1, JFK)'), expr('At(P1, ATL)'), expr('At(P1, ORD)'), expr('At(P2, SFO)'), expr('At(P2, ATL)'), expr('At(P2, ORD)') ] init = FluentState(pos, neg) goal = [expr('At(C1, JFK)'), expr('At(C2, SFO)'), expr('At(C3, JFK)'), expr('At(C4, SFO)') ] return AirCargoProblem(cargos, planes, airports, init, goal) if __name__ == '__main__': P1 = air_cargo_p1() # P1.result('TTFTTFTF', P1.actions('TTFTTFTF')[0]) # print(P1.state_map) # Print intial state map of P1 # for a in P1.actions_list: # prinf all possible (concrete) actions P1 # print(a) # print(P1.actions('TTTTTTTTTTTT')) # All actions are possible # print(P1.actions('FFFFFFFFFFFF')) # No actions are possible # print(P1.result('TTFTF', P1.actions('TTFTF')[0])) # print(P1.h_ignore_preconditions(Node('FFTFTTFF'))) # Test the heuritstic for the state # print(P1.h_ignore_preconditions(Node('FFTFTTFF'))) # Test the heuritstic for the state print(P1.h_pg_levelsum(Node('FFTFTTFF'))) # Test the heuritstic for the state PG = PlanningGraph(P1,'FFTFTTFF')
def h_pg_setlevel(self, node: Node): # uses the planning graph setlevel heuristic calculated # from this node to the goal pg = PlanningGraph(self, node.state) pg_setlevel = pg.h_setlevel() return pg_setlevel
def create_planning_graph(self, state): return PlanningGraph(self, state)
expr('Eaten(Cake)'), ] return FluentState(pos, neg) def get_goal(): return [ expr('Have(Cake)'), expr('Eaten(Cake)'), ] return HaveCakeProblem(get_init(), get_goal()) if __name__ == '__main__': p = have_cake() pg = PlanningGraph(p, p.initial) [s.show() for s in pg.s_levels[0]] [a.show() for a in pg.a_levels[0]] """ print("**** Have Cake example problem setup ****") print("Initial state for this problem is {}".format(p.initial)) print("Actions for this domain are:") for a in p.actions_list: print(' {}{}'.format(a.name, a.args)) print("Fluents in this problem are:") for f in p.state_map: print(' {}'.format(f)) print("Goal requirement for this problem are:") for g in p.goal: print(' {}'.format(g)) print()
def setUp(self): #import pdb;pdb.set_trace() self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial)
def setUp(self): # print("\n===================== TestPlanningGraphLevels.....setUp") self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial)
def setUp(self): # print("\n===================== TestPlanningGraphHeuristics.....test_inconsistent_support_mutex") self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial)
def setUp(self): self.p = have_cake() self.pg = PlanningGraph(self.p, self.p.initial) print("Setup is good")
def h_pg_maxlevel(self, node: Node): # requires implemented PlanningGraph class pg = PlanningGraph(self, node.state) pg_maxlevel = pg.h_maxlevel() return pg_maxlevel