def level_sum(state, planning_problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ prop_layer = PropositionLayer() goal_set = planning_problem.goal.difference(frozenset(state)) cost = 1 + len(goal_set.intersection(frozenset(state))) for prop in state: prop_layer.add_proposition(prop) pg_init = PlanGraphLevel() pg_cur = PlanGraphLevel() pg_init.set_proposition_layer(prop_layer) pg_cur.set_proposition_layer(prop_layer) graph = [pg_init] level = 1 while planning_problem.is_goal_state( pg_cur.get_proposition_layer().get_propositions()): new_prop = PlanGraphLevel() for prop in pg_cur.get_proposition_layer().get_propositions(): new_prop.get_proposition_layer().add_proposition(prop) new_prop.expand_without_mutex(pg_cur.get_proposition_layer()) graph.append(new_prop) pg_cur = new_prop if is_fixed(graph, level - 1): return float('inf') cost += level * len( goal_set.intersection( frozenset(pg_cur.get_proposition_layer().get_propositions()))) goal_set = goal_set.difference( frozenset(pg_cur.get_proposition_layer().get_propositions())) level += 1 return cost
def graph_plan(self): """ The graphplan algorithm. The code calls the extract function which you should complete below """ # initialization init_state = self.initial_state level = 0 self.no_goods = [ ] # make sure you update noGoods in your backward search! self.no_goods.append([]) # create first layer of the graph, note it only has a proposition layer which consists of the initial state. prop_layer_init = PropositionLayer() for prop in init_state: prop_layer_init.add_proposition(prop) pg_init = PlanGraphLevel() pg_init.set_proposition_layer(prop_layer_init) self.graph.append(pg_init) size_no_good = -1 """ While the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph """ while self.goal_state_not_in_prop_layer(self.graph[level].get_proposition_layer().get_propositions()) or \ self.goal_state_has_mutex(self.graph[level].get_proposition_layer()): if self.is_fixed(level): return None # this means we stopped the while loop above because we reached a fixed point in the graph. # nothing more to do, we failed! self.no_goods.append([]) level = level + 1 pg_next = PlanGraphLevel() # create new PlanGraph object pg_next.expand( self.graph[level - 1] ) # calls the expand function, which you are implementing in the PlanGraph class self.graph.append( pg_next) # appending the new level to the plan graph size_no_good = len( self.no_goods[level]) # remember size of nogood table plan_solution = self.extract(self.graph, self.goal, level) # try to extract a plan since all of the goal propositions are in current graph level, and are not mutex while plan_solution is None: # while we didn't extract a plan successfully level = level + 1 self.no_goods.append([]) pg_next = PlanGraphLevel( ) # create next level of the graph by expanding pg_next.expand( self.graph[level - 1]) # create next level of the graph by expanding self.graph.append(pg_next) plan_solution = self.extract(self.graph, self.goal, level) # try to extract a plan again if plan_solution is None and self.is_fixed( level): # if failed and reached fixed point if len(self.no_goods[level - 1]) == len(self.no_goods[level]): # if size of nogood didn't change, means there's nothing more to do. We failed. return None size_no_good = len( self.no_goods[level] ) # we didn't fail yet! update size of no good return plan_solution
def max_level(state, planning_problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: prop_layer_init = PropositionLayer() #create a new proposition layer for prop in state: prop_layer_init.add_proposition(prop) #update the proposition layer with the propositions of the state pg_init = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pg_init.set_proposition_layer(prop_layer_init) #update the new plan graph level with the the proposition layer """ prop_layer = PropositionLayer() for prop in state: prop_layer.add_proposition(prop) pg_init = PlanGraphLevel() pg_cur = PlanGraphLevel() pg_init.set_proposition_layer(prop_layer) pg_cur.set_proposition_layer(prop_layer) graph = [pg_init] cost = 1 while planning_problem.is_goal_state( pg_cur.get_proposition_layer().get_propositions()): new_prop = PlanGraphLevel() for prop in pg_cur.get_proposition_layer().get_propositions(): new_prop.get_proposition_layer().add_proposition(prop) new_prop.expand_without_mutex(new_prop.get_proposition_layer()) graph.append(new_prop) pg_cur = new_prop if is_fixed(graph, cost - 1): return float('inf') cost += 1 return cost