def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ "*** YOUR CODE HERE ***" # explain: implement max level heuristic # expand the graph with out mutexs until the goal is reached, # the heuristic value is the number of levels need to reach the goal pg = state Graph = [] Graph.append(pg) Level = 0 pgNext = pg isRepeat = False isGoal = False while not isGoal and not isRepeat: # expand next level without mutexs pgPrev = pgNext pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(pgPrev) Graph.append(pgNext) Level += 1 # check if level expansion is in 'levels-off' state # if isFixed() function return true' check if the current level was aread reached in a previous graph history # if so, we are in a loop state and the heuristic value should be 'inf' if isFixed(Graph, Level): pgNextPropositions = pgNext.getPropositionLayer().getPropositions() isRepeat = True for Hist in range(Level - 1): HistLevelPropositions = Graph[Hist].getPropositionLayer( ).getPropositions() for prop in pgNextPropositions: if prop not in HistLevelPropositions: isRepeat = False isGoal = not problem.goalStateNotInPropLayer( pgNext.propositionLayer.propositions) h = Level if isRepeat and not isGoal: h = float('inf') return h
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ "*** YOUR CODE HERE ***" propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer level = 0; while not problem.isGoalState(pgInit.getPropositionLayer().getPropositions()): level += 1 ## Expand to the next leyer prevLayerSize = len(pgInit.getPropositionLayer().getPropositions()) pgInit.expandWithoutMutex(pgInit) ## Check if the expanded leyer is the same leyer as before if len(pgInit.getPropositionLayer().getPropositions()) == prevLayerSize: return float("inf") return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer level = 0 sumLevel = 0 currentGoals = set(copy.copy(problem.goal)) while currentGoals: #TODO: Changed: run until all goals found/no solution possible #check for new goals achieved goalsInHand = set(pgInit.getPropositionLayer().getPropositions()) & currentGoals if goalsInHand: sumLevel += len(goalsInHand) * level; currentGoals -= goalsInHand; level += 1 ## Expand to the next leyer prevLayerSize = len(pgInit.getPropositionLayer().getPropositions()) pgInit.expandWithoutMutex(pgInit) ## Check if the expanded leyer is the same leyer as before if len(pgInit.getPropositionLayer().getPropositions()) == prevLayerSize: return float("inf") return sumLevel
def __init__(self, domain, problem): """ Constructor """ p = Parser(domain, problem) self.actions, self.propositions = p.parseActionsAndPropositions() # list of all the actions and list of all the propositions self.initialState, self.goal = p.pasreProblem() # the initial state and the goal state are lists of propositions self.createNoOps() # creates noOps that are used to propagate existing propositions from one layer to the next PlanGraphLevel.setActions(self.actions) PlanGraphLevel.setProps(self.propositions) self._expanded = 0
def getStartState(self): "*** YOUR CODE HERE ***" # explain: A state is a planGraphLevel, # and here I am building a level with the initial state propositions propLayerInit = PropositionLayer() for prop in self.initialState: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) return pgInit
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ propLayerInit = PropositionLayer() for prop in state: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) graph = [] # list of PlanGraphLevel objects graph.append(pgInit) level = 0 while True: # check if this level has the goal if problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()): break # else if the goal is not in this level, and we finished to max graph, meens we vant reach the goal. elif isFixed(graph, level): return float('inf') pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(graph[level]) graph.append(pgNext) level += 1 # if we got into break meens last level contain goal. So lets return the level. return level
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ level = 0 graph = [] initial_prop_layer = PropositionLayer() for prop in state: initial_prop_layer.addProposition(prop) curr_graph_level = PlanGraphLevel() curr_graph_level.setPropositionLayer(initial_prop_layer) graph.append(curr_graph_level) while not problem.isGoalState( graph[level].getPropositionLayer().getPropositions()): if isFixed(graph, level): return float('inf') level += 1 next_level = PlanGraphLevel() next_level.expandWithoutMutex(graph[level - 1]) graph.append(next_level) return level
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ "*** YOUR CODE HERE ***" propLayerInit = PropositionLayer() for prop in state: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) level = 0 graph = [] graph.append(pgInit) while not problem.isGoalState( graph[level].getPropositionLayer().getPropositions()): level += 1 pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(graph[level - 1]) graph.append(pgNext) return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ """ this is copy paste from graph plan algorithm, with small change in loop definition and disabling of mutex """ propLayerInit = PropositionLayer() # create a new proposition layer for prop in state: propLayerInit.addProposition(prop) # update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() # create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) graph = [] sum_sub_goals = 0 level = 0 graph.append(pgInit) while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()): if isFixed(graph, level): return float("inf") # this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed! if problem.isSubGoal(graph[level].getPropositionLayer().getPropositions()): # if we have sub goal here. count it sum_sub_goals += 1 level += 1 pgNext = PlanGraphLevel() # create new PlanGraph object pgNext.expandWithoutMutex(graph[level - 1]) # calls the expand function, which you are implementing in the PlanGraph class graph.append(pgNext) # appending the new level to the plan graph sum_sub_goals += 1 # the latest full sub goal that is equals goal, we take it to attention too return sum_sub_goals
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ "*** YOUR CODE HERE ***" propLayerInit = PropositionLayer() #create a new proposition layer # initialize the propositions for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer currentLevel = pgInit level = 0 sum = 0 graph = list() graph.append(currentLevel) goals = copy.deepcopy(problem.goal) while(len(goals) > 0): if isFixed(graph, level): return float("inf") layer = currentLevel.getPropositionLayer().getPropositions() for prop in layer: if prop in goals: sum = sum + level goals.remove(prop) nextLevel = PlanGraphLevel() nextLevel.expandWithoutMutex(graph[level]) level = level + 1 graph.append(nextLevel) currentLevel = nextLevel return sum
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ level = 0 sum = 0 graph = [] goals = [goal for goal in problem.goal] initial_prop_layer = PropositionLayer() for prop in state: initial_prop_layer.addProposition(prop) initial_level = PlanGraphLevel() initial_level.setPropositionLayer(initial_prop_layer) graph.append(initial_level) while len(goals) > 0: if isFixed(graph, level): return float('inf') for goal in goals: if goal in graph[level].getPropositionLayer().getPropositions(): sum += level goals.remove(goal) level += 1 next_level = PlanGraphLevel() next_level.expandWithoutMutex(graph[level - 1]) graph.append(next_level) return sum
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ """ this is copy paste from graph plan algorithm, with small change in loop definition and disabling of mutex """ propLayerInit = PropositionLayer() # create a new proposition layer for prop in state: propLayerInit.addProposition(prop) # update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() # create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) graph = [] level = 0 graph.append(pgInit) while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()): if isFixed(graph, level): return float("inf") # this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed! level = level + 1 pgNext = PlanGraphLevel() # create new PlanGraph object pgNext.expandWithoutMutex(graph[level - 1]) # calls the expand function, which you are implementing in the PlanGraph class graph.append(pgNext) # appending the new level to the plan graph return level
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ "*** YOUR CODE HERE ***" propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer currentLevel = pgInit level = 0 graph = list() graph.append(currentLevel) while(problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions())): newLevel = PlanGraphLevel() newLevel.expandWithoutMutex(graph[level]) level += 1 graph.append(newLevel) currentLevel = newLevel if isFixed(graph, level): return float("inf") return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ "*** YOUR CODE HERE ***" propLayerInit = PropositionLayer() for prop in state: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) level = 0 sum = 0 graph = [] goals = [goal for goal in problem.goal] graph.append(pgInit) while len(goals) > 0: for goal in goals: if goal in graph[level].getPropositionLayer().getPropositions(): sum += level goals.remove(goal) level += 1 pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(graph[level - 1]) graph.append(pgNext) return sum
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') """ level = 0 propLayerInit = PropositionLayer() # Add all propositions in current state to proposition layer for p in state: propLayerInit.addProposition(p) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) # Graph is a list of PlanGraphLevel objects graph = [] graph.append(pgInit) # While goal state is not in proposition layer, keep expanding while problem.goalStateNotInPropLayer( graph[level].getPropositionLayer().getPropositions()): # If the graph has not changed between expansions, we should halt if isFixed(graph, level): return float('inf') level += 1 pgNext = PlanGraphLevel() # Expand without mutex (relaxed version of problem) pgNext.expandWithoutMutex(graph[level - 1]) graph.append(pgNext) return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ def nextPlan(plan): next_plan = PlanGraphLevel() next_plan.expandWithoutMutex(plan) return next_plan, next_plan.getPropositionLayer().getPropositions() propLayerInit = PropositionLayer() # add all to the new proposition layer lmap(propLayerInit.addProposition, state) plan = PlanGraphLevel() plan.setPropositionLayer(propLayerInit) plan_propositions = plan.getPropositionLayer().getPropositions() # create a graph that will store all the plan levels graph = [] graph.append(plan) goals_levels = dict() goal = problem.goal # init goals levels for p in goal: goals_levels[p.getName()] = None # as long as we have for one of the goal None we didnt find the first level while None in goals_levels.values(): # if fixed we won't have a solution if isFixed(graph, len(graph) - 1): return float('inf') # for each prop in the goal check if exist on the current plan # propositions for p in goal: # check that we didnt assign a value yet if p in plan_propositions and goals_levels[p.getName()] == None: # set the current level as the fist appearance of the prop goals_levels[p.getName()] = len(graph) - 1 # create the next plan by the prev plan, plan_propositions = nextPlan(plan) # store in the graph graph.append(plan) return sum(goals_levels.values())
def graphPlan(self): """ The graphplan algorithm. The code calls the extract function which you should complete below """ #initialization initState = self.initialState level = 0 self.noGoods = [] #make sure you update noGoods in your backward search! self.noGoods.append([]) #create first layer of the graph, note it only has a proposition layer which consists of the initial state. propLayerInit = PropositionLayer() for prop in initState: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) self.graph.append(pgInit) """ While the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph """ while self.goalStateNotInPropLayer(self.graph[level].getPropositionLayer().getPropositions()) or \ self.goalStateHasMutex(self.graph[level].getPropositionLayer()): if self.isFixed(level): return None #this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed! self.noGoods.append([]) level = level + 1 pgNext = PlanGraphLevel() #create new PlanGraph object pgNext.expand(self.graph[level - 1]) #calls the expand function, which you are implementing in the PlanGraph class self.graph.append(pgNext) #appending the new level to the plan graph sizeNoGood = len(self.noGoods[level]) #remember size of nogood table plan = self.extract(self.graph, self.goal, level) #try to extract a plan since all of the goal propositions are in current graph level, and are not mutex while(plan is None): #while we didn't extract a plan successfully level = level + 1 self.noGoods.append([]) pgNext = PlanGraphLevel() #create next level of the graph by expanding pgNext.expand(self.graph[level - 1]) #create next level of the graph by expanding self.graph.append(pgNext) plan = self.extract(self.graph, self.goal, level) #try to extract a plan again if (plan is None and self.isFixed(level)): #if failed and reached fixed point if sizeNoGood == len(self.noGoods[level]): #if size of nogood didn't change, means there's nothing more to do. We failed. return None sizeNoGood = len(self.noGoods[level]) #we didn't fail yet! update size of no good return plan
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: #update the proposition layer with the propositions of the state propLayerInit.addProposition(prop) # create a new plan graph level (level is the action layer and the # propositions layer) pgInit = PlanGraphLevel() #update the new plan graph level with the the proposition layer pgInit.setPropositionLayer(propLayerInit) """ def nextPlan(plan): next_plan = PlanGraphLevel() next_plan.expandWithoutMutex(plan) return next_plan, next_plan.getPropositionLayer().getPropositions() propLayerInit = PropositionLayer() # add all to the new proposition layer lmap(propLayerInit.addProposition, state) plan = PlanGraphLevel() plan.setPropositionLayer(propLayerInit) plan_propositions = plan.getPropositionLayer().getPropositions() # create a graph that will store all the plan levels graph = [] graph.append(plan) # if we found we can rest while not problem.isGoalState(plan_propositions): # if fixed we won't have a solution if isFixed(graph, len(graph) - 1): return float('inf') # create the next plan by the prev plan, plan_propositions = nextPlan(plan) # store in the graph graph.append(plan) return len(graph) - 1
def maxLevel(state, problem): """ El valor de la heurística es el número de capas necesarias para expandir todas las proposiciones de gol. Si el objetivo no es alcanzable desde el estado de su heurística debe volver float('inf') """ level = 0 propLayerInit = PropositionLayer() # Añadir todas las proposiciones en el estado actual en la propositionLayer for p in state: propLayerInit.addProposition(p) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) # El Grafo es una lista de objetos PlanGraphLevel graph = [] graph.append(pgInit) # Mientras que el estado objetivo no está en la capa proposición, seguimos expandiendolo while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()): # Si el grafo no ha cambiado entre expansiones, lo detenemos. if isFixed(graph, level): return float('inf') level += 1 pgNext = PlanGraphLevel() # Expandir sin mutex (versión relajada de problema) pgNext.expandWithoutMutex(graph[level-1]) graph.append(pgNext) return level
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') """ level = 0 propLayerInit = PropositionLayer() # Add all propositions in current state to proposition layer for p in state: propLayerInit.addProposition(p) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) # Graph is a list of PlanGraphLevel objects graph = [] graph.append(pgInit) # While goal state is not in proposition layer, keep expanding while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()): # If the graph has not changed between expansions, we should halt if isFixed(graph, level): return float('inf') level += 1 pgNext = PlanGraphLevel() # Expand without mutex (relaxed version of problem) pgNext.expandWithoutMutex(graph[level-1]) graph.append(pgNext) return level
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ level = 0 graph = [] initial_prop_layer = PropositionLayer() for prop in state: initial_prop_layer.addProposition(prop) curr_graph_level = PlanGraphLevel() curr_graph_level.setPropositionLayer(initial_prop_layer) graph.append(curr_graph_level) while not problem.isGoalState(graph[level].getPropositionLayer().getPropositions()): if isFixed(graph, level): return float('inf') level += 1 next_level = PlanGraphLevel() next_level.expandWithoutMutex(graph[level-1]) graph.append(next_level) return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ level = 0 sum = 0 graph = [] goals = [goal for goal in problem.goal] initial_prop_layer = PropositionLayer() for prop in state: initial_prop_layer.addProposition(prop) initial_level = PlanGraphLevel() initial_level.setPropositionLayer(initial_prop_layer) graph.append(initial_level) while len(goals) > 0: if isFixed(graph, level): return float('inf') for goal in goals: if goal in graph[level].getPropositionLayer().getPropositions(): sum += level goals.remove(goal) level += 1 next_level = PlanGraphLevel() next_level.expandWithoutMutex(graph[level-1]) graph.append(next_level) return sum
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ total = 0 propLayerInit = PropositionLayer() for prop in state: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) g = [pgInit] level = 0 while len(problem.goal) > 0: if isFixed(g, level): return float("inf") for goal in problem.goal: if goal in g[level].getPropositionLayer().getPropositions(): problem.goal.remove(goal) total += level nextPlanGraphLevel = PlanGraphLevel() nextPlanGraphLevel.expandWithoutMutex(g[level]) level += 1 g.append(nextPlanGraphLevel) return total
def graphPlan(self): #El algoritmo graphplan en sí #Inicialización initState = self.initialState level = 0 self.noGoods = [] self.noGoods.append([]) #Crea la primera capa del grafo, que no consiste más que en el estado inicial propLayerInit = PropositionLayer() for prop in initState: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) self.graph.append(pgInit) #Mientras que la capa no contiene todos los estados del estado final buscado (o están mutex) continuamos expandiendo el grafo while self.goalStateNotInPropLayer(self.graph[level].getPropositionLayer().getPropositions()) or \ self.goalStateHasMutex(self.graph[level].getPropositionLayer()): if self.isFixed(level): return None #Si llegamos aquí paramos porque significa que hemos llegado a un fixed point en el grafo, así que no podemos hacer nada más self.noGoods.append([]) level = level + 1 #Actualizamos el nivel pgNext = PlanGraphLevel() #Crea un nuevo objeto GraphPlan pgNext.expand(self.graph[level - 1]) #Llama a la función expandir self.graph.append(pgNext) #Une el nuevo nivel generado con el graphplan sizeNoGood = len(self.noGoods[level]) plan = self.extract(self.graph, self.goal, level) #Intentamos hallar un plan (si todos los estados objetivos están en este nivel y no están mutex) while(plan is None): #Hacemos esto mientras no podemos encontrar un plan level = level + 1 self.noGoods.append([]) pgNext = PlanGraphLevel() #Crea el próximo nivel del grafo pgNext.expand(self.graph[level - 1]) #Y ahora lo expande self.graph.append(pgNext) plan = self.extract(self.graph, self.goal, level) #Intentamos econtrar el plan if (plan is None and self.isFixed(level)): #Si fallamos y encontramos un punto un fixed point if sizeNoGood == len(self.noGoods[level]): #Si el tamaño de noGood no cambia significa que hemos fallado y no hay plan return None sizeNoGood = len(self.noGoods[level]) #Si no, significa que aún podemos encontrar el plan y actualizamos el tamaño de noGood return plan
def getSuccessors(self, state): """ For a given state, this should return a list of triples, (successor, action, stepCost), where 'successor' is a successor to the current state, 'action' is the action required to get there, and 'stepCost' is the incremental cost of expanding to that successor, 1 in our case. You might want to this function: For a list of propositions l and action a, a.allPrecondsInList(l) returns true if the preconditions of a are in l """ self._expanded += 1 "*** YOUR CODE HERE ***" # explain: Successors are the list of actions can be done from the current level # I build the action lyer using pgNext.updateActionLayer # then for each action I build a the Level that wholud have been created if only this action was selected Successors = [] pg = state previousPropositionLayer = pg.getPropositionLayer() previousLayerPropositions = previousPropositionLayer.propositions pgNext = PlanGraphLevel() pgNext.updateActionLayer(previousPropositionLayer) for Action in pgNext.actionLayer.actions: if Action.isNoOp(): continue pgNextAction = PlanGraphLevel() pgNextAction.actionLayer.addAction(Action) for prop in previousLayerPropositions: pgNextAction.propositionLayer.addProposition(prop) for prop in Action.getAdd(): new_prop = Proposition(prop.getName()) new_prop.addProducer(Action) pgNextAction.propositionLayer.addProposition(new_prop) for prop in Action.getDelete(): pgNextAction.propositionLayer.removePropositions(prop) pgNextAction.updateMutexProposition() Successors.append((pgNextAction, Action, 1)) return Successors
def expansionGenerator(state, problem): """ Generates and yields the propositions in each level, Until the graph becomes fixed. """ propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer graph = [pgInit] count = 0 while not isFixed(graph, count): props = graph[count].getPropositionLayer().getPropositions() yield count, props pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(graph[count]) graph.append(pgNext) count += 1
def __init__(self,domain, problem): #Constructor de la clase self.independentActions = set() self.noGoods = [] self.graph = [] p = Parser(domain, problem) self.actions, self.propositions = p.parseActionsAndPropositions() #Listado de todas las acciones y estados self.initialState, self.goal = p.pasreProblem() #El estado iniacial y el objetivo (que son una lista de estados) self.createNoOps() #Crea el noOps que se usa para propagar estados de una capa a la siguiente self.independent() #Crea el listado de acciones independent y actualiza self.independentActions PlanGraphLevel.setIndependentActions(self.independentActions) PlanGraphLevel.setActions(self.actions) PlanGraphLevel.setProps(self.propositions)
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ propLayerInit = PropositionLayer() for p in state: propLayerInit.addProposition(p) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) graph = [] # list of PlanGraphLevel objects graph.append(pgInit) goals = problem.goal[:] level = 0 sum_ = 0 # keep expanding as long as we still have goal states we didn't see while goals: if isFixed(graph, level): # if the graph is fixed and expansions didn't change in the last level, it means that we can't reach # the goal state, and we return infinity return float('inf') props = graph[level].getPropositionLayer().getPropositions() for goal in goals: if goal in props: # each goal state that we run into, we should add to the sum, and remove it from the goals we need to see sum_ += level goals.remove(goal) pg = PlanGraphLevel() # expanding using a easier version of the problem - without mutexes pg.expandWithoutMutex(graph[level]) graph.append(pg) level += 1 sum_ += level return sum_
def __init__(self,domain, problem): """ Constructor """ self.independentActions = [] self.noGoods = [] self.graph = [] p = Parser(domain, problem) self.actions, self.propositions = p.parseActionsAndPropositions() # list of all the actions and list of all the propositions self.initialState, self.goal = p.pasreProblem() # the initial state and the goal state are lists of propositions self.createNoOps() # creates noOps that are used to propagate existing propositions from one layer to the next self.independent() # creates independent actions list and updates self.independentActions PlanGraphLevel.setIndependentActions(self.independentActions) PlanGraphLevel.setActions(self.actions) PlanGraphLevel.setProps(self.propositions)
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') A good place to start would be: propLayerInit = PropositionLayer() #create a new proposition layer for prop in state: propLayerInit.addProposition(prop) #update the proposition layer with the propositions of the state pgInit = PlanGraphLevel() #create a new plan graph level (level is the action layer and the propositions layer) pgInit.setPropositionLayer(propLayerInit) #update the new plan graph level with the the proposition layer """ propLayerInit = PropositionLayer() for p in state: propLayerInit.addProposition(p) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) graph = [] # list of PlanGraphLevel objects graph.append(pgInit) level = 0 # keep expanding as long as we don't hit the goal state while problem.goalStateNotInPropLayer( graph[level].getPropositionLayer().getPropositions()): if isFixed(graph, level): # if the graph is fixed and expansions didn't change in the last level, it means that we can't reach # the goal state, and we return infinity return float('inf') pg = PlanGraphLevel() # expanding using a easier version of the problem - without mutexes pg.expandWithoutMutex(graph[level]) graph.append(pg) level += 1 return level
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ propLayerInit = PropositionLayer() for prop in state: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) graph = [] # list of PlanGraphLevel objects graph.append(pgInit) level = 0 leftGoals = problem.goal.copy() level_sum = 0 while True: # if leftGoals is empty, means we reached all the goals. if len(leftGoals) == 0: break # else if the goal is not in this level, and we finished to max graph, meens we vant reach the goal. elif isFixed(graph, level): return float('inf') props = graph[level].getPropositionLayer().getPropositions() # check for each goal if it is in the next props. If so, remove it from the left golas, and add the level to the sum for goal in leftGoals: if goal in props: level_sum += level leftGoals.remove(goal) pgTemp = PlanGraphLevel() pgTemp.expandWithoutMutex(graph[level]) graph.append(pgTemp) level += 1 # adding last level to the sum, and return it level_sum += level return level_sum
def maxLevel(state, problem): """ The heuristic value is the number of layers required to expand all goal propositions. If the goal is not reachable from the state your heuristic should return float('inf') """ newPropositionLayer = PropositionLayer() [newPropositionLayer.addProposition(p) for p in state] newPlanGraphLevel = PlanGraphLevel() newPlanGraphLevel.setPropositionLayer(newPropositionLayer) level = 0 g = [newPlanGraphLevel] while problem.goalStateNotInPropLayer(g[level].getPropositionLayer().getPropositions()): if isFixed(g, level): return float("inf") level += 1 nextPlanGraphLevel = PlanGraphLevel() nextPlanGraphLevel.expandWithoutMutex(g[level - 1]) g.append(newPlanGraphLevel) return level
def nextPlan(plan): next_plan = PlanGraphLevel() next_plan.expandWithoutMutex(plan) return next_plan, next_plan.getPropositionLayer().getPropositions()
def graphPlan(self): """ The graphplan algorithm. The code calls the extract function which you should complete below """ #initialization initState = self.initialState level = 0 self.noGoods = [ ] #make sure you update noGoods in your backward search! self.noGoods.append([]) #create first layer of the graph, note it only has a proposition layer which consists of the initial state. propLayerInit = PropositionLayer() for prop in initState: propLayerInit.addProposition(prop) pgInit = PlanGraphLevel() pgInit.setPropositionLayer(propLayerInit) self.graph.append(pgInit) """ While the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph """ while self.goalStateNotInPropLayer(self.graph[level].getPropositionLayer().getPropositions()) or \ self.goalStateHasMutex(self.graph[level].getPropositionLayer()): if self.isFixed(level): return None #this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed! self.noGoods.append([]) level = level + 1 pgNext = PlanGraphLevel() #create new PlanGraph object pgNext.expand( self.graph[level - 1] ) #calls the expand function, which you are implementing in the PlanGraph class self.graph.append( pgNext) #appending the new level to the plan graph sizeNoGood = len( self.noGoods[level]) #remember size of nogood table plan = self.extract( self.graph, self.goal, level ) #try to extract a plan since all of the goal propositions are in current graph level, and are not mutex while (plan is None): #while we didn't extract a plan successfully level = level + 1 self.noGoods.append([]) pgNext = PlanGraphLevel( ) #create next level of the graph by expanding pgNext.expand( self.graph[level - 1]) #create next level of the graph by expanding self.graph.append(pgNext) plan = self.extract(self.graph, self.goal, level) #try to extract a plan again if (plan is None and self.isFixed(level)): #if failed and reached fixed point if sizeNoGood == len( self.noGoods[level] ): #if size of nogood didn't change, means there's nothing more to do. We failed. return None sizeNoGood = len(self.noGoods[level] ) #we didn't fail yet! update size of no good return plan
def levelSum(state, problem): """ The heuristic value is the sum of sub-goals level they first appeared. If the goal is not reachable from the state your heuristic should return float('inf') """ "*** YOUR CODE HERE ***" # explain: implement max level heuristic # expand the graph with out mutexs until the goal is reached, # the heuristic value is the sum of the levels reached for each goal proposition pg = state Graph = [] Graph.append(pg) Level = 0 Sum = 0 goal = copy.deepcopy(problem.goal) pgNext = pg isRepeat = False isGoal = False while not isGoal and not isRepeat: # expand next level without mutexs pgPrev = pgNext pgNext = PlanGraphLevel() pgNext.expandWithoutMutex(pgPrev) Graph.append(pgNext) Level += 1 # check if level expansion is in 'levels-off' state # if isFixed() function return true' check if the current level was aread reached in a previous graph history # if so, we are in a loop state and the heuristic value should be 'inf' if isFixed(Graph, Level): pgNextPropositions = pgNext.getPropositionLayer().getPropositions() isRepeat = True for Hist in range(Level - 1): HistLevelPropositions = Graph[Hist].getPropositionLayer( ).getPropositions() for prop in pgNextPropositions: if prop not in HistLevelPropositions: isRepeat = False to_delete = [] for prop in goal: if prop in pgNext.propositionLayer.propositions: # add each goal level, and delete it from goal list Sum += Level to_delete.append(prop) for prop in to_delete: goal.remove(prop) isGoal = len(goal) == 0 h = Sum if isRepeat and not isGoal: h = float('inf') return h