Ejemplo n.º 1
0
def levelSum(state, problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    """ this is copy paste from graph plan algorithm, with small change in loop definition and disabling of mutex """

    propLayerInit = PropositionLayer()  # create a new proposition layer
    for prop in state:
        propLayerInit.addProposition(prop)  # update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)

    graph = []
    sum_sub_goals = 0
    level = 0
    graph.append(pgInit)

    while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()):
        if isFixed(graph, level):
            return float("inf")  # this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed!

        if problem.isSubGoal(graph[level].getPropositionLayer().getPropositions()):  # if we have sub goal here. count it
            sum_sub_goals += 1
        level += 1

        pgNext = PlanGraphLevel()  # create new PlanGraph object
        pgNext.expandWithoutMutex(graph[level - 1])  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pgNext)  # appending the new level to the plan graph

    sum_sub_goals += 1  # the latest full sub goal that is equals goal, we take it to attention too
    return sum_sub_goals
Ejemplo n.º 2
0
def maxLevel(state, problem):
  """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """
  propLayerInit = PropositionLayer()
  for prop in state:
    propLayerInit.addProposition(prop)
  pgInit = PlanGraphLevel()
  pgInit.setPropositionLayer(propLayerInit)

  graph = []  # list of PlanGraphLevel objects
  graph.append(pgInit)
  level = 0
  while True:
    # check if this level has the goal
    if problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()):
      break
    # else if the goal is not in this level, and we finished to max graph, meens we vant reach the goal.
    elif isFixed(graph, level):
      return float('inf')

    pgNext = PlanGraphLevel()
    pgNext.expandWithoutMutex(graph[level])
    graph.append(pgNext)
    level += 1
  # if we got into break meens last level contain goal. So lets return the level.
  return level
def levelSum(state, problem):
  """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
  "*** YOUR CODE HERE ***"
  propLayerInit = PropositionLayer()          #create a new proposition layer
  # initialize the propositions
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  currentLevel = pgInit
  level = 0
  sum = 0
  graph = list()
  graph.append(currentLevel)
  goals = copy.deepcopy(problem.goal)
  while(len(goals) > 0):
    if isFixed(graph, level):
      return float("inf")
    layer = currentLevel.getPropositionLayer().getPropositions()
    for prop in layer:
      if prop in goals:
        sum = sum + level
        goals.remove(prop)
    nextLevel = PlanGraphLevel()
    nextLevel.expandWithoutMutex(graph[level])
    level = level + 1
    graph.append(nextLevel)
    currentLevel = nextLevel

  return sum
Ejemplo n.º 4
0
def maxLevel(state, problem):
  """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """
  "*** YOUR CODE HERE ***"
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer

  level = 0;

  while not problem.isGoalState(pgInit.getPropositionLayer().getPropositions()):
    level += 1
    ## Expand to the next leyer
    prevLayerSize = len(pgInit.getPropositionLayer().getPropositions())
    pgInit.expandWithoutMutex(pgInit)
    ## Check if the expanded leyer is the same leyer as before
    if len(pgInit.getPropositionLayer().getPropositions()) == prevLayerSize:
      return float("inf")
    
  return level
Ejemplo n.º 5
0
def maxLevel(state, problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    propLayerInit = PropositionLayer()          #create a new proposition layer
    for prop in state:
      propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
    """

    """ this is copy paste from graph plan algorithm, with small change in loop definition and disabling of mutex """

    propLayerInit = PropositionLayer()  # create a new proposition layer
    for prop in state:
        propLayerInit.addProposition(prop)  # update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)

    graph = []
    level = 0
    graph.append(pgInit)

    while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()):
        if isFixed(graph, level):
            return float("inf")  # this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed!
        level = level + 1
        pgNext = PlanGraphLevel()  # create new PlanGraph object
        pgNext.expandWithoutMutex(graph[level - 1])  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pgNext)  # appending the new level to the plan graph

    return level
Ejemplo n.º 6
0
def maxLevel(state, problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    propLayerInit = PropositionLayer()          #create a new proposition layer
    for prop in state:
      propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
    """

    level = 0
    graph = []

    initial_prop_layer = PropositionLayer()
    for prop in state:
        initial_prop_layer.addProposition(prop)

    curr_graph_level = PlanGraphLevel()
    curr_graph_level.setPropositionLayer(initial_prop_layer)
    graph.append(curr_graph_level)

    while not problem.isGoalState(graph[level].getPropositionLayer().getPropositions()):
        if isFixed(graph, level):
            return float('inf')
        level += 1
        next_level = PlanGraphLevel()
        next_level.expandWithoutMutex(graph[level-1])
        graph.append(next_level)

    return level
Ejemplo n.º 7
0
def levelSum(state, problem):
  """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
  total = 0
  propLayerInit = PropositionLayer()

  for prop in state:
    propLayerInit.addProposition(prop)

  pgInit = PlanGraphLevel()
  pgInit.setPropositionLayer(propLayerInit)
  g = [pgInit]
  level = 0

  while len(problem.goal) > 0:
    if isFixed(g, level):
      return float("inf")

    for goal in problem.goal:
      if goal in g[level].getPropositionLayer().getPropositions():
        problem.goal.remove(goal)
        total += level

    nextPlanGraphLevel = PlanGraphLevel()
    nextPlanGraphLevel.expandWithoutMutex(g[level])
    level += 1
    g.append(nextPlanGraphLevel)
  return total
def maxLevel(state, problem):
  """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """
  "*** YOUR CODE HERE ***"
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  currentLevel = pgInit
  level = 0
  graph = list()
  graph.append(currentLevel)
  while(problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions())):
    newLevel = PlanGraphLevel()
    newLevel.expandWithoutMutex(graph[level])
    level += 1
    graph.append(newLevel)
    currentLevel = newLevel
    if isFixed(graph, level):
      return float("inf")

  return level
Ejemplo n.º 9
0
def maxLevel(state, problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    propLayerInit = PropositionLayer()          #create a new proposition layer
    for prop in state:
      propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
    """

    level = 0
    graph = []

    initial_prop_layer = PropositionLayer()
    for prop in state:
        initial_prop_layer.addProposition(prop)

    curr_graph_level = PlanGraphLevel()
    curr_graph_level.setPropositionLayer(initial_prop_layer)
    graph.append(curr_graph_level)

    while not problem.isGoalState(
            graph[level].getPropositionLayer().getPropositions()):
        if isFixed(graph, level):
            return float('inf')
        level += 1
        next_level = PlanGraphLevel()
        next_level.expandWithoutMutex(graph[level - 1])
        graph.append(next_level)

    return level
Ejemplo n.º 10
0
def maxLevel(state, problem):
  """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  """
  level = 0
  propLayerInit = PropositionLayer()
  # Add all propositions in current state to proposition layer
  for p in state:
    propLayerInit.addProposition(p)

  pgInit = PlanGraphLevel()
  pgInit.setPropositionLayer(propLayerInit)
  # Graph is a list of PlanGraphLevel objects
  graph = []
  graph.append(pgInit)

  # While goal state is not in proposition layer, keep expanding
  while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()):
    # If the graph has not changed between expansions, we should halt
    if isFixed(graph, level):
      return float('inf')
    level += 1
    pgNext = PlanGraphLevel()
    # Expand without mutex (relaxed version of problem)
    pgNext.expandWithoutMutex(graph[level-1])
    graph.append(pgNext)

  return level
Ejemplo n.º 11
0
def levelSum(state, problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """

    level = 0
    sum = 0
    graph = []
    goals = [goal for goal in problem.goal]

    initial_prop_layer = PropositionLayer()
    for prop in state:
        initial_prop_layer.addProposition(prop)

    initial_level = PlanGraphLevel()
    initial_level.setPropositionLayer(initial_prop_layer)
    graph.append(initial_level)

    while len(goals) > 0:
        if isFixed(graph, level):
            return float('inf')

        for goal in goals:
            if goal in graph[level].getPropositionLayer().getPropositions():
                sum += level
                goals.remove(goal)

        level += 1
        next_level = PlanGraphLevel()
        next_level.expandWithoutMutex(graph[level - 1])
        graph.append(next_level)

    return sum
Ejemplo n.º 12
0
def maxLevel(state, problem):
    """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """
    "*** YOUR CODE HERE ***"

    propLayerInit = PropositionLayer()
    for prop in state:
        propLayerInit.addProposition(prop)
    pgInit = PlanGraphLevel()
    pgInit.setPropositionLayer(propLayerInit)

    level = 0
    graph = []
    graph.append(pgInit)

    while not problem.isGoalState(
            graph[level].getPropositionLayer().getPropositions()):
        level += 1
        pgNext = PlanGraphLevel()
        pgNext.expandWithoutMutex(graph[level - 1])
        graph.append(pgNext)

    return level
Ejemplo n.º 13
0
def levelSum(state, problem):
    """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
    "*** YOUR CODE HERE ***"
    propLayerInit = PropositionLayer()
    for prop in state:
        propLayerInit.addProposition(prop)
        pgInit = PlanGraphLevel()
        pgInit.setPropositionLayer(propLayerInit)

    level = 0
    sum = 0
    graph = []
    goals = [goal for goal in problem.goal]
    graph.append(pgInit)

    while len(goals) > 0:
        for goal in goals:
            if goal in graph[level].getPropositionLayer().getPropositions():
                sum += level
                goals.remove(goal)

        level += 1
        pgNext = PlanGraphLevel()
        pgNext.expandWithoutMutex(graph[level - 1])
        graph.append(pgNext)

    return sum
Ejemplo n.º 14
0
def maxLevel(state, problem):
    """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  """
    level = 0
    propLayerInit = PropositionLayer()
    # Add all propositions in current state to proposition layer
    for p in state:
        propLayerInit.addProposition(p)

    pgInit = PlanGraphLevel()
    pgInit.setPropositionLayer(propLayerInit)
    # Graph is a list of PlanGraphLevel objects
    graph = []
    graph.append(pgInit)

    # While goal state is not in proposition layer, keep expanding
    while problem.goalStateNotInPropLayer(
            graph[level].getPropositionLayer().getPropositions()):
        # If the graph has not changed between expansions, we should halt
        if isFixed(graph, level):
            return float('inf')
        level += 1
        pgNext = PlanGraphLevel()
        # Expand without mutex (relaxed version of problem)
        pgNext.expandWithoutMutex(graph[level - 1])
        graph.append(pgNext)

    return level
Ejemplo n.º 15
0
def levelSum(state, problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """

    level = 0
    sum = 0
    graph = []
    goals = [goal for goal in problem.goal]

    initial_prop_layer = PropositionLayer()
    for prop in state:
        initial_prop_layer.addProposition(prop)

    initial_level = PlanGraphLevel()
    initial_level.setPropositionLayer(initial_prop_layer)
    graph.append(initial_level)

    while len(goals) > 0:
        if isFixed(graph, level):
            return float('inf')

        for goal in goals:
            if goal in graph[level].getPropositionLayer().getPropositions():
                sum += level
                goals.remove(goal)

        level += 1
        next_level = PlanGraphLevel()
        next_level.expandWithoutMutex(graph[level-1])
        graph.append(next_level)

    return sum
Ejemplo n.º 16
0
def levelSum(state, problem):
  """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer

  level = 0
  sumLevel = 0
  currentGoals = set(copy.copy(problem.goal))

  while currentGoals: #TODO: Changed: run until all goals found/no solution possible 
    #check for new goals achieved
    goalsInHand = set(pgInit.getPropositionLayer().getPropositions()) & currentGoals

    if goalsInHand:
      sumLevel +=  len(goalsInHand) * level;
      currentGoals -= goalsInHand;

    level += 1

    ## Expand to the next leyer
    prevLayerSize = len(pgInit.getPropositionLayer().getPropositions())
    pgInit.expandWithoutMutex(pgInit)
    ## Check if the expanded leyer is the same leyer as before
    if len(pgInit.getPropositionLayer().getPropositions()) == prevLayerSize:
      return float("inf")
    
  return sumLevel
Ejemplo n.º 17
0
def maxLevel(state, problem):
  """ El valor de la heurística es el número de capas
  necesarias para expandir todas las proposiciones de gol.
  Si el objetivo no es alcanzable desde el estado de su
  heurística debe volver float('inf') """
  level = 0
  propLayerInit = PropositionLayer()
  # Añadir todas las proposiciones en el estado actual en la propositionLayer
  for p in state:
    propLayerInit.addProposition(p)

  pgInit = PlanGraphLevel()
  pgInit.setPropositionLayer(propLayerInit)
  # El Grafo es una lista de objetos PlanGraphLevel
  graph = []
  graph.append(pgInit)

  # Mientras que el estado objetivo no está en la capa proposición, seguimos expandiendolo
  while problem.goalStateNotInPropLayer(graph[level].getPropositionLayer().getPropositions()):
    # Si el grafo no ha cambiado entre expansiones, lo detenemos.
    if isFixed(graph, level):
      return float('inf')
    level += 1
    pgNext = PlanGraphLevel()
    # Expandir sin mutex (versión relajada de problema)
    pgNext.expandWithoutMutex(graph[level-1])
    graph.append(pgNext)

  return level  
Ejemplo n.º 18
0
def maxLevel(state, problem):
    """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """
    "*** YOUR CODE HERE ***"

    # explain: implement max level heuristic
    #   expand the graph with out mutexs until the goal is reached,
    #   the heuristic value is the number of levels need to reach the goal

    pg = state

    Graph = []
    Graph.append(pg)
    Level = 0

    pgNext = pg
    isRepeat = False
    isGoal = False
    while not isGoal and not isRepeat:

        # expand next level without mutexs
        pgPrev = pgNext
        pgNext = PlanGraphLevel()
        pgNext.expandWithoutMutex(pgPrev)

        Graph.append(pgNext)
        Level += 1

        # check if level expansion is in 'levels-off' state
        #  if isFixed() function return true' check if the current level was aread reached in a previous graph history
        #  if so, we are in a loop state and the heuristic value should be 'inf'

        if isFixed(Graph, Level):

            pgNextPropositions = pgNext.getPropositionLayer().getPropositions()
            isRepeat = True
            for Hist in range(Level - 1):
                HistLevelPropositions = Graph[Hist].getPropositionLayer(
                ).getPropositions()
                for prop in pgNextPropositions:
                    if prop not in HistLevelPropositions:
                        isRepeat = False

        isGoal = not problem.goalStateNotInPropLayer(
            pgNext.propositionLayer.propositions)

    h = Level
    if isRepeat and not isGoal:
        h = float('inf')

    return h
Ejemplo n.º 19
0
def expansionGenerator(state, problem):
    """
    Generates and yields the propositions in each level,
    Until the graph becomes fixed.
    """
    propLayerInit = PropositionLayer()          #create a new proposition layer
    for prop in state:
        propLayerInit.addProposition(prop)      #update the proposition layer with the propositions of the state
    pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
    graph = [pgInit]
    count = 0
    
    while not isFixed(graph, count):
        props = graph[count].getPropositionLayer().getPropositions()
        yield count, props
        pgNext = PlanGraphLevel()
        pgNext.expandWithoutMutex(graph[count])
        graph.append(pgNext)
        count += 1
Ejemplo n.º 20
0
def levelSum(state, problem):
    """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
    propLayerInit = PropositionLayer()
    for p in state:
        propLayerInit.addProposition(p)

    pgInit = PlanGraphLevel()
    pgInit.setPropositionLayer(propLayerInit)

    graph = []  # list of PlanGraphLevel objects
    graph.append(pgInit)
    goals = problem.goal[:]
    level = 0
    sum_ = 0

    # keep expanding as long as we still have goal states we didn't see
    while goals:
        if isFixed(graph, level):
            # if the graph is fixed and expansions didn't change in the last level, it means that we can't reach
            # the goal state, and we return infinity
            return float('inf')

        props = graph[level].getPropositionLayer().getPropositions()
        for goal in goals:
            if goal in props:
                # each goal state that we run into, we should add to the sum, and remove it from the goals we need to see
                sum_ += level
                goals.remove(goal)

        pg = PlanGraphLevel()
        # expanding using a easier version of the problem - without mutexes
        pg.expandWithoutMutex(graph[level])
        graph.append(pg)
        level += 1

    sum_ += level

    return sum_
Ejemplo n.º 21
0
def levelSum(state, problem):
  """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
  propLayerInit = PropositionLayer()
  for prop in state:
    propLayerInit.addProposition(prop)
  pgInit = PlanGraphLevel()
  pgInit.setPropositionLayer(propLayerInit)

  graph = []  # list of PlanGraphLevel objects
  graph.append(pgInit)
  level = 0
  leftGoals = problem.goal.copy()
  level_sum = 0

  while True:
    # if leftGoals is empty, means we reached all the goals.
    if len(leftGoals) == 0:
      break
    # else if the goal is not in this level, and we finished to max graph, meens we vant reach the goal.
    elif isFixed(graph, level):
      return float('inf')
    props = graph[level].getPropositionLayer().getPropositions()
    # check for each goal if it is in the next props. If so, remove it from the left golas, and add the level to the sum
    for goal in leftGoals:
      if goal in props:
        level_sum += level
        leftGoals.remove(goal)

    pgTemp = PlanGraphLevel()
    pgTemp.expandWithoutMutex(graph[level])
    graph.append(pgTemp)
    level += 1
  # adding last level to the sum, and return it
  level_sum += level
  return level_sum
Ejemplo n.º 22
0
def maxLevel(state, problem):
    """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')  
  A good place to start would be:
  propLayerInit = PropositionLayer()          #create a new proposition layer
  for prop in state:
    propLayerInit.addProposition(prop)        #update the proposition layer with the propositions of the state
  pgInit = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
  pgInit.setPropositionLayer(propLayerInit)   #update the new plan graph level with the the proposition layer
  """

    propLayerInit = PropositionLayer()
    for p in state:
        propLayerInit.addProposition(p)

    pgInit = PlanGraphLevel()
    pgInit.setPropositionLayer(propLayerInit)

    graph = []  # list of PlanGraphLevel objects
    graph.append(pgInit)
    level = 0

    # keep expanding as long as we don't hit the goal state
    while problem.goalStateNotInPropLayer(
            graph[level].getPropositionLayer().getPropositions()):
        if isFixed(graph, level):
            # if the graph is fixed and expansions didn't change in the last level, it means that we can't reach
            # the goal state, and we return infinity
            return float('inf')

        pg = PlanGraphLevel()
        # expanding using a easier version of the problem - without mutexes
        pg.expandWithoutMutex(graph[level])
        graph.append(pg)
        level += 1

    return level
Ejemplo n.º 23
0
def maxLevel(state, problem):
  """
  The heuristic value is the number of layers required to expand all goal propositions.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
  newPropositionLayer = PropositionLayer()
  [newPropositionLayer.addProposition(p) for p in state]

  newPlanGraphLevel = PlanGraphLevel()
  newPlanGraphLevel.setPropositionLayer(newPropositionLayer)

  level = 0
  g = [newPlanGraphLevel]

  while problem.goalStateNotInPropLayer(g[level].getPropositionLayer().getPropositions()):
    if isFixed(g, level):
      return float("inf")

    level += 1
    nextPlanGraphLevel = PlanGraphLevel()
    nextPlanGraphLevel.expandWithoutMutex(g[level - 1])
    g.append(newPlanGraphLevel)

  return level
Ejemplo n.º 24
0
 def nextPlan(plan):
     next_plan = PlanGraphLevel()
     next_plan.expandWithoutMutex(plan)
     return next_plan, next_plan.getPropositionLayer().getPropositions()
Ejemplo n.º 25
0
def levelSum(state, problem):
    """
  The heuristic value is the sum of sub-goals level they first appeared.
  If the goal is not reachable from the state your heuristic should return float('inf')
  """
    "*** YOUR CODE HERE ***"

    # explain: implement max level heuristic
    #   expand the graph with out mutexs until the goal is reached,
    #   the heuristic value is the sum of the levels reached for each goal proposition

    pg = state

    Graph = []
    Graph.append(pg)
    Level = 0
    Sum = 0
    goal = copy.deepcopy(problem.goal)

    pgNext = pg
    isRepeat = False
    isGoal = False
    while not isGoal and not isRepeat:

        # expand next level without mutexs
        pgPrev = pgNext
        pgNext = PlanGraphLevel()
        pgNext.expandWithoutMutex(pgPrev)

        Graph.append(pgNext)
        Level += 1

        # check if level expansion is in 'levels-off' state
        #  if isFixed() function return true' check if the current level was aread reached in a previous graph history
        #  if so, we are in a loop state and the heuristic value should be 'inf'

        if isFixed(Graph, Level):
            pgNextPropositions = pgNext.getPropositionLayer().getPropositions()
            isRepeat = True
            for Hist in range(Level - 1):
                HistLevelPropositions = Graph[Hist].getPropositionLayer(
                ).getPropositions()
                for prop in pgNextPropositions:
                    if prop not in HistLevelPropositions:
                        isRepeat = False

        to_delete = []
        for prop in goal:
            if prop in pgNext.propositionLayer.propositions:
                # add each goal level, and delete it from goal list
                Sum += Level
                to_delete.append(prop)
        for prop in to_delete:
            goal.remove(prop)
        isGoal = len(goal) == 0

    h = Sum
    if isRepeat and not isGoal:
        h = float('inf')

    return h