Esempio n. 1
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    # initialization
    level = 0
    graph = []
    # create first layer of the graph, note it only has a proposition layer which consists of the initial state.

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph.append(pg_init)
    while not planning_problem.is_goal_state(
            graph[level].get_proposition_layer().get_propositions()):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_next = PlanGraphLevel()  # create new PlanGraph object
        pg_next.expand_without_mutex(
            graph[level - 1]
        )  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pg_next)  # appending the new level to the plan graph
    return level
Esempio n. 2
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    level = 0
    infinity = float('inf')
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer
    current_props = pg_init.get_proposition_layer().get_propositions()
    plans_list = [pg_init]
    while not planning_problem.is_goal_state(current_props):
        if is_fixed(plans_list, level):
            return infinity
        else:
            next_pg_init = PlanGraphLevel()
            next_pg_init.expand_without_mutex(pg_init)
            pg_init = next_pg_init
            plans_list += [pg_init]
            level += 1
            current_props = pg_init.get_proposition_layer().get_propositions()
    return level
Esempio n. 3
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)
    graph_layer = PlanGraphLevel()
    graph_layer.set_proposition_layer(prop_layer_init)

    graph = [graph_layer]
    lvl_number = 0
    while True:
        # check if reached goal
        if planning_problem.is_goal_state(
                graph[-1].get_proposition_layer().get_propositions()):
            return lvl_number
        # check if we are stuck
        if is_fixed(graph, lvl_number):
            return float('inf')
        # otherwise, expand
        new_layer = PlanGraphLevel()
        new_layer.expand_without_mutex(graph[-1])
        graph.append(new_layer)
        lvl_number += 1
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    level = 0
    graph = [pg_init]
    while not planning_problem.is_goal_state(state):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_prev = pg_init
        pg_init = PlanGraphLevel()
        pg_init.expand_without_mutex(pg_prev)
        state = pg_init.get_proposition_layer().get_propositions()
        graph.append(pg_init)
    return level
Esempio n. 5
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph = [pg_init]
    level = 0

    goals_level_sum = 0
    remained_goals = set(planning_problem.goal) - set(state)

    while planning_problem.goal_state_not_in_prop_layer(
            graph[level].get_proposition_layer().get_propositions()):
        if is_fixed(graph, level):
            return float('inf')

        level = level + 1
        pg_next = PlanGraphLevel()
        pg_next.expand_without_mutex(graph[level - 1])
        graph.append(pg_next)

        for p in graph[level].get_proposition_layer().get_propositions():
            if p in remained_goals:
                remained_goals.remove(p)
                goals_level_sum += level

    return goals_level_sum
Esempio n. 6
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer

    layer_num = 0
    graph = [pg_init]
    while (planning_problem.goal_state_not_in_prop_layer(graph[layer_num].get_proposition_layer().get_propositions())):
        if is_fixed(graph, layer_num):
            return float('inf')

        layer_num += 1
        new_pg = PlanGraphLevel()
        new_pg.expand_without_mutex(graph[layer_num-1])
        graph.append(new_pg)
    return layer_num
Esempio n. 7
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    level, heuristic = 0, 0
    goal = planning_problem.goal - state  #at level=0 satisfying propositions will have no effect
    for prop in state:
        if prop in goal:
            heuristic += level
            goal -= {prop}
    while not planning_problem.is_goal_state(state):
        pg_init.expand_without_mutex(pg_init)
        new_state = pg_init.get_proposition_layer().get_propositions()
        if state == new_state:  #We're at a fixed point
            return float('inf')
        state = new_state
        level += 1
        for prop in state:
            if prop in goal:
                heuristic += level
                goal -= {prop}
    return heuristic
Esempio n. 8
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)
    graph_layer = PlanGraphLevel()
    graph_layer.set_proposition_layer(prop_layer_init)

    graph = [graph_layer]
    sum_of_subgoals = 0
    subgoals = list(planning_problem.goal)
    while True:
        this_lvl = len(graph) - 1
        propositions = graph[-1].get_proposition_layer().get_propositions()
        for pr in propositions:
            if pr in subgoals:
                sum_of_subgoals += this_lvl
                subgoals.remove(pr)
        # check if we are done
        if len(subgoals) == 0:
            return sum_of_subgoals
        # check if we are stuck
        if is_fixed(graph, len(graph) - 1):
            return float('inf')
        # otherwise, expand
        new_layer = PlanGraphLevel()
        new_layer.expand_without_mutex(graph[-1])
        graph.append(new_layer)
Esempio n. 9
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    heuristic = 0
    while not planning_problem.is_goal_state(state):
        pg_init.expand_without_mutex(pg_init)
        new_state = pg_init.get_proposition_layer().get_propositions()
        if state == new_state:  #We're at a fixed point
            return float('inf')
        state = new_state
        heuristic += 1
    return heuristic
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    my_remaining_goals = list(planning_problem.goal)
    levels_counter = 0
    h_counter = 0
    graph = [pg_init]
    # running the graphplan
    while (True):
        remove_shit = []
        for goal in my_remaining_goals:
            if goal in graph[
                    levels_counter].proposition_layer.get_propositions():
                h_counter += levels_counter
                remove_shit.append(goal)
        for shit in remove_shit:
            my_remaining_goals.remove(shit)
        if len(my_remaining_goals) == 0:
            return h_counter
        if is_fixed(graph, levels_counter):
            return float('inf')
        new_graph_level = PlanGraphLevel()
        new_graph_level.expand_without_mutex(graph[levels_counter])
        graph.append(new_graph_level)
        levels_counter += 1
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    # first level creation
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    levels_counter = 0
    graph = [pg_init]
    # running the graphplan
    while (True):
        if planning_problem.is_goal_state(
                graph[levels_counter].proposition_layer.get_propositions()):
            return levels_counter
        if is_fixed(graph, levels_counter):
            return float('inf')
        new_graph_level = PlanGraphLevel()
        new_graph_level.expand_without_mutex(graph[levels_counter])
        graph.append(new_graph_level)
        levels_counter += 1
Esempio n. 12
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    level = 0
    infinity = float('inf')
    sum = 0
    goals_lst = list(planning_problem.goal)
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer
    current_props = pg_init.get_proposition_layer().get_propositions()
    plans_list = [pg_init]
    while len(goals_lst) != 0:
        if is_fixed(plans_list, level):
            return infinity
        else:
            current_goals_lst = goals_lst[:]
            for goal in current_goals_lst:
                if goal in current_props:
                    sum += level
                    goals_lst.remove(goal)
            next_pg_init = PlanGraphLevel()
            next_pg_init.expand_without_mutex(pg_init)
            pg_init = next_pg_init
            plans_list += [pg_init]
            level += 1
            current_props = pg_init.get_proposition_layer().get_propositions()
    return sum
Esempio n. 13
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    prop_layer = PropositionLayer()
    for prop in state:
        prop_layer.add_proposition(prop)

    pg_init = PlanGraphLevel()
    pg_cur = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer)
    pg_cur.set_proposition_layer(prop_layer)

    graph = [pg_init]

    cost = 1
    while planning_problem.is_goal_state(
            pg_cur.get_proposition_layer().get_propositions()):
        new_prop = PlanGraphLevel()
        for prop in pg_cur.get_proposition_layer().get_propositions():
            new_prop.get_proposition_layer().add_proposition(prop)
        new_prop.expand_without_mutex(new_prop.get_proposition_layer())
        graph.append(new_prop)
        pg_cur = new_prop
        if is_fixed(graph, cost - 1):
            return float('inf')
        cost += 1
    return cost
Esempio n. 14
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer = PropositionLayer()

    goal_set = planning_problem.goal.difference(frozenset(state))
    cost = 1 + len(goal_set.intersection(frozenset(state)))

    for prop in state:
        prop_layer.add_proposition(prop)

    pg_init = PlanGraphLevel()
    pg_cur = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer)
    pg_cur.set_proposition_layer(prop_layer)

    graph = [pg_init]

    level = 1

    while planning_problem.is_goal_state(
            pg_cur.get_proposition_layer().get_propositions()):
        new_prop = PlanGraphLevel()
        for prop in pg_cur.get_proposition_layer().get_propositions():
            new_prop.get_proposition_layer().add_proposition(prop)
        new_prop.expand_without_mutex(pg_cur.get_proposition_layer())
        graph.append(new_prop)
        pg_cur = new_prop
        if is_fixed(graph, level - 1):
            return float('inf')

        cost += level * len(
            goal_set.intersection(
                frozenset(pg_cur.get_proposition_layer().get_propositions())))

        goal_set = goal_set.difference(
            frozenset(pg_cur.get_proposition_layer().get_propositions()))
        level += 1

    return cost
Esempio n. 15
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    sub_goals = planning_problem.goal
    level = 0
    graph = [pg_init]
    my_dict = {}
    my_sum = 0

    for goal in sub_goals:
        if goal in state and goal not in my_dict.keys():
            my_dict[goal] = level
            my_sum += level

    while not planning_problem.is_goal_state(state):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_prev = pg_init
        pg_init = PlanGraphLevel()
        pg_init.expand_without_mutex(pg_prev)
        state = pg_init.get_proposition_layer().get_propositions()
        graph.append(pg_init)
        for goal in sub_goals:
            if goal in state and goal not in my_dict.keys():
                my_dict[goal] = level
                my_sum += level

    return my_sum
Esempio n. 16
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    # initialization
    level = 0
    graph = []
    level_sum_counter = 0
    goals_set = set(planning_problem.goal)
    # create first layer of the graph, note it only has a proposition layer which consists of the initial state.

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph.append(pg_init)
    while not planning_problem.is_goal_state(
            graph[level].get_proposition_layer().get_propositions()):
        for prop in graph[level].get_proposition_layer().get_propositions():
            if prop in goals_set:
                level_sum_counter += level + 1
                goals_set.remove(prop)
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_next = PlanGraphLevel()  # create new PlanGraph object
        pg_next.expand_without_mutex(
            graph[level - 1]
        )  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pg_next)  # appending the new level to the plan graph
    for prop in graph[level].get_proposition_layer().get_propositions():
        if prop in goals_set:
            level_sum_counter += level + 1
            goals_set.remove(prop)
    return level_sum_counter
Esempio n. 17
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(
            prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(
        prop_layer_init)  # update the new plan graph level with the the proposition layer

    sum = 0
    layer_num = 0
    left_goals = set(planning_problem.goal)
    graph = [pg_init]
    while (planning_problem.goal_state_not_in_prop_layer(graph[layer_num].get_proposition_layer().get_propositions())):
        if is_fixed(graph, layer_num):
            return float('inf')

        found_goals = set()
        for goal in left_goals:
                if goal in graph[layer_num].get_proposition_layer().get_propositions():
                    found_goals.add(goal)

        sum += len(found_goals)
        for sub_goal in found_goals:
            left_goals.remove(sub_goal)

        layer_num += 1
        new_pg = PlanGraphLevel()
        new_pg.expand_without_mutex(graph[layer_num-1])
        graph.append(new_pg)

    return sum
    def graph_plan(self):
        """
        The graphplan algorithm.
        The code calls the extract function which you should complete below
        """
        # initialization
        init_state = self.initial_state
        level = 0
        self.no_goods = [
        ]  # make sure you update noGoods in your backward search!
        self.no_goods.append([])
        # create first layer of the graph, note it only has a proposition layer which consists of the initial state.
        prop_layer_init = PropositionLayer()
        for prop in init_state:
            prop_layer_init.add_proposition(prop)
        pg_init = PlanGraphLevel()
        pg_init.set_proposition_layer(prop_layer_init)
        self.graph.append(pg_init)
        size_no_good = -1
        """
        While the layer does not contain all of the propositions in the goal state,
        or some of these propositions are mutex in the layer we,
        and we have not reached the fixed point, continue expanding the graph
        """

        while self.goal_state_not_in_prop_layer(self.graph[
                                                    level].get_proposition_layer().get_propositions()) or \
                self.goal_state_has_mutex(
                    self.graph[level].get_proposition_layer()):
            if self.is_fixed(level):
                return None
                # this means we stopped the while loop above because we reached a fixed point in the graph.
                #  nothing more to do, we failed!

            self.no_goods.append([])
            level = level + 1
            pg_next = PlanGraphLevel()  # create new PlanGraph object
            pg_next.expand(
                self.graph[level - 1]
            )  # calls the expand function, which you are implementing in the PlanGraph class
            self.graph.append(
                pg_next)  # appending the new level to the plan graph

            size_no_good = len(
                self.no_goods[level])  # remember size of nogood table

        plan_solution = self.extract(self.graph, self.goal, level)
        # try to extract a plan since all of the goal propositions are in current graph level, and are not mutex

        while plan_solution is None:  # while we didn't extract a plan successfully
            level = level + 1
            self.no_goods.append([])
            pg_next = PlanGraphLevel(
            )  # create next level of the graph by expanding
            pg_next.expand(
                self.graph[level -
                           1])  # create next level of the graph by expanding
            self.graph.append(pg_next)
            plan_solution = self.extract(self.graph, self.goal,
                                         level)  # try to extract a plan again
            if plan_solution is None and self.is_fixed(
                    level):  # if failed and reached fixed point
                if len(self.no_goods[level - 1]) == len(self.no_goods[level]):
                    # if size of nogood didn't change, means there's nothing more to do. We failed.
                    return None
                size_no_good = len(
                    self.no_goods[level]
                )  # we didn't fail yet! update size of no good
        return plan_solution
Esempio n. 19
0
 def __init__(self):
     """
     Constructor
     """
     self.action_layer = ActionLayer()  # see action_layer.py
     self.proposition_layer = PropositionLayer()  # see proposition_layer.py