Beispiel #1
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    level = 0
    infinity = float('inf')
    sum = 0
    goals_lst = list(planning_problem.goal)
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer
    current_props = pg_init.get_proposition_layer().get_propositions()
    plans_list = [pg_init]
    while len(goals_lst) != 0:
        if is_fixed(plans_list, level):
            return infinity
        else:
            current_goals_lst = goals_lst[:]
            for goal in current_goals_lst:
                if goal in current_props:
                    sum += level
                    goals_lst.remove(goal)
            next_pg_init = PlanGraphLevel()
            next_pg_init.expand_without_mutex(pg_init)
            pg_init = next_pg_init
            plans_list += [pg_init]
            level += 1
            current_props = pg_init.get_proposition_layer().get_propositions()
    return sum
Beispiel #2
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer

    layer_num = 0
    graph = [pg_init]
    while (planning_problem.goal_state_not_in_prop_layer(graph[layer_num].get_proposition_layer().get_propositions())):
        if is_fixed(graph, layer_num):
            return float('inf')

        layer_num += 1
        new_pg = PlanGraphLevel()
        new_pg.expand_without_mutex(graph[layer_num-1])
        graph.append(new_pg)
    return layer_num
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    level = 0
    graph = [pg_init]
    while not planning_problem.is_goal_state(state):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_prev = pg_init
        pg_init = PlanGraphLevel()
        pg_init.expand_without_mutex(pg_prev)
        state = pg_init.get_proposition_layer().get_propositions()
        graph.append(pg_init)
    return level
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph = [pg_init]
    level = 0

    goals_level_sum = 0
    remained_goals = set(planning_problem.goal) - set(state)

    while planning_problem.goal_state_not_in_prop_layer(
            graph[level].get_proposition_layer().get_propositions()):
        if is_fixed(graph, level):
            return float('inf')

        level = level + 1
        pg_next = PlanGraphLevel()
        pg_next.expand_without_mutex(graph[level - 1])
        graph.append(pg_next)

        for p in graph[level].get_proposition_layer().get_propositions():
            if p in remained_goals:
                remained_goals.remove(p)
                goals_level_sum += level

    return goals_level_sum
Beispiel #5
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)
    graph_layer = PlanGraphLevel()
    graph_layer.set_proposition_layer(prop_layer_init)

    graph = [graph_layer]
    sum_of_subgoals = 0
    subgoals = list(planning_problem.goal)
    while True:
        this_lvl = len(graph) - 1
        propositions = graph[-1].get_proposition_layer().get_propositions()
        for pr in propositions:
            if pr in subgoals:
                sum_of_subgoals += this_lvl
                subgoals.remove(pr)
        # check if we are done
        if len(subgoals) == 0:
            return sum_of_subgoals
        # check if we are stuck
        if is_fixed(graph, len(graph) - 1):
            return float('inf')
        # otherwise, expand
        new_layer = PlanGraphLevel()
        new_layer.expand_without_mutex(graph[-1])
        graph.append(new_layer)
Beispiel #6
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)
    graph_layer = PlanGraphLevel()
    graph_layer.set_proposition_layer(prop_layer_init)

    graph = [graph_layer]
    lvl_number = 0
    while True:
        # check if reached goal
        if planning_problem.is_goal_state(
                graph[-1].get_proposition_layer().get_propositions()):
            return lvl_number
        # check if we are stuck
        if is_fixed(graph, lvl_number):
            return float('inf')
        # otherwise, expand
        new_layer = PlanGraphLevel()
        new_layer.expand_without_mutex(graph[-1])
        graph.append(new_layer)
        lvl_number += 1
Beispiel #7
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    heuristic = 0
    while not planning_problem.is_goal_state(state):
        pg_init.expand_without_mutex(pg_init)
        new_state = pg_init.get_proposition_layer().get_propositions()
        if state == new_state:  #We're at a fixed point
            return float('inf')
        state = new_state
        heuristic += 1
    return heuristic
Beispiel #8
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    level, heuristic = 0, 0
    goal = planning_problem.goal - state  #at level=0 satisfying propositions will have no effect
    for prop in state:
        if prop in goal:
            heuristic += level
            goal -= {prop}
    while not planning_problem.is_goal_state(state):
        pg_init.expand_without_mutex(pg_init)
        new_state = pg_init.get_proposition_layer().get_propositions()
        if state == new_state:  #We're at a fixed point
            return float('inf')
        state = new_state
        level += 1
        for prop in state:
            if prop in goal:
                heuristic += level
                goal -= {prop}
    return heuristic
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    my_remaining_goals = list(planning_problem.goal)
    levels_counter = 0
    h_counter = 0
    graph = [pg_init]
    # running the graphplan
    while (True):
        remove_shit = []
        for goal in my_remaining_goals:
            if goal in graph[
                    levels_counter].proposition_layer.get_propositions():
                h_counter += levels_counter
                remove_shit.append(goal)
        for shit in remove_shit:
            my_remaining_goals.remove(shit)
        if len(my_remaining_goals) == 0:
            return h_counter
        if is_fixed(graph, levels_counter):
            return float('inf')
        new_graph_level = PlanGraphLevel()
        new_graph_level.expand_without_mutex(graph[levels_counter])
        graph.append(new_graph_level)
        levels_counter += 1
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    # first level creation
    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    levels_counter = 0
    graph = [pg_init]
    # running the graphplan
    while (True):
        if planning_problem.is_goal_state(
                graph[levels_counter].proposition_layer.get_propositions()):
            return levels_counter
        if is_fixed(graph, levels_counter):
            return float('inf')
        new_graph_level = PlanGraphLevel()
        new_graph_level.expand_without_mutex(graph[levels_counter])
        graph.append(new_graph_level)
        levels_counter += 1
Beispiel #11
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    # initialization
    level = 0
    graph = []
    # create first layer of the graph, note it only has a proposition layer which consists of the initial state.

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph.append(pg_init)
    while not planning_problem.is_goal_state(
            graph[level].get_proposition_layer().get_propositions()):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_next = PlanGraphLevel()  # create new PlanGraph object
        pg_next.expand_without_mutex(
            graph[level - 1]
        )  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pg_next)  # appending the new level to the plan graph
    return level
Beispiel #12
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    "*** YOUR CODE HERE ***"
    level = 0
    infinity = float('inf')
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)  # update the new plan graph level with the the proposition layer
    current_props = pg_init.get_proposition_layer().get_propositions()
    plans_list = [pg_init]
    while not planning_problem.is_goal_state(current_props):
        if is_fixed(plans_list, level):
            return infinity
        else:
            next_pg_init = PlanGraphLevel()
            next_pg_init.expand_without_mutex(pg_init)
            pg_init = next_pg_init
            plans_list += [pg_init]
            level += 1
            current_props = pg_init.get_proposition_layer().get_propositions()
    return level
Beispiel #13
0
def max_level(state, planning_problem):
    """
    The heuristic value is the number of layers required to expand all goal propositions.
    If the goal is not reachable from the state your heuristic should return float('inf')
    A good place to start would be:
    prop_layer_init = PropositionLayer()          #create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(prop)        #update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()                   #create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(prop_layer_init)   #update the new plan graph level with the the proposition layer
    """
    prop_layer = PropositionLayer()
    for prop in state:
        prop_layer.add_proposition(prop)

    pg_init = PlanGraphLevel()
    pg_cur = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer)
    pg_cur.set_proposition_layer(prop_layer)

    graph = [pg_init]

    cost = 1
    while planning_problem.is_goal_state(
            pg_cur.get_proposition_layer().get_propositions()):
        new_prop = PlanGraphLevel()
        for prop in pg_cur.get_proposition_layer().get_propositions():
            new_prop.get_proposition_layer().add_proposition(prop)
        new_prop.expand_without_mutex(new_prop.get_proposition_layer())
        graph.append(new_prop)
        pg_cur = new_prop
        if is_fixed(graph, cost - 1):
            return float('inf')
        cost += 1
    return cost
Beispiel #14
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    prop_layer = PropositionLayer()

    goal_set = planning_problem.goal.difference(frozenset(state))
    cost = 1 + len(goal_set.intersection(frozenset(state)))

    for prop in state:
        prop_layer.add_proposition(prop)

    pg_init = PlanGraphLevel()
    pg_cur = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer)
    pg_cur.set_proposition_layer(prop_layer)

    graph = [pg_init]

    level = 1

    while planning_problem.is_goal_state(
            pg_cur.get_proposition_layer().get_propositions()):
        new_prop = PlanGraphLevel()
        for prop in pg_cur.get_proposition_layer().get_propositions():
            new_prop.get_proposition_layer().add_proposition(prop)
        new_prop.expand_without_mutex(pg_cur.get_proposition_layer())
        graph.append(new_prop)
        pg_cur = new_prop
        if is_fixed(graph, level - 1):
            return float('inf')

        cost += level * len(
            goal_set.intersection(
                frozenset(pg_cur.get_proposition_layer().get_propositions())))

        goal_set = goal_set.difference(
            frozenset(pg_cur.get_proposition_layer().get_propositions()))
        level += 1

    return cost
Beispiel #15
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)

    sub_goals = planning_problem.goal
    level = 0
    graph = [pg_init]
    my_dict = {}
    my_sum = 0

    for goal in sub_goals:
        if goal in state and goal not in my_dict.keys():
            my_dict[goal] = level
            my_sum += level

    while not planning_problem.is_goal_state(state):
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_prev = pg_init
        pg_init = PlanGraphLevel()
        pg_init.expand_without_mutex(pg_prev)
        state = pg_init.get_proposition_layer().get_propositions()
        graph.append(pg_init)
        for goal in sub_goals:
            if goal in state and goal not in my_dict.keys():
                my_dict[goal] = level
                my_sum += level

    return my_sum
Beispiel #16
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    "*** YOUR CODE HERE ***"
    prop_layer_init = PropositionLayer()  # create a new proposition layer
    for prop in state:
        prop_layer_init.add_proposition(
            prop)  # update the proposition layer with the propositions of the state
    pg_init = PlanGraphLevel()  # create a new plan graph level (level is the action layer and the propositions layer)
    pg_init.set_proposition_layer(
        prop_layer_init)  # update the new plan graph level with the the proposition layer

    sum = 0
    layer_num = 0
    left_goals = set(planning_problem.goal)
    graph = [pg_init]
    while (planning_problem.goal_state_not_in_prop_layer(graph[layer_num].get_proposition_layer().get_propositions())):
        if is_fixed(graph, layer_num):
            return float('inf')

        found_goals = set()
        for goal in left_goals:
                if goal in graph[layer_num].get_proposition_layer().get_propositions():
                    found_goals.add(goal)

        sum += len(found_goals)
        for sub_goal in found_goals:
            left_goals.remove(sub_goal)

        layer_num += 1
        new_pg = PlanGraphLevel()
        new_pg.expand_without_mutex(graph[layer_num-1])
        graph.append(new_pg)

    return sum
Beispiel #17
0
def level_sum(state, planning_problem):
    """
    The heuristic value is the sum of sub-goals level they first appeared.
    If the goal is not reachable from the state your heuristic should return float('inf')
    """
    # initialization
    level = 0
    graph = []
    level_sum_counter = 0
    goals_set = set(planning_problem.goal)
    # create first layer of the graph, note it only has a proposition layer which consists of the initial state.

    prop_layer_init = PropositionLayer()
    for prop in state:
        prop_layer_init.add_proposition(prop)
    pg_init = PlanGraphLevel()
    pg_init.set_proposition_layer(prop_layer_init)
    graph.append(pg_init)
    while not planning_problem.is_goal_state(
            graph[level].get_proposition_layer().get_propositions()):
        for prop in graph[level].get_proposition_layer().get_propositions():
            if prop in goals_set:
                level_sum_counter += level + 1
                goals_set.remove(prop)
        if is_fixed(graph, level):
            return float('inf')
        level += 1
        pg_next = PlanGraphLevel()  # create new PlanGraph object
        pg_next.expand_without_mutex(
            graph[level - 1]
        )  # calls the expand function, which you are implementing in the PlanGraph class
        graph.append(pg_next)  # appending the new level to the plan graph
    for prop in graph[level].get_proposition_layer().get_propositions():
        if prop in goals_set:
            level_sum_counter += level + 1
            goals_set.remove(prop)
    return level_sum_counter
Beispiel #18
0
class PlanGraphLevel(object):
    """
    A class for representing a level in the plan graph.
    For each level i, the PlanGraphLevel consists of the actionLayer and propositionLayer at this level in this order!
    """
    independent_actions = set(
    )  # updated to the independent_actions of the problem (graph_plan.py line 32)
    actions = [
    ]  # updated to the actions of the problem (graph_plan.py line 33 and planning_problem.py line 36)
    props = [
    ]  # updated to the propositions of the problem (graph_plan.py line 34 and planning_problem.py line 36)

    @staticmethod
    def set_independent_actions(independent_actions):
        PlanGraphLevel.independent_actions = independent_actions

    @staticmethod
    def set_actions(actions):
        PlanGraphLevel.actions = actions

    @staticmethod
    def set_props(props):
        PlanGraphLevel.props = props

    def __init__(self):
        """
        Constructor
        """
        self.action_layer = ActionLayer()  # see action_layer.py
        self.proposition_layer = PropositionLayer()  # see proposition_layer.py

    def get_proposition_layer(self):  # returns the proposition layer
        return self.proposition_layer

    def set_proposition_layer(self, prop_layer):  # sets the proposition layer
        self.proposition_layer = prop_layer

    def get_action_layer(self):  # returns the action layer
        return self.action_layer

    def set_action_layer(self, action_layer):  # sets the action layer
        self.action_layer = action_layer

    def update_action_layer(self, previous_proposition_layer):
        """
        Updates the action layer given the previous proposition layer (see proposition_layer.py)
        You should add an action to the layer if its preconditions are in the previous propositions layer,
        and the preconditions are not pairwise mutex.
        all_actions is the set of all the action (include noOp) in the domain
        You might want to use those functions:
        previous_proposition_layer.is_mutex(prop1, prop2) returns true
        if prop1 and prop2 are mutex at the previous propositions layer
        previous_proposition_layer.all_preconds_in_layer(action) returns true
        if all the preconditions of action are in the previous propositions layer
        self.actionLayer.addAction(action) adds action to the current action layer
        """
        all_actions = PlanGraphLevel.actions
        "*** YOUR CODE HERE ***"
        for action in all_actions:
            if not previous_proposition_layer.all_preconds_in_layer(action):
                continue
            else:
                all_combinations = [
                    previous_proposition_layer.is_mutex(cond1, cond2)
                    for cond1 in action.get_pre()
                    for cond2 in action.get_pre() if cond1 != cond2
                ]
                if not any(all_combinations):
                    self.action_layer.add_action(action)

    def update_mutex_actions(self, previous_layer_mutex_proposition):
        """
        Updates the mutex set in self.action_layer,
        given the mutex proposition from the previous layer.
        current_layer_actions are the actions in the current action layer
        You might want to use this function:
        self.actionLayer.add_mutex_actions(action1, action2)
        adds the pair (action1, action2) to the mutex set in the current action layer
        Note that an action is *not* mutex with itself
        """
        current_layer_actions = self.action_layer.get_actions()
        "*** YOUR CODE HERE ***"
        all_combinations = [(action1, action2)
                            for action1 in current_layer_actions
                            for action2 in current_layer_actions
                            if action1 != action2]
        for combination in all_combinations:
            if mutex_actions(combination[0], combination[1],
                             previous_layer_mutex_proposition):
                if Pair(combination[0], combination[1]
                        ) not in self.action_layer.get_mutex_actions():
                    self.action_layer.add_mutex_actions(
                        combination[0], combination[1])

    def update_proposition_layer(self):
        """
        Updates the propositions in the current proposition layer,
        given the current action layer.
        don't forget to update the producers list!
        Note that same proposition in different layers might have different producers lists,
        hence you should create two different instances.
        current_layer_actions is the set of all the actions in the current layer.
        You might want to use those functions:
        dict() creates a new dictionary that might help to keep track on the propositions that you've
               already added to the layer
        self.proposition_layer.add_proposition(prop) adds the proposition prop to the current layer

        """
        dic = dict()
        current_layer_actions = self.action_layer.get_actions()
        "*** YOUR CODE HERE ***"
        for action in current_layer_actions:
            for proposition in action.get_add():
                if proposition.get_name() not in dic.values():
                    new_prop = Proposition(proposition.get_name())
                    self.proposition_layer.add_proposition(new_prop)
                    dic[new_prop] = new_prop.get_name()
                keys = list(dic.keys())
                values = list(dic.values())
                new_prop = keys[values.index(proposition.name)]
                new_prop.add_producer(action)

    def update_mutex_proposition(self):
        """
        updates the mutex propositions in the current proposition layer
        You might want to use those functions:
        mutex_propositions(prop1, prop2, current_layer_mutex_actions) returns true
        if prop1 and prop2 are mutex in the current layer
        self.proposition_layer.add_mutex_prop(prop1, prop2) adds the pair (prop1, prop2)
        to the mutex set of the current layer
        """
        current_layer_propositions = self.proposition_layer.get_propositions()
        current_layer_mutex_actions = self.action_layer.get_mutex_actions()
        "*** YOUR CODE HERE ***"
        combinations = [(proposition1, proposition2)
                        for proposition1 in current_layer_propositions
                        for proposition2 in current_layer_propositions
                        if proposition1 != proposition2]
        for pair in combinations:
            if mutex_propositions(pair[0], pair[1], current_layer_mutex_actions) and \
                    Pair(pair[0], pair[1]) not in self.proposition_layer.get_mutex_props():
                self.proposition_layer.add_mutex_prop(pair[0], pair[1])

    def expand(self, previous_layer):
        """
        Your algorithm should work as follows:
        First, given the propositions and the list of mutex propositions from the previous layer,
        set the actions in the action layer.
        Then, set the mutex action in the action layer.
        Finally, given all the actions in the current layer,
        set the propositions and their mutex relations in the proposition layer.
        """
        previous_proposition_layer = previous_layer.get_proposition_layer()
        previous_layer_mutex_proposition = previous_proposition_layer.get_mutex_props(
        )
        "*** YOUR CODE HERE ***"
        self.update_action_layer(previous_proposition_layer)
        self.update_mutex_actions(previous_layer_mutex_proposition)
        self.update_proposition_layer()
        self.update_mutex_proposition()
        # print("actions")
        # for action in self.action_layer.get_actions():
        #     print(action, end = " ")
        # print("mutex_action")
        # for mutex_action in self.action_layer.get_mutex_actions():
        #    print(mutex_action, end = " ")
        # print("props")
        # for prop in self.proposition_layer.get_propositions():
        #    print(prop, end = " ")
        # print("mutex_props")
        # for mutex_prop in self.proposition_layer.get_mutex_props():
        #    print(mutex_prop, end = " ")
        # print("end")

    def expand_without_mutex(self, previous_layer):
        """
        Questions 11 and 12
        You don't have to use this function
        """
        previous_layer_proposition = previous_layer.get_proposition_layer()
        "*** YOUR CODE HERE ***"
        self.update_action_layer(previous_layer_proposition)
        self.update_proposition_layer()
Beispiel #19
0
class PlanGraphLevel(object):
    """
    A class for representing a level in the plan graph.
    For each level i, the PlanGraphLevel consists of the actionLayer and propositionLayer at this level in this order!
    """
    independent_actions = set(
    )  # updated to the independent_actions of the problem (graph_plan.py line 32)
    actions = [
    ]  # updated to the actions of the problem (graph_plan.py line 33 and planning_problem.py line 36)
    props = [
    ]  # updated to the propositions of the problem (graph_plan.py line 34 and planning_problem.py line 36)

    @staticmethod
    def set_independent_actions(independent_actions):
        PlanGraphLevel.independent_actions = independent_actions

    @staticmethod
    def set_actions(actions):
        PlanGraphLevel.actions = actions

    @staticmethod
    def set_props(props):
        PlanGraphLevel.props = props

    def __init__(self):
        """
        Constructor
        """
        self.action_layer = ActionLayer()  # see action_layer.py
        self.proposition_layer = PropositionLayer()  # see proposition_layer.py

    def get_proposition_layer(self):  # returns the proposition layer
        return self.proposition_layer

    def set_proposition_layer(self, prop_layer):  # sets the proposition layer
        self.proposition_layer = prop_layer

    def get_action_layer(self):  # returns the action layer
        return self.action_layer

    def set_action_layer(self, action_layer):  # sets the action layer
        self.action_layer = action_layer

    def update_action_layer(self, previous_proposition_layer):
        """
        Updates the action layer given the previous proposition layer (see proposition_layer.py)
        You should add an action to the layer if its preconditions are in the previous propositions layer,
        and the preconditions are not pairwise mutex.
        all_actions is the set of all the action (include noOp) in the domain
        You might want to use those functions:
        previous_proposition_layer.is_mutex(prop1, prop2) returns true
        if prop1 and prop2 are mutex at the previous propositions layer
        previous_proposition_layer.all_preconds_in_layer(action) returns true
        if all the preconditions of action are in the previous propositions layer
        self.actionLayer.addAction(action) adds action to the current action layer
        """
        all_actions = PlanGraphLevel.actions

        def admissible_act(action):
            if not previous_proposition_layer.all_preconds_in_layer(action):
                return False

            props = action.get_pre()

            for i in range(0, len(props)):
                for j in range(i + 1, len(props)):

                    prop_1 = props[i]
                    prop_2 = props[j]

                    if previous_proposition_layer.is_mutex(prop_1, prop_2):
                        return False

            return True

        for act in all_actions:
            if admissible_act(act):
                self.action_layer.add_action(act)

    def update_mutex_actions(self, previous_layer_mutex_proposition):
        """
        Updates the mutex set in self.action_layer,
        given the mutex proposition from the previous layer.
        current_layer_actions are the actions in the current action layer
        You might want to use this function:
        self.actionLayer.add_mutex_actions(action1, action2)
        adds the pair (action1, action2) to the mutex set in the current action layer
        Note that an action is *not* mutex with itself
        """
        current_layer_actions = list(self.action_layer.get_actions())

        for i in range(0, len(current_layer_actions)):
            for j in range(i + 1, len(current_layer_actions)):

                if mutex_actions(current_layer_actions[i],
                                 current_layer_actions[j],
                                 previous_layer_mutex_proposition):
                    self.action_layer.add_mutex_actions(
                        current_layer_actions[i], current_layer_actions[j])

    def update_proposition_layer(self):
        """
        Updates the propositions in the current proposition layer,
        given the current action layer.
        don't forget to update the producers list!
        Note that same proposition in different layers might have different producers lists,
        hence you should create two different instances.
        current_layer_actions is the set of all the actions in the current layer.
        You might want to use those functions:
        dict() creates a new dictionary that might help to keep track on the propositions that you've
               already added to the layer
        self.proposition_layer.add_proposition(prop) adds the proposition prop to the current layer

        """
        current_layer_actions = self.action_layer.get_actions()

        updated_props = {}

        # checks for every action and adds to the dictionary of updated props the acts that led to is
        for act in current_layer_actions:
            for prop in act.get_add():
                if prop not in updated_props:
                    updated_props[prop] = list()
                updated_props[prop].append(act)

        # create new Proposition and add it to the new layer
        for proposition in updated_props:

            # create new prop
            new_prop = Proposition(proposition.get_name())
            # add its producers
            new_prop.set_producers(updated_props[proposition][:])
            # add to new layer
            self.proposition_layer.add_proposition(new_prop)

    def update_mutex_proposition(self):
        """
        updates the mutex propositions in the current proposition layer
        You might want to use those functions:
        mutex_propositions(prop1, prop2, current_layer_mutex_actions) returns true
        if prop1 and prop2 are mutex in the current layer
        self.proposition_layer.add_mutex_prop(prop1, prop2) adds the pair (prop1, prop2)
        to the mutex set of the current layer
        """
        current_layer_propositions = list(
            self.proposition_layer.get_propositions())
        current_layer_mutex_actions = list(
            self.action_layer.get_mutex_actions())

        for i in range(0, len(current_layer_propositions)):
            for j in range(i + 1, len(current_layer_propositions)):
                if mutex_propositions(current_layer_propositions[i],
                                      current_layer_propositions[j],
                                      current_layer_mutex_actions):
                    self.proposition_layer.add_mutex_prop(
                        current_layer_propositions[i],
                        current_layer_propositions[j])

    def expand(self, previous_layer):
        """
        Your algorithm should work as follows:
        First, given the propositions and the list of mutex propositions from the previous layer,
        set the actions in the action layer.
        Then, set the mutex action in the action layer.
        Finally, given all the actions in the current layer,
        set the propositions and their mutex relations in the proposition layer.
        """
        previous_proposition_layer = previous_layer.get_proposition_layer()
        previous_layer_mutex_proposition = previous_proposition_layer.get_mutex_props(
        )
        # update the action layer with previous prop layer
        self.update_action_layer(previous_proposition_layer)
        # update the mutex action in cur layer
        self.update_mutex_actions(previous_layer_mutex_proposition)

        #update the proposition layer
        self.update_proposition_layer()

        #finally update mutex
        self.update_mutex_proposition()

    def expand_without_mutex(self, previous_layer):
        """
        Questions 11 and 12
        You don't have to use this function
        """
        previous_layer_proposition = previous_layer

        self.update_action_layer(previous_layer_proposition)
        self.update_proposition_layer()
    def graph_plan(self):
        """
        The graphplan algorithm.
        The code calls the extract function which you should complete below
        """
        # initialization
        init_state = self.initial_state
        level = 0
        self.no_goods = [
        ]  # make sure you update noGoods in your backward search!
        self.no_goods.append([])
        # create first layer of the graph, note it only has a proposition layer which consists of the initial state.
        prop_layer_init = PropositionLayer()
        for prop in init_state:
            prop_layer_init.add_proposition(prop)
        pg_init = PlanGraphLevel()
        pg_init.set_proposition_layer(prop_layer_init)
        self.graph.append(pg_init)
        size_no_good = -1
        """
        While the layer does not contain all of the propositions in the goal state,
        or some of these propositions are mutex in the layer we,
        and we have not reached the fixed point, continue expanding the graph
        """

        while self.goal_state_not_in_prop_layer(self.graph[
                                                    level].get_proposition_layer().get_propositions()) or \
                self.goal_state_has_mutex(
                    self.graph[level].get_proposition_layer()):
            if self.is_fixed(level):
                return None
                # this means we stopped the while loop above because we reached a fixed point in the graph.
                #  nothing more to do, we failed!

            self.no_goods.append([])
            level = level + 1
            pg_next = PlanGraphLevel()  # create new PlanGraph object
            pg_next.expand(
                self.graph[level - 1]
            )  # calls the expand function, which you are implementing in the PlanGraph class
            self.graph.append(
                pg_next)  # appending the new level to the plan graph

            size_no_good = len(
                self.no_goods[level])  # remember size of nogood table

        plan_solution = self.extract(self.graph, self.goal, level)
        # try to extract a plan since all of the goal propositions are in current graph level, and are not mutex

        while plan_solution is None:  # while we didn't extract a plan successfully
            level = level + 1
            self.no_goods.append([])
            pg_next = PlanGraphLevel(
            )  # create next level of the graph by expanding
            pg_next.expand(
                self.graph[level -
                           1])  # create next level of the graph by expanding
            self.graph.append(pg_next)
            plan_solution = self.extract(self.graph, self.goal,
                                         level)  # try to extract a plan again
            if plan_solution is None and self.is_fixed(
                    level):  # if failed and reached fixed point
                if len(self.no_goods[level - 1]) == len(self.no_goods[level]):
                    # if size of nogood didn't change, means there's nothing more to do. We failed.
                    return None
                size_no_good = len(
                    self.no_goods[level]
                )  # we didn't fail yet! update size of no good
        return plan_solution
class PlanGraphLevel(object):
    """
    A class for representing a level in the plan graph.
    For each level i, the PlanGraphLevel consists of the actionLayer and propositionLayer at this level in this order!
    """
    independent_actions = set()  # updated to the independent_actions of the problem (graph_plan.py line 32)
    actions = []  # updated to the actions of the problem (graph_plan.py line 33 and planning_problem.py line 36)
    props = []  # updated to the propositions of the problem (graph_plan.py line 34 and planning_problem.py line 36)

    @staticmethod
    def set_independent_actions(independent_actions):
        PlanGraphLevel.independent_actions = independent_actions

    @staticmethod
    def set_actions(actions):
        PlanGraphLevel.actions = actions

    @staticmethod
    def set_props(props):
        PlanGraphLevel.props = props

    def __init__(self):
        """
        Constructor
        """
        self.action_layer = ActionLayer()  # see action_layer.py
        self.proposition_layer = PropositionLayer()  # see proposition_layer.py

    def get_proposition_layer(self):  # returns the proposition layer
        return self.proposition_layer

    def set_proposition_layer(self, prop_layer):  # sets the proposition layer
        self.proposition_layer = prop_layer

    def get_action_layer(self):  # returns the action layer
        return self.action_layer

    def set_action_layer(self, action_layer):  # sets the action layer
        self.action_layer = action_layer

    def update_action_layer(self, previous_proposition_layer):
        """
        Updates the action layer given the previous proposition layer (see proposition_layer.py)
        You should add an action to the layer if its preconditions are in the previous propositions layer,
        and the preconditions are not pairwise mutex.
        all_actions is the set of all the action (include noOp) in the domain
        You might want to use those functions:
        previous_proposition_layer.is_mutex(prop1, prop2) returns true
        if prop1 and prop2 are mutex at the previous propositions layer
        previous_proposition_layer.all_preconds_in_layer(action) returns true
        if all the preconditions of action are in the previous propositions layer
        self.actionLayer.addAction(action) adds action to the current action layer
        """
        all_actions = PlanGraphLevel.actions
        for action in all_actions:
            if not previous_proposition_layer.all_preconds_in_layer(action):
                continue
            action_props_list = action.get_pre()
            mutex_flag = False
            for i in range(len(action_props_list) - 1):
                mutex_flag2 = False
                for j in range(i +1, len(action_props_list)):
                    if previous_proposition_layer.is_mutex(
                            action_props_list[i], action_props_list[j]):
                        mutex_flag = True
                        break
                if mutex_flag:
                    break
            if mutex_flag:
                continue
            else:
                self.action_layer.add_action(action)


    def update_mutex_actions(self, previous_layer_mutex_proposition):
        """
        Updates the mutex set in self.action_layer,
        given the mutex proposition from the previous layer.
        current_layer_actions are the actions in the current action layer
        You might want to use this function:
        self.actionLayer.add_mutex_actions(action1, action2)
        adds the pair (action1, action2) to the mutex set in the current action layer
        Note that an action is *not* mutex with itself
        """
        current_layer_actions = list(self.action_layer.get_actions())
        for action1 in current_layer_actions:
            for action2 in current_layer_actions:
                if action1 != action2 and mutex_actions(action1, action2,
                                 previous_layer_mutex_proposition):
                    self.action_layer.add_mutex_actions(action1, action2)

    def update_proposition_layer(self):
        """
        Updates the propositions in the current proposition layer,
        given the current action layer.
        don't forget to update the producers list!
        Note that same proposition in different layers might have different producers lists,
        hence you should create two different instances.
        current_layer_actions is the set of all the actions in the current layer.
        You might want to use those functions:
        dict() creates a new dictionary that might help to keep track on the propositions that you've
               already added to the layer
        self.proposition_layer.add_proposition(prop) adds the proposition prop to the current layer

        """
        current_layer_actions = self.action_layer.get_actions()
        new_props_set = set()
        for action1 in current_layer_actions:
            list_of_propo = action1.get_add()
            for propo in list_of_propo:
                new_props_set.add(propo.name)
        props_dict = dict.fromkeys(new_props_set, set())
        for action2 in current_layer_actions:
            list_of_propo2 = action2.get_add()
            for propo2 in list_of_propo2:
                props_dict[propo2.get_name()].add(action2)
        for prop in props_dict:
            my_prop = Proposition(prop)
            my_prop.set_producers(list(props_dict[prop]))
            self.proposition_layer.add_proposition(my_prop)


    def update_mutex_proposition(self):
        """
        updates the mutex propositions in the current proposition layer
        You might want to use those functions:
        mutex_propositions(prop1, prop2, current_layer_mutex_actions) returns true
        if prop1 and prop2 are mutex in the current layer
        self.proposition_layer.add_mutex_prop(prop1, prop2) adds the pair (prop1, prop2)
        to the mutex set of the current layer
        """
        current_layer_propositions = \
            list(self.proposition_layer.get_propositions())
        current_layer_mutex_actions = self.action_layer.get_mutex_actions()
        for prop1 in current_layer_propositions:
            for prop2 in current_layer_propositions:
                if prop1 != prop2 and mutex_propositions(prop1, prop2,
                                      current_layer_mutex_actions):
                    self.proposition_layer.add_mutex_prop(prop1, prop2)

    def expand(self, previous_layer):
        """
        Your algorithm should work as follows:
        First, given the propositions and the list of mutex propositions from the previous layer,
        set the actions in the action layer.
        Then, set the mutex action in the action layer.
        Finally, given all the actions in the current layer,
        set the propositions and their mutex relations in the proposition layer.
        """
        previous_proposition_layer = previous_layer.get_proposition_layer()
        previous_layer_mutex_proposition = previous_proposition_layer.get_mutex_props()
        # build action layer
        self.update_action_layer(previous_proposition_layer)
        # mutex to actions
        self.update_mutex_actions(previous_layer_mutex_proposition)
        # set new prop layer and mutexes
        self.update_proposition_layer()
        self.update_mutex_proposition()

    def expand_without_mutex(self, previous_layer):
        """
        Questions 11 and 12
        You don't have to use this function
        """
        previous_proposition_layer = previous_layer.get_proposition_layer()
        previous_layer_mutex_proposition = previous_proposition_layer.get_mutex_props()
        # build action layer
        self.update_action_layer(previous_proposition_layer)
        # set new prop layer
        self.update_proposition_layer()
class PlanGraphLevel(object):
    """
    A class for representing a level in the plan graph.
    For each level i, the PlanGraphLevel consists of the actionLayer and propositionLayer at this level in this order!
    """
    independent_actions = set(
    )  # updated to the independent_actions of the problem (graph_plan.py line 32)
    actions = [
    ]  # updated to the actions of the problem (graph_plan.py line 33 and planning_problem.py line 36)
    props = [
    ]  # updated to the propositions of the problem (graph_plan.py line 34 and planning_problem.py line 36)

    @staticmethod
    def set_independent_actions(independent_actions):
        PlanGraphLevel.independent_actions = independent_actions

    @staticmethod
    def set_actions(actions):
        PlanGraphLevel.actions = actions

    @staticmethod
    def set_props(props):
        PlanGraphLevel.props = props

    def __init__(self):
        """
        Constructor
        """
        self.action_layer = ActionLayer()  # see action_layer.py
        self.proposition_layer = PropositionLayer()  # see proposition_layer.py

    def get_proposition_layer(self):  # returns the proposition layer
        return self.proposition_layer

    def set_proposition_layer(self, prop_layer):  # sets the proposition layer
        self.proposition_layer = prop_layer

    def get_action_layer(self):  # returns the action layer
        return self.action_layer

    def set_action_layer(self, action_layer):  # sets the action layer
        self.action_layer = action_layer

    def update_action_layer(self, previous_proposition_layer):
        """
        Updates the action layer given the previous proposition layer (see proposition_layer.py)
        You should add an action to the layer if its preconditions are in the previous propositions layer,
        and the preconditions are not pairwise mutex.
        all_actions is the set of all the action (include noOp) in the domain
        You might want to use those functions:
        previous_proposition_layer.is_mutex(prop1, prop2) returns true
        if prop1 and prop2 are mutex at the previous propositions layer
        previous_proposition_layer.all_preconds_in_layer(action) returns true
        if all the preconditions of action are in the previous propositions layer
        self.actionLayer.addAction(action) adds action to the current action layer
        """
        all_actions = PlanGraphLevel.actions
        for action in all_actions:
            if previous_proposition_layer.all_preconds_in_layer(action):
                pre = action.get_pre()
                to_add = False
                for prop1 in pre:
                    for prop2 in pre:
                        if prop1 != prop2 and previous_proposition_layer.is_mutex(
                                prop1, prop2):
                            to_add = True
                if not to_add:
                    self.action_layer.add_action(action)

    def update_mutex_actions(self, previous_layer_mutex_proposition):
        """
        Updates the mutex set in self.action_layer,
        given the mutex proposition from the previous layer.
        current_layer_actions are the actions in the current action layer
        You might want to use this function:
        self.actionLayer.add_mutex_actions(action1, action2)
        adds the pair (action1, action2) to the mutex set in the current action layer
        Note that an action is *not* mutex with itself
        """
        current_layer_actions = self.action_layer.get_actions()
        for i in current_layer_actions:
            for j in current_layer_actions:
                if i != j and mutex_actions(i, j,
                                            previous_layer_mutex_proposition):
                    self.action_layer.add_mutex_actions(i, j)

    def update_proposition_layer(self):
        """
        Updates the propositions in the current proposition layer,
        given the current action layer.
        don't forget to update the producers list!
        Note that same proposition in different layers might have different producers lists,
        hence you should create two different instances.
        current_layer_actions is the set of all the actions in the current layer.
        You might want to use those functions:
        dict() creates a new dictionary that might help to keep track on the propositions that you've
               already added to the layer
        self.proposition_layer.add_proposition(prop) adds the proposition prop to the current layer
        """
        current_layer_actions = self.action_layer.get_actions()
        prop_dict = dict()
        for act in current_layer_actions:
            add_prop_list = act.get_add()
            for prop in add_prop_list:
                if prop not in prop_dict:
                    prop_dict[prop] = [act]
                else:
                    prop_dict[prop].append(act)
        for prop, act_list in prop_dict.items():
            prop.set_producers(act_list)
            self.proposition_layer.add_proposition(prop)

    def update_mutex_proposition(self):
        """
        updates the mutex propositions in the current proposition layer
        You might want to use those functions:
        mutex_propositions(prop1, prop2, current_layer_mutex_actions) returns true
        if prop1 and prop2 are mutex in the current layer
        self.proposition_layer.add_mutex_prop(prop1, prop2) adds the pair (prop1, prop2)
        to the mutex set of the current layer
        """
        current_layer_propositions = self.proposition_layer.get_propositions()
        current_layer_mutex_actions = self.action_layer.get_mutex_actions()
        for i in current_layer_propositions:
            for j in current_layer_propositions:
                if i != j and mutex_propositions(i, j,
                                                 current_layer_mutex_actions):
                    self.proposition_layer.add_mutex_prop(i, j)

    def expand(self, previous_layer):
        """
        Your algorithm should work as follows:
        First, given the propositions and the list of mutex propositions from the previous layer,
        set the actions in the action layer.
        Then, set the mutex action in the action layer.
        Finally, given all the actions in the current layer,
        set the propositions and their mutex relations in the proposition layer.
        """
        previous_proposition_layer = previous_layer.get_proposition_layer()
        previous_layer_mutex_proposition = previous_proposition_layer.get_mutex_props(
        )
        self.update_action_layer(previous_proposition_layer)
        self.update_mutex_actions(previous_layer_mutex_proposition)
        self.update_proposition_layer()
        self.update_mutex_proposition()

    def expand_without_mutex(self, previous_layer):
        """
        Questions 11 and 12
        You don't have to use this function
        """
        previous_layer_proposition = previous_layer.get_proposition_layer()
        self.update_action_layer(previous_layer_proposition)
        self.update_proposition_layer()