Beispiel #1
0
def run_test_plan(world_spec, plan, expression_spec, result, sets={}):
    global run, passed
    run += 1
    try:
        world = make_world(world_spec, sets)
        i = 1
        for p in plan:
            run += 1
            (pre, eff, subst) = p

            preexp = make_expression(pre)
            effexp = make_expression(eff)

            for s in subst:
                (var, val) = s
                preexp = substitute(preexp, var, val)
                effexp = substitute(effexp, var, val)
            if not models(world, preexp):
                print("precondition failed at step", i)
                return False
            world = apply(world, effexp)
            passed += 1
            i += 1
        exp = make_expression(expression_spec)
        res = models(world, exp)
        if res == result:
            passed += 1
            return True
    except Exception:
        traceback.print_exc()
    return False
Beispiel #2
0
def run_test_simple(world_spec, expression_spec, result, sets={}):
    global run, passed
    run += 1
    try:
        world = make_world(world_spec, sets)
        exp = make_expression(expression_spec)
        res = models(world, exp)
        if res == result:
            passed += 1
            return True
    except Exception:
        traceback.print_exc()
    return False
Beispiel #3
0
def run_test_substitute(world_spec, expression_spec, subst, result, sets={}):
    global run, passed
    run += 1
    try:

        world = make_world(world_spec, sets)
        action = make_expression(action_spec)
        world1 = apply(world, action)
        exp = make_expression(expression_spec)
        res = models(world1, exp)
        if res == result:
            passed += 1
            return True
    except Exception:
        traceback.print_exc()
    return False
Beispiel #4
0
def plan(domain, problem, useheuristic=True):
    # get all objects applying the typinh hierarchy
    allobjs = mergeObjs(domain, problem)

    # get all grounded actions
    groundActions(domain.actions, allobjs)
    goalExp = expressions.make_expression(problem.goal)

    def isgoal(state):
        return expressions.models(state.state, goalExp)

    def heuristic(state, action):
        return SuperHeuristic(state, action, problem.init, problem.goal,
                              allobjs, isgoal)  # pathfinding.default_heuristic

    start = PlanNode("init", expressions.make_world(problem.init, allobjs),
                     allobjs)
    return pathfinding.astar(
        start, heuristic if useheuristic else pathfinding.default_heuristic,
        isgoal)
def run_test_planp(world_spec, plan, expression_spec, result, sets={}):
    global run, passed
    run += 1
    try:
        world = make_world(world_spec, sets)
        i = 1
        for p in plan:
            run += 1
            (pre, eff, subst) = p

            preexp = make_expression(sub(pre, subst))
            effexp = make_expression(sub(eff, subst))
            printNAryTree(effexp.getRoot())

            if not models(world, preexp):
                print("precondition failed at step", i, preexp)
                return False
            world = apply(world, effexp)
            passed += 1
            i += 1
        exp = make_expression(expression_spec)
        res = models(world, exp)
        if res == result:
            passed += 1
            return True
        else:
            print('Not pass planp')
            print('world')
            print(world_spec)
            print('plan')
            print(plan)
            print('exp')
            print(expression_spec)
            print('result')
            print(result)
            print('set')
            print(sets)
    except Exception:
        print('error 5')
        traceback.print_exc()
    return False
def run_test_apply(world_spec, action_spec, expression_spec, result, sets={}):
    global run, passed
    run += 1
    try:
        world = make_world(world_spec, sets)
        action = make_expression(action_spec)
        world1 = apply(world, action)
        exp = make_expression(expression_spec)
        res = models(world1, exp)
        if res == result:
            passed += 1
            return True
        else:
            print('Not pass apply')
            print(world_spec)
            print(action_spec)
            print(expression_spec)
            print(sets)
            print(result)
    except Exception:
        print('error 2')
        traceback.print_exc()

    return False
Beispiel #7
0
 def set_initialStates(self):
     #print('++++++++++++++++++++++++++World++++++++++++++++++++++++++++++++++++++++')
     self.world = expressions.make_world(self.initialStates, self.domainTypes)
Beispiel #8
0
def plan(domain, problem, useheuristic=True):
    """
    Find a solution to a planning problem in the given domain 
    
    The parameters domain and problem are exactly what is returned from pddl.parse_domain and pddl.parse_problem. If useheuristic is true,
    a planning heuristic (developed in task 4) should be used, otherwise use pathfinding.default_heuristic. This allows you to compare 
    the effect of your heuristic vs. the default one easily.
    
    The return value of this function should be a 4-tuple, with the exact same elements as returned by pathfinding.astar:
       - A plan, which is a sequence of graph.Edge objects that have to be traversed to reach a goal state from the start. Each Edge object represents an action, 
         and the edge's name should be the name of the action, consisting of the name of the operator the action was derived from, followed by the parenthesized 
         and comma-separated parameter values e.g. "move(agent-1,sq-1-1,sq-2-1)"
       - distance is the number of actions in the plan (i.e. each action has cost 1)
       - visited is the total number of nodes that were added to the frontier during the execution of the algorithm 
       - expanded is the total number of nodes that were expanded (i.e. whose neighbors were added to the frontier)
    """
    '''
    domain[0] = pddl_types, domain[1] = pddl_constants, domain[2] = pddl_predicates, domain[3] = pddl_actions
    problem[0] = pddl_objects, problem[1] = pddl_init_exp, problem[2] = pddl_goal_exp
    '''
    def heuristic(state, action):
        """Calculates a heuristic following some basic principles of Fast-Forward algorithm"""
        # initial state is a neighbor of a previous state that A* needs to evaluate
        props_layer = state
        # relaxed plan has layers, each with and action layer and a propositions layer
        relaxed_plan_graph = [[[], props_layer]]

        # extend one action layer and proposition layer at a time while goal is not reached
        while not isgoal(props_layer):
            # next action layer: get "relaxed" neighbors whose Add Lists have a real effect and ignoring Delete Lists
            actions_layer = props_layer.get_neighbors(True)
            # next props layer: start with propositions in current layer and add new ones generated by each new action
            next_props_layer = set(props_layer.world.atoms)
            for next_action in actions_layer:
                next_props_layer = next_props_layer.union(
                    next_action.target.world.atoms)

            # stop if next propositions layer did not add any new propositions, otherwise continue in the loop
            if props_layer.world.atoms.issuperset(next_props_layer):
                break

            # new propositional layer
            new_world = expressions.World(next_props_layer,
                                          props_layer.world.sets)
            props_layer = graph.ExpressionNode(new_world, props_layer.actions,
                                               props_layer.preceding_action)
            # add new actions and props layer to relaxed plan
            relaxed_plan_graph.append([actions_layer, props_layer])

        # extract relaxed plan size and return it as the heuristic value
        return extract_plan_size(relaxed_plan_graph)

    def extract_plan_size(rpg):
        """Extract relaxed plan size based on the number of actions required to complete it"""
        goal = problem[2]
        final_state = rpg[len(rpg) - 1][1]

        # if the world in final proposition layer does not contain the goal, return magic large number as h ...
        if not isgoal(final_state):
            return 1000

        # find the layer where each sub-goal appears for the first time on the relaxed planning graph
        first_goal_levels = {}
        add_first_goal_levels(rpg, goal, first_goal_levels)
        # obtain maximum level number where a goal was found
        first_goal_levels_max = max(first_goal_levels.keys())

        # backtrack starting on the last proposition layer we need to consider
        logger.debug("Goal Levels: %s" % first_goal_levels)
        for i in range(first_goal_levels_max, 0, -1):
            logger.debug("BACKTRACKING i: %s" % i)
            # if there is at least one sub-goal on level i
            if i in first_goal_levels:
                add_first_action_levels(rpg, first_goal_levels, i)
        logger.debug("Action-Goal Levels: %s" % first_goal_levels)

        h = 0
        for layer, actions in first_goal_levels.items():
            h += len(actions)

        return h

    def add_first_goal_levels(rpg, goal, first_goal_levels):
        """Find the layer where each sub-goal appears for the first time on the relaxed planning graph"""
        # handle special case when the goal is not a conjunction of atoms, but one atom
        if isinstance(goal, expressions.Atom):
            goal = expressions.And([goal])

        # for each sub-goal in goal
        for sub_goal in goal.operands:
            level = 0
            # for each layer in relaxed planning graph
            for layer in rpg:
                # if the world in this propositional layer models this sub-goal, add sub-goal to that level
                if layer[1].world.models(sub_goal):
                    if level in first_goal_levels:
                        first_goal_levels[level] = first_goal_levels[
                            level].union({sub_goal})
                    else:
                        first_goal_levels[level] = {sub_goal}
                    # break to guarantee we always only use only the first appearance
                    break
                level += 1

    def add_first_action_levels(rpg, first_goal_levels, layer):
        """Find the layer where each action whose effect is a sub-goal appears for the first time on the relaxed
        planning graph. Then consider its preconditions as new sub-goals and add them to preceding layers of
        first_goal_levels that will eventually be reached by the backtracking process to also process their
        preconditions"""
        for sub_goal in first_goal_levels[layer]:
            level = 0
            # for each layer in the relaxed planning graph
            for layer_index in range(len(rpg)):
                # for each action on this layer of the relaxed planning graph
                for action in rpg[layer_index][0]:
                    # determine if this action introduces sub_goal for the first time
                    previous_props_layer = rpg[layer_index - 1][1]
                    next_props_layer = action.target
                    if next_props_layer.world.models(
                            sub_goal
                    ) and not previous_props_layer.world.models(sub_goal):
                        # each precondition must now be considered a sub-goal
                        preconditions = action.target.preceding_action.expression.operands[
                            0]
                        logger.debug("\tACTION: %s" % action.name)
                        logger.debug("\tPRECONS: %s" % preconditions)
                        # find the layer where each sub-goal appears for the first time on the relaxed planning graph
                        add_first_goal_levels(rpg, preconditions,
                                              first_goal_levels)
                        # break to guarantee we always only use only the first appearance
                        break
                level += 1

    def isgoal(state):
        """Check is goal is reached"""
        return state.world.models(problem[2])

    # get the sets variable required to make a n initial world
    world_sets = build_world_sets(domain[1], problem[0], domain[0])

    # get all expanded expressions for all actions
    expanded_expressions = []

    # for each action in the domain
    for action in domain[3]:
        substitutions_per_action = []
        # for each group of params of the same type for this action
        for parameter_type in action.parameters:
            logger.debug("Action: %s - Param Type: %s - Params: %s" %
                         (action.name, parameter_type,
                          action.parameters[parameter_type]))
            # for each param in each group of params of the same type for this action
            for parameter in action.parameters[parameter_type]:
                substitutions_per_param = []
                # for each ground param as taken from world_sets based on type
                for ground_param in world_sets[parameter_type]:
                    logger.debug("\tParam: %s, Ground Param: %s" %
                                 (parameter, ground_param))
                    substitutions_per_param.append([parameter, ground_param])
                substitutions_per_action.append(substitutions_per_param)
        # expand the action with all possible substitutions
        expanded_expressions.extend(
            expand_action(action, substitutions_per_action))

    # create the initial world with pddl_init_exp and world_sets and the start node for astar
    logger.info("Grounded Actions: %s" % len(expanded_expressions))
    world = expressions.make_world(problem[1], world_sets)
    start = graph.ExpressionNode(world, expanded_expressions, None)

    return pathfinding.astar(
        start, heuristic if useheuristic else pathfinding.default_heuristic,
        isgoal)