Esempio n. 1
0
def compile(domain, rank=False, alloutcome=False, verbose=False):
    """
    given the path to a non-deterministic domain, compiles it 
    into a set of deterministic domains and creates pddl files 
    as well as a file containing probabilistic actions names
    """

    if not (':probabilistic-effects' in domain.requirements or\
            ':non-deterministic' in domain.requirements):
        if verbose:
            print(
                color.fg_yellow(
                    '-- the \':probabilistic-effects\' requirement is not present'
                ))
            print(
                color.fg_yellow(
                    '-- \'{}\' is assumed as a deterministic domain'.format(
                        domain.name)))
        (deterministic_domains, nd_actions,
         map_actions) = compilation(domain, rank, alloutcome=True)
    else:
        (deterministic_domains, nd_actions,
         map_actions) = compilation(domain, rank, alloutcome)

    ## create the directory for compiled deterministic domains
    domains_dir = '/tmp/safe-planner/{}{}/'.format(
        domain.name, str(int(time.time() * 1000000)))
    if not os.path.exists(domains_dir): os.makedirs(domains_dir)

    ## create deterministic domains files
    for i, domain in enumerate(deterministic_domains):
        pddl_file = '%s/%s%03d.pddl' % (domains_dir, domain.name, i + 1)
        with open(pddl_file, 'w') as f:
            f.write(pddl.to_pddl(domain))
            f.close()

    ## write nd_actions to a file
    prob_file = '{}/{}.prob'.format(domains_dir, domain.name)
    with open(prob_file, 'w') as f:
        json.dump(nd_actions, f)
        f.close()

    ## write map_actions to a file
    map_file = '{}/{}.acts'.format(domains_dir, domain.name)
    with open(map_file, 'w') as f:
        json.dump(map_actions, f)
        f.close()

    if verbose:
        print('{} deterministic domains generated in \'{}\''.format(
            str(len(deterministic_domains)), domains_dir))

    return domains_dir
Esempio n. 2
0
def send_json_plan(plan_json_file, actions_json_file):

    # a list keeping the ids of successfully executed actions
    global action_ids
    action_ids = []

    plan_json_file = os.path.splitext(os.path.splitext(plan_json_file)[0])[0]
    pub = rospy.Publisher('plan', String, queue_size=10, latch=True)
    rospy.init_node('tamp_interface', anonymous=True)
    pub.publish(plan_json_file)
    print(
        color.fg_yellow('\n - path to the json files: ') +
        '{}'.format(plan_json_file))

    # pub = rospy.Publisher('plan', Plan, queue_size=10)
    # rospy.init_node('tamp_interface', anonymous=True)
    # plan = Plan()
    # plan.plan_json_file = plan_json_file
    # plan.actions_json_file = actions_json_file
    # pub.publish(plan)
    # print(color.fg_yellow('\n - plan json files: ') + '{}\n\t\t    {}'.format(plan_json_file, actions_json_file))
    return
Esempio n. 3
0
def compilation(domain, rank=False, alloutcome=False, probability=0.4):
    '''given a non-deterministic domain object return 
       a list (set) of deterministic domains'''

    ## NOTE: currently, it is supposed that !!AN ACTION!! does not have both
    ##       probabilistic and non-deterministic effects simultaneously;
    ##       but instead it must have either probabilistic or non-deterministic effects.

    ## list of non-deterministic/probabilistic actions: key - name; value - number of possible outcomes
    nd_actions = OrderedDict()

    ## a mapping of actions names:
    # for non-deterministic actions an extended name is created and
    # for deterministic actions the mapping is to their original names
    map_actions = dict()

    ## a list of all possible deterministic actions
    deterministic_actions = list()

    for action in domain.actions:

        ## a list of all possible effects separately
        deterministic_effects = list()

        ## split action probabilistic effects into a list of deterministic effects
        ## make all possible combination of probabilistic effects
        probabilistic_effects = list()
        for prob_eff_lst in action.probabilistic:
            # rank by the highest probability
            if rank:
                prob_eff_lst = tuple(
                    sorted(prob_eff_lst, key=lambda x: x[0], reverse=False))
            if sum([eff[0] for eff in prob_eff_lst]) == 1:
                probabilistic_effects.append(prob_eff_lst)
            elif rank:
                probabilistic_effects.append(\
                    tuple(sorted(tuple([(probability, Effect())])+prob_eff_lst, key=lambda x: x[0], reverse=False)))
            else:
                probabilistic_effects.append(prob_eff_lst +
                                             tuple([(0, Effect())]))

        for prob_eff in list(product(*probabilistic_effects)):
            if prob_eff:
                literals_lst, forall_lst, when_lst = [], [], []
                for eff in prob_eff:
                    literals_lst.extend(eff[1].literals)
                    forall_lst.extend(eff[1].forall)
                    when_lst.extend(eff[1].when)
                eff = Effect(action.effects.literals+tuple(literals_lst), \
                        action.effects.forall+tuple(forall_lst), \
                        action.effects.when+tuple(when_lst))
                if eff: deterministic_effects.append(eff)

        ## split action oneof effects into a list of deterministic effects
        ## make all possible combination of oneof effects
        oneof_effects = list()
        if rank:
            for oneof_eff_lst in action.oneof:
                # oneof_effects.append(tuple(sorted(oneof_eff_lst, key=lambda x: x.__len__(), reverse=True)))
                oneof_effects.append(tuple(reversed(oneof_eff_lst)))
        else:
            oneof_effects = action.oneof

        for oneof_eff in list(product(*oneof_effects)):
            if oneof_eff:
                literals_lst, forall_lst, when_lst = [], [], []
                for eff in oneof_eff:
                    literals_lst.extend(eff.literals)
                    forall_lst.extend(eff.forall)
                    when_lst.extend(eff.when)
                eff = Effect(action.effects.literals+tuple(literals_lst), \
                        action.effects.forall+tuple(forall_lst), \
                        action.effects.when+tuple(when_lst))
                if eff: deterministic_effects.append(eff)

        ## include also the action (deterministic) effects if the total probability is less than 1.0
        ## or if there is only one probabilistic/oneof effect
        if action.effects and (len(deterministic_effects) == 0
                               or len(deterministic_effects) == 1):
            if rank and (action.oneof or \
                         action.probabilistic and action.probabilistic[0][0][0] < probability):
                deterministic_effects.insert(0, action.effects)
            else:
                deterministic_effects.extend([action.effects])

        ## if there is only one probabilistic/oneof effect, then we also need a neutral
        ## effect, i.e., we add the action preconditions (literals only) as an action effect
        if not action.effects and len(deterministic_effects) == 1:
            # deterministic_effects.extend([Effect()])
            # ideally empty effect should be added, however, some classical planners
            # will fail when an actions has no effect, so, we add the precondition as
            # the action's effect, that is, no effect will apply
            # first, make sure ignoring equalities in preconditions
            precond_lts = tuple(lit for lit in action.preconditions.literals
                                if '=' not in str(lit))
            if rank and action.probabilistic and action.probabilistic[0][0][
                    0] < probability:
                deterministic_effects.insert(0, Effect(literals=precond_lts))
            else:
                deterministic_effects.extend([Effect(literals=precond_lts)])

        ## rank based on the number (length) of effects (EFF)
        ## Descending: reverse=True (rank:False) (default), Ascending: reverse=False (rank=True)
        ## Descending: large to small number of effects
        ## Ascending: small to large number of effects
        deterministic_effects = tuple(
            sorted(deterministic_effects,
                   key=lambda x: len(x),
                   reverse=not rank))

        ## add current action into map_actions
        map_actions[action.name] = action.name

        ## all possible compiled deterministic actions for current action
        deterministic_action = []

        ## if current action is non-deterministic/probabilistic
        if len(deterministic_effects) > 1:
            ## add action to nd_actions
            nd_actions[action.name] = len(deterministic_effects)
            for i, effect in enumerate(deterministic_effects):
                ## create a new action name for each outcome and add them to mappings
                nd_actions['%s_%s' %
                           (action.name, i)] = len(deterministic_effects)
                map_actions['%s_%s' % (action.name, i)] = action.name
                ## create the action object with the new names
                deterministic_action.append(Action(name='%s_%s'%(action.name,i), \
                        parameters=tuple(zip(action.types, action.arg_names)), \
                        preconditions=action.preconditions, \
                        effects=effect))
        ## if action is already deterministic; then only create an action object for it
        else:
            deterministic_action.append(Action(name=action.name, \
                    parameters=tuple(zip(action.types, action.arg_names)), \
                    preconditions=action.preconditions, \
                    effects=deterministic_effects[0]))
        ## add the compiled actions into deterministic_actions as a tuple
        deterministic_actions.append(tuple(deterministic_action))

    ## test if the length of Cartesian product might exceed the max number (1000)
    if not alloutcome:
        domains_product_len = 1
        for det_actions in deterministic_actions:
            domains_product_len *= len(det_actions)

        # switch to all-outcome
        if domains_product_len > MAX_DOMAINS:
            print(
                color.fg_yellow(
                    '-- the possible combination of single-outcome domains is {}'
                    .format(domains_product_len)))
            print(
                color.fg_yellow(
                    '-- that exceeds the allowed MAX_DOMAINS [{}]'.format(
                        MAX_DOMAINS)))
            print(color.fg_yellow('-- switch to all-outcome compilation\n'))
            alloutcome = True

        # rise a warning to switch to all-outcome
        elif domains_product_len > MAX_DOMAINS / 4:
            print(
                color.fg_green(
                    '-- the possible combination of single-outcome domains is {}'
                    .format(domains_product_len)))
            print(
                color.fg_green(
                    '-- this degrades dramatically the planning performance'))
            print(
                color.fg_green(
                    '-- recommended switching to all-outcome by giving the parameter \'-a\'\n'
                ))

    ## make all possible combination of deterministic actions
    if alloutcome:
        # include only all-outcome compilation
        deterministic_actions = [tuple(set().union(*deterministic_actions))]
    else:
        ## include add all-outcome determinization at the end of the list of domains
        deterministic_actions = list(product(*deterministic_actions)) + [
            tuple(set().union(*deterministic_actions))
        ]

    ## return a list of deterministic domains
    return ([Domain(name = domain.name, \
                requirements = tuple(set(domain.requirements) - set([':probabilistic-effects',':non-deterministic'])), \
                types = domain.types, \
                predicates = domain.predicates, \
                derived_predicates = domain.derived_predicates, \
                constants = domain.constants, \
                actions = tuple(actions)) for actions in deterministic_actions],
            nd_actions,
            map_actions)
Esempio n. 4
0
###############################################################################
if __name__ == '__main__':

    import pddlparser

    args = parse()

    domain = pddlparser.PDDLParser.parse(args.domain)

    ## if problem is also in the file
    if type(domain) == tuple: domain = domain[0]

    domains_dir = compile(domain, args.rank, args.all)

    if domains_dir is None:
        print(color.fg_yellow('-- successfully parsed: ') + args.domain)
        exit()

    deterministic_domains = OrderedDict()
    nd_actions = OrderedDict()

    ## parse deterministic pddl domains
    # print([os.path.join(domains_dir, file) for file in os.listdir(domains_dir)])
    for domain in sorted(
        [os.path.join(domains_dir, file) for file in os.listdir(domains_dir)]):
        ## read the probabilistic actions names
        if domain.endswith('.prob'):
            with open(domain) as f:
                nd_actions = json.load(f)
        ## read the deterministic domains
        if domain.endswith('.pddl'):
Esempio n. 5
0
###############################################################################
if __name__ == '__main__':

    import pddlparser

    args = parse()

    domain = pddlparser.PDDLParser.parse(args.domain)

    ## if problem is also in the file
    if type(domain) == tuple: domain = domain[0]

    domains_dir = compile(domain, args.rank, args.all)

    if domains_dir is None: 
        print(color.fg_yellow('-- successfully parsed: ') + args.domain)
        exit()

    deterministic_domains = OrderedDict()
    nd_actions = OrderedDict()

    ## parse deterministic pddl domains
    # print([os.path.join(domains_dir, file) for file in os.listdir(domains_dir)])
    for domain in sorted([os.path.join(domains_dir, file) for file in os.listdir(domains_dir)]):
        ## read the probabilistic actions names
        if domain.endswith('.prob'):
            with open(domain) as f:
                nd_actions = json.load(f)
        ## read the deterministic domains
        if domain.endswith('.pddl'):
            deterministic_domains[domain] = pddlparser.PDDLParser.parse(domain)
Esempio n. 6
0
    # make a policy given domain and problem
    policy = planner.Planner(args.domain, args.problem, args.planners,
                             args.rank, args.verbose)

    # transform the produced policy into a contingency plan and print it
    plan = policy.plan()
    path = policy.get_paths(policy.plan())[0]
    policy.print_plan(plan=path)

    #############################
    # get possible concurrent and joint executions
    single_executions, joint_executions = concurrent_executions(
        policy, path, args.agents)

    if args.verbose:
        print(color.fg_yellow('----------------------------------'))
        print(color.fg_yellow('-- possible concurrent executions'))
        print(color.fg_yellow('----------------------------------'))
        for i, single_execution in enumerate(single_executions):
            print(color.fg_yellow('-- execution_{}'.format(str(i))))
            for level, (actions, outcomes) in sorted(single_execution.items()):
                # for level, (actions, outcomes) in sorted(merge_dict(single_execution,joint_executions).items()):
                print('{} : {} {}'.format(str(level),
                                          ' '.join(map(str,
                                                       actions)), outcomes))

        print(color.fg_yellow('-- joint executions'))
        for level, (actions, outcomes) in joint_executions.items():
            print('{} : {} {}'.format(str(level), ' '.join(map(str, actions)),
                                      outcomes))
Esempio n. 7
0
def make_plan(domain,
              problem,
              planners=['optic-clp'],
              agents=[],
              temporal_actions=[],
              rank=False,
              verbose=False):

    import planner
    import dot_plan
    import dot_ma_plan

    # make a policy given domain and problem
    policy = planner.Planner(domain=domain,
                             problem=problem,
                             planners=planners,
                             rank=rank,
                             verbose=verbose)

    # transform the produced policy into a contingency plan and print it
    plan = policy.plan()
    policy.print_plan(plan=plan)

    #############################
    # get possible concurrent and joint executions
    single_executions, joint_executions = concurrent_executions(
        policy, plan, agents)

    if verbose:
        print(color.fg_yellow('----------------------------------'))
        print(color.fg_yellow('-- possible concurrent executions'))
        print(color.fg_yellow('----------------------------------'))
        for i, single_execution in enumerate(single_executions):
            print(color.fg_yellow('-- execution_{}'.format(str(i))))
            for level, (actions, outcomes) in sorted(single_execution.items()):
                # for level, (actions, outcomes) in sorted(merge_dict(single_execution,joint_executions).items()):
                print('{} : {} {}'.format(str(level),
                                          ' '.join(map(str,
                                                       actions)), outcomes))

        print(color.fg_yellow('-- joint executions'))
        for level, (actions, outcomes) in joint_executions.items():
            print('{} : {} {}'.format(str(level), ' '.join(map(str, actions)),
                                      outcomes))

    #############################
    # refine and separate the concurrent executions into concurrent clusters
    main_list = concurrent_subplans(policy, plan, agents)

    if verbose:
        print(color.fg_yellow('\n----------------------------------'))
        print(color.fg_yellow('-- actual multi-agent plan'))
        print(color.fg_yellow('----------------------------------'))

        for i, (key, subplans) in enumerate(main_list.items()):
            print(
                color.fg_yellow(
                    '---------------------------------- block_{}'.format(
                        str(i))))
            for j, subplan in enumerate(subplans):
                if (len(subplans)) > 1:
                    print(color.fg_beige('-- subplan_{}'.format(str(j))))
                for k, (actions, outcomes) in subplan.items():
                    print('{} -- {} {}'.format(k, ' '.join(map(str, actions)),
                                               outcomes))

    #############################
    # convert the plan inti a concurrent plan in json files
    plan_json_file, actions_json_file = json_ma_plan(policy, agents,
                                                     temporal_actions)

    print(color.fg_yellow('-- plan_json_file:') + plan_json_file)
    print(color.fg_yellow('-- actions_json_file:') + actions_json_file)
    print(
        color.fg_yellow('-- graphical plan_json_file:') + plan_json_file +
        '.dot')
    os.system('lua lua/json_multiagent_plan.lua %s &' % plan_json_file)
    if verbose: os.system('xdot %s.dot &' % plan_json_file)

    # generate a graph of the policy as a dot file in graphviz
    dot_file = dot_plan.gen_dot_plan(plan=plan,
                                     domain_file=domain,
                                     problem_file=problem)
    print(color.fg_yellow('-- plan in dot file: ') + dot_file)
    # transform the plan into a parallel plan
    dot_file, tred_dot_file = dot_ma_plan.parallel_plan(policy,
                                                        verbose=verbose)
    print(color.fg_yellow('-- precedence graph: ') + dot_file)
    print(color.fg_yellow('-- transitive reduction: ') + tred_dot_file)
    if verbose: os.system('xdot %s &' % tred_dot_file)

    # print out resulting info
    print('\nPlanning domain: %s' % policy.domain_file)
    print('Planning problem: %s' % policy.problem_file)
    print('Arguments: %s' % ' '.join(sys.argv[3:]))
    print('Policy length: %i' % len(policy.policy))
    print('Plan length: %i' % (len(plan) - 1))
    print('Compilation time: %.3f s' % policy.compilation_time)
    print('Planning time: %.3f s' % policy.planning_time)
    print('Planning iterations (all-outcome): %i' %
          policy.alloutcome_planning_call)
    print('Total number of replannings (single-outcome): %i' %
          policy.singleoutcome_planning_call)
    print('Total number of unsolvable states: %i' %
          len(policy.unsolvable_states))

    return (policy, plan, plan_json_file, actions_json_file)
        ## if problem is also in the file
        if type(domain) == tuple:
            domain, problem = domain[0], domain[1]
            problem.initial_state.objects = planner.mergeDict(problem.initial_state.objects, domain.constants)
        else:
            problem = None
    ## if both args (domain, problem) are given
    else:
        domain = pddlparser.PDDLParser.parse(args.domain)
        problem = pddlparser.PDDLParser.parse(args.problem)
        problem.initial_state.objects = planner.mergeDict(problem.initial_state.objects, domain.constants)


    ## print out in string
    if args.all: 
        print(color.fg_yellow('-- domain in string'))
        print(domain)
    if args.all and problem is not None: 
        print(color.fg_yellow('-- problem in string'))
        print(problem)

    ## print out in pddl
    print(color.fg_yellow('-- domain in pddl '))
    print(pddl.to_pddl(domain))
    if problem is not None: 
        print(color.fg_yellow('-- problem in pddl '))
        print(pddl.to_pddl(problem))

    ## print out pddl files
    if args.all: 
        print(color.fg_yellow('-- domain pddl file: ') + pddl.pddl(domain))
#################################################################
if __name__ == '__main__':

    import planner
    import color

    args = parse()

    # make a policy given domain and problem
    policy = planner.Planner(args.domain, args.problem, args.planners, args.rank, args.verbose)

    # transform the produced policy into a contingency plan and print it
    plan = policy.plan()
    policy.print_plan(plan=plan)


    #############################
    # transform the plan into a parallel plan
    dot_file, tred_dot_file = parallel_plan(policy, verbose=args.verbose)
    print(color.fg_yellow('-- graphviz file: ') + dot_file)
    print(color.fg_yellow('-- transitive reduction: ') + tred_dot_file)

    print('\nPlanning domain: %s' % policy.domain_file)
    print('Planning problem: %s' % policy.problem_file)
    print('Policy length: %i' % len(policy.policy))
    print('Planning time: %.3f s' % policy.planning_time)
    print('Planning iterations (all-outcome): %i' % policy.alloutcome_planning_call)
    print('Total number of replannings (single-outcome): %i' % policy.singleoutcome_planning_call)
    print('Total number of unsolvable states: %i' % len(policy.unsolvable_states))
Esempio n. 10
0
def action_execution_verification(action_msg):

    global args, domain, problem, state, plan, action_ids

    #############################
    ## simulate and execute the plan
    for level, step in plan.items():

        ## if all actions in the plan are visited
        if step == 'GOAL':
            ## check if goal is achieved then terminate planner
            if state.is_true(problem.goals):
                # goal state is achieved
                print(color.fg_voilet('@ GOAL'))
                sub_proc.unregister()
                rospy.signal_shutdown('finished')

        else:
            # unfold step into a tuple of actions and outcomes
            (actions, outcomes) = step

            for action in actions:
                ## find an action matching the received action
                if '_'.join(action.sig) == action_msg.id:

                    # if action was already visited
                    if '_'.join(action.sig) in action_ids: return

                    # add action's id to the visited action_ids
                    action_ids.append('_'.join(action.sig))

                    # check if action succeeded
                    if action_msg.succeed:
                        ## print out succeeded action
                        print(color.fg_yellow(' + ') + str(action))
                        # apply action to the state and update the state
                        state = state.apply(action)

                        ## check if goal is achieved then terminate planner
                        if state.is_true(problem.goals):
                            # goal state is achieved
                            print(color.fg_voilet('@ GOAL'))
                            sub_proc.unregister()
                            rospy.signal_shutdown('finished')
                    else:
                        ## print out failed action
                        print(color.fg_red(' - ') + str(action))
                        for monitor in action_msg.monitors:
                            print(
                                color.fg_red('   ---- {} {}'.format(
                                    monitor.predicate, monitor.arguments[0])))

                        ## update the state with the action violence and make a re-plane
                        ## convert the predicates frozenset to a list and update the state
                        ## i.e., remove ('collision_free', 'left_arm')
                        state_predicates = list(state.predicates)

                        for monitor in action_msg.monitors:
                            if 'collision' in monitor.predicate:
                                if ('collision_free', monitor.arguments[0]
                                    ) in state_predicates:
                                    state_predicates.remove(
                                        ('collision_free',
                                         monitor.arguments[0]))
                                if not ('collision_detected', monitor.
                                        arguments[0]) in state_predicates:
                                    state_predicates.append(
                                        ('collision_detected',
                                         monitor.arguments[0]))
                            elif 'admittance' in monitor.predicate:
                                if ('admittance_free', monitor.arguments[0]
                                    ) in state_predicates:
                                    state_predicates.remove(
                                        ('admittance_free',
                                         monitor.arguments[0]))
                                if not ('admittance_detected', monitor.
                                        arguments[0]) in state_predicates:
                                    state_predicates.append(
                                        ('admittance_detected',
                                         monitor.arguments[0]))

                        ## convert back to frozenset
                        state.predicates = frozenset(state_predicates)

                        #############################
                        ## create a new pddl problem file at /tmp/safe-planner
                        problem_pddl = pddl.pddl(problem, state=state)

                        ## call planner to make an initial policy given the domain, problem and planners
                        (policy, plan, plan_json_file, actions_json_file) = \
                            make_plan(domain = domain, \
                                      problem = problem_pddl, \
                                      planners = args.planners, \
                                      agents = args.agents, \
                                      temporal_actions = args.temporal_actions, \
                                      rank=False, \
                                      verbose=args.verbose)

                        #############################
                        ## call the execution engine giving the initial plan
                        # plan_json_file actions_json_file
                        send_json_plan(plan_json_file, actions_json_file)

                    return
    return
Esempio n. 11
0
def main():
    ## parse arguments
    parser = parse()
    args = parser.parse_args()
    if args.domain == None:
        parser.print_help()
        sys.exit()

    ## make a policy given domain and problem
    policy = planner.Planner(args.domain, args.problem, args.planners,
                             args.safe_planner, args.rank, args.all_outcome,
                             args.verbose)

    ## transform the produced policy into a contingency plan
    plan = policy.plan()

    ## print out the plan in a readable form
    policy.print_plan(del_effect_inc=True, det_effect_inc=False)

    ## print out sub-paths in the plan
    if args.path:
        paths = policy.get_paths(plan)
        policy.print_paths(paths=paths, del_effect_inc=True)
        # for path in paths:
        #     policy.print_plan(plan=path, del_effect_inc=True)
        ## generate graphs of sub-paths too
        if args.dot:
            for i, path in enumerate(paths):
                dot_file = dot_plan.gen_dot_plan(plan=path)
                print(
                    color.fg_yellow('-- path{} dot file: ').format(str(i +
                                                                       1)) +
                    dot_file)
                # os.system('xdot %s &' % dot_file)
            dot_file = dot_plan.gen_dot_plan(plan=paths[0])
            # os.system('xdot %s &' % dot_file)
            print('')

    ## generate a graph of the policy as a dot file in graphviz
    if args.dot:
        plan = policy.plan(tree=True)
        dot_file = dot_plan.gen_dot_plan(plan=plan,
                                         del_effect=True,
                                         domain_file=args.domain,
                                         problem_file=args.problem)
        print(color.fg_yellow('-- dot file: ') + dot_file + '\n')
        # os.system('xdot %s &' % dot_file)
        # os.system('dot -T pdf %s > %s.pdf &' % (dot_file, dot_file))
        # os.system('evince %s.pdf &' % dot_file)

    ## transform the policy into a json file
    if args.json:
        import json_ma_plan
        import dot_ma_plan
        json_output = json_ma_plan.json_ma_plan(policy, verbose=args.verbose)
        if json_output is not None:
            plan_json_file, actions_json_file = json_output
            print(
                color.fg_yellow('-- plan_json_file:') + plan_json_file +
                color.fg_red(' [EXPERIMENTAL!]'))
            print(
                color.fg_yellow('-- actions_json_file:') + actions_json_file +
                color.fg_red(' [EXPERIMENTAL!]'))
            os.system('cd lua && lua json_multiagent_plan.lua ../%s &' %
                      plan_json_file)
            print(
                color.fg_yellow('-- plan_json_dot_file:') +
                ('%s.dot' % plan_json_file) + color.fg_red(' [EXPERIMENTAL!]'))
            # transform the plan into a parallel plan
            dot_file, tred_dot_file = dot_ma_plan.parallel_plan(
                policy, verbose=args.verbose)
            print(color.fg_yellow('-- graphviz file: ') + dot_file)
            print(color.fg_yellow('-- transitive reduction: ') + tred_dot_file)
            # os.system('xdot %s.dot &' % plan_json_file)

    ## transform the policy into a json file
    if args.json:
        import json_plan
        plan = policy.plan(tree=False)
        json_file, plan_json = json_plan.json_plan(policy)
        print(
            color.fg_yellow('\n-- json file: ') + json_file +
            color.fg_red(' [EXPERIMENTAL!]'))
        print(
            color.fg_yellow('-- try: ') + 'lua json_plan.lua ' + json_file +
            color.fg_red(' [EXPERIMENTAL!]\n'))

    if args.store:
        stat_file = policy.log_performance(plan)
        print(color.fg_yellow('-- planner performance: ') + stat_file)

    # print out resulting info
    if args.problem is not None:
        print('\nPlanning domain: %s' % policy.domain_file)
        print('Planning problem: %s' % policy.problem_file)
        print('Arguments: %s' % ' '.join(sys.argv[3:]))
    else:
        print('Planning problem: %s' % policy.domain_file)
        print('Arguments: %s' % ' '.join(sys.argv[2:]))
    print('Policy length: %i' % len(policy.policy))
    print('Plan length: %i' % (len(plan) - 1))
    print('Compilation time: %.3f s' % policy.compilation_time)
    print('Planning time: %.3f s' % policy.planning_time)
    print('Planning iterations (all-outcome): %i' %
          policy.alloutcome_planning_call)
    print('Total number of replannings (single-outcome): %i' %
          policy.singleoutcome_planning_call)
    print('Total number of unsolvable states: %i' %
          len(policy.unsolvable_states))
Esempio n. 12
0
                  outfile,
                  sort_keys=False,
                  indent=4)

    return json_file, plan_json


###############################################################################
if __name__ == '__main__':

    import color
    import planner

    args = parse()

    policy = planner.Planner(args.domain, args.problem, args.planner,
                             args.verbose)

    plan = policy.plan()
    policy.print_plan()

    json_file_path, plan_json = json_plan(policy)

    print(color.fg_yellow('-- json plan object\n') + str(plan_json))
    print(color.fg_yellow('-- json file\n') + str(json_file_path))

    # with open(json_file_path) as json_file:
    #     plan_json = json.load(json_file)

    # print(plan_json)