コード例 #1
0
def test_symbol_classification_in_gripper():
    prob = gripper.create_sample_problem()
    fluent, static = approximate_symbol_fluency(prob)
    assert (len(fluent), len(static)) == (4, 3)

    fluent, static = approximate_symbol_fluency(prob, include_builtin=True)
    assert (len(fluent), len(static)) == (4, 5)  # Same as before plus "=" and "!="
コード例 #2
0
def compute_static_atoms(problem):
    """ Compute a list with all of the atoms and predicates from the problem that are static """
    init_atoms = state_as_atoms(problem.init)

    fluent_symbols, _ = approximate_symbol_fluency(problem)
    fluent_symbols = set(s.name for s in fluent_symbols)

    static_atoms = set()
    static_predicates = set()
    for atom in init_atoms:
        predicate_name = atom[0]
        if predicate_name == 'total-cost':
            continue

        if predicate_name not in fluent_symbols:
            static_atoms.add(atom)
            static_predicates.add(predicate_name)

    static_predicates.add('object')  # The object predicate is always static
    for atom in types_as_atoms(problem.language):
        predicate_name = atom[0]
        static_atoms.add(atom)
        static_predicates.add(predicate_name)

    return static_atoms, static_predicates
コード例 #3
0
def test_symbol_classification_with_nested_effect_heads():
    lang = generate_fstrips_bw_language(nblocks=3)
    problem = create_fstrips_problem(lang,
                                     domain_name='blocksworld',
                                     problem_name='test-instance')
    block, place, clear, loc, table = lang.get('block', 'place', 'clear',
                                               'loc', 'table')

    x = lang.variable('x', 'block')
    problem.action('dummy-action', [x],
                   precondition=loc(x) == table,
                   effects=[AddEffect(clear(loc(x)))])

    fluent, static = approximate_symbol_fluency(problem, include_builtin=True)
    assert loc in static and clear in fluent, "loc has not been detected as fluent even though it " \
                                              "appears (nested) in the head of an effect"
コード例 #4
0
def test_symbol_classification(instance_file, domain_file):
    # Test the symbol classification procedure for a few standard benchmarks that we parse entirely
    problem = reader().read_problem(domain_file, instance_file)
    fluent, static = approximate_symbol_fluency(problem)

    expected = {  # A compilation of the expected values for each tested domain (including total-cost terms!)
        "grid-visit-all": (2, 1),
        "Trucks": (6, 4),
        "BLOCKS": (5, 0),
        "gripper-strips": (4, 3),
        "elevators-sequencedstrips": (4, 6),
        "sokoban-sequential": (3, 3),
        "parking": (5, 0),
        "transport": (3, 3),
        "spider": (15, 6),
        "counters-fn": (1, 1),
        "settlers": (26, 23),
        "nurikabe": (9, 3),
    }
    # First make sure that the amount of expected fluent + static add up to the total number of symbols
    assert len(set(get_symbols(problem.language, include_builtin=False))) == sum(expected[problem.domain_name])
    assert (len(fluent), len(static)) == expected[problem.domain_name]
コード例 #5
0
def test_symbol_classification_in_parcprinter():
    prob = parcprinter.create_small_task()
    fluent, static = approximate_symbol_fluency(prob)
    assert (len(fluent), len(static)) == (4, 3)
コード例 #6
0
 def __init__(self, problem):
     super().__init__()
     self.problem = problem
     _, self.static_symbols = approximate_symbol_fluency(problem)
     self.nested_symbols = dict()
コード例 #7
0
def run_on_problem(problem,
                   reachability,
                   max_horizon,
                   grounding,
                   smtlib_filename=None,
                   solver_name='z3',
                   print_full_model=False):
    """ Note that invoking this method might perform several modifications and simplifications to the given problem
    and its language """
    with resources.timing(f"Preprocessing problem", newline=True):
        # The encoding expects a problem without universally-quantified effects, so let's compile them away
        problem = compile_universal_effects_away(problem, inplace=True)

        # Let's also some apply trivial simplifications
        simplifier = Simplify(problem, problem.init)
        problem = simplifier.simplify(inplace=True, remove_unused_symbols=True)

        # Compute which symbols are static
        _, statics = approximate_symbol_fluency(problem)

    # ATM we disable reachability, as it's not being used for the lifted encoding
    # do_reachability_analysis(problem, reachability)

    # Let's just fix one single horizon value, for the sake of testing
    horizon = max_horizon

    # Ok, down to business: let's generate the theory, which will be represented as a set of first-order sentences
    # in a different Tarski FOL (smtlang):
    with resources.timing(f"Generating theory", newline=True):
        encoding = FullyLiftedEncoding(problem, statics)
        smtlang, formulas, comments = encoding.generate_theory(horizon=horizon)

    if grounding == 'full':
        comments, formulas = ground_smt_theory(smtlang, comments, formulas,
                                               horizon)

    # Some sanity check: All formulas must be sentences!
    for formula in formulas:
        freevars = free_variables(formula)
        if freevars:
            raise TransformationError(
                f'Formula {formula} has unexpected free variables: {freevars}')

    # Once we have the theory in Tarski format, let's just translate it into PySMT format:
    with resources.timing(f"Translating theory to pysmt", newline=True):
        anames = set(a.name for a in problem.actions.values())

        translator_class = choose_translator_based_on_theory(smtlang)
        translator = translator_class(smtlang, statics, anames)
        translated = translator.translate(formulas)

        # Let's simplify the sentences for further clarity
        translated = translator.simplify(translated)

    # Some optional debugging statements:
    # _ = [print(f"{i}. {s}") for i, s in enumerate(formulas)]
    # _ = [print(f"{i}. {s.serialize()}") for i, s in enumerate(translated)]
    # _ = [print(f"{i}. {to_smtlib(s, daggify=False)}") for i, s in enumerate(translated)]

    # Try some built-in quantifier elimination?
    # translated = [qelim(t, solver_name="z3", logic="LIA") for t in translated]

    # Dump the SMT theory
    if smtlib_filename is not None:
        with resources.timing(f"Writing theory to file \"{smtlib_filename}\"",
                              newline=True):
            with open(smtlib_filename, "w") as f:
                translator.print_as_smtlib(translated, comments, f)

    with resources.timing(f"Solving theory", newline=True):
        if Theory.SETS in smtlang.theories:
            smtlib_solver = solve_smtlib(translated,
                                         solver_name,
                                         logic="QF_UFLIAFS")
            plan = decode_satlib_model(smtlib_solver, horizon, translator,
                                       print_full_model)
        else:
            model = solve(translated, solver_name)
            plan = decode_smt_model(model, horizon, translator,
                                    print_full_model)
    if plan:
        print(f"Found length-{len(plan)} plan:")
        print('\n'.join(map(str, plan)))
    return plan