コード例 #1
0
def gen_heuristic_test(dom,
                       prob,
                       search_class,
                       heuristic_class,
                       h_values_plan,
                       plan_length=None):
    parser = Parser('')
    parser.domInput = dom
    parser.probInput = prob

    domain = parser.parse_domain(False)
    problem = parser.parse_problem(domain, False)

    task = grounding.ground(problem)

    heuristic = heuristic_class(task)
    plan = search_class(task, heuristic)
    if plan_length:
        assert len(plan) == plan_length
    # run through plan and validate heuristic value
    # the true_h_values are taken from fast downward with astar and lm cut
    # heuristic
    computed_h_values = list(_gen_h_values(task.initial_state, plan,
                                           heuristic))
    assert h_values_plan == computed_h_values
コード例 #2
0
ファイル: pyperplan.py プロジェクト: marirsg2/pyperplan
def _ground(problem):
    logging.info("Grounding start: {}".format(problem.name))
    task = grounding.ground(problem)
    logging.info("Grounding end: {}".format(problem.name))
    logging.info("{} Variables created".format(len(task.facts)))
    logging.info("{} Operators created".format(len(task.operators)))
    return task
コード例 #3
0
def get_ground_task(domain_filepath, problem_filepath):
    parser = Parser(domain_filepath, problem_filepath)
    domain = parser.parse_domain()
    problem = parser.parse_problem(domain)
    task = grounding.ground(problem)

    return task
コード例 #4
0
def _ground(problem):
    #logging.info('Grounding start: {0}'.format(problem.name))
    task = grounding.ground(problem)
    #logging.info('Grounding end: {0}'.format(problem.name))
    #logging.info('{0} Variables created'.format(len(task.facts)))
    #logging.info('{0} Operators created'.format(len(task.operators)))
    return task
コード例 #5
0
def _ground(problem):
    logging.info('Grounding start: {0}'.format(problem.name))
    task = grounding.ground(problem)
    logging.info('Grounding end: {0}'.format(problem.name))
    logging.info('{0} Variables created'.format(len(task.facts)))
    logging.info('{0} Actions created'.format(len(task.actions)))
    return task
コード例 #6
0
ファイル: pddl_planner.py プロジェクト: Gin93/111
def _ground(problem):
    logging.info('Grounding start: {0}'.format(problem.name))
    task = grounding.ground(problem)
    logging.info('Grounding end: {0}'.format(problem.name))
    logging.info('{0} Variables created'.format(len(task.facts)))
    logging.info('{0} Actions created'.format(len(task.actions)))
    return task
コード例 #7
0
ファイル: grd_task.py プロジェクト: aabinks/gep_grd
    def load_hypotheses(self, file_name):
        '''
        Creates a seperate problem instance for each hypothesis (the problem is as defined in the third party pyperplan_s)
        :param file_name: hyps file name
        :return:
        '''

        #open and parse hyp file
        instream = open(file_name)
        index = 0
        for line in instream:
            line = line.strip()
            H = GrdHypothesis()
            H.atoms = [tok.strip() for tok in line.split(',')]
            H.name = index
            self.hyps_set.append(H)
            index += 1
        instream.close()

        #create for each GrdHypothesis the corresponding planning task
        index = 0
        while (index < len(self.hyps_set)):

            # generate the problem with Goal = hyp
            hyp_problem_name = os.path.join(self.destination_folder_name,
                                            'hyp_%d_problem.pddl' % index)

            #because the pyper plan does not support action costs we remove the '(= (total-cost) 0)' statement from the file if it exists
            self.hyps_set[index].generate_pddl_for_hyp_plan(
                hyp_problem_name, self.full_template_file_name, True)

            #parse the current planning problem
            hyp_planning_problem = grd_planning_gen.parse_problem_file(
                hyp_problem_name, self.full_domain_file_name)
            hyp_planning_task = grounding.ground(hyp_planning_problem)

            #TODO: decide if the re is a more elegant way - and see how helpful the relevance check may be
            #We create a grounded problem for which relevance check is not performed
            if (index == 0):
                self.planningTaskForExploration = grounding.ground(
                    hyp_planning_problem, False)
                self.static_predicates = grounding.get_statis_predicates(
                    hyp_planning_problem)

            self.hyps_set[index].set_task(hyp_planning_task)
            self.hyps_set[index].set_name(index)
            index += 1
コード例 #8
0
ファイル: pyperplan.py プロジェクト: PhiPhiper/pyperplan
def _ground(problem):
    logging.info('Grounding start: {0}'.format(problem.name))
    task = grounding.ground(problem) 
    task.operators.sort()
    logging.info('Grounding end: {0}'.format(problem.name))
    logging.info('{0} Variables created'.format(len(task.facts)))
    logging.info('{0} Operators created'.format(len(task.operators)))
    return task
コード例 #9
0
ファイル: test_lm_cut.py プロジェクト: AAIR-lab/GHN
def test_lm_cut_blocksworld_initial_state():
    parser = Parser("")
    parser.domInput = blocks_dom
    parser.probInput = blocks_problem_1

    domain = parser.parse_domain(False)
    problem = parser.parse_problem(domain, False)

    task = grounding.ground(problem)

    heuristic = LmCutHeuristic(task)
    h_val = heuristic(make_root_node(task.initial_state))
    assert h_val == 6.0
コード例 #10
0
ファイル: test_lm_cut.py プロジェクト: KDercksen/SPML
def test_lm_cut_blocksworld_initial_state():
    parser = Parser('')
    parser.domInput = blocks_dom
    parser.probInput = blocks_problem_1

    domain = parser.parse_domain(False)
    problem = parser.parse_problem(domain, False)

    task = grounding.ground(problem)

    heuristic = LmCutHeuristic(task)
    h_val = heuristic(make_root_node(task.initial_state))
    assert h_val == 6.
コード例 #11
0
 def __init__(self, domain_file, problem_file):
     """
         domain - domain pddl file
         problem - problem pddl file
     """
     #parse the domain and problem
     with open(DOM_TEMPL) as d_fd:
         self.domain_template = '\n'.join(
             [line.strip() for line in d_fd.readlines()])
     with open(PROB_TEMPL) as p_fd:
         self.prob_template = '\n'.join(
             [line.strip() for line in p_fd.readlines()])
     parser = Parser(domain_file, problem_file)
     domain = parser.parse_domain()
     problem = parser.parse_problem(domain)
     self.task = grounding.ground(problem)
コード例 #12
0
ファイル: test_grounding.py プロジェクト: KDercksen/SPML
def test_ground():
    """
    predicate which does not occur in any operator: "car_color"

    -> does it occurs in a variable?
    -> does it occur in an operator?
    """
    task = grounding.ground(standard_problem)

    assert not any(var.startswith("car_color") for var in task.facts)

    for operators in task.operators:
        assert not any(pre.startswith("car_color")
                       for pre in operators.preconditions)
        assert not any(add.startswith("car_color")
                       for add in operators.add_effects)
        assert not any(dee.startswith("car_color")
                       for dee in operators.del_effects)
コード例 #13
0
def test_ground():
    """
    predicate which does not occur in any operator: "car_color"

    -> does it occurs in a variable?
    -> does it occur in an operator?
    """
    task = grounding.ground(standard_problem)

    assert not any(var.startswith("car_color") for var in task.facts)

    for operators in task.operators:
        assert not any(
            pre.startswith("car_color") for pre in operators.preconditions)
        assert not any(
            add.startswith("car_color") for add in operators.add_effects)
        assert not any(
            dee.startswith("car_color") for dee in operators.del_effects)
コード例 #14
0
def run_model(data_path, out_path):

    rules, hard_rules, _, atoms = ground(data_path)
    results = map_inference(rules, hard_rules)

    reviews = atoms['review']
    with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
        for (review, paper), (vid, _) in reviews.items():
            print("'%s'\t'%s'\t%f" % (review, paper, results[vid]), file=f)

    acceptable = atoms['acceptable']
    with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
        for paper, (vid, _) in acceptable.items():
            print("'%s'\t%f" % (paper, results[vid]), file=f)

    presents = atoms['presents']
    with open(ojoin(out_path, 'PRESENTS.txt'), 'w') as f:
        for author, (vid, _) in presents.items():
            print("'%s'\t%f" % (author, results[vid]), file=f)
コード例 #15
0
def gen_heuristic_test(dom, prob, search_class, heuristic_class, h_values_plan,
                       plan_length=None):
    parser = Parser('')
    parser.domInput = dom
    parser.probInput = prob

    domain = parser.parse_domain(False)
    problem = parser.parse_problem(domain, False)

    task = grounding.ground(problem)

    heuristic = heuristic_class(task)
    plan = search_class(task, heuristic)
    if plan_length:
        assert len(plan) == plan_length
    # run through plan and validate heuristic value
    # the true_h_values are taken from fast downward with astar and lm cut
    # heuristic
    computed_h_values = list(_gen_h_values(task.initial_state, plan, heuristic))
    assert h_values_plan == computed_h_values
コード例 #16
0
ファイル: run_experiments.py プロジェクト: gfarnadi/FairPSL
def runExperiment(dataPath, resultPath):
    epsilons = [0.001,0.005, 0.01, 0.05, 0.1,0.5]
    fairMeasureCodes = ['RD', 'RR', 'RC']
    i=1
    text = ''
    while i<=3:
        print (i)
        text+='dataset No.'+str(i)+'\n' 
        text+='---------------------------'+'\n'
        text+='---------------------------'+'\n'
        rules, hard_rules, counts, atoms = ground(dataPath+str(i)+'/')
        for code in fairMeasureCodes:
            print(code)
            results = map_inference(rules, hard_rules)
            accuracyScore = accuracy(dataPath+str(i)+'/', results, atoms)
            score = evaluate(results, counts, code)
            
            text+='----------'+code+'---------------'+'\n'
            text+='----------PSL--------------'+'\n'
            line = ''
            for epsilon in epsilons:
                text+=str(score)+'\t'
                line+=str(accuracyScore)+'\t'
            
            text+='\n'+line+'\n'+'----------FairPSL----------'+'\n'
            line = ''
            for epsilon in epsilons:
                print(epsilon)
                results = fair_map_inference(rules, hard_rules, counts, epsilon,code)
                accuracyScore = accuracy(dataPath+str(i)+'/', results, atoms)
                line+=str(accuracyScore)+'\t'
                score = evaluate(results, counts,code)
                text+=str(score)+'\t'
            text+='\n'
            text+=line+'\n'
        text+='---------------------------'+'\n'
        text+='---------------------------'+'\n'
        i+=1 
    with open(resultPath, 'w') as f:
        print(text, file=f) 
コード例 #17
0
def test_regression():
    parser = Parser("")

    def parse_problem(domain, problem):
        parser.domInput = domain
        parser.probInput = problem
        domain = parser.parse_domain(False)
        return parser.parse_problem(domain, False)

    prob_05 = """
    ;; See domain file for description of this test.

    (define (problem regression-test-05)
      (:domain regression-test)
      (:objects y - object)
      (:init)
      (:goal (the-predicate x y)))
    """

    dom_05 = """
    ;; Expected behaviour: plan of length one found
    ;; Observed behaviour (r265): plan of length zero found

    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate x ?x)
      )
    )
    """

    prob_06 = """
    ;; See domain file for description of this test.

    (define (problem regression-test-06)
      (:domain regression-test)
      (:objects y - object)
      (:init)
      (:goal (the-predicate y y)))

    """
    dom_06 = """
    ;; Expected behaviour: planner proves that no plan exists
    ;; Observed behaviour (r265): plan of length one found

    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate x ?x)
      )
    )
    """

    # problem / domain 07 contains a different action compared
    # to the actions of domain 5 & 6
    prob_07 = prob_06

    dom_07 = """
    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants y - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate y ?x)
      )
    )
    """

    # action of problem / domain 8 differs only in the variable name compared
    # to the actions of problem 5 and 6: After grounding there should be no
    # difference between the grounded actions
    prob_08 = prob_05

    dom_08 = """
    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?z - object)
       :precondition (and)
       :effect (the-predicate x ?z)
      )
    )
    """

    parsed_problem5 = parse_problem(dom_05, prob_05)
    parsed_problem6 = parse_problem(dom_06, prob_06)
    parsed_problem7 = parse_problem(dom_07, prob_07)
    parsed_problem8 = parse_problem(dom_08, prob_08)

    # coded input:
    type_object = Type("object", None)
    types = {"object": type_object}
    predicates = {
        "the_predicate":
        Predicate("the-predicate", [("v1", type_object), ("v2", type_object)])
    }
    constants = {"x": type_object}
    actions = {
        "theaction":
        get_action(
            "theaction",
            [("?x", [type_object])],
            [],
            [
                Predicate("the-predicate", [("x", type_object),
                                            ("?x", type_object)])
            ],
            [],
        )
    }
    domain = Domain("regression-test", types, predicates, actions, constants)
    problem5 = Problem(
        "regression-test-05",
        domain,
        {"y": type_object},
        [],
        [Predicate("the-predicate", [("x", type_object), ("y", type_object)])],
    )
    problem6 = Problem(
        "regression-test-06",
        domain,
        {"y": type_object},
        [],
        [Predicate("the-predicate", [("y", type_object), ("y", type_object)])],
    )

    parsed_task5 = grounding.ground(parsed_problem5)
    coded_task5 = grounding.ground(problem5)
    parsed_task6 = grounding.ground(parsed_problem6)
    coded_task6 = grounding.ground(problem6)
    parsed_task7 = grounding.ground(parsed_problem7)
    parsed_task8 = grounding.ground(parsed_problem8)

    expected = [
        (parsed_task5.operators, coded_task5.operators, True),
        (parsed_task6.operators, coded_task6.operators, True),
        (parsed_task5.operators, coded_task6.operators, False),
        (parsed_task5.operators, parsed_task7.operators, False),
        (parsed_task5.operators, parsed_task8.operators, True),
    ]

    for operator1, operator2, expected_result in expected:
        assert compare_operators(operator1, operator2) == expected_result
コード例 #18
0
def test_operators():

    # action with signature with 2 types
    action_drive_vehicle = get_action(
        "DRIVE-VEHICLE",
        [
            ("vehicle", [types["car"], types["truck"]]),
            ("orig", [types["city"]]),
            ("dest", [types["city"]]),
        ],
        [predicate_veh_orig],
        [predicate_veh_dest],
        [predicate_veh_orig],
    )

    # action with predicate in add & delete list
    action_add_delete = get_action(
        "STAY",
        [("car", [types["car"]]), ("in", [types["city"]])],
        [predicate_in],
        [predicate_in],
        [predicate_in],
    )

    # action with constant input
    action_constant = get_action(
        "CONSTANT-ACTION",
        [("my_car", [types["my_car"]]), ("city", [types["city"]])],
        [],
        [
            Predicate("in", [("basel", [types["city"]]),
                             ("switzerland", [types["country"]])])
        ],
        [],
    )

    # action with only delete effects
    action_only_delete = get_action(
        "LEAVE",
        [("car", [types["car"]]), ("in", [types["city"]])],
        [predicate_in],
        [],
        [predicate_in],
    )

    # action with delete effect which does not occur in precondition
    action_delete = get_action(
        "DELETE",
        [("car", [types["car"]]), ("orig", [types["city"]]),
         ("dest", [types["city"]])],
        [],
        [predicate_car_orig],
        [predicate_car_dest],
    )

    type_map = grounding._create_type_map(objects)

    grounded_initial_state = grounding._get_partial_state(initial_state)

    grounded_drive_car = list(
        grounding._ground_action(action_drive_car, type_map, [],
                                 grounded_initial_state))
    grounded_drive_vehicle = list(
        grounding._ground_action(action_drive_vehicle, type_map, [],
                                 grounded_initial_state))
    grounded_add_delete = list(
        grounding._ground_action(action_add_delete, type_map, [],
                                 grounded_initial_state))
    grounded_only_delete = list(
        grounding._ground_action(action_only_delete, type_map, [],
                                 grounded_initial_state))
    grounded_delete = list(
        grounding._ground_action(action_delete, type_map, [],
                                 grounded_initial_state))

    domain = Domain(
        "test_domain",
        types,
        {
            "in":
            Predicate("in", [("city", types["city"]),
                             ("country", types["country"])])
        },
        {"action-constant": action_constant},
        {"my_car": types["car"]},
    )

    problem = Problem("test_problem", domain, objects, initial_state,
                      goal_state)
    task = grounding.ground(problem)

    expected = [
        ("(DRIVE-CAR red_car freiburg basel)", grounded_drive_car),
        ("(DRIVE-VEHICLE blue_truck freiburg basel)", grounded_drive_vehicle),
        ("(STAY red_car freiburg)", grounded_add_delete),
        ("(LEAVE red_car freiburg)", grounded_only_delete),
        ("(DELETE red_car freiburg basel)", grounded_delete),
    ]

    for operator, grounded_operators in expected:
        assert any(op.name == operator for op in grounded_operators)
コード例 #19
0
ファイル: run_fpsl_pulp.py プロジェクト: gfarnadi/FairPSL
import os, sys
SCRIPTDIR = os.path.dirname(__file__)

ENGINDIR = os.path.join(SCRIPTDIR, '..', '..', 'engines')
sys.path.append(os.path.abspath(ENGINDIR))
from fpsl_pulp import fair_map_inference

PROBLEMDIR = os.path.join(SCRIPTDIR, '..', '..', 'problems', 'paper_review')
sys.path.append(os.path.abspath(PROBLEMDIR))
from grounding import ground

from os.path import join as ojoin

if __name__ == '__main__':
    data_path = ojoin(PROBLEMDIR, 'data', '1')
    rules, hard_rules, counts, atoms = ground(data_path)
    results = fair_map_inference(rules,
                                 hard_rules,
                                 counts,
                                 0.1,
                                 'RC',
                                 solver='gurobi')
    out_path = ojoin('output', 'fpsl_pulp')

    reviews = atoms['review']
    with open(ojoin(out_path, 'POSITIVEREVIEW.txt'), 'w') as f:
        for (review, paper), (vid, _) in reviews.items():
            print("'%s'\t'%s'\t%f" % (review, paper, results[vid]), file=f)

    acceptable = atoms['acceptable']
    with open(ojoin(out_path, 'ACCEPTABLE.txt'), 'w') as f:
コード例 #20
0
ファイル: test_grounding.py プロジェクト: KDercksen/SPML
def test_operators():

    # action with signature with 2 types
    action_drive_vehicle = get_action("DRIVE-VEHICLE",
                                      [("vehicle", [types["car"],
                                                    types["truck"]]),
                                       ("orig", [types["city"]]),
                                       ("dest", [types["city"]])],
                                      [predicate_veh_orig],
                                      [predicate_veh_dest],
                                      [predicate_veh_orig])

    # action with predicate in add & delete list
    action_add_delete = get_action("STAY", [("car", [types["car"]]),
                                            ("in", [types["city"]])],
                                   [predicate_in], [predicate_in],
                                   [predicate_in])

    # action with constant input
    action_constant = get_action("CONSTANT-ACTION",
                                 [("my_car", [types["my_car"]]),
                                  ("city", [types["city"]])],
                                 [],
                                 [Predicate("in", [("basel", [types["city"]]),
                                                   ("switzerland",
                                                    [types["country"]])])], [])

    # action with only delete effects
    action_only_delete = get_action("LEAVE",
                                    [("car", [types["car"]]),
                                     ("in", [types["city"]])],
                                    [predicate_in], [], [predicate_in])

    # action with delete effect which does not occur in precondition
    action_delete = get_action("DELETE", [("car", [types["car"]]),
                                          ("orig", [types["city"]]),
                                          ("dest", [types["city"]])],
                               [], [predicate_car_orig], [predicate_car_dest])

    type_map = grounding._create_type_map(objects)

    grounded_initial_state = grounding._get_partial_state(initial_state)

    grounded_drive_car = list(
        grounding._ground_action(action_drive_car, type_map, [],
                                 grounded_initial_state))
    grounded_drive_vehicle = list(
        grounding._ground_action(action_drive_vehicle, type_map, [],
                                 grounded_initial_state))
    grounded_add_delete = list(
        grounding._ground_action(action_add_delete, type_map, [],
                                 grounded_initial_state))
    grounded_only_delete = list(
        grounding._ground_action(action_only_delete, type_map, [],
                                 grounded_initial_state))
    grounded_delete = list(
        grounding._ground_action(action_delete, type_map, [],
                                 grounded_initial_state))

    domain = Domain("test_domain", types,
                    {"in": Predicate("in", [("city", types["city"]),
                                            ("country", types["country"])])},
                    {"action-constant": action_constant},
                    {"my_car": types["car"]})

    problem = Problem("test_problem", domain, objects, initial_state,
                      goal_state)
    task = grounding.ground(problem)
    grounded_constant = task.operators

    expected = [("(DRIVE-CAR red_car freiburg basel)", grounded_drive_car),
                ("(DRIVE-VEHICLE blue_truck freiburg basel)",
                 grounded_drive_vehicle),
                ("(STAY red_car freiburg)", grounded_add_delete),
                ("(LEAVE red_car freiburg)", grounded_only_delete),
                ("(DELETE red_car freiburg basel)", grounded_delete)]

    for operator, grounded_operators in expected:
        yield operator_grounded, operator, grounded_operators
コード例 #21
0
ファイル: test_grounding.py プロジェクト: KDercksen/SPML
def test_regression():
    parser = Parser('')

    def parse_problem(domain, problem):
        parser.domInput = domain
        parser.probInput = problem
        domain = parser.parse_domain(False)
        return parser.parse_problem(domain, False)

    prob_05 = """
    ;; See domain file for description of this test.

    (define (problem regression-test-05)
      (:domain regression-test)
      (:objects y - object)
      (:init)
      (:goal (the-predicate x y)))
    """

    dom_05 = """
    ;; Expected behaviour: plan of length one found
    ;; Observed behaviour (r265): plan of length zero found

    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate x ?x)
      )
    )
    """

    prob_06 = """
    ;; See domain file for description of this test.

    (define (problem regression-test-06)
      (:domain regression-test)
      (:objects y - object)
      (:init)
      (:goal (the-predicate y y)))

    """
    dom_06 = """
    ;; Expected behaviour: planner proves that no plan exists
    ;; Observed behaviour (r265): plan of length one found

    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate x ?x)
      )
    )
    """

    # problem / domain 07 contains a different action compared
    # to the actions of domain 5 & 6
    prob_07 = prob_06

    dom_07 = """
    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants y - object)

      (:action theaction
       :parameters (?x - object)
       :precondition (and)
       :effect (the-predicate y ?x)
      )
    )
    """

    # action of problem / domain 8 differs only in the variable name compared
    # to the actions of problem 5 and 6: After grounding there should be no
    # difference between the grounded actions
    prob_08 = prob_05

    dom_08 = """
    (define (domain regression-test)
      (:requirements :typing) ;; work around problem in regression test #4.
      (:predicates (the-predicate ?v1 ?v2 - object))
      (:constants x - object)

      (:action theaction
       :parameters (?z - object)
       :precondition (and)
       :effect (the-predicate x ?z)
      )
    )
    """

    parsed_problem5 = parse_problem(dom_05, prob_05)
    parsed_problem6 = parse_problem(dom_06, prob_06)
    parsed_problem7 = parse_problem(dom_07, prob_07)
    parsed_problem8 = parse_problem(dom_08, prob_08)

    #coded input:
    type_object = Type("object", None)
    types = {"object": type_object}
    predicates = {"the_predicate": Predicate("the-predicate",
                                             [("v1", type_object),
                                              ("v2", type_object)])}
    constants = {"x": type_object}
    actions = {"theaction": get_action("theaction",
                                       [("?x", [type_object])], [],
                                       [Predicate("the-predicate",
                                        [("x", type_object),
                                         ("?x", type_object)])], [])}
    domain = Domain("regression-test", types, predicates, actions, constants)
    problem5 = Problem("regression-test-05", domain, {"y": type_object}, [],
                       [Predicate("the-predicate", [("x", type_object),
                                                    ("y", type_object)])])
    problem6 = Problem("regression-test-06", domain, {"y": type_object}, [],
                       [Predicate("the-predicate", [("y", type_object),
                                                    ("y", type_object)])])

    parsed_task5 = grounding.ground(parsed_problem5)
    coded_task5 = grounding.ground(problem5)
    parsed_task6 = grounding.ground(parsed_problem6)
    coded_task6 = grounding.ground(problem6)
    parsed_task7 = grounding.ground(parsed_problem7)
    parsed_task8 = grounding.ground(parsed_problem8)

    expected = [(parsed_task5.operators, coded_task5.operators, True),
                (parsed_task6.operators, coded_task6.operators, True),
                (parsed_task5.operators, coded_task6.operators, True),
                (parsed_task5.operators, parsed_task7.operators, False),
                (parsed_task5.operators, parsed_task8.operators, True)]

    for operator1, operator2, expected_result in expected:
        yield compare_operators, operator1, operator2, expected_result