Ejemplo n.º 1
0
    def test_alternative_partial_order(self):
        class OrderedPair:
            """Pair (a,b) where (a,b) are natural numbers.
      Partial order is defined by (a,b) <= (c,d) iff a<=c and b<=d."""
            def __init__(self, x, y):
                self.x: int = x
                self.y: int = y

            def __eq__(self, other):
                return self.x == other.x and self.y == other.y

            def __le__(self, other):
                return self.x <= other.x and self.y <= other.y

            def __hash__(self):
                return hash((self.x, self.y))

        default = Case('default', OrderedPair(0, 0), outcome=0)
        case1 = Case('1', OrderedPair(1, 0), outcome=1)
        case2 = Case('2', OrderedPair(0, 1), outcome=0)
        case3 = Case('3', OrderedPair(2, 1), outcome=0)
        cb = (case1, case2, case3)
        clf = Aacbr(default_case=default)
        clf.fit(cb)
        assert set(clf.casebase_active) == set(cb + (default, ))
        test = [
            OrderedPair(2, 0),
            OrderedPair(0, 2),
            OrderedPair(20, 20),
            OrderedPair(1, 1),
            OrderedPair(0, 0)
        ]
        expected_output = [1, 0, 0, 1, 0]
        predictions = clf.predict(test)
        assert expected_output == predictions
Ejemplo n.º 2
0
 def test_graph_drawing(self, tmp_path):
     "Checks if a graph is created"
     cb = self.example_cb2
     clf = Aacbr().fit(cb)
     clf.draw_graph(output_dir=tmp_path)
     output_path = tmp_path / "graph.png"
     assert output_path.exists()
     assert output_path.is_file()
Ejemplo n.º 3
0
 def test_inconsistent(self):
     # Even if already tested in test_cases.py, Aacbr should have its
     # own interface to it.
     case1 = Case('1', {'a', 'b'}, outcome=0)
     case2 = Case('2', {'a', 'b'}, outcome=1)
     cb = [case1, case2]
     clf = Aacbr().fit(cb)
     assert clf.past_case_attacks(case1, case2)
     assert clf.past_case_attacks(case2, case1)
     assert clf.inconsistent_attacks(case1, case2)
     assert clf.inconsistent_attacks(case2, case1)
Ejemplo n.º 4
0
 def test_attack(self, cb):
     clf = Aacbr()
     clf.fit(cb)
     if cb == self.example_cb or cb == self.example_cb2:
         list_of_attacks = ((self.case1, self.default), (self.case2,
                                                         self.case1))
     else:
         raise (Exception("Undefined test"))
     for pair in product(cb, repeat=2):
         assert ((clf.past_case_attacks(pair[0], pair[1])) == (
             pair in list_of_attacks
         )), f"Violated by pair {pair}. Expected {pair in list_of_attacks}."
Ejemplo n.º 5
0
 def test_argumentation_framework(self):
     cb = self.example_cb
     # newcase = self.case3
     newcase = Case('4', {'a', 'd'}, outcome=1)
     expected_output = newcase.outcome
     clf = Aacbr().fit(cb)
     framework = clf.give_argumentation_framework(newcase)
     arguments, attacks = framework
     assert arguments == set(cb + (newcase, ))
     expected_attacks = \
       {(self.case2, self.case1),
        (self.case1, self.default)}
     assert attacks == expected_attacks
Ejemplo n.º 6
0
 def test_scikit_learning_like_api_with_case_input(self):
     # It would be nice to have it compatible with the scikit-learn API:
     # https://scikit-learn.org/stable/developers/develop.html#apis-of-scikit-learn-objects
     train_data = self.example_cb2
     test_data = [
         Case('new1', {'a'}),
         Case('new2', {'a', 'b'}),
         Case('new3', {'a', 'c'}),
         Case('new4', {'a', 'b', 'c', 'd'}),
         Case('new5', set())
     ]
     expected_output = [1, 0, 1, 0, 0]
     clf = Aacbr()
     predicted_output = clf.fit(train_data).predict(test_data)
     assert expected_output == predicted_output
Ejemplo n.º 7
0
 def test_default_case_in_casebase(self):
     default = Case('default', {'a'}, outcome=0)
     case1 = Case('1', {'a', 'b'}, outcome=0)
     case2 = Case('2', {'a', 'b'}, outcome=1)
     cb = [default, case1, case2]
     clf = Aacbr().fit(cb)
     assert clf.default_case == default
Ejemplo n.º 8
0
 def test_default_case_in_arguments(self):
     default = Case('default', {'a'}, outcome=0)
     case1 = Case('1', {'a', 'b'}, outcome=0)
     case2 = Case('2', {'a', 'b'}, outcome=1)
     cb = [case1, case2]
     clf = Aacbr(default_case=default).fit(cb)
     assert clf.default_case == default
Ejemplo n.º 9
0
 def test_attack_new_case(self, cb):
     new = Case('new', {'a', 'd'})
     clf = Aacbr()
     clf.fit(cb)
     assert not clf.new_case_attacks(new, self.default)
     if self.case1 in cb:
         assert not clf.new_case_attacks(new, self.case1)
     if self.case2 in cb:
         assert clf.new_case_attacks(new, self.case2)
     if self.case3 in cb:
         assert clf.new_case_attacks(new, self.case3)
Ejemplo n.º 10
0
 def test_scikit_learning_like_api_with_characterisation_input(self):
     case0 = Case("0", set(), outcome=0)
     cb = [case0, self.case1, self.case2, self.case3]
     train_X = [c.factors for c in cb]
     train_Y = [c.outcome for c in cb]
     test_data = [
         Case('new1', {'a'}),
         Case('new2', {'a', 'b'}),
         Case('new3', {'a', 'c'}),
         Case('new4', {'a', 'b', 'c', 'd'}),
         Case('new5', set())
     ]
     test_X = [c.factors for c in test_data]
     expected_output = [1, 0, 1, 0, 0]
     clf = Aacbr()
     clf.fit(train_X, train_Y)
     assert set(clf.casebase_active) == set(cb + [self.default])
     predicted_output = clf.predict(test_X)
     assert expected_output == predicted_output
Ejemplo n.º 11
0
    def test_scikit_learning_like_api_with_characterisation_input2(self):
        train_X = [set(), {'a'}, {'a', 'b'}, {'a', 'b', 'c'}]
        train_Y = [0, 1, 0, 0]
        test_X = [{'a'}, {'a', 'b'}, {'a', 'c'}, {'a', 'b', 'c', 'd'}, set()]
        expected_output = [1, 0, 1, 0, 0]
        clf = Aacbr()
        clf.fit(train_X, train_Y)

        default = Case('default', set(), outcome=0)
        case0 = Case("0", set(), outcome=0)
        case1 = Case('1', {'a'}, outcome=1)
        case2 = Case('2', {'a', 'b'}, outcome=0)
        case3 = Case('3', {'a', 'b', 'c'}, outcome=0)
        cb = [case0, case1, case2, case3]

        assert set(clf.casebase_active) == set(cb + [default])

        predicted_output = clf.predict(test_X)
        assert expected_output == predicted_output
Ejemplo n.º 12
0
 def test_inconsistent_IO(self):
     default = Case('default', set(), outcome=0)
     case1 = Case('1', {'a'}, outcome=1)
     case2 = Case('2', {'a'}, outcome=0)
     case3 = Case('3', {'a', 'b'}, outcome=0)
     case4 = Case('4', {'a', 'b'}, outcome=1)
     cb = [default, case1, case2, case3, case4]
     train_data = cb
     test_data = [
         Case('new1', {'a'}),
         Case('new2', {'a', 'b'}),
         Case('new3', {'a', 'c'}),
         Case('new4', {'a', 'b', 'c', 'd'}),
         Case('new5', set()),
         Case('new6', {'a', 'c', 'd'})
     ]
     expected_output = [1, 1, 1, 1, 0, 1]
     clf = Aacbr(cautious=False)
     predicted_output = clf.fit(train_data).predict(test_data)
     assert expected_output == predicted_output
Ejemplo n.º 13
0
 def test_remove_spikes(self):
     default = Case('default', set(), outcome=0)
     case1 = Case('1', {'a'}, outcome=1)
     case2 = Case('2', {'a', 'b'}, outcome=0)
     case3 = Case('3', {'b'}, outcome=0)
     case4 = Case('4', {'c'}, outcome=0)
     case5 = Case('5', {'a', 'c'}, outcome=1)
     case6 = Case('6', {'a', 'b', 'c'}, outcome=0)
     cb = (default, case1, case2, case3, case4, case5, case6)
     filtered_cb = {default, case1, case2}
     clf = Aacbr().fit(cb, remove_spikes=True)
     assert set(clf.casebase_active) == filtered_cb
Ejemplo n.º 14
0
 def test_scikit_learning_like_api_with_case_input_cautious(self):
     default = Case('default', set(), outcome=0)
     case1 = Case('1', {'a'}, outcome=1)
     case2 = Case('2', {'a', 'b'}, outcome=0)
     case3 = Case('3', {'c'}, outcome=1)
     case4 = Case('4', {'c', 'd'}, outcome=0)
     case5 = Case('5', {'a', 'b', 'c'}, outcome=1)
     cb = [default, case1, case2, case3, case4, case5]
     train_data = cb
     test_data = [
         Case('new1', {'a'}),
         Case('new2', {'a', 'b'}),
         Case('new3', {'a', 'c'}),
         Case('new4', {'a', 'b', 'c', 'd'}),
         Case('new5', set()),
         Case('new6', {'a', 'c', 'd'})
     ]
     expected_output = [1, 0, 1, 0, 0, 1]
     clf = Aacbr(cautious=True)
     predicted_output = clf.fit(train_data).predict(test_data)
     assert expected_output == predicted_output
     assert set(clf.casebase_active) == set(
         [default, case1, case2, case3, case4])
     #
     clf_noncautious = Aacbr(cautious=False)
     expected_output = [1, 0, 1, 1, 0, 1]
     predicted_output = clf_noncautious.fit(train_data).predict(test_data)
     assert expected_output == predicted_output, "Non-cautious is not giving expected result!"
Ejemplo n.º 15
0
 def test_conciseness(self):
     cb = self.example_cb2
     clf = Aacbr()
     clf.fit(cb)
     assert clf.past_case_attacks(self.case2, self.case1)
     assert not clf.past_case_attacks(
         self.case3, self.case1
     ), "case3 is attacking case1 even if case2 already does so. Violating conciseness."
Ejemplo n.º 16
0
def run_test_from_files(aacbr_type, test):
    # TODO: change this interface in line below in the future
    # -- it should not be inside Aacbr
    cautious = True if aacbr_type == "cautious" else False
    casebase = load_cases(TEST_PATH_PREFIX + test["casebase"])
    clf = Aacbr(outcome_def=test["outcomes"]["default"],
                outcome_nondef=test["outcomes"]["nondefault"],
                outcome_unknown=test["outcomes"]["undecided"],
                cautious=cautious)
    clf.fit(casebase)
    casebase_active_ids = set(
        map(lambda x: getattr(x, "id"), clf.casebase_active))
    assert set(casebase_active_ids) == set(
        test["casebase_expected"][aacbr_type])

    for newcase_spec in test["newcases"]:
        newcase = Case(id=newcase_spec["id"],
                       factors=set(newcase_spec["factors"]))
        result = clf.predict([newcase])
        # prediction = result[1][0]["Prediction"]
        prediction = result[0]
        assert prediction == newcase_spec["outcome_expected"][
            aacbr_type], f"Failed for {newcase_spec}, in type {aacbr_type}" f"Failed on test {test}"
Ejemplo n.º 17
0
    def test_grounded_extension(self):
        default = Case('default', set(), outcome=0)
        case1 = Case('1', {'a'}, outcome=1)
        case2 = Case('2', {'a', 'b'}, outcome=0)
        case3 = Case('3', {'a', 'b', 'c'}, outcome=0)
        case4 = Case('4', {'c'}, outcome=1)
        example_cb = (default, case1, case2, case3, case4)

        new = Case('new', {'a', 'c'})
        new2 = Case('new2', {'a', 'b'})
        ge = {case1, case4, new}
        ge2 = {case2, default, new2}

        clf = Aacbr()
        clf.fit(example_cb)
        assert clf.grounded_extension(new_case=new) == ge
        assert clf.grounded_extension(new_case=new2) == ge2
Ejemplo n.º 18
0
 def test_default_case_implict(self):
     case1 = Case('1', {'a', 'b'}, outcome=0)
     case2 = Case('2', {'a', 'b'}, outcome=1)
     cb = [case1, case2]
     clf = Aacbr().fit(cb)
     assert clf.default_case == Case("default", set(), clf.outcome_def)
Ejemplo n.º 19
0
def setup(request):
    test = request.param
    setup_result = Aacbr(test["outcomes"]["default"],
                         test["outcomes"]["nondefault"],
                         test["outcomes"]["undecided"])
    return test, setup_result
Ejemplo n.º 20
0
 def test_aacbr_methods(self, cb):
     clf = Aacbr().fit(cb)
     assert clf.casebase_initial == cb
Ejemplo n.º 21
0
 def test_initialisation(self, cb):
     clf = Aacbr().fit(cb)
     assert isinstance(clf, Aacbr)