Beispiel #1
0
    def test_copy(self):
        eprint(">> Continuum.copy(self)")

        for i in range(self.__nb_unit_test):

            # Emptyset
            c_ = Continuum()
            c = c_.copy()

            self.assertTrue(c.is_empty())
            self.assertEqual(c.get_min_value(), None)
            self.assertEqual(c.get_max_value(), None)
            self.assertEqual(c.min_included(), None)
            self.assertEqual(c.max_included(), None)

            # Non empty
            min = random.uniform(self.__min_value, self.__max_value)
            max = random.uniform(min, self.__max_value)
            min_included = random.choice([True, False])
            max_included = random.choice([True, False])

            c_ = Continuum(min, max, min_included, max_included)
            c = c_.copy()

            self.assertFalse(c.is_empty())
            self.assertEqual(c.get_min_value(), min)
            self.assertEqual(c.get_max_value(), max)
            self.assertEqual(c.min_included(), min_included)
            self.assertEqual(c.max_included(), max_included)
Beispiel #2
0
    def test_to_string(self):
        eprint(">> Continuum.to_string(self)")

        for i in range(self.__nb_unit_test):
            c = Continuum()

            self.assertEqual(c.to_string(), u"\u2205")

            c = Continuum.random(self.__min_value, self.__max_value)

            if c.is_empty():
                self.assertEqual(c.to_string(), u"\u2205")

            out = ""

            if c.min_included():
                out += "["
            else:
                out += "]"

            out += str(c.get_min_value()) + "," + str(c.get_max_value())

            if c.max_included():
                out += "]"
            else:
                out += "["

            self.assertEqual(c.to_string(), out)
            self.assertEqual(c.__str__(), out)
            self.assertEqual(c.__repr__(), out)
    def test_copy(self):
        eprint(">> ContinuumRule.copy(self)")

        for i in range(self.__nb_unit_test):
            head_var_id = random.randint(0, self.__max_variables)
            head_domain = Continuum.random(self.__min_value, self.__max_value)

            size = random.randint(1, self.__max_variables)
            body = []
            locked = []
            for j in range(size):
                var_id = random.randint(0, self.__max_variables)
                domain = Continuum.random(self.__min_value, self.__max_value)
                if var_id not in locked:
                    body.append((var_id, domain))
                    locked.append(var_id)

            r_ = ContinuumRule(head_var_id, head_domain, body)
            r = r_.copy()

            self.assertEqual(r.get_head_variable(), head_var_id)
            self.assertEqual(r.get_head_value(), head_domain)
            for e in body:
                self.assertTrue(e in r.get_body())
            for e in r.get_body():
                self.assertTrue(e in body)

            self.assertEqual(r.get_head_variable(), r_.get_head_variable())
            self.assertEqual(r.get_head_value(), r_.get_head_value())
            for e in r_.get_body():
                self.assertTrue(e in r.get_body())
            for e in r.get_body():
                self.assertTrue(e in r_.get_body())
    def _check_rules_and_predictions(self, dataset, expected_string_rules):
        expected_string_rules = [
            s.strip() for s in expected_string_rules.strip().split("\n")
            if len(s) > 0
        ]

        expected_rules = []
        for string_rule in expected_string_rules:
            expected_rules.append(
                Rule.from_string(string_rule, dataset.features,
                                 dataset.targets))

        #eprint(expected_rules)

        output = GULA.fit(dataset)

        #eprint(output)

        for r in expected_rules:
            if r not in output:
                eprint("Missing rule: ", r)
            self.assertTrue(r in output)

        for r in output:
            if r not in expected_rules:
                eprint("Additional rule: ", r)
            self.assertTrue(r in expected_rules)

        model = DMVLP(dataset.features, dataset.targets, output)

        expected = set((tuple(s1), tuple(s2)) for s1, s2 in dataset.data)
        predicted = set()

        for s1 in model.feature_states():
            prediction = model.predict([s1])
            for s2 in prediction[tuple(s1)]:
                predicted.add((tuple(s1), tuple(s2)))

        eprint()
        done = 0
        for s1, s2 in expected:
            done += 1
            eprint("\rChecking transitions ", done, "/", len(expected), end='')
            self.assertTrue((s1, s2) in predicted)

        done = 0
        for s1, s2 in predicted:
            done += 1
            eprint("\rChecking transitions ",
                   done,
                   "/",
                   len(predicted),
                   end='')
            self.assertTrue((s1, s2) in expected)
    def test_constructor_empty(self):
        eprint(">> ContinuumRule.__init__(self, head_variable, head_value)")

        for i in range(self.__nb_unit_test):
            var_id = random.randint(0, self.__max_variables)
            domain = Continuum.random(self.__min_value, self.__max_value)
            c = ContinuumRule(var_id, domain)

            self.assertEqual(c.get_head_variable(), var_id)
            self.assertEqual(c.get_head_value(), domain)
            self.assertEqual(c.get_body(), [])
Beispiel #6
0
    def test_constructor_empty(self):
        eprint(">> Continuum.__init__(self)")

        for i in range(self.__nb_unit_test):
            c = Continuum()

            self.assertTrue(c.is_empty())
            self.assertEqual(c.get_min_value(), None)
            self.assertEqual(c.get_max_value(), None)
            self.assertEqual(c.min_included(), None)
            self.assertEqual(c.max_included(), None)
    def test_static_random(self):
        eprint(">> ContinuumRule.random(min_value, max_value)")

        for i in range(self.__nb_unit_test):

            variables, domains = self.random_system()

            # rule characteristics
            var = random.randint(0, len(variables) - 1)
            var_domain = domains[var]
            val = Continuum.random(var_domain.get_min_value(),
                                   var_domain.get_max_value())
            min_size = random.randint(0, len(variables))
            max_size = random.randint(min_size, len(variables))
            r = ContinuumRule.random(var, val, variables, domains, min_size,
                                     max_size)

            # Check head
            self.assertEqual(r.get_head_variable(), var)
            self.assertEqual(r.get_head_value(), val)

            # Check body
            self.assertTrue(r.size() >= min_size)
            self.assertTrue(r.size() <= max_size)

            appears = []
            for var, val in r.get_body():
                self.assertTrue(var >= 0 and var < len(variables))
                self.assertTrue(domains[var].includes(val))

                self.assertFalse(var in appears)
                appears.append(var)

            # min > max
            min_size = random.randint(0, len(variables))
            max_size = random.randint(-100, min_size - 1)
            self.assertRaises(ValueError, ContinuumRule.random, var, val,
                              variables, domains, min_size, max_size)

            # min > nb variables
            min_size = random.randint(len(variables) + 1, len(variables) + 100)
            max_size = random.randint(min_size, len(variables) + 100)
            self.assertRaises(ValueError, ContinuumRule.random, var, val,
                              variables, domains, min_size, max_size)

            # max > nb variables
            min_size = random.randint(0, len(variables))
            max_size = random.randint(len(variables) + 1, len(variables) + 100)
            self.assertRaises(ValueError, ContinuumRule.random, var, val,
                              variables, domains, min_size, max_size)
Beispiel #8
0
    def test_constructor_full(self):
        eprint(
            ">> Continuum.__init__(self, min_value=None, max_value=None, min_included=None, max_included=None)"
        )

        for i in range(self.__nb_unit_test):

            # Valid continuum
            #-----------------
            min = random.uniform(self.__min_value, self.__max_value)
            max = random.uniform(min, self.__max_value)
            min_included = random.choice([True, False])
            max_included = random.choice([True, False])

            c = Continuum(min, max, min_included, max_included)

            self.assertFalse(c.is_empty())
            self.assertEqual(c.get_min_value(), min)
            self.assertEqual(c.get_max_value(), max)
            self.assertEqual(c.min_included(), min_included)
            self.assertEqual(c.max_included(), max_included)

            # Implicit emptyset
            #-------------------
            min = random.uniform(self.__min_value, self.__max_value)

            c = Continuum(min, min, False, False)

            self.assertTrue(c.is_empty())
            self.assertEqual(c.get_min_value(), None)
            self.assertEqual(c.get_max_value(), None)
            self.assertEqual(c.min_included(), None)
            self.assertEqual(c.max_included(), None)

            # Invalid Continuum
            #--------------------
            max = random.uniform(self.__min_value, min - 0.001)

            self.assertRaises(ValueError, Continuum, min, max, min_included,
                              max_included)

            # Invalid number of arguments
            #-------------------------------

            self.assertRaises(ValueError, Continuum, min)
            self.assertRaises(ValueError, Continuum, min, max)
            self.assertRaises(ValueError, Continuum, min, max, min_included)
            self.assertRaises(ValueError, Continuum, min, max, min_included,
                              max_included)
Beispiel #9
0
    def test_size(self):
        eprint(">> Continuum.size(self)")

        for i in range(self.__nb_unit_test):

            # empty set
            c = Continuum()
            self.assertEqual(c.size(), 0.0)

            # regular
            c = Continuum.random(self.__min_value, self.__max_value)

            if not c.is_empty():
                self.assertEqual(c.size(),
                                 c.get_max_value() - c.get_min_value())
Beispiel #10
0
    def _check_rules(self, model, expected_string_rules):
        expected_string_rules = [s.strip() for s in expected_string_rules.strip().split("\n") if len(s.strip()) > 0 ]

        expected_rules = []
        for string_rule in expected_string_rules:
            expected_rules.append(Rule.from_string(string_rule, model.features, model.targets))

        for r in expected_rules:
            if r not in model.rules:
                eprint("Missing rule: ", r.logic_form(model.features, model.targets), " (", r.to_string(),")")
            self.assertTrue(r in model.rules)

        for r in model.rules:
            if r not in expected_rules:
                eprint("Additional rule: ", r.logic_form(model.features, model.targets), " (", r.to_string(),")")
            self.assertTrue(r in expected_rules)
Beispiel #11
0
    def test_fit(self):
        print(">> Probabilizer.fit(variables, values, transitions, complete, synchronous_independant)")

        # No transitions
        p = self.random_program(self.__nb_features, self.__nb_targets, self.__nb_values, self.__body_size)
        p_ = Probabilizer.fit([], p.get_features(), p.get_targets())
        self.assertEqual(p_.get_features(), p.get_features())
        self.assertEqual(p_.get_targets(), [(i[0],[]) for i in p.get_targets()])
        rules = []
        self.assertEqual(p_.get_rules(),rules)

        for i in range(self.__nb_unit_test):
            #eprint(i,"/",self.__nb_unit_test)

            p = self.random_program(self.__nb_features, self.__nb_targets, self.__nb_values, self.__body_size)
            t = self.random_independant_transitions(p)

            eprint("Input: ",t)
            t = Probabilizer.encode_transitions_set(t, p.get_features(), p.get_targets())
            #eprint(t)

            p_ = Probabilizer.fit(t, p.get_features(), p.get_targets())

            #eprint(p_.logic_form())

            probability_encoded_input = Probabilizer.encode(t)
            probability_encoded_targets = Probabilizer.conclusion_values(p.get_targets(), probability_encoded_input)

            probability_encoded_input = [([p_.get_features()[var][1][i[var]] for var in range(len(i))],j) for (i,j) in probability_encoded_input]

            eprint(probability_encoded_input)

            # Only original transitions are produced from observed states
            for s1, s2 in probability_encoded_input:
                next = Synchronous.next(p_,s1)
                #eprint(s2)
                #eprint(next)
                #eprint(conclusion_values)
                next = [tuple(s) for s in next]
                eprint(s1)
                eprint(next)
                eprint(s2)
                self.assertTrue(s2 in next)
                for s in next:
                    self.assertTrue((s1,s) in probability_encoded_input)
Beispiel #12
0
    def test_static_random(self):
        eprint(">> Continuum.random(min_value, max_value)")

        for i in range(self.__nb_unit_test):

            # Valid values
            min = random.uniform(self.__min_value, self.__max_value)
            max = random.uniform(min, self.__max_value)

            c = Continuum.random(min, max)

            self.assertTrue(c.get_min_value() >= min
                            and c.get_min_value() <= max)
            self.assertTrue(isinstance(c.min_included(), bool))
            self.assertFalse(c.is_empty())

            # with min size
            min_size = 0
            while min_size == 0:
                min_size = random.uniform(-100, 0)

            self.assertRaises(ValueError, Continuum.random, min, max, min_size)

            min_size = random.uniform(0, self.__max_value)

            if min_size >= (max - min):
                self.assertRaises(ValueError, Continuum.random, min, max,
                                  min_size)

            c = Continuum.random(min, max)

            self.assertTrue(c.get_min_value() >= min
                            and c.get_min_value() <= max)
            self.assertTrue(isinstance(c.min_included(), bool))
            self.assertFalse(c.is_empty())

            # Invalid values
            min = random.uniform(self.__min_value, self.__max_value)
            max = random.uniform(self.__min_value, min - 0.001)

            self.assertRaises(ValueError, Continuum.random, min, max)
Beispiel #13
0
    def test_includes(self):
        eprint(">> Continuum.includes(self, element)")

        for i in range(self.__nb_unit_test):

            # bad argument type
            c = Continuum.random(self.__min_value, self.__max_value)

            self.assertRaises(TypeError, c.includes, "test")

            # float argument
            #----------------

            # empty set includes nothing
            c = Continuum()
            value = random.uniform(self.__min_value, self.__max_value)
            self.assertFalse(c.includes(value))

            c = Continuum.random(self.__min_value, self.__max_value)

            # Before min
            value = c.get_min_value()
            while value == c.get_min_value():
                value = random.uniform(c.get_min_value() - 100.0,
                                       c.get_min_value())

            self.assertFalse(c.includes(value))

            # on min bound
            self.assertEqual(c.includes(c.get_min_value()), c.min_included())

            # Inside
            value = c.get_min_value()
            while value == c.get_min_value() or value == c.get_max_value():
                value = random.uniform(c.get_min_value(), c.get_max_value())

            self.assertTrue(c.includes(value))

            # on max bound
            self.assertEqual(c.includes(c.get_max_value()), c.max_included())

            # after max bound
            value = c.get_max_value()
            while value == c.get_max_value():
                value = random.uniform(c.get_max_value(),
                                       c.get_max_value() + 100.0)

            self.assertFalse(c.includes(value))

            # int argument
            #--------------

            # empty set includes nothing
            c = Continuum()
            value = random.randint(int(self.__min_value),
                                   int(self.__max_value))
            self.assertFalse(c.includes(value))

            c = Continuum.random(self.__min_value, self.__max_value)

            while int(c.get_max_value()) - int(c.get_min_value()) <= 1:
                min = random.uniform(self.__min_value, self.__max_value)
                max = random.uniform(min, self.__max_value)

                c = Continuum.random(min, max)

            #eprint(c.to_string())

            # Before min
            value = random.randint(int(c.get_min_value() - 100),
                                   int(c.get_min_value()) - 1)

            self.assertFalse(c.includes(value))

            # on min bound
            self.assertEqual(c.includes(c.get_min_value()), c.min_included())

            # Inside
            value = random.randint(
                int(c.get_min_value()) + 1,
                int(c.get_max_value()) - 1)

            #eprint(value)

            self.assertTrue(c.includes(value))

            # on max bound
            self.assertEqual(c.includes(c.get_max_value()), c.max_included())

            # after max bound
            value = random.randint(
                int(c.get_max_value()) + 1, int(c.get_max_value() + 100))

            self.assertFalse(c.includes(value))

            # continuum argument
            #--------------------

            # 0) c is empty set
            c = Continuum()
            c_ = Continuum()
            self.assertTrue(c.includes(c_))  # empty set VS empty set

            c_ = Continuum.random(self.__min_value, self.__max_value)
            while c_.is_empty():
                c_ = Continuum.random(self.__min_value, self.__max_value)
            self.assertFalse(c.includes(c_))  # empty set VS non empty

            # 1) c is non empty
            c = Continuum.random(self.__min_value, self.__max_value)

            self.assertTrue(c.includes(Continuum()))  # non empty VS empty set
            self.assertTrue(c.includes(c))  # includes itself

            # 1.1) Lower bound over
            c_ = Continuum.random(c.get_min_value(), self.__max_value)
            while c_.is_empty():
                c_ = Continuum.random(c.get_min_value(), self.__max_value)

            value = c.get_min_value()
            while value == c.get_min_value():
                value = random.uniform(c.get_min_value() - 100,
                                       c.get_min_value())

            c_.set_lower_bound(value, random.choice([True, False]))
            self.assertFalse(c.includes(c_))

            # 1.2) on min bound
            c_ = Continuum.random(c.get_min_value(), self.__max_value)
            while c_.is_empty():
                c_ = Continuum.random(c.get_min_value(), self.__max_value)
            c_.set_lower_bound(c.get_min_value(), random.choice([True, False]))

            if not c.min_included() and c_.min_included():  # one value over
                self.assertFalse(c.includes(c_))

            # 1.3) upper bound over
            c_ = Continuum.random(self.__min_value, c.get_max_value())
            while c_.is_empty():
                c_ = Continuum.random(self.__min_value, c.get_max_value())

            value = c.get_max_value()
            while value == c.get_max_value():
                value = random.uniform(c.get_max_value(),
                                       c.get_max_value() + 100)

            c_.set_upper_bound(value, random.choice([True, False]))

            self.assertFalse(c.includes(c_))

            # 1.4) on upper bound
            c_ = Continuum.random(self.__min_value, c.get_max_value())
            while c_.is_empty():
                c_ = Continuum.random(self.__min_value, c.get_max_value())
            c_.set_upper_bound(c.get_max_value(), random.choice([True, False]))

            if not c.max_included() and c_.max_included():  # one value over
                self.assertFalse(c.includes(c_))

            # 1.5) inside
            min = c.get_min_value()
            while min == c.get_min_value():
                min = random.uniform(c.get_min_value(), c.get_max_value())
            max = c.get_max_value()
            while max == c.get_max_value():
                max = random.uniform(min, c.get_max_value())
            c_ = Continuum(min, max, random.choice([True, False]),
                           random.choice([True, False]))

            self.assertTrue(c.includes(c_))
            self.assertFalse(c_.includes(c))
Beispiel #14
0
    def test_fit(self):
        print(">> LFkT.fit(variables, values, time_series)")

        # No transitions
        p = self.random_program(self.__nb_features, self.__nb_targets,
                                self.__nb_values, self.__body_size)

        min_body_size = 0
        max_body_size = random.randint(min_body_size, len(p.get_features()))
        delay_original = random.randint(2, self.__max_delay)

        features = []
        targets = p.get_targets()

        for d in range(1, delay_original + 1):
            features += [(var + "_" + str(d), vals)
                         for var, vals in p.get_features()]

        p = LogicProgram.random(features, targets, min_body_size,
                                max_body_size)
        p_ = LFkT.fit([], p.get_features(), p.get_targets())
        self.assertEqual(p_.get_features(), p.get_features())
        self.assertEqual(p_.get_targets(), p.get_targets())
        self.assertEqual(p_.get_rules(), [])

        for i in range(self.__nb_unit_test):
            #eprint("\rTest ", i+1, "/", self.__nb_unit_test, end='')

            # Generate transitions
            p = self.random_program(self.__nb_features, self.__nb_targets,
                                    self.__nb_values, self.__body_size)

            min_body_size = 0
            max_body_size = random.randint(min_body_size,
                                           len(p.get_features()))
            delay_original = random.randint(2, self.__max_delay)

            features = []
            targets = p.get_features()

            for d in range(0, delay_original):
                features += [(var + "_t-" + str(d + 1), vals)
                             for var, vals in p.get_features()]

            p = LogicProgram.random(features, targets, min_body_size,
                                    max_body_size)

            cut = len(targets)
            time_series = [[
                list(s[cut * (d - 1):cut * d])
                for d in range(1, delay_original + 1)
            ] for s in p.feature_states()]

            #eprint(delay_original)
            #eprint(p)
            #eprint(p.states())
            #eprint(time_series)
            #exit()

            time_serie_size = delay_original + 2

            for serie in time_series:
                while len(serie) < time_serie_size:
                    serie_end = serie[-delay_original:]
                    #eprint(serie_end)
                    serie_end = list(itertools.chain.from_iterable(serie_end))
                    serie.append(Synchronous.next(p, serie_end)[0])

            #eprint(p.logic_form())
            #for s in time_series:
            #    eprint(s)

            p_ = LFkT.fit(time_series, targets, targets)
            rules = p_.get_rules()

            #eprint(p_.logic_form())

            for variable in range(len(targets)):
                for value in range(len(targets[variable][1])):
                    #eprint("var="+str(variable)+", val="+str(value))
                    pos, neg, delay = LFkT.interprete(time_series, targets,
                                                      targets, variable, value)

                    #eprint("pos: ", pos)

                    # Each positive is explained
                    for s in pos:
                        cover = False
                        for r in rules:
                            if r.get_head_variable() == variable \
                               and r.get_head_value() == value \
                               and r.matches(s):
                                cover = True
                        #if not cover:
                        #    eprint(p_)
                        #    eprint(s)
                        self.assertTrue(cover)  # One rule cover the example

                    #eprint("neg: ", neg)

                    # No negative is covered
                    for s in neg:
                        cover = False
                        for r in rules:
                            if r.get_head_variable() == variable \
                               and r.get_head_value() == value \
                               and r.matches(s):
                                cover = True
                        self.assertFalse(cover)  # no rule covers the example

                    # All rules are minimals
                    for r in rules:
                        if r.get_head_variable(
                        ) == variable and r.get_head_value() == value:
                            for (var, val) in r.get_body():
                                r.remove_condition(var)  # Try remove condition

                                conflict = False
                                for s in neg:
                                    if r.matches(
                                            s):  # Cover a negative example
                                        conflict = True
                                        break

                                # # DEBUG:
                                if not conflict:
                                    eprint("not minimal " + r.to_string())
                                    eprint(neg)

                                self.assertTrue(conflict)
                                r.add_condition(var, val)  # Cancel removal
    ]

    # 1: Parameters
    #---------------
    lfit_methods = ["gula", "pride", "brute-force", "synchronizer"]
    baseline_methods = ["baseline"]
    experiements = ["scalability", "accuracy", "explanation"]
    observations = ["all_from_init_states", "random_transitions"]

    if len(sys.argv) < 9 or (sys.argv[1]
                             not in lfit_methods + baseline_methods) or (
                                 sys.argv[6]
                                 not in experiements) or (sys.argv[7]
                                                          not in observations):
        eprint(
            "Please give the experiement to perform as parameter: gula/pride/brute-force/synchronizer/baseline and min_var, max_var, max_var_general, run_tests, scalability/accuracy/explanation, all_from_init_states/random_transitions, time_out"
        )
        exit()

    if sys.argv[1] in lfit_methods or sys.argv[1] in baseline_methods:
        algorithm = sys.argv[1]

    min_var = int(sys.argv[2])
    max_var = int(sys.argv[3])
    max_var_general = int(sys.argv[4])
    run_tests = int(sys.argv[5])
    experiement = SCALABILITY_EXPERIEMENT
    mode = "all_from_init_states"

    if sys.argv[6] == "scalability":
        experiement = SCALABILITY_EXPERIEMENT
def evaluate_accuracy_on_bn_benchmark(algorithm,
                                      benchmark,
                                      semantics,
                                      run_tests,
                                      train_size,
                                      mode,
                                      benchmark_name,
                                      full_transitions=None):
    """
        Evaluate accuracy of an algorithm
        over a given benchmark with a given number/proporsion
        of training samples.

        Args:
            algorithm: Class
                Class of the algorithm to be tested
            benchmark: DMVLP
                benchmark model to be tested
            semantics: Class
                Class of the semantics to be tested
            train_size: float in [0,1] or int
                Size of the training set in proportion (float in [0,1])
            mode: string
                "all_from_init_states": training contains all transitions from its initials states
                "random": training contains random transitions, 80%/20% train/test then train is reduced to train_size
            benchmark_name: string
                for csv output.
        Returns:
        train_set_size: int
        test_set_size: int
        accuracy: float
            Average accuracy score.
        csv_output: String
            csv string format of all tests run statistiques.
    """
    csv_output = ""

    # 0) Extract logic program
    #-----------------------
    #eprint(benchmark.to_string())

    # 1) Generate transitions
    #-------------------------------------

    # Boolean network benchmarks only have rules for value 1, if none match next value is 0
    #default = [[0] for v in benchmark.targets]
    if full_transitions is None:
        eprint(">>> Generating benchmark transitions...")
        full_transitions = [
            (np.array(feature_state),
             np.array(["0" if x == "?" else "1" for x in target_state]))
            for feature_state in program.feature_states()
            for target_state in program.predict([feature_state], semantics)[
                tuple(feature_state)]
        ]
    full_transitions_grouped = {
        tuple(s1): set(
            tuple(s2_) for s1_, s2_ in full_transitions
            if tuple(s1) == tuple(s1_))
        for s1, s2 in full_transitions
    }
    #eprint("Transitions: ", full_transitions)
    #eprint("Grouped: ", full_transitions_grouped)

    #eprint(benchmark.to_string())
    #eprint(semantics.states(P))
    #eprint(full_transitions)

    # 2) Prepare scores containers
    #---------------------------
    results_time = []
    results_score = []

    # 3) Average over several tests
    #-----------------------------
    for run in range(run_tests):

        # 3.1 Split train/test sets on initial states
        #----------------------------------------------
        all_feature_states = list(full_transitions_grouped.keys())
        random.shuffle(all_feature_states)

        # Test set: all transition from last 20% feature states
        test_begin = max(1, int(0.8 * len(all_feature_states)))
        test_feature_states = all_feature_states[test_begin:]

        test = []
        for s1 in test_feature_states:
            test.extend([(list(s1), list(s2))
                         for s2 in full_transitions_grouped[s1]])
        random.shuffle(test)

        # Train set
        # All transition from first train_size % feature states (over 80% include some test set part)
        if mode == "all_from_init_states":
            train_end = max(1, int(train_size * len(all_feature_states)))
            train_feature_states = all_feature_states[:train_end]
            train = []
            for s1 in train_feature_states:
                train.extend([(list(s1), list(s2))
                              for s2 in full_transitions_grouped[s1]])
            random.shuffle(train)
        # Random train_size % of transitions from the feature states not in test set
        elif mode == "random_transitions":
            train_feature_states = all_feature_states[:test_begin]
            train = []
            for s1 in train_feature_states:
                train.extend([(list(s1), list(s2))
                              for s2 in full_transitions_grouped[s1]])
            random.shuffle(train)
            train_end = int(max(1, train_size * len(train)))
            train = train[:train_end]
        else:
            raise ValueError("Wrong mode requested")

        #eprint("train: ", train)
        #eprint("test: ", test)
        #exit()

        # DBG
        if run == 0:
            eprint(">>> Start Training on " + str(len(train)) + "/" +
                   str(len(full_transitions)) + " transitions (" +
                   str(round(100 * len(train) / len(full_transitions), 2)) +
                   "%)")

        eprint(">>>> run: " + str(run + 1) + "/" + str(run_tests), end='')

        train_dataset = StateTransitionsDataset([(np.array(s1), np.array(s2))
                                                 for (s1, s2) in train],
                                                benchmark.features,
                                                benchmark.targets)
        test_dataset = StateTransitionsDataset([(np.array(s1), np.array(s2))
                                                for s1, s2 in test],
                                               benchmark.features,
                                               benchmark.targets)

        # 3.2) Learn from training set
        #------------------------------------------

        if algorithm == "gula" or algorithm == "pride":
            # possibilities
            start = time.time()
            model = WDMVLP(features=benchmark.features,
                           targets=benchmark.targets)
            model.compile(algorithm=algorithm)
            model.fit(dataset=train_dataset)
            #model = algorithm.fit(train, benchmark.features, benchmark.targets, supported_only=True)
            end = time.time()

            results_time.append(round(end - start, 3))

        # 3.4) Evaluate on accuracy of domain prediction on test set
        #------------------------------------------------------------

        # csv format of results
        expected_train_size = train_size
        expected_test_size = 0.2
        real_train_size = round(len(train) / (len(full_transitions)), 2)
        real_test_size = round(len(test) / (len(full_transitions)), 2)

        if mode == "random_transitions":
            expected_train_size = round(train_size * 0.8, 2)

        common_settings = \
        semantics + "," +\
        benchmark_name + "," +\
        str(len(benchmark.features)) + "," +\
        str(len(full_transitions)) + "," +\
        mode + "," +\
        str(expected_train_size) + "," +\
        str(expected_test_size) + "," +\
        str(real_train_size) + "," +\
        str(real_test_size) + "," +\
        str(len(train)) + "," +\
        str(len(test))

        if algorithm == "gula" or algorithm == "pride":
            accuracy = accuracy_score(model=model, dataset=test_dataset)
            print(algorithm + "," + common_settings + "," + str(accuracy))
            eprint(" accuracy: " + str(round(accuracy * 100, 2)) + "%")
            results_score.append(accuracy)

        if algorithm == "baseline":
            csv_output_settings = csv_output
            predictions = {
                tuple(s1): {
                    variable:
                    {value: random.uniform(0.0, 1.0)
                     for value in values}
                    for (variable, values) in test_dataset.targets
                }
                for s1 in test_feature_states
            }
            #eprint(prediction)
            accuracy = accuracy_score_from_predictions(predictions=predictions,
                                                       dataset=test_dataset)
            print("baseline_random," + common_settings + "," + str(accuracy))
            eprint()
            eprint(">>>>> accuracy: " + str(round(accuracy * 100, 2)) +
                   "% (baseline_random)")

            #if algorithm == "always_0.0":
            predictions = {
                tuple(s1): {
                    variable: {value: 0.0
                               for value in values}
                    for (variable, values) in test_dataset.targets
                }
                for s1 in test_feature_states
            }
            #eprint(predictions)
            accuracy = accuracy_score_from_predictions(predictions=predictions,
                                                       dataset=test_dataset)
            print("baseline_always_0.0," + common_settings + "," +
                  str(accuracy))
            eprint(">>>>> accuracy: " + str(round(accuracy * 100, 2)) +
                   "% (baseline_always_0.0)")

            #if algorithm == "always_0.5":
            predictions = {
                tuple(s1): {
                    variable: {value: 0.5
                               for value in values}
                    for (variable, values) in test_dataset.targets
                }
                for s1 in test_feature_states
            }
            #eprint(predictions)
            accuracy = accuracy_score_from_predictions(predictions=predictions,
                                                       dataset=test_dataset)
            print("baseline_always_0.5," + common_settings + "," +
                  str(accuracy))
            eprint(">>>>> accuracy: " + str(round(accuracy * 100, 2)) +
                   "% (baseline_always_0.5)")

            #if algorithm == "always_1.0":
            predictions = {
                tuple(s1): {
                    variable: {value: 1.0
                               for value in values}
                    for (variable, values) in test_dataset.targets
                }
                for s1 in test_feature_states
            }
            #eprint(predictions)
            accuracy = accuracy_score_from_predictions(predictions=predictions,
                                                       dataset=test_dataset)
            print("baseline_always_1.0," + common_settings + "," +
                  str(accuracy))
            eprint(">>>>> accuracy: " + str(round(accuracy * 100, 2)) +
                   "% (baseline_always_1.0)")

    # 4) Average scores
    #-------------------
    if algorithm in ["gula", "pride"]:
        accuracy = sum(results_score) / run_tests
        run_time = sum(results_time) / run_tests

        eprint(">>> AVG accuracy: " + str(round(accuracy * 100, 2)) + "%")
def evaluate_scalability_on_bn_benchmark(algorithm,
                                         benchmark,
                                         benchmark_name,
                                         semantics,
                                         run_tests,
                                         train_size=None,
                                         full_transitions=None):
    """
        Evaluate accuracy and explainability of an algorithm
        over a given benchmark with a given number/proporsion
        of training samples.

        Args:
            algorithm: Class
                Class of the algorithm to be tested
            benchmark: String
                Label of the benchmark to be tested
            semantics: String
                Semantics to be tested
            train_size: float in [0,1] or int
                Size of the training set in proportion (float in [0,1])
                or explicit (int)
    """

    # 0) Extract logic program
    #-----------------------
    P = benchmark
    #eprint(P)
    #eprint(semantics)

    # 1) Generate transitions
    #-------------------------------------

    # Boolean network benchmarks only have rules for value 1, if none match next value is 0
    if full_transitions is None:
        eprint("Generating benchmark transitions ...")
        full_transitions = [
            (np.array(feature_state),
             np.array(["0" if x == "?" else "1" for x in target_state]))
            for feature_state in benchmark.feature_states()
            for target_state in benchmark.predict([feature_state], semantics)[
                tuple(feature_state)]
        ]
    #eprint(full_transitions)

    # 2) Prepare scores containers
    #---------------------------
    results_time = []

    # 3) Average over several tests
    #-----------------------------
    for run in range(run_tests):

        # 3.1 Split train/test sets
        #-----------------------
        random.shuffle(full_transitions)
        train = full_transitions
        test = []

        # Complete, Proportion or explicit?
        if train_size is not None:
            if isinstance(train_size, float):  # percentage
                last_obs = max(int(train_size * len(full_transitions)), 1)
            else:  # exact number of transitions
                last_obs = train_size
            train = full_transitions[:last_obs]
            test = full_transitions[last_obs:]

        # DBG
        if run == 0:
            eprint(">>> Start Training on " + str(len(train)) + "/" +
                   str(len(full_transitions)) + " transitions (" +
                   str(round(100 * len(train) / len(full_transitions), 2)) +
                   "%)")

        eprint(">>>> run: " + str(run + 1) + "/" + str(run_tests), end='')

        dataset = StateTransitionsDataset(train, benchmark.features,
                                          benchmark.targets)

        # csv format of results
        if train_size != None:
            expected_train_size = train_size
        else:
            expected_train_size = 1.0
        real_train_size = round(len(train) / (len(full_transitions)), 2)

        common_settings = \
        algorithm + "," +\
        semantics + "," +\
        benchmark_name + "," +\
        str(len(benchmark.features)) + "," +\
        str(len(full_transitions)) + "," +\
        "random_transitions" + "," +\
        str(expected_train_size) + "," +\
        str(real_train_size) + "," +\
        str(len(train))

        # 3.2) Learn from training set
        #-------------------------

        # Define a timeout
        signal.signal(signal.SIGALRM, handler)
        signal.alarm(TIME_OUT)
        run_time = -2
        try:
            start = time.time()

            if algorithm in ["gula", "pride", "brute-force"]:
                model = WDMVLP(features=benchmark.features,
                               targets=benchmark.targets)
            elif algorithm in ["synchronizer"]:
                model = CDMVLP(features=benchmark.features,
                               targets=benchmark.targets)
            else:
                eprint("Error, algorithm not accepted: " + algorithm)
                exit()

            model.compile(algorithm=algorithm)
            model.fit(dataset)

            signal.alarm(0)
            end = time.time()
            run_time = end - start
            results_time.append(run_time)
        except TimeoutException:
            signal.alarm(0)
            eprint(" TIME OUT")
            print(common_settings + "," + "-1")
            return len(train), -1

        #signal.alarm(0)

        print(common_settings + "," + str(run_time))
        eprint(" " + str(round(run_time, 3)) + "s")

    # 4) Average scores
    #-------------------
    avg_run_time = sum(results_time) / run_tests

    eprint(">> AVG Run time: " + str(round(avg_run_time, 3)) + "s")

    return len(train), avg_run_time
def evaluate_explanation_on_bn_benchmark(algorithm,
                                         benchmark,
                                         expected_model,
                                         run_tests,
                                         train_size,
                                         mode,
                                         benchmark_name,
                                         semantics_name,
                                         full_transitions=None):
    """
        Evaluate accuracy of an algorithm
        over a given benchmark with a given number/proporsion
        of training samples.

        Args:
            algorithm: Class
                Class of the algorithm to be tested
            benchmark: DMVLP
                benchmark model to be tested
            expected_model: WDMVLP
                optimal WDMVLP that model the transitions of the benchmark.
            train_size: float in [0,1] or int
                Size of the training set in proportion (float in [0,1])
            mode: string
                "all_from_init_states": training contains all transitions from its initials states
                "random": training contains random transitions, 80%/20% train/test then train is reduced to train_size
            benchmark_name: string
                for csv output.
            benchmark_name: string
                for csv output.
        Returns:
        train_set_size: int
        test_set_size: int
        accuracy: float
            Average accuracy score.
        csv_output: String
            csv string format of all tests run statistiques.
    """
    csv_output = ""

    # 0) Extract logic program
    #-----------------------
    #eprint(benchmark.to_string())

    # 1) Generate transitions
    #-------------------------------------

    # Boolean network benchmarks only have rules for value 1, if none match next value is 0
    #default = [[0] for v in benchmark.targets]
    if full_transitions is None:
        eprint(">>> Generating benchmark transitions...")
        full_transitions = [
            (np.array(feature_state),
             np.array(["0" if x == "?" else "1" for x in target_state]))
            for feature_state in program.feature_states()
            for target_state in program.predict([feature_state], semantics)[
                tuple(feature_state)]
        ]
    full_transitions_grouped = {
        tuple(s1): set(
            tuple(s2_) for s1_, s2_ in full_transitions
            if tuple(s1) == tuple(s1_))
        for s1, s2 in full_transitions
    }
    #eprint("Transitions: ", full_transitions)
    #eprint("Grouped: ", full_transitions_grouped)

    #eprint(benchmark.to_string())
    #eprint(semantics.states(P))
    #eprint(full_transitions)

    # 2) Prepare scores containers
    #---------------------------
    results_time = []
    results_score = []

    # 3) Average over several tests
    #-----------------------------
    for run in range(run_tests):

        # 3.1 Split train/test sets on initial states
        #----------------------------------------------
        all_feature_states = list(full_transitions_grouped.keys())
        random.shuffle(all_feature_states)

        # Test set: all transition from last 20% feature states
        test_begin = max(1, int(0.8 * len(all_feature_states)))
        test_feature_states = all_feature_states[test_begin:]

        test = []
        for s1 in test_feature_states:
            test.extend([(list(s1), list(s2))
                         for s2 in full_transitions_grouped[s1]])
        random.shuffle(test)

        # Train set
        # All transition from first train_size % feature states (over 80% include some test set part)
        if mode == "all_from_init_states":
            train_end = max(1, int(train_size * len(all_feature_states)))
            train_feature_states = all_feature_states[:train_end]
            train = []
            for s1 in train_feature_states:
                train.extend([(list(s1), list(s2))
                              for s2 in full_transitions_grouped[s1]])
            random.shuffle(train)
        # Random train_size % of transitions from the feature states not in test set
        elif mode == "random_transitions":
            train_feature_states = all_feature_states[:test_begin]
            train = []
            for s1 in train_feature_states:
                train.extend([(list(s1), list(s2))
                              for s2 in full_transitions_grouped[s1]])
            random.shuffle(train)
            train_end = int(max(1, train_size * len(train)))
            train = train[:train_end]
        else:
            raise ValueError("Wrong mode requested")

        #eprint("train: ", train)
        #eprint("test: ", test)
        #exit()

        # DBG
        if run == 0:
            eprint(">>> Start Training on " + str(len(train)) + "/" +
                   str(len(full_transitions)) + " transitions (" +
                   str(round(100 * len(train) / len(full_transitions), 2)) +
                   "%)")

        eprint(">>>> run: " + str(run + 1) + "/" + str(run_tests), end='')

        train_dataset = StateTransitionsDataset([(np.array(s1), np.array(s2))
                                                 for (s1, s2) in train],
                                                benchmark.features,
                                                benchmark.targets)

        # 3.2) Learn from training set
        #------------------------------------------

        if algorithm == "gula" or algorithm == "pride":
            # possibilities
            start = time.time()
            model = WDMVLP(features=benchmark.features,
                           targets=benchmark.targets)
            model.compile(algorithm=algorithm)
            model.fit(dataset=train_dataset)
            #model = algorithm.fit(train, benchmark.features, benchmark.targets, supported_only=True)
            end = time.time()

            results_time.append(round(end - start, 3))

        # 3.4) Evaluate on accuracy of domain prediction on test set
        #------------------------------------------------------------
        test_dataset = StateTransitionsDataset([(np.array(s1), np.array(s2))
                                                for s1, s2 in test],
                                               benchmark.features,
                                               benchmark.targets)

        # csv format of results
        expected_train_size = train_size
        expected_test_size = 0.2
        real_train_size = round(len(train) / (len(full_transitions)), 2)
        real_test_size = round(len(test) / (len(full_transitions)), 2)

        if mode == "random_transitions":
            expected_train_size = round(train_size * 0.8, 2)

        common_settings = \
        semantics_name + "," +\
        benchmark_name + "," +\
        str(len(benchmark.features)) + "," +\
        str(len(full_transitions)) + "," +\
        mode + "," +\
        str(expected_train_size) + "," +\
        str(expected_test_size) + "," +\
        str(real_train_size) + "," +\
        str(real_test_size) + "," +\
        str(len(train)) + "," +\
        str(len(test))

        if algorithm == "gula" or algorithm == "pride":
            score = explanation_score(model=model,
                                      expected_model=expected_model,
                                      dataset=test_dataset)
            print(algorithm + "," + common_settings + "," + str(score))
            results_score.append(score)
            eprint(" explanation score: " + str(round(score * 100, 2)) + "%")

        if algorithm == "baseline":
            eprint()

            # Perfect prediction random rule
            predictions = {tuple(s1): {variable: {value: (proba, \
            (int(proba*100), random_rule(var_id,val_id,test_dataset.features,test_dataset.targets)),\
            (100 - int(proba*100), random_rule(var_id,val_id,test_dataset.features,test_dataset.targets)) )\
            for val_id, value in enumerate(values) for proba in [int(val_id in set(test_dataset.targets[var_id][1].index(s2[var_id]) for s1_, s2 in test_dataset.data if tuple(s1_)==s1))]}\
            for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            for s1 in test_feature_states}

            score = explanation_score_from_predictions(
                predictions=predictions,
                expected_model=expected_model,
                dataset=test_dataset)
            print("baseline_perfect_predictions_random_rules," +
                  common_settings + "," + str(score))
            eprint(">>>>> explanation score: " + str(round(score * 100, 2)) +
                   "% (baseline_perfect_predictions_random_rules)")

            # Perfect prediction empty_program":
            predictions = {tuple(s1): {variable: {value: (proba, \
            (int(proba*100), None),\
            (100 - int(proba*100), None) )\
            for val_id, value in enumerate(values) for proba in [int(val_id in set(test_dataset.targets[var_id][1].index(s2[var_id]) for s1_, s2 in test_dataset.data if tuple(s1_)==s1))]}\
            for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            for s1 in test_feature_states}

            score = explanation_score_from_predictions(
                predictions=predictions,
                expected_model=expected_model,
                dataset=test_dataset)
            print("baseline_perfect_predictions_no_rules," + common_settings +
                  "," + str(score))
            eprint(">>>>> explanation score: " + str(round(score * 100, 2)) +
                   "% (baseline_perfect_predictions_no_rules)")

            # Perfect prediction most general rule
            predictions = {tuple(s1): {variable: {value: (proba, \
            (int(proba*100), Rule(var_id, val_id, len(test_dataset.features))),\
            (100 - int(proba*100), Rule(var_id, val_id, len(test_dataset.features))) )\
            for val_id, value in enumerate(values) for proba in [int(val_id in set(test_dataset.targets[var_id][1].index(s2[var_id]) for s1_, s2 in test_dataset.data if tuple(s1_)==s1))]}\
            for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            for s1 in test_feature_states}

            score = explanation_score_from_predictions(
                predictions=predictions,
                expected_model=expected_model,
                dataset=test_dataset)
            print("baseline_perfect_predictions_most_general_rules," +
                  common_settings + "," + str(score))
            eprint(">>>>> explanation score: " + str(round(score * 100, 2)) +
                   "% (baseline_perfect_predictions_most_general_rules)")

            # Perfect prediction most specific rule:
            predictions = {tuple(s1): {variable: {value: (proba, \
            (int(proba*100), most_specific_matching_rule),\
            (100 - int(proba*100), most_specific_matching_rule) )\
            for val_id, value in enumerate(values)\
            for proba in [int(val_id in set(test_dataset.targets[var_id][1].index(s2[var_id]) for s1_, s2 in test_dataset.data if tuple(s1_)==s1))] \
            for most_specific_matching_rule in [Rule(var_id,val_id,len(test_dataset.features),[(cond_var,cond_val) for cond_var,cond_val in enumerate(GULA.encode_state(s1,test_dataset.features))])]}\
            for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            for s1 in test_feature_states}

            score = explanation_score_from_predictions(
                predictions=predictions,
                expected_model=expected_model,
                dataset=test_dataset)
            print("baseline_perfect_predictions_most_specific_rules," +
                  common_settings + "," + str(score))
            eprint(">>>>> explanation score: " + str(round(score * 100, 2)) +
                   "% (baseline_perfect_predictions_most_specific_rules)")

            # Random prediction

            # random prediction and rules
            #predictions = {tuple(s1): {variable: {value: (proba, \
            #(int(proba*100), random_rule(var_id,val_id,test_dataset.features,test_dataset.targets)),\
            #(100 - int(proba*100), random_rule(var_id,val_id,test_dataset.features,test_dataset.targets)) )\
            #for val_id, value in enumerate(values) for proba in [round(random.uniform(0.0,1.0),2)]}\
            #for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            #for s1 in test_feature_states}

            #score = explanation_score_from_predictions(predictions=predictions, expected_model=expected_model, dataset=test_dataset)
            #print("baseline_random_predictions_random_rules," + common_settings + "," + str(score))
            #eprint(">>>>> explanation score: " + str(round(score * 100,2)) + "% (baseline_random_predictions_random_rules)")

            # empty_program":
            #predictions = {tuple(s1): {variable: {value: (proba, \
            #(int(proba*100), None),\
            #(100 - int(proba*100), None) )\
            #for val_id, value in enumerate(values) for proba in [round(random.uniform(0.0,1.0),2)]}\
            #for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            #for s1 in test_feature_states}

            #score = explanation_score_from_predictions(predictions=predictions, expected_model=expected_model, dataset=test_dataset)
            #print("baseline_random_predictions_no_rules," + common_settings + "," + str(score))
            #eprint(">>>>> explanation score: " + str(round(score * 100,2)) + "% (baseline_random_predictions_no_rules)")

            # random prediction and most general rule
            #predictions = {tuple(s1): {variable: {value: (proba, \
            #(int(proba*100), Rule(var_id, val_id, len(test_dataset.features))),\
            #(100 - int(proba*100), Rule(var_id, val_id, len(test_dataset.features))) )\
            #for val_id, value in enumerate(values) for proba in [round(random.uniform(0.0,1.0),2)]}\
            #for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            #for s1 in test_feature_states}

            #score = explanation_score_from_predictions(predictions=predictions, expected_model=expected_model, dataset=test_dataset)
            #print("baseline_random_predictions_most_general_rules," + common_settings + "," + str(score))
            #eprint(">>>>> explanation score: " + str(round(score * 100,2)) + "% (baseline_random_predictions_most_general_rules)")

            # random prediction and most specific rule:
            #predictions = {tuple(s1): {variable: {value: (proba, \
            #(int(proba*100), most_specific_matching_rule),\
            #(100 - int(proba*100), most_specific_matching_rule) )\
            #for val_id, value in enumerate(values)\
            #for proba in [round(random.uniform(0.0,1.0),2)] \
            #for most_specific_matching_rule in [Rule(var_id,val_id,len(test_dataset.features),[(cond_var,cond_val) for cond_var,cond_val in enumerate(GULA.encode_state(s1,test_dataset.features))])]}\
            #for var_id, (variable, values) in enumerate(test_dataset.targets)}\
            #for s1 in test_feature_states}

            #score = explanation_score_from_predictions(predictions=predictions, expected_model=expected_model, dataset=test_dataset)
            #print("baseline_random_predictions_most_specific_rules," + common_settings + "," + str(score))
            #eprint(">>>>> explanation score: " + str(round(score * 100,2)) + "% (baseline_random_predictions_most_specific_rules)")

    # 4) Average scores
    #-------------------
    if algorithm in ["gula", "pride"]:
        score = sum(results_score) / run_tests
        #run_time = sum(results_time) / run_tests
        eprint(">>> AVG explanation score: " + str(round(score * 100, 2)) +
               "%")
Beispiel #19
0
    def test_set_upper_bound(self):
        eprint(">> Continuum.set_upper_bound(self, value, included)")

        for i in range(self.__nb_unit_test):
            # Empty set
            c = Continuum()

            self.assertRaises(TypeError, c.set_upper_bound, "string", True)
            self.assertRaises(TypeError, c.set_upper_bound, "string", False)
            self.assertRaises(TypeError, c.set_upper_bound, 0.5, 10)

            value = random.uniform(self.__min_value, self.__max_value)

            # extend with exclusion gives empty set, mistake expected from user
            # or both min and max will be changed and constructor must be used
            self.assertRaises(ValueError, c.set_upper_bound, value, False)

            c.set_upper_bound(value, True)

            # Empty set to one value interval
            self.assertEqual(c, Continuum(value, value, True, True))

            # Regular continuum

            # over min value
            c = Continuum.random(self.__min_value, self.__max_value)
            value = random.uniform(self.__min_value, c.get_min_value())
            while value == c.get_min_value():
                value = random.uniform(self.__min_value, c.get_min_value())

            self.assertRaises(ValueError, c.set_upper_bound, value, True)
            self.assertRaises(ValueError, c.set_upper_bound, value, False)

            # on min value
            c = Continuum.random(self.__min_value, self.__max_value)
            c_old = c.copy()
            value = c.get_min_value()
            if not c.max_included() or not c.min_included():
                c.set_upper_bound(value, False)
                self.assertEqual(c,
                                 Continuum())  # continuum reduced to empty set
            else:
                c.set_upper_bound(value, True)
                self.assertEqual(c.get_max_value(), value)
                self.assertEqual(c.max_included(), True)
                self.assertEqual(c.get_max_value(), c.get_min_value())

                self.assertEqual(c.get_min_value(), c_old.get_min_value())
                self.assertEqual(c.min_included(), c_old.min_included())

            # other valid value
            c = Continuum.random(self.__min_value, self.__max_value)
            c_old = c.copy()
            value = random.uniform(c.get_min_value(), self.__max_value)
            while value == c.get_min_value():
                value = random.uniform(c.get_min_value(), self.__max_value)

            c.set_upper_bound(value, True)

            self.assertEqual(c.get_max_value(), value)
            self.assertEqual(c.max_included(), True)

            c = Continuum.random(self.__min_value, self.__max_value)
            c_old = c.copy()
            value = random.uniform(c.get_min_value(), self.__max_value)
            while value == c.get_min_value():
                value = random.uniform(c.get_min_value(), self.__max_value)

            c.set_upper_bound(value, False)

            self.assertEqual(c.get_max_value(), value)
            self.assertEqual(c.max_included(), False)
Beispiel #20
0
    def test_fit(self):
        print(">> GULA.fit(dataset, targets_to_learn, verbose):")

        for test_id in range(self._nb_tests):

            # 0) exceptions
            #---------------

            # Datatset type
            dataset = "" # not a StateTransitionsDataset
            self.assertRaises(ValueError, GULA.fit, dataset)

            # 1) No transitions
            #--------------------
            dataset = random_StateTransitionsDataset( \
            nb_transitions=0, \
            nb_features=random.randint(1,self._nb_features), \
            nb_targets=random.randint(1,self._nb_targets), \
            max_feature_values=self._nb_feature_values, max_target_values=self._nb_target_values)

            output = GULA.fit(dataset=dataset)

            # Output must be one empty rule for each target value
            self.assertEqual(len(output), len([val for (var,vals) in dataset.targets for val in vals]))

            expected = [Rule(var_id,val_id,len(dataset.features)) for var_id, (var,vals) in enumerate(dataset.targets) for val_id, val in enumerate(vals)]
            #eprint(expected)
            #eprint(output)

            for r in expected:
                self.assertTrue(r in output)

            # 2) Random observations
            # ------------------------

            for impossibility_mode in [False,True]:
                for verbose in [0,1]:

                    # Generate transitions
                    dataset = random_StateTransitionsDataset( \
                    nb_transitions=random.randint(1, self._nb_transitions), \
                    nb_features=random.randint(1,self._nb_features), \
                    nb_targets=random.randint(1,self._nb_targets), \
                    max_feature_values=self._nb_feature_values, \
                    max_target_values=self._nb_target_values)

                    # Empty target list
                    self.assertEqual(GULA.fit(dataset=dataset, targets_to_learn=dict()), [])

                    #dataset.summary()

                    f = io.StringIO()
                    with contextlib.redirect_stderr(f):
                        output = GULA.fit(dataset=dataset, impossibility_mode=impossibility_mode, verbose=verbose)

                    # Encode data to check GULA output rules
                    data_encoded = []
                    for (s1,s2) in dataset.data:
                        s1_encoded = [domain.index(s1[var_id]) for var_id, (var,domain) in enumerate(dataset.features)]
                        s2_encoded = [domain.index(s2[var_id]) for var_id, (var,domain) in enumerate(dataset.targets)]
                        data_encoded.append((s1_encoded,s2_encoded))

                    # 2.1.1) Correctness (explain all)
                    # -----------------
                    # all transitions are fully explained, i.e. each target value is explained by atleast one rule
                    if impossibility_mode == False:
                        for (s1,s2) in data_encoded:
                            for target_id in range(len(dataset.targets)):
                                expected_value = s2_encoded[target_id]
                                realizes_target = False

                                for r in output:
                                    if r.head_variable == target_id and r.head_value == expected_value and r.matches(s1_encoded):
                                        realises_target = True
                                        #eprint(s1_encoded, " => ", target_id,"=",expected_value, " by ", r)
                                        break
                                self.assertTrue(realises_target)

                    #eprint("-------------------")
                    #eprint(data_encoded)

                    # 2.1.2) Correctness (no spurious observation)
                    # -----------------
                    # No rules generate a unobserved target value from an observed state
                    for r in output:
                        for (s1,s2) in data_encoded:
                            if r.matches(s1):
                                observed = False
                                for (s1_,s2_) in data_encoded: # Must be in a target state after s1
                                    if s1_ == s1 and s2_[r.head_variable] == r.head_value:
                                        observed = True
                                        #eprint(r, " => ", s1_, s2_)
                                        break
                                if impossibility_mode:
                                    self.assertFalse(observed)
                                else:
                                    self.assertTrue(observed)

                    # 2.2) Completness
                    # -----------------
                    # all possible initial state is matched by a rule of each target

                    # generate all combination of domains
                    if impossibility_mode == False:
                        encoded_domains = [set([i for i in range(len(domain))]) for (var, domain) in dataset.features]
                        init_states_encoded = set([i for i in list(itertools.product(*encoded_domains))])

                        for s in init_states_encoded:
                            for target_id in range(len(dataset.targets)):
                                realises_target = False
                                for r in output:
                                    if r.head_variable == target_id and r.matches(s):
                                        realises_target = True
                                        #eprint(s, " => ", target_id,"=",expected_value, " by ", r)
                                        break

                                self.assertTrue(realises_target)

                    # 2.3) minimality
                    # -----------------
                    # All rules conditions are necessary, i.e. removing a condition makes realizes unobserved target value from observation
                    data_encoded = []
                    for (s1,s2) in dataset.data:
                        s1_encoded = [domain.index(s1[var_id]) for var_id, (var,domain) in enumerate(dataset.features)]
                        s2_encoded = [domain.index(s2[var_id]) for var_id, (var,domain) in enumerate(dataset.targets)]
                        data_encoded.append((s1_encoded,s2_encoded))

                    #dataset.summary()

                    # Group transitions by initial state
                    data_grouped_by_init_state = []
                    for (s1,s2) in data_encoded:
                        added = False
                        for (s1_,S) in data_grouped_by_init_state:
                            if s1_ == s1:
                                if s2 not in S:
                                    S.append(s2)
                                added = True
                                break

                        if not added:
                            data_grouped_by_init_state.append((s1,[s2])) # new init state

                    for r in output:
                        neg, pos = GULA.interprete(data_grouped_by_init_state, r.head_variable, r.head_value, True)
                        if impossibility_mode:
                            pos_ = pos
                            pos = neg
                            neg = pos_
                        for (var_id, val_id) in r.body:
                                r.remove_condition(var_id) # Try remove condition

                                conflict = False
                                for s in neg:
                                    if r.matches(s):
                                        conflict = True
                                        break

                                r.add_condition(var_id,val_id) # Cancel removal

                                # # DEBUG:
                                if not conflict:
                                    eprint("not minimal "+r.to_string())

                                self.assertTrue(conflict)
Beispiel #21
0
    def test_fit__targets_to_learn(self):
        print(">> PRIDE.fit(dataset, targets_to_learn):")

        for test_id in range(self._nb_tests):

            # 0) exceptions
            #---------------

            # Datatset type
            dataset = "" # not a StateTransitionsDataset
            self.assertRaises(ValueError, PRIDE.fit, dataset, dict())

            # targets_to_learn type
            dataset = random_StateTransitionsDataset( \
            nb_transitions=0, \
            nb_features=random.randint(1,self._nb_features), \
            nb_targets=random.randint(1,self._nb_targets), \
            max_feature_values=self._nb_feature_values, max_target_values=self._nb_target_values)

            targets_to_learn = "" # not a dict
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            targets_to_learn = {"1":["1","2"], 2:["1","2"]} # bad key
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            targets_to_learn = {"1":"1,2", "2":["1","2"]} # bad values (not list)
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            targets_to_learn = {"1":["1",2], "2":[1,"2"]} # bad values (not string)
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            targets_to_learn = {"y0":["val_0","val_2"], "lool":["val_0","val_1"]} # bad values (not in targets)
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            targets_to_learn = {"y0":["lool","val_2"]} # bad values (not domain)
            self.assertRaises(ValueError, PRIDE.fit, dataset, targets_to_learn)

            # 1) No transitions
            #--------------------
            dataset = random_StateTransitionsDataset( \
            nb_transitions=0, \
            nb_features=random.randint(1,self._nb_features), \
            nb_targets=random.randint(1,self._nb_targets), \
            max_feature_values=self._nb_feature_values, max_target_values=self._nb_target_values)

            f = io.StringIO()
            with contextlib.redirect_stderr(f):
                output = PRIDE.fit(dataset=dataset)

            # Output must be empty
            self.assertEqual(output, [])

            # 2) Random observations
            # ------------------------

            # Generate transitions
            dataset = random_StateTransitionsDataset( \
            nb_transitions=random.randint(1, self._nb_transitions), \
            nb_features=random.randint(1,self._nb_features), \
            nb_targets=random.randint(1,self._nb_targets), \
            max_feature_values=self._nb_feature_values, \
            max_target_values=self._nb_target_values)

            # Empty target list
            self.assertEqual(PRIDE.fit(dataset=dataset, targets_to_learn=dict()), [])

            #dataset.summary()

            targets_to_learn = dict()
            for a, b in dataset.targets:
                if random.choice([True,False]):
                    b_ = random.sample(b, random.randint(0,len(b)))
                    targets_to_learn[a] = b_

            #eprint(targets_to_learn)

            f = io.StringIO()
            with contextlib.redirect_stderr(f):
                output = PRIDE.fit(dataset=dataset, targets_to_learn=targets_to_learn)

            # Encode data to check PRIDE output rules
            data_encoded = []
            for (s1,s2) in dataset.data:
                s1_encoded = [domain.index(s1[var_id]) for var_id, (var,domain) in enumerate(dataset.features)]
                s2_encoded = [domain.index(s2[var_id]) for var_id, (var,domain) in enumerate(dataset.targets)]
                data_encoded.append((s1_encoded,s2_encoded))

            # 2.1.1) Correctness (explain all)
            # -----------------
            # all transitions are fully explained, i.e. each target value is explained by atleast one rule
            for (s1,s2) in data_encoded:
                for target_id in range(len(dataset.targets)):
                    expected_value = s2_encoded[target_id]
                    realizes_target = False

                    # In partial mode only requested target values are expected
                    target_name = dataset.targets[target_id][0]
                    target_value_name = dataset.targets[target_id][1][expected_value]
                    if target_name not in targets_to_learn:
                        continue
                    if target_value_name not in targets_to_learn[target_name]:
                        continue

                    for r in output:
                        if r.head_variable == target_id and r.head_value == expected_value and r.matches(s1_encoded):
                            realises_target = True
                            #eprint(s1_encoded, " => ", target_id,"=",expected_value, " by ", r)
                            break
                    self.assertTrue(realises_target)

            #eprint("-------------------")
            #eprint(data_encoded)

            # 2.1.2) Correctness (no spurious observation)
            # -----------------
            # No rules generate a unobserved target value from an observed state
            for r in output:
                for (s1,s2) in data_encoded:
                    if r.matches(s1):
                        observed = False
                        for (s1_,s2_) in data_encoded: # Must be in a target state after s1
                            if s1_ == s1 and s2_[r.head_variable] == r.head_value:
                                observed = True
                                #eprint(r, " => ", s1_, s2_)
                                break
                        self.assertTrue(observed)

            # 2.2) minimality
            # -----------------
            # All rules conditions are necessary, i.e. removing a condition makes realizes unobserved target value from observation
            for r in output:
                for (var_id, val_id) in r.body:
                        r.remove_condition(var_id) # Try remove condition

                        conflict = False
                        for (s1,s2) in data_encoded:
                            if r.matches(s1):
                                observed = False
                                for (s1_,s2_) in data_encoded: # Must be in a target state after s1
                                    if s1_ == s1 and s2_[r.head_variable] == r.head_value:
                                        observed = True
                                        #eprint(r, " => ", s1_, s2_)
                                        break
                                if not observed:
                                    conflict = True
                                    break

                        r.add_condition(var_id,val_id) # Cancel removal

                        # # DEBUG:
                        if not conflict:
                            eprint("not minimal "+r)

                        self.assertTrue(conflict)

            # 2.3) only requested targets value appear in rule head
            # ------------

            for r in output:
                target_name = dataset.targets[r.head_variable][0]
                target_value = dataset.targets[r.head_variable][1][r.head_value]

                self.assertTrue(target_name in targets_to_learn)
                self.assertTrue(target_value in targets_to_learn[target_name])
Beispiel #22
0
    def test__eq__(self):
        eprint(">> Continuum.__eq__(self, continuum)")

        for i in range(self.__nb_unit_test):

            # emptyset
            c = Continuum()

            self.assertTrue(Continuum() == Continuum())
            self.assertTrue(c == Continuum())
            self.assertTrue(c == c)

            self.assertFalse(Continuum() != Continuum())
            self.assertFalse(c != Continuum())
            self.assertFalse(c != c)

            c = Continuum.random(self.__min_value, self.__max_value)

            self.assertTrue(c == c)
            self.assertFalse(c != c)
            self.assertEqual(c == Continuum(), c.is_empty())

            c_ = Continuum.random(self.__min_value, self.__max_value)

            if c.is_empty() and c_.is_empty():
                self.assertTrue(c == c_)
                self.assertTrue(c != c_)

            if c.is_empty() != c_.is_empty():
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            if c.get_min_value() != c_.get_min_value():
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            if c.get_max_value() != c_.get_max_value():
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            if c.min_included() != c_.min_included():
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            if c.max_included() != c_.max_included():
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            # exaustive modifications
            if not c.is_empty():
                c_ = c.copy()
                value = random.uniform(1, 100)
                c_.set_lower_bound(c.get_min_value() - value, True)
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)
                c_.set_lower_bound(c.get_min_value() - value, False)
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

                c_ = c.copy()
                c_.set_lower_bound(c.get_min_value(), not c.min_included())
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

                c_ = c.copy()
                value = random.uniform(1, 100)
                c_.set_upper_bound(c.get_min_value() + value, True)
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)
                c_.set_upper_bound(c.get_min_value() + value, False)
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

                c_ = c.copy()
                c_.set_upper_bound(c.get_max_value(), not c.max_included())
                self.assertFalse(c == c_)
                self.assertTrue(c != c_)

            # different type
            self.assertFalse(c == "test")
            self.assertFalse(c == 0)
            self.assertFalse(c == True)
            self.assertFalse(c == [])
Beispiel #23
0
    def test_find_one_optimal_rule_of(self):
        print(">> PRIDE.find_one_optimal_rule_of(variable, value, nb_features, positives, negatives, feature_state_to_match, verbose=0)")

        for i in range(self._nb_tests):

            for verbose in [0,1]:
                # Generate transitions
                dataset = random_StateTransitionsDataset( \
                nb_transitions=random.randint(1, self._nb_transitions), \
                nb_features=random.randint(1,self._nb_features), \
                nb_targets=random.randint(1,self._nb_targets), \
                max_feature_values=self._nb_feature_values, \
                max_target_values=self._nb_target_values)

                #dataset.summary()

                # Encode data with StateTransitionsDataset
                data_encoded = []
                for (s1,s2) in dataset.data:
                    s1_encoded = [domain.index(s1[var_id]) for var_id, (var,domain) in enumerate(dataset.features)]
                    s2_encoded = [domain.index(s2[var_id]) for var_id, (var,domain) in enumerate(dataset.targets)]
                    data_encoded.append((s1_encoded,s2_encoded))

                values_ids = [[j for j in range(0,len(dataset.features[i][1]))] for i in range(0,len(dataset.features))]
                feature_states = [list(i) for i in list(itertools.product(*values_ids))]

                # each target value
                for var_id, (var,vals) in enumerate(dataset.targets):
                    for val_id, val in enumerate(vals):
                        #eprint("var: ", var_id)
                        #eprint("val: ", val_id)
                        pos, neg = PRIDE.interprete(data_encoded, var_id, val_id)

                        if len(pos) == 0:
                            continue

                        feature_state_to_match = random.choice([s for s in feature_states])
                        #eprint("neg: ", neg)
                        f = io.StringIO()
                        with contextlib.redirect_stderr(f):
                            output = PRIDE.find_one_optimal_rule_of(var_id, val_id, len(dataset.features), pos, neg, feature_state_to_match, verbose)
                        #eprint()
                        #eprint("rules: ", output)

                        # Check no consistent rule exists
                        if output is None:
                            for s in pos:
                                # Most specific rule that match both the pos and request feature state
                                r = Rule(var_id, val_id, len(dataset.features))
                                for var in range(len(dataset.features)):
                                    if feature_state_to_match[var] == s[var]:
                                        r.add_condition(var,s[var])
                                # Must match atleast a neg
                                if len(neg) > 0:
                                    cover = False
                                    for s in neg:
                                        if r.matches(s):
                                            cover = True
                                            break

                                    if not cover:
                                        eprint(feature_state_to_match)
                                        eprint(s)
                                        eprint(r.to_string())

                                    self.assertTrue(cover)
                            continue

                        # Check head
                        self.assertEqual(output.head_variable, var_id)
                        self.assertEqual(output.head_value, val_id)

                        # Cover at least a positive
                        cover = False
                        for s in pos:
                            if output.matches(s):
                                cover = True
                                break

                        self.assertTrue(cover)

                        # No negative is covered
                        cover = False
                        for s in neg:
                            if output.matches(s):
                                cover = True
                                break
                        self.assertFalse(cover)

                        # Rules is minimal
                        for (var_id_, val_id_) in output.body:
                            output.remove_condition(var_id_) # Try remove condition

                            conflict = False
                            for s in neg:
                                if output.matches(s): # Cover a negative example
                                    conflict = True
                                    break
                            self.assertTrue(conflict)
                            output.add_condition(var_id_,val_id_) # Cancel removal
Beispiel #24
0
    def test_least_revision(self):
        eprint(">> ACEDIA.least_revision(rule, state_1, state_2)")

        for i in range(self.__nb_unit_test):
            variables, domains = self.random_system()

            state_1 = self.random_state(variables, domains)
            state_2 = self.random_state(variables, domains)

            # not matching
            #--------------
            rule = self.random_rule(variables, domains)
            while rule.matches(state_1):
                rule = self.random_rule(variables, domains)

            self.assertRaises(ValueError, ACEDIA.least_revision, rule, state_1,
                              state_2)

            # matching
            #--------------

            rule = self.random_rule(variables, domains)
            while not rule.matches(state_1):
                rule = self.random_rule(variables, domains)

            head_var = rule.get_head_variable()
            target_val = state_2[rule.get_head_variable()]

            # Consistent
            head_value = Continuum()
            while not head_value.includes(target_val):
                head_value = Continuum.random(
                    domains[head_var].get_min_value(),
                    domains[head_var].get_max_value())
            rule.set_head_value(head_value)
            self.assertRaises(ValueError, ACEDIA.least_revision, rule, state_1,
                              state_2)

            # Empty set head
            rule.set_head_value(Continuum())

            LR = ACEDIA.least_revision(rule, state_1, state_2)
            lg = rule.copy()
            lg.set_head_value(Continuum(target_val, target_val, True, True))
            self.assertTrue(lg in LR)

            nb_valid_revision = 1

            for var, val in rule.get_body():
                state_value = state_1[var]

                # min rev
                ls = rule.copy()
                new_val = val.copy()
                new_val.set_lower_bound(state_value, False)
                if not new_val.is_empty():
                    ls.set_condition(var, new_val)
                    self.assertTrue(ls in LR)
                    nb_valid_revision += 1

                # max rev
                ls = rule.copy()
                new_val = val.copy()
                new_val.set_upper_bound(state_value, False)
                if not new_val.is_empty():
                    ls.set_condition(var, new_val)
                    self.assertTrue(ls in LR)
                    nb_valid_revision += 1

            self.assertEqual(len(LR), nb_valid_revision)

            #eprint(nb_valid_revision)

            # usual head
            head_value = Continuum.random(domains[head_var].get_min_value(),
                                          domains[head_var].get_max_value())
            while head_value.includes(target_val):
                head_value = Continuum.random(
                    domains[head_var].get_min_value(),
                    domains[head_var].get_max_value())
            rule.set_head_value(head_value)

            LR = ACEDIA.least_revision(rule, state_1, state_2)

            lg = rule.copy()
            head_value = lg.get_head_value()
            if target_val <= head_value.get_min_value():
                head_value.set_lower_bound(target_val, True)
            else:
                head_value.set_upper_bound(target_val, True)
            lg.set_head_value(head_value)
            self.assertTrue(lg in LR)

            nb_valid_revision = 1

            for var, val in rule.get_body():
                state_value = state_1[var]

                # min rev
                ls = rule.copy()
                new_val = val.copy()
                new_val.set_lower_bound(state_value, False)
                if not new_val.is_empty():
                    ls.set_condition(var, new_val)
                    self.assertTrue(ls in LR)
                    nb_valid_revision += 1

                # max rev
                ls = rule.copy()
                new_val = val.copy()
                new_val.set_upper_bound(state_value, False)
                if not new_val.is_empty():
                    ls.set_condition(var, new_val)
                    self.assertTrue(ls in LR)
                    nb_valid_revision += 1

            self.assertEqual(len(LR), nb_valid_revision)
Beispiel #25
0
    def test_fit(self):
        print(">> PRIDE.fit(dataset, targets_to_learn, verbose):")

        for i in range(self._nb_tests):

            # Datatset type
            dataset = "" # not a StateTransitionsDataset
            self.assertRaises(ValueError, PRIDE.fit, dataset)

            # 1) No transitions
            #--------------------
            dataset = random_StateTransitionsDataset( \
            nb_transitions=0, \
            nb_features=random.randint(1,self._nb_features), \
            nb_targets=random.randint(1,self._nb_targets), \
            max_feature_values=self._nb_feature_values, max_target_values=self._nb_target_values)

            f = io.StringIO()
            with contextlib.redirect_stderr(f):
                output = PRIDE.fit(dataset=dataset)

            # Output must be empty
            self.assertTrue(output == [])

            # 2) Random observations
            # ------------------------

            for impossibility_mode in [False,True]:
                for verbose in [0,1]:

                    # Generate transitions
                    dataset = random_StateTransitionsDataset( \
                    nb_transitions=random.randint(1, self._nb_transitions), \
                    nb_features=random.randint(1,self._nb_features), \
                    nb_targets=random.randint(1,self._nb_targets), \
                    max_feature_values=self._nb_feature_values, \
                    max_target_values=self._nb_target_values)

                    #dataset.summary()

                    f = io.StringIO()
                    with contextlib.redirect_stderr(f):
                        output = PRIDE.fit(dataset=dataset, impossibility_mode=impossibility_mode, verbose=verbose)

                    # Encode data to check PRIDE output rules
                    data_encoded = []
                    for (s1,s2) in dataset.data:
                        s1_encoded = [domain.index(s1[var_id]) for var_id, (var,domain) in enumerate(dataset.features)]
                        s2_encoded = [domain.index(s2[var_id]) for var_id, (var,domain) in enumerate(dataset.targets)]
                        data_encoded.append((s1_encoded,s2_encoded))

                    # 2.1.1) Correctness (explain all)
                    # -----------------
                    # all transitions are fully explained, i.e. each target value is explained by atleast one rule
                    for (s1,s2) in data_encoded:
                        for target_id in range(len(dataset.targets)):
                            expected_value = s2_encoded[target_id]
                            realizes_target = False
                            for r in output:
                                if r.head_variable == target_id and r.head_value == expected_value and r.matches(s1_encoded):
                                    realises_target = True
                                    #eprint(s1_encoded, " => ", target_id,"=",expected_value, " by ", r)
                                    break
                            self.assertTrue(realises_target)

                    #eprint("-------------------")
                    #eprint(data_encoded)

                    # 2.1.2) Correctness (no spurious observation)
                    # -----------------
                    # No rules generate a unobserved target value from an observed state
                    for r in output:
                        for (s1,s2) in data_encoded:
                            if r.matches(s1):
                                observed = False
                                for (s1_,s2_) in data_encoded: # Must be in a target state after s1
                                    if s1_ == s1 and s2_[r.head_variable] == r.head_value:
                                        observed = True
                                        #eprint(r, " => ", s1_, s2_)
                                        break
                                if impossibility_mode:
                                    self.assertFalse(observed)
                                else:
                                    self.assertTrue(observed)

                    # 2.2) minimality
                    # -----------------
                    # All rules conditions are necessary, i.e. removing a condition makes realizes unobserved target value from observation

                    for r in output:
                        pos, neg = PRIDE.interprete(data_encoded, r.head_variable, r.head_value)
                        if impossibility_mode:
                            pos_ = pos
                            pos = neg
                            neg = pos_
                        for (var_id, val_id) in r.body:
                                r.remove_condition(var_id) # Try remove condition

                                conflict = False
                                for s in neg:
                                    if r.matches(s):
                                        conflict = True
                                        break

                                r.add_condition(var_id,val_id) # Cancel removal

                                # # DEBUG:
                                if not conflict:
                                    eprint("not minimal "+r.to_string())

                                self.assertTrue(conflict)
Beispiel #26
0
    def test_fit(self):
        print(">> Synchronizer.fit(dataset, complete, verbose)")

        for test_id in range(self._nb_tests):
            for complete in [True, False]:
                for verbose in [0, 1]:

                    # 0) exceptions
                    #---------------

                    # Datatset type
                    dataset = ""  # not a StateTransitionsDataset
                    self.assertRaises(ValueError, Synchronizer.fit, dataset)

                    # 1) No transitions
                    #--------------------
                    dataset = random_StateTransitionsDataset( \
                    nb_transitions=0, \
                    nb_features=random.randint(1,self._nb_features), \
                    nb_targets=random.randint(1,self._nb_targets), \
                    max_feature_values=self._nb_feature_values, max_target_values=self._nb_target_values)

                    f = io.StringIO()
                    with contextlib.redirect_stderr(f):
                        rules, constraints = Synchronizer.fit(dataset=dataset,
                                                              complete=True,
                                                              verbose=verbose)

                    # Output must be one empty rule for each target value and the empty constraint
                    self.assertEqual(
                        len(rules),
                        len([
                            val for (var, vals) in dataset.targets
                            for val in vals
                        ]))
                    self.assertEqual(len(constraints), 1)

                    expected = [
                        Rule(var_id, val_id, len(dataset.features))
                        for var_id, (var, vals) in enumerate(dataset.targets)
                        for val_id, val in enumerate(vals)
                    ]
                    #eprint(expected)
                    #eprint(output)

                    for r in expected:
                        self.assertTrue(r in rules)

                    # 2) Random observations
                    # ------------------------

                    for heuristic_partial in [True, False]:
                        Synchronizer.HEURISTIC_PARTIAL_IMPOSSIBLE_STATE = heuristic_partial

                        # Generate transitions
                        dataset = random_StateTransitionsDataset( \
                        nb_transitions=random.randint(1, self._nb_transitions), \
                        nb_features=random.randint(1,self._nb_features), \
                        nb_targets=random.randint(1,self._nb_targets), \
                        max_feature_values=self._nb_feature_values, \
                        max_target_values=self._nb_target_values)

                        #dataset.summary()

                        f = io.StringIO()
                        with contextlib.redirect_stderr(f):
                            rules, constraints = Synchronizer.fit(
                                dataset=dataset,
                                complete=complete,
                                verbose=verbose)

                        # Encode data to check Synchronizer output rules
                        data_encoded = []
                        for (s1, s2) in dataset.data:
                            s1_encoded = [
                                domain.index(s1[var_id])
                                for var_id, (
                                    var, domain) in enumerate(dataset.features)
                            ]
                            s2_encoded = [
                                domain.index(s2[var_id])
                                for var_id, (
                                    var, domain) in enumerate(dataset.targets)
                            ]
                            data_encoded.append((s1_encoded, s2_encoded))

                        # 2.1) Correctness (explain all and no spurious observation)
                        # -----------------
                        # all transitions are fully explained, i.e. each target state are reproduce
                        for (s1, s2) in data_encoded:
                            next_states = SynchronousConstrained.next(
                                s1, dataset.targets, rules, constraints)
                            #eprint("rules: ", rules)
                            #eprint("constraints: ", constraints)
                            #eprint("s1: ", s1)
                            #eprint("s2: ", s2)
                            #eprint("next: ", next_states)
                            self.assertTrue(tuple(s2) in next_states)
                            for s3 in next_states:
                                self.assertTrue((s1, list(s3)) in data_encoded)

                        #eprint("-------------------")
                        #eprint(data_encoded)

                        # 2.2) Completness
                        # -----------------
                        # all non observed initial state has no next state under synchronous constrainted semantics

                        # generate all combination of domains
                        encoded_domains = [
                            set([i for i in range(len(domain))])
                            for (var, domain) in dataset.features
                        ]
                        init_states_encoded = [
                            list(i)
                            for i in list(itertools.product(*encoded_domains))
                        ]
                        observed_init_states = [
                            s1 for (s1, s2) in data_encoded
                        ]

                        for s in init_states_encoded:
                            next_states = SynchronousConstrained.next(
                                s, dataset.targets, rules, constraints)
                            if s not in observed_init_states:
                                #eprint(s)
                                if complete == True:
                                    self.assertEqual(len(next_states), 0)

                        # 2.3) minimality
                        # -----------------
                        # All rules conditions are necessary, i.e. removing a condition makes realizes unobserved target value from observation
                        for r in rules:
                            for (var_id, val_id) in r.body:
                                r.remove_condition(
                                    var_id)  # Try remove condition

                                conflict = False
                                for (s1, s2) in data_encoded:
                                    if r.matches(s1):
                                        observed = False
                                        for (
                                                s1_, s2_
                                        ) in data_encoded:  # Must be in a target state after s1
                                            if s1_ == s1 and s2_[
                                                    r.
                                                    head_variable] == r.head_value:
                                                observed = True
                                                #eprint(r, " => ", s1_, s2_)
                                                break
                                        if not observed:
                                            conflict = True
                                            break

                                r.add_condition(var_id,
                                                val_id)  # Cancel removal

                                # # DEBUG:
                                if not conflict:
                                    eprint("not minimal " + r)

                                self.assertTrue(conflict)

                        # 2.4) Constraints are minimals
                        #--------------------------------
                        # All constraints conditions are necessary, i.e. removing a condition makes some observed transitions impossible
                        for r in constraints:
                            for (var_id, val_id) in r.body:
                                r.remove_condition(
                                    var_id)  # Try remove condition

                                conflict = False
                                for (s1, s2) in data_encoded:
                                    if r.matches(s1 + s2):
                                        conflict = True
                                        break

                                r.add_condition(var_id,
                                                val_id)  # Cancel removal

                                # # DEBUG:
                                if not conflict:
                                    eprint("not minimal " + r)

                                self.assertTrue(conflict)

                        # 2.5) Constraints are all applicable
                        #-------------------------------------
                        for constraint in constraints:
                            applicable = True
                            for (var, val) in constraint.body:
                                # Each condition on targets must be achievable by a rule head
                                if var >= len(dataset.features):
                                    head_var = var - len(dataset.features)
                                    matching_rule = False
                                    # The conditions of the rule must be in the constraint
                                    for rule in rules:
                                        #eprint(rule)
                                        if rule.head_variable == head_var and rule.head_value == val:
                                            matching_conditions = True
                                            for (cond_var,
                                                 cond_val) in rule.body:
                                                if constraint.has_condition(
                                                        cond_var
                                                ) and constraint.get_condition(
                                                        cond_var) != cond_val:
                                                    matching_conditions = False
                                                    break
                                            if matching_conditions:
                                                matching_rule = True
                                                break
                                    if not matching_rule:
                                        applicable = False
                                        break
                            self.assertTrue(applicable)

                            # Get applicables rules
                            compatible_rules = []
                            for (var, val) in constraint.body:
                                #eprint(var)
                                # Each condition on targets must be achievable by a rule head
                                if var >= len(dataset.features):
                                    compatible_rules.append([])
                                    head_var = var - len(dataset.features)
                                    #eprint(var," ",val)
                                    # The conditions of the rule must be in the constraint
                                    for rule in rules:
                                        #eprint(rule)
                                        if rule.head_variable == head_var and rule.head_value == val:
                                            matching_conditions = True
                                            for (cond_var,
                                                 cond_val) in rule.body:
                                                if constraint.has_condition(
                                                        cond_var
                                                ) and constraint.get_condition(
                                                        cond_var) != cond_val:
                                                    matching_conditions = False
                                                    #eprint("conflict on: ",cond_var,"=",cond_val)
                                                    break
                                            if matching_conditions:
                                                compatible_rules[-1].append(
                                                    rule)

                            nb_combinations = np.prod(
                                [len(l) for l in compatible_rules])
                            done = 0

                            applicable = False
                            for combination in itertools.product(
                                    *compatible_rules):
                                done += 1
                                #eprint(done,"/",nb_combinations)

                                condition_variables = set()
                                conditions = set()
                                valid_combo = True
                                for r in combination:
                                    for var, val in r.body:
                                        if var not in condition_variables:
                                            condition_variables.add(var)
                                            conditions.add((var, val))
                                        elif (var, val) not in conditions:
                                            valid_combo = False
                                            break
                                    if not valid_combo:
                                        break

                                if valid_combo:
                                    #eprint("valid combo: ", combination)
                                    applicable = True
                                    break

                            self.assertTrue(applicable)
    def _check_rules_and_predictions(self, dataset, expected_string_rules,
                                     expected_string_constraints):
        expected_string_rules = [
            s.strip() for s in expected_string_rules.strip().split("\n")
            if len(s) > 0
        ]
        expected_string_constraints = [
            s.strip() for s in expected_string_constraints.strip().split("\n")
            if len(s) > 0
        ]

        expected_rules = []
        for string_rule in expected_string_rules:
            expected_rules.append(
                Rule.from_string(string_rule, dataset.features,
                                 dataset.targets))

        expected_constraints = []
        for string_constraint in expected_string_constraints:
            expected_constraints.append(
                Rule.from_string(string_constraint, dataset.features,
                                 dataset.targets))

        #eprint(expected_rules)

        rules, constraints = Synchronizer.fit(dataset)

        #eprint(output)

        for r in expected_rules:
            if r not in rules:
                eprint("Missing rule: ", r)
            self.assertTrue(r in rules)

        for r in rules:
            if r not in expected_rules:
                eprint("Additional rule: ", r)
            self.assertTrue(r in expected_rules)

        for r in expected_constraints:
            if r not in constraints:
                eprint("Missing constraint: ", r)
            self.assertTrue(r in constraints)

        for r in constraints:
            if r not in expected_constraints:
                eprint("Additional constraint: ", r)
            self.assertTrue(r in constraints)

        model = CDMVLP(dataset.features, dataset.targets, rules, constraints)

        #model.compile("synchronizer")
        #model.summary()

        expected = set((tuple(s1), tuple(s2)) for s1, s2 in dataset.data)

        predicted = model.predict(model.feature_states())
        predicted = set(
            (tuple(s1), tuple(s2)) for (s1, S2) in predicted for s2 in S2)

        eprint()
        done = 0
        for s1, s2 in expected:
            done += 1
            eprint("\rChecking transitions ", done, "/", len(expected), end='')
            self.assertTrue((s1, s2) in predicted)

        done = 0
        for s1, s2 in predicted:
            done += 1
            eprint("\rChecking transitions ",
                   done,
                   "/",
                   len(predicted),
                   end='')
            self.assertTrue((s1, s2) in expected)
Beispiel #28
0
    def test_intersects(self):
        eprint(">> Continuum.intersects(self, continuum)")

        for i in range(self.__nb_unit_test):
            c = Continuum.random(self.__min_value, self.__max_value)
            c_ = Continuum()

            # emptyset
            self.assertFalse(c.intersects(c_))
            self.assertFalse(c_.intersects(c))
            self.assertFalse(c_.intersects(c_))

            # stricly before
            c = Continuum.random(self.__min_value, self.__max_value)
            c_ = Continuum.random(c.get_min_value() - 100, c.get_min_value())
            self.assertFalse(c.intersects(c_))
            self.assertFalse(c_.intersects(c))

            # touching on lower bound
            c = Continuum.random(self.__min_value, self.__max_value)
            c_ = Continuum.random(c.get_min_value() - 100, c.get_min_value())
            c_.set_upper_bound(c.get_min_value(), True)

            self.assertEqual(c.intersects(c_), c.min_included())
            self.assertEqual(c_.intersects(c), c.min_included())

            c_.set_upper_bound(c.get_min_value(), False)

            self.assertFalse(c.intersects(c_))
            self.assertFalse(c_.intersects(c))

            # strictly after
            c = Continuum.random(self.__min_value, self.__max_value)
            c_ = Continuum.random(c.get_max_value(), c.get_max_value() + 100)
            self.assertFalse(c.intersects(c_))
            self.assertFalse(c_.intersects(c))

            # touching on lower bound
            c = Continuum.random(self.__min_value, self.__max_value)
            c_ = Continuum.random(c.get_max_value(), c.get_max_value() + 100)
            c_.set_lower_bound(c.get_max_value(), True)

            self.assertEqual(c.intersects(c_), c.max_included())
            self.assertEqual(c_.intersects(c), c.max_included())

            c_.set_lower_bound(c.get_max_value(), False)

            self.assertFalse(c.intersects(c_))
            self.assertFalse(c_.intersects(c))

            # same (not empty)
            c = Continuum.random(self.__min_value, self.__max_value)
            while c.is_empty():
                c = Continuum.random(self.__min_value, self.__max_value)
            self.assertTrue(c.includes(c))

            # smaller
            c_ = Continuum.random(c.get_min_value(), c.get_max_value())
            while c_.get_min_value() == c.get_min_value() and c_.get_max_value(
            ) == c.get_max_value():
                c_ = Continuum.random(c.get_min_value(), c.get_max_value())

            self.assertTrue(c.intersects(c_))
            self.assertTrue(c_.intersects(c))

            # bigger
            c_ = Continuum.random(c.get_min_value() - 100,
                                  c.get_max_value() + 100)
            while c_.get_min_value() >= c.get_min_value() or c_.get_max_value(
            ) <= c.get_max_value():
                c_ = Continuum.random(c.get_min_value() - 100,
                                      c.get_max_value() + 100)

            #eprint(c.to_string())
            #eprint(c_.to_string())
            self.assertTrue(c.intersects(c_))
            self.assertTrue(c_.intersects(c))
Beispiel #29
0
    def test_fit(self):
        eprint(">> ACEDIA.fit(variables, values, transitions)")

        for i in range(self.__nb_unit_test):

            eprint("\rTest ", i + 1, "/", self.__nb_unit_test, end='')

            # Generate transitions
            epsilon = random.choice([0.1, 0.25, 0.3, 0.5])
            variables, domains = self.random_system()
            p = ContinuumLogicProgram.random(variables, domains, 1,
                                             len(variables), epsilon)

            #eprint("Progam: ", p)

            # Valid and realistic epsilon
            #epsilon = round(random.uniform(0.1,1.0), 2)
            #while epsilon == 1.0:
            #    epsilon = round(random.uniform(0.1,1.0), 2)

            t = p.generate_all_transitions(epsilon)

            #sys.exit()

            #eprint("Transitions: ")
            #for s1, s2 in t:
            #    eprint(s1, s2)
            #eprint("Transitions: ", t)

            p_ = ACEDIA.fit(p.get_variables(), p.get_domains(), t)
            rules = p_.get_rules()

            #eprint("learned: ", p_)

            # All transitions are realized
            #------------------------------

            for head_var in range(len(p.get_variables())):
                for s1, s2 in t:
                    for idx, val in enumerate(s2):
                        realized = 0
                        for r in rules:
                            if r.get_head_variable(
                            ) == idx and r.get_head_value().includes(
                                    val) and r.matches(s1):
                                realized += 1
                                break
                        if realized <= 0:
                            eprint("head_var: ", head_var)
                            eprint("s1: ", s1)
                            eprint("s2: ", s2)
                            eprint("learned: ", p_)
                        self.assertTrue(
                            realized >= 1)  # One rule realize the example

            # All rules are minimals
            #------------------------
            for r in rules:

                #eprint("r: ", r)

                # Try reducing head min
                #-----------------------
                r_ = r.copy()
                h = r_.get_head_value()
                if h.get_min_value() + epsilon <= h.get_max_value():
                    r_.set_head_value(
                        Continuum(h.get_min_value() + epsilon,
                                  h.get_max_value(), h.min_included(),
                                  h.max_included()))

                    #eprint("spec: ", r_)

                    conflict = False
                    for s1, s2 in t:
                        if not r_.get_head_value().includes(
                                s2[r_.get_head_variable()]) and r_.matches(
                                    s1):  # Cover a negative example
                            conflict = True
                            #eprint("conflict")
                            break

                    if not conflict:
                        eprint("Non minimal rule: ", r)
                        eprint("head can be specialized into: ",
                               r_.get_head_variable(), "=",
                               r_.get_head_value())

                    self.assertTrue(conflict)

                # Try reducing head max
                #-----------------------
                r_ = r.copy()
                h = r_.get_head_value()
                if h.get_max_value() - epsilon >= h.get_min_value():
                    r_.set_head_value(
                        Continuum(h.get_min_value(),
                                  h.get_max_value() - epsilon,
                                  h.min_included(), h.max_included()))

                    #eprint("spec: ", r_)

                    conflict = False
                    for s1, s2 in t:
                        if not r_.get_head_value().includes(
                                s2[r_.get_head_variable()]) and r_.matches(
                                    s1):  # Cover a negative example
                            conflict = True
                            #eprint("conflict")
                            break

                    if not conflict:
                        eprint("Non minimal rule: ", r)
                        eprint("head can be generalized to: ",
                               r_.get_head_variable(), "=",
                               r_.get_head_value())

                    self.assertTrue(conflict)

                # Try extending condition
                #-------------------------
                for (var, val) in r.get_body():

                    # Try extend min
                    r_ = r.copy()
                    if val.get_min_value(
                    ) - epsilon >= domains[var].get_min_value():
                        val_ = val.copy()
                        if not val_.min_included():
                            val_.set_lower_bound(val_.get_min_value(), True)
                        else:
                            val_.set_lower_bound(
                                val_.get_min_value() - epsilon, False)
                        r_.set_condition(var, val_)

                        #eprint("gen: ", r_)

                        conflict = False
                        for s1, s2 in t:
                            if not r_.get_head_value().includes(
                                    s2[r_.get_head_variable()]) and r_.matches(
                                        s1):  # Cover a negative example
                                conflict = True
                                #eprint("conflict")
                                break

                        if not conflict:
                            eprint("Non minimal rule: ", r)
                            eprint("condition can be generalized: ", var, "=",
                                   val_)

                        self.assertTrue(conflict)

                    # Try extend max
                    r_ = r.copy()
                    if val.get_max_value(
                    ) + epsilon <= domains[var].get_max_value():
                        val_ = val.copy()
                        if not val_.max_included():
                            val_.set_upper_bound(val_.get_max_value(), True)
                        else:
                            val_.set_upper_bound(
                                val_.get_max_value() + epsilon, False)
                        r_.set_condition(var, val_)

                        #eprint("gen: ", r_)

                        conflict = False
                        for s1, s2 in t:
                            if not r_.get_head_value().includes(
                                    s2[r_.get_head_variable()]) and r_.matches(
                                        s1):  # Cover a negative example
                                conflict = True
                                #eprint("conflict")
                                break

                        if not conflict:
                            eprint("Non minimal rule: ", r)
                            eprint("condition can be generalized: ", var, "=",
                                   val_)

                        self.assertTrue(conflict)
        eprint()
Beispiel #30
0
    start_command = "nohup "
    end_command = " &"

if debug:
    redirect_error = ""

# 1: Main
#------------
if __name__ == '__main__':

    current_directory = os.getcwd()
    tmp_directory = os.path.join(current_directory, r'tmp')
    if not os.path.exists(tmp_directory):
        os.makedirs(tmp_directory)

    eprint("Starting all experiement of MLJ 2020 paper")
    if DEBUG_runs:
        eprint("Debug runs")
        os.system(
            start_command +
            "python3 -u evaluations/mlj2020/mlj2020_bn_benchmarks.py gula 0 10 10 "
            + str(run_tests) + " scalability random_transitions " + str(1) +
            " > tmp/debug_bn_benchmarks_scalability_gula.csv" +
            redirect_error + end_command)
        os.system(
            start_command +
            "python3 -u evaluations/mlj2020/mlj2020_bn_benchmarks.py brute-force 0 10 10 "
            + str(run_tests) + " scalability random_transitions " + str(1) +
            " > tmp/debug_bn_benchmarks_scalability_brute_force.csv" +
            redirect_error + end_command)
        os.system(