Exemple #1
0
    def test_should_find_more_general(self, _c1, _c2, _result, cfg):
        # given
        cl1 = Classifier(condition=Condition(_c1, cfg), cfg=cfg)
        cl2 = Classifier(condition=Condition(_c2, cfg), cfg=cfg)

        # then
        assert cl1.is_more_general(cl2) is _result
Exemple #2
0
    def test_should_create_copy(self, cfg):
        # given
        operation_time = random.randint(0, 100)
        condition = Condition(
            [self._random_ubr(), self._random_ubr()], cfg=cfg)
        action = random.randint(0, 2)
        effect = Effect([self._random_ubr(), self._random_ubr()], cfg=cfg)

        cl = Classifier(condition,
                        action,
                        effect,
                        quality=random.random(),
                        reward=random.random(),
                        intermediate_reward=random.random(),
                        cfg=cfg)
        # when
        copied_cl = Classifier.copy_from(cl, operation_time)

        # then
        assert cl is not copied_cl
        assert cl.condition == copied_cl.condition
        assert cl.condition is not copied_cl.condition
        assert cl.action == copied_cl.action
        assert cl.effect == copied_cl.effect
        assert cl.effect is not copied_cl.effect
        assert copied_cl.is_marked() is False
        assert cl.r == copied_cl.r
        assert cl.q == copied_cl.q
        assert operation_time == copied_cl.tga
        assert operation_time == copied_cl.talp
Exemple #3
0
    def test_should_get_maximum_fitness(self, cfg):
        # given
        # anticipate change - low fitness
        cl1 = Classifier(effect=Effect([UBR(0, 1), UBR(0, 3)], cfg),
                         quality=0.3,
                         reward=1,
                         cfg=cfg)

        # do not anticipate change - high fitness
        cl2 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.5,
                         reward=1,
                         cfg=cfg)

        # anticipate change - medium fitness
        cl3 = Classifier(effect=Effect([UBR(0, 14), UBR(0, 15)], cfg),
                         quality=0.4,
                         reward=1,
                         cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        mf = population.get_maximum_fitness()

        # then
        assert mf == cl3.fitness
Exemple #4
0
    def test_crossover(self, cfg):
        # given
        parent = Classifier(
            condition=Condition(
                [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg),
            effect=Effect(
                [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg),
            cfg=cfg)
        donor = Classifier(
            condition=Condition(
                [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg),
            effect=Effect(
                [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg),
            cfg=cfg)

        # when
        np.random.seed(12345)  # left: 3, right: 6
        crossover(parent, donor)

        # then
        assert parent.condition == \
            Condition([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg)
        assert parent.effect == \
            Effect([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg)
        assert donor.condition == \
            Condition([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg)
        assert donor.effect == \
            Effect([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg)
Exemple #5
0
def crossover(parent: Classifier, donor: Classifier):
    assert parent.cfg.classifier_length == donor.cfg.classifier_length

    # flatten parent and donor perception strings
    p_cond_flat = _flatten(parent.condition)
    d_cond_flat = _flatten(donor.condition)
    p_effect_flat = _flatten(parent.effect)
    d_effect_flat = _flatten(donor.effect)

    # select crossing points
    left, right = sorted(
        np.random.choice(range(0,
                               len(p_cond_flat) + 1), 2, replace=False))

    assert left < right

    # extract chromosomes
    p_cond_chromosome = p_cond_flat[left:right]
    d_cond_chromosome = d_cond_flat[left:right]
    p_effect_chromosome = p_effect_flat[left:right]
    d_effect_chromosome = d_effect_flat[left:right]

    # Flip everything
    p_cond_flat[left:right] = d_cond_chromosome
    d_cond_flat[left:right] = p_cond_chromosome
    p_effect_flat[left:right] = d_effect_chromosome
    d_effect_flat[left:right] = p_effect_chromosome

    # Rebuild proper perception strings
    parent.condition = Condition(_unflatten(p_cond_flat), cfg=parent.cfg)
    donor.condition = Condition(_unflatten(d_cond_flat), cfg=donor.cfg)
    parent.effect = Effect(_unflatten(p_effect_flat), cfg=parent.cfg)
    donor.effect = Effect(_unflatten(d_effect_flat), cfg=parent.cfg)
Exemple #6
0
    def test_should_anticipate_change(self, _effect, _p0, _p1, _result, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        p1 = Perception(_p1, oktypes=(float, ))

        c = Classifier(effect=_effect, cfg=cfg)

        # then
        assert c.does_anticipate_correctly(p0, p1) is _result
Exemple #7
0
    def test_should_update_intermediate_reward(self, _ir0, _ir1, _p, cfg):
        # given
        cls = Classifier(intermediate_reward=_ir0, cfg=cfg)

        # when
        cls.update_intermediate_reward(_p)

        # then
        assert cls.ir == _ir1
Exemple #8
0
    def test_should_update_reward(self, _r0, _r1, _p, cfg):
        # given
        cls = Classifier(reward=_r0, cfg=cfg)

        # when
        cls.update_reward(_p)

        # then
        assert cls.r == _r1
Exemple #9
0
    def test_should_increase_quality(self, cfg):
        # given
        cl = Classifier(cfg=cfg)
        assert cl.q == 0.5

        # when
        cl.increase_quality()

        # then
        assert cl.q == 0.525
Exemple #10
0
    def test_should_detect_identical_classifier(self, cfg):
        cl_1 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        cl_2 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        assert cl_1 == cl_2
Exemple #11
0
    def test_should_specialize(self, cfg):
        # given
        p0 = Perception([random.random()] * 2, oktypes=(float, ))
        p1 = Perception([random.random()] * 2, oktypes=(float, ))
        cl = Classifier(cfg=cfg)

        # when
        cl.specialize(p0, p1)

        # then
        for condition_ubr, effect_ubr in zip(cl.condition, cl.effect):
            assert condition_ubr.lower_bound == condition_ubr.upper_bound
            assert effect_ubr.lower_bound == effect_ubr.upper_bound
Exemple #12
0
    def test_should_generalize_randomly_unchanging_condition_attribute(
            self, _condition, _effect, _soa_before, _soa_after, cfg):

        # given
        condition = Condition(_condition, cfg)
        effect = Effect(_effect, cfg)
        cl = Classifier(condition=condition, effect=effect, cfg=cfg)
        assert len(cl.specified_unchanging_attributes) == _soa_before

        # when
        cl.generalize_unchanging_condition_attribute()

        # then
        assert (len(cl.specified_unchanging_attributes)) == _soa_after
Exemple #13
0
    def test_should_initialize_classifier_list(self, cfg):
        # given
        cl1 = Classifier(cfg=cfg)
        cl2 = Classifier(cfg=cfg)
        cl3 = Classifier(cfg=cfg)

        # when
        cll = ClassifierList(*[cl1, cl2])

        # then
        assert len(cll) == 2
        assert cl1 in cll
        assert cl2 in cll
        assert cl3 not in cll
Exemple #14
0
    def test_should_specialize(self, cfg):
        # given
        p0 = Perception(np.random.random(2), oktypes=(float, ))
        p1 = Perception(np.random.random(2), oktypes=(float, ))
        cl = Classifier(cfg=cfg)

        # when
        cl.specialize(p0, p1)

        # then
        enc_p0 = list(map(cfg.encoder.encode, p0))
        enc_p1 = list(map(cfg.encoder.encode, p1))

        for i, (c_ubr, e_ubr) in enumerate(zip(cl.condition, cl.effect)):
            assert c_ubr.lower_bound <= enc_p0[i] <= c_ubr.upper_bound
            assert e_ubr.lower_bound <= enc_p1[i] <= e_ubr.upper_bound
Exemple #15
0
    def test_should_expand(self, cfg):
        # given
        cl1 = Classifier(numerosity=1, cfg=cfg)
        cl2 = Classifier(numerosity=2, cfg=cfg)
        cl3 = Classifier(numerosity=3, cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        expanded = population.expand()

        # then
        assert len(expanded) == 6
        assert cl1 in expanded
        assert cl2 in expanded
        assert cl3 in expanded
Exemple #16
0
    def test_aggressive_mutation(self, _cond, _effect, cfg):
        # given
        condition = Condition(_cond, cfg)
        effect = Effect(_effect, cfg)

        cfg.encoder = RealValueEncoder(16)  # more precise encoder
        cfg.mutation_noise = 0.5  # strong noise mutation range
        mu = 1.0  # mutate every attribute

        cl = Classifier(condition=deepcopy(condition),
                        effect=deepcopy(effect),
                        cfg=cfg)

        # when
        mutate(cl, mu)

        # then
        range_min, range_max = cfg.encoder.range
        for idx, (c, e) in enumerate(zip(cl.condition, cl.effect)):
            # assert that we have new locus
            if condition[idx] != cfg.classifier_wildcard:
                assert condition[idx] != c

            if effect[idx] != cfg.classifier_wildcard:
                assert effect[idx] != e

            # assert if condition values are in ranges
            assert c.lower_bound >= range_min
            assert c.upper_bound <= range_max

            # assert if effect values are in ranges
            assert e.lower_bound >= range_min
            assert e.upper_bound <= range_max
Exemple #17
0
    def test_should_form_action_set(self, cfg):
        # given
        cl1 = Classifier(action=0, cfg=cfg)
        cl2 = Classifier(action=0, cfg=cfg)
        cl3 = Classifier(action=1, cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        action_set = population.form_action_set(0)

        # then
        assert len(action_set) == 2
        assert cl1 in action_set
        assert cl2 in action_set
        assert cl3 not in action_set
Exemple #18
0
    def test_should_distinguish_classifier_as_subsumer(self, _exp, _q,
                                                       _is_subsumer, cfg):
        # given
        cl = Classifier(experience=_exp, quality=_q, cfg=cfg)

        # when & then
        # general classifier should not be considered as subsumer
        assert cl.is_subsumer is _is_subsumer
Exemple #19
0
    def test_should_handle_unexpected_case_2(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        p1 = Perception([.5, .5], oktypes=(float, ))
        # Effect is not specializable
        effect = Effect([UBR(0, 15), UBR(2, 4)], cfg=cfg)
        quality = random.random()
        time = random.randint(0, 1000)
        cl = Classifier(effect=effect, quality=quality, cfg=cfg)

        # when
        child = unexpected_case(cl, p0, p1, time)

        # then
        assert cl.q < quality
        assert cl.is_marked() is True
        # We cannot generate child from non specializable parent
        assert child is None
Exemple #20
0
    def test_should_find_similar(self):
        # given
        cfg = Configuration(3, 2, encoder=RealValueEncoder(2))
        cl1 = Classifier(
            condition=Condition([UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
            action=0,
            effect=Effect([UBR(0, 3), UBR(0, 3), UBR(0, 3)], cfg=cfg),
            cfg=cfg
        )
        cl2 = Classifier(
            condition=Condition([UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
            action=0,
            effect=Effect([UBR(0, 3), UBR(0, 3), UBR(0, 3)], cfg=cfg),
            cfg=cfg
        )

        # then
        assert cl1 == cl2
Exemple #21
0
    def test_should_not_distinguish_marked_classifier_as_subsumer(self, cfg):
        # given
        # Now check if the fact that classifier is marked will block
        # it from being considered as a subsumer
        cl = Classifier(experience=30, quality=0.92, cfg=cfg)
        cl.mark[0].add(4)

        # when & then
        assert cl.is_subsumer is False
Exemple #22
0
    def test_should_return_zero_max_fitness(self, cfg):
        # given classifiers that does not anticipate change
        cl1 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.5,
                         reward=1,
                         cfg=cfg)

        cl2 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.7,
                         reward=1,
                         cfg=cfg)

        population = ClassifierList(*[cl1, cl2])

        # when
        mf = population.get_maximum_fitness()

        # then
        assert mf == 0.0
Exemple #23
0
    def test_regions_averaging(self, cfg):
        # given
        cl1 = Classifier(condition=Condition([UBR(2, 3), UBR(4, 5)], cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition([UBR(0, 3), UBR(4, 9)], cfg),
                         cfg=cfg)
        cl3 = Classifier(condition=Condition([UBR(1, 3), UBR(4, 15)], cfg),
                         cfg=cfg)
        cl4 = Classifier(condition=Condition(
            [UBR(0, 13), UBR(0, 15)], cfg),
                         cfg=cfg)
        population = ClassifierList(*[cl1, cl2, cl3, cl4])

        # when
        result = count_averaged_regions(population)

        # then
        assert type(result) is dict
        assert result == {1: 0.5, 2: 0.25, 3: 0.125, 4: 0.125}
Exemple #24
0
    def test_should_count_specified_unchanging_attributes(
            self, _condition, _effect, _sua, cfg):

        # given
        cl = Classifier(condition=Condition(_condition, cfg),
                        effect=Effect(_effect, cfg),
                        cfg=cfg)

        # then
        assert len(cl.specified_unchanging_attributes) == _sua
Exemple #25
0
def unexpected_case(cl: Classifier,
                    p0: Perception,
                    p1: Perception,
                    time: int) -> Optional[Classifier]:
    """
    The classifier does not anticipate the resulting state correctly.
    In this case the classifier is marked by the `previous_perception`
    and it's quality is decreased.

    If it is possible to specialize an offspring (change pass-through
    symbols to correct values then new classifier is returned.

    Parameters
    ----------
    cl: Classifier
        Classifier object
    p0: Perception
        previous situation
    p1: Perception
        current situation
    time:
        current epoch

    Returns
    -------
    Optional[Classifier]
        If possible to specialize parent, None otherwise
    """
    cl.decrease_quality()
    cl.set_mark(p0)

    # TODO: think
    if not cl.effect.is_specializable(p0, p1):
        return None

    child = cl.copy_from(cl, time)
    child.specialize(p0, p1, leave_specialized=True)

    if child.q < .5:
        child.q = .5

    return child
Exemple #26
0
    def test_should_apply_reinforcement_learning(self, cfg):
        # given
        cl = Classifier(reward=34.29, intermediate_reward=11.29, cfg=cfg)
        population = ClassifierList(*[cl])

        # when
        population.apply_reinforcement_learning(0, 28.79)

        # then
        assert abs(33.94 - population[0].r) < 0.1
        assert abs(10.74 - population[0].ir) < 0.1
Exemple #27
0
    def test_should_initialize_without_arguments(self, cfg):
        # when
        c = Classifier(cfg=cfg)

        # then
        assert c.condition == Condition.generic(cfg=cfg)
        assert c.action is None
        assert c.effect == Effect.pass_through(cfg=cfg)
        assert c.exp == 1
        assert c.talp is None
        assert c.tav == 0.0
Exemple #28
0
    def test_should_detect_subsumption(self, _e1, _e2, _exp1, _marked,
                                       _reliable, _more_general,
                                       _condition_matching, _result, mocker,
                                       cfg):
        # given
        cl1 = Classifier(effect=Effect(_e1, cfg), experience=_exp1, cfg=cfg)
        cl2 = Classifier(effect=Effect(_e2, cfg), cfg=cfg)

        # when
        mocker.patch.object(cl1, "is_reliable")
        mocker.patch.object(cl1, "is_marked")
        mocker.patch.object(cl1, "is_more_general")
        mocker.patch.object(cl1.condition, "does_match_condition")

        cl1.is_reliable.return_value = _reliable
        cl1.is_marked.return_value = _marked
        cl1.is_more_general.return_value = _more_general
        cl1.condition.does_match_condition.return_value = _condition_matching

        # then
        assert cl1.does_subsume(cl2) == _result
Exemple #29
0
    def test_should_form_match_set(self, cfg):
        # given
        # 4bit encoding 0.2 => 3, 0.6 => 9
        observation = Perception([0.2, 0.6], oktypes=(float, ))

        cl1 = Classifier(condition=Condition([UBR(2, 5), UBR(8, 11)], cfg=cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition([UBR(5, 7), UBR(5, 12)], cfg=cfg),
                         cfg=cfg)
        cl3 = Classifier(cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        match_set = population.form_match_set(observation)

        # then
        assert len(match_set) == 2
        assert cl1 in match_set
        assert cl2 not in match_set
        assert cl3 in match_set
Exemple #30
0
def cover(p0: Perception, action: int, p1: Perception, time: int,
          cfg: Configuration) -> Classifier:
    """
    Covering - creates a classifier that anticipates a change correctly.
    The reward of the new classifier is set to 0 to prevent *reward bubbles*
    in the environmental model.

    Parameters
    ----------
    p0: Perception
        previous perception
    action: int
        chosen action
    p1: Perception
        current perception
    time: int
        current epoch
    cfg: Configuration
        algorithm configuration class

    Returns
    -------
    Classifier
        new classifier
    """
    # In paper it's advised to set experience and reward of newly generated
    # classifier to 0. However in original code these values are initialized
    # with defaults 1 and 0.5 correspondingly.
    new_cl = Classifier(action=action, experience=0, reward=0, cfg=cfg)
    new_cl.tga = time
    new_cl.talp = time

    new_cl.specialize(p0, p1)

    return new_cl