Example #1
0
    def test_should_compare_without_ordering(self):
        # given
        o1 = UBR(0, 2)
        o2 = UBR(2, 0)

        # then
        assert o1 == o2
        assert (o1 != o2) is False
Example #2
0
    def get_differences(self, p0: Perception) -> Condition:
        """
        Difference determination is run when the classifier anticipated the
        change correctly.

        If it's marked we want to find if we can propose differences that
        will be applied to new condition part (specialization).

        There can be two types of differences:
        1) unique - one or more attributes in mark does not contain given
        perception attribute
        2) fuzzy - there is no unique difference - one or more attributes in
        the mark specify more than one value in perception attribute.

        If only unique differences are present - one random one get specified.
        If there are fuzzy differences everyone is specified.

        Parameters
        ----------
        p0: Perception

        Returns
        -------
        Condition
            differences between mark and perception that can form
            a new classifier

        """
        diff = Condition.generic(self.cfg)

        if self.is_marked():
            enc_p0 = list(map(self.cfg.encoder.encode, p0))

            # Unique and fuzzy difference counts
            nr1, nr2 = 0, 0

            # Count difference types
            for idx, item in enumerate(self):
                if len(item) > 0 and enc_p0[idx] not in item:
                    nr1 += 1
                elif len(item) > 1:
                    nr2 += 1

            if nr1 > 0:
                possible_idx = [
                    pi for pi, p in enumerate(enc_p0)
                    if p not in self[pi] and len(self[pi]) > 0
                ]
                rand_idx = random.choice(possible_idx)
                p = enc_p0[rand_idx]
                diff[rand_idx] = UBR(p, p)
            elif nr2 > 0:
                for pi, p in enumerate(enc_p0):
                    if len(self[pi]) > 1:
                        diff[pi] = UBR(p, p)

        return diff
Example #3
0
def _mutate_attribute(ubr: UBR, encoder: RealValueEncoder, noise_max: float,
                      mu: float):

    if np.random.random() < mu:
        noise = np.random.uniform(-noise_max, noise_max)
        x1p = encoder.decode(ubr.x1)
        ubr.x1 = encoder.encode(x1p, noise)

    if np.random.random() < mu:
        noise = np.random.uniform(-noise_max, noise_max)
        x2p = encoder.decode(ubr.x2)
        ubr.x2 = encoder.encode(x2p, noise)
Example #4
0
def _widen_attribute(ubr: UBR, encoder: RealValueEncoder, noise_max: float,
                     mu: float):

    # TODO: we should modify both condition and effect parts with the
    # same noise.
    if np.random.random() < mu:
        noise = np.random.uniform(-noise_max, noise_max)
        x1p = encoder.decode(ubr.x1)
        ubr.x1 = encoder.encode(x1p, noise)

    if np.random.random() < mu:
        noise = np.random.uniform(-noise_max, noise_max)
        x2p = encoder.decode(ubr.x2)
        ubr.x2 = encoder.encode(x2p, noise)
Example #5
0
    def test_should_handle_expected_case_3(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        q = 0.4
        cl = Classifier(quality=q, cfg=cfg)
        cl.mark[0].add(2)
        time = random.randint(0, 1000)

        # when
        child = expected_case(cl, p0, time)

        # then
        assert child is not None
        assert child.condition == Condition([UBR(8, 8), UBR(0, 15)], cfg)
        assert child.q == 0.5
Example #6
0
    def test_should_handle_unexpected_case_2(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        p1 = Perception([.5, .5], oktypes=(float, ))
        # Effect is not specializable
        effect = Effect([UBR(0, 15), UBR(2, 4)], cfg=cfg)
        quality = random.random()
        time = random.randint(0, 1000)
        cl = Classifier(effect=effect, quality=quality, cfg=cfg)

        # when
        child = unexpected_case(cl, p0, p1, time)

        # then
        assert cl.q < quality
        assert cl.is_marked() is True
        # We cannot generate child from non specializable parent
        assert child is None
Example #7
0
    def test_should_detect_if_marked(self, cfg):
        # given
        mark = Mark(cfg)

        # when
        mark[0].add(UBR(2, 5))

        # then
        assert mark.is_marked() is True
Example #8
0
    def test_should_return_zero_max_fitness(self, cfg):
        # given classifiers that does not anticipate change
        cl1 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.5,
                         reward=1,
                         cfg=cfg)

        cl2 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.7,
                         reward=1,
                         cfg=cfg)

        population = ClassifierList(*[cl1, cl2])

        # when
        mf = population.get_maximum_fitness()

        # then
        assert mf == 0.0
Example #9
0
    def test_should_detect_identical_classifier(self, cfg):
        cl_1 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        cl_2 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        assert cl_1 == cl_2
Example #10
0
class TestGeneticAlgorithm:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder_bits=4)

    @pytest.mark.parametrize("_cond", [
        ([UBR(2, 5), UBR(5, 10)]),
        ([UBR(5, 2), UBR(10, 5)]),
        ([UBR(2, 2), UBR(5, 5)]),
        ([UBR(0, 15), UBR(0, 15)]),
    ])
    def test_aggressive_mutation(self, _cond, cfg):
        # given
        condition = Condition(_cond, cfg)
        cl = Classifier(condition=condition, cfg=cfg)
        mu = 1.0

        # when
        mutate(cl, cfg.encoder.range, mu)

        # then
        for idx, ubr in enumerate(cl.condition):
            assert ubr.lower_bound <= condition[idx].lower_bound
            assert ubr.upper_bound >= condition[idx].upper_bound

    @pytest.mark.parametrize("_cond", [
        ([UBR(2, 5), UBR(5, 10)]),
    ])
    def test_disabled_mutation(self, _cond, cfg):
        # given
        condition = Condition(_cond, cfg)
        cl = Classifier(condition=condition, cfg=cfg)
        mu = 0.0

        # when
        mutate(cl, cfg.encoder.range, mu)

        # then
        for idx, ubr in enumerate(cl.condition):
            assert ubr.lower_bound == condition[idx].lower_bound
            assert ubr.upper_bound == condition[idx].upper_bound
Example #11
0
    def specialize(self,
                   p0: Perception,
                   p1: Perception,
                   leave_specialized: bool = False) -> None:
        """
        Specializes the effect part where necessary to correctly anticipate
        the changes from p0 to p1 and returns a condition which specifies
        the attributes which must be specified in the condition part.
        The specific attributes in the returned conditions are set to
        the necessary values.

        For real-valued representation a random noise might be added to both
        `p0` and `p1` (see `Configuration`, `cover_noise` parameter).

        Parameters
        ----------
        p0: Perception
            previous raw perception obtained from environment
        p1: Perception
            current raw perception obtained from environment
        leave_specialized: bool
            Requires the effect attribute to be a wildcard to specialize it.
            By default false
        """
        p0_enc = list(map(self.cfg.encoder.encode, p0))
        p1_enc = list(map(self.cfg.encoder.encode, p1))

        for idx, item in enumerate(p1):
            if leave_specialized:
                if self.effect[idx] != self.cfg.classifier_wildcard:
                    # If we have a specialized attribute don't change it.
                    continue

            if p0_enc[idx] != p1_enc[idx]:
                noise = np.random.uniform(0, self.cfg.cover_noise)
                self.condition[idx] = UBR(
                    self.cfg.encoder.encode(p0[idx], -noise),
                    self.cfg.encoder.encode(p0[idx], noise))
                self.effect[idx] = UBR(
                    self.cfg.encoder.encode(p1[idx], -noise),
                    self.cfg.encoder.encode(p1[idx], noise))
Example #12
0
    def test_should_form_match_set(self, cfg):
        # given
        # 4bit encoding 0.2 => 3, 0.6 => 9
        observation = Perception([0.2, 0.6], oktypes=(float, ))

        cl1 = Classifier(condition=Condition([UBR(2, 5), UBR(8, 11)], cfg=cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition([UBR(5, 7), UBR(5, 12)], cfg=cfg),
                         cfg=cfg)
        cl3 = Classifier(cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        match_set = population.form_match_set(observation)

        # then
        assert len(match_set) == 2
        assert cl1 in match_set
        assert cl2 not in match_set
        assert cl3 in match_set
Example #13
0
    def test_should_get_maximum_fitness(self, cfg):
        # given
        # anticipate change - low fitness
        cl1 = Classifier(effect=Effect([UBR(0, 1), UBR(0, 3)], cfg),
                         quality=0.3,
                         reward=1,
                         cfg=cfg)

        # do not anticipate change - high fitness
        cl2 = Classifier(effect=Effect([UBR(0, 15), UBR(0, 15)], cfg),
                         quality=0.5,
                         reward=1,
                         cfg=cfg)

        # anticipate change - medium fitness
        cl3 = Classifier(effect=Effect([UBR(0, 14), UBR(0, 15)], cfg),
                         quality=0.4,
                         reward=1,
                         cfg=cfg)

        population = ClassifierList(*[cl1, cl2, cl3])

        # when
        mf = population.get_maximum_fitness()

        # then
        assert mf == cl3.fitness
Example #14
0
    def specialize(self,
                   p0: Perception,
                   p1: Perception,
                   leave_specialized=False) -> None:
        """
        Specializes the effect part where necessary to correctly anticipate
        the changes from p0 to p1 and returns a condition which specifies
        the attributes which must be specified in the condition part.
        The specific attributes in the returned conditions are set to
        the necessary values.

        For real-valued representation a narrow, fixed point UBR is created
        for condition and effect part using the encoded perceptions.

        Parameters
        ----------
        p0: Perception
            previous raw perception obtained from environment
        p1: Perception
            current raw perception obtained from environment
        leave_specialized: bool
            Requires the effect attribute to be a wildcard to specialize it.
            By default false
        """
        p0_enc = list(map(self.cfg.encoder.encode, p0))
        p1_enc = list(map(self.cfg.encoder.encode, p1))

        for idx, item in enumerate(p1_enc):
            if leave_specialized:
                if self.effect[idx] != self.cfg.classifier_wildcard:
                    # If we have a specialized attribute don't change it.
                    continue

            if p0_enc[idx] != p1_enc[idx]:
                self.effect[idx] = UBR(p1_enc[idx], p1_enc[idx])
                self.condition[idx] = UBR(p0_enc[idx], p0_enc[idx])
Example #15
0
def _unflatten(flatten: List[int]) -> List[UBR]:
    """
    Unflattens list by creating pairs of UBR using consecutive list items

    Parameters
    ----------
    flatten: List[int]
        Flat list of encoded perceptions

    Returns
    -------
    List[UBR]
        List of created UBRs
    """
    # Make sure we are not left with any outliers
    assert len(flatten) % 2 == 0
    return [UBR(flatten[i], flatten[i + 1]) for i in range(0, len(flatten), 2)]
Example #16
0
    def test_regions_averaging(self, cfg):
        # given
        cl1 = Classifier(condition=Condition([UBR(2, 3), UBR(4, 5)], cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition([UBR(0, 3), UBR(4, 9)], cfg),
                         cfg=cfg)
        cl3 = Classifier(condition=Condition([UBR(1, 3), UBR(4, 15)], cfg),
                         cfg=cfg)
        cl4 = Classifier(condition=Condition(
            [UBR(0, 13), UBR(0, 15)], cfg),
                         cfg=cfg)
        population = ClassifierList(*[cl1, cl2, cl3, cl4])

        # when
        result = count_averaged_regions(population)

        # then
        assert type(result) is dict
        assert result == {1: 0.5, 2: 0.25, 3: 0.125, 4: 0.125}
Example #17
0
class TestEffect:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder_bits=4)

    def test_should_create_pass_through_effect(self, cfg):
        # when
        effect = Effect.pass_through(cfg)

        # then
        assert len(effect) == cfg.classifier_length
        for allele in effect:
            assert allele == cfg.classifier_wildcard

    @pytest.mark.parametrize(
        "_p0, _p1, _effect, is_specializable",
        [
            # Effect is all pass-through. Can be specialized.
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(0, 15)], True),
            # 1 pass-through effect get skipped. Second effect attribute get's
            # examined. P1 perception is not in correct range. That's invalid
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(2, 4)], False),
            # In this case the range is proper, but no change is anticipated.
            # In this case this should be a pass-through symbol.
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(2, 12)], False),
            # Here second perception attribute changes. 0.8 => 12
            ([0.5, 0.5], [0.5, 0.8], [UBR(0, 15), UBR(10, 14)], True)
        ])
    def test_should_specialize(self, _p0, _p1, _effect, is_specializable, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        p1 = Perception(_p1, oktypes=(float, ))
        effect = Effect(_effect, cfg=cfg)

        # then
        assert effect.is_specializable(p0, p1) is is_specializable
Example #18
0
    def __init__(self,
                 classifier_length: int,
                 number_of_possible_actions: int,
                 encoder_bits: int,
                 beta=0.05,
                 theta_i=0.1,
                 theta_r=0.9,
                 u_max=100000,
                 theta_exp=20,) -> None:

        self.oktypes = (UBR,)
        self.encoder = RealValueEncoder(encoder_bits)

        self.classifier_length = classifier_length
        self.number_of_possible_actions = number_of_possible_actions
        self.classifier_wildcard = UBR(*self.encoder.range)

        self.beta = beta
        self.theta_i = theta_i
        self.theta_r = theta_r
        self.u_max = u_max

        self.theta_exp = theta_exp
Example #19
0
def _mutate_attribute(ubr: UBR, bounds: Tuple[int, int], mu: float) -> UBR:
    rmin, rmax = bounds[0], bounds[1]

    # Calculate global spread
    spread = _calculate_spread(rmax)

    lb, ub = ubr.lower_bound, ubr.upper_bound
    nlb, nub = lb, ub

    # Generate new lower bound
    if random.random() < mu:
        while True:
            nlb = _draw(lb, spread)
            if rmin <= nlb <= lb:
                break

    # Generate new upper bound
    if random.random() < mu:
        while True:
            nub = _draw(ub, spread)
            if ub <= nub <= rmax:
                break

    return UBR(nlb, nub)
Example #20
0
    def test_should_handle_unexpected_case_3(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        p1 = Perception([.5, .8], oktypes=(float, ))
        # Second effect attribute is specializable
        effect = Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg)
        quality = 0.4
        time = random.randint(0, 1000)
        cl = Classifier(effect=effect, quality=quality, cfg=cfg)

        # when
        child = unexpected_case(cl, p0, p1, time)

        # then
        assert cl.q < quality
        assert cl.is_marked() is True

        assert child is not None
        assert child.is_marked() is False
        assert child.q == .5
        assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg)
        assert child.effect == Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg)
Example #21
0
    def test_should_handle_unexpected_case_1(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        p1 = Perception([.5, .5], oktypes=(float, ))
        # Effect is all pass-through. Can be specialized.
        effect = Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg)
        quality = .4
        time = random.randint(0, 1000)
        cl = Classifier(effect=effect, quality=quality, cfg=cfg)

        # when
        child = unexpected_case(cl, p0, p1, time)

        # then
        assert cl.q < quality
        assert cl.is_marked() is True
        assert child
        assert child.q == .5
        assert child.talp == time
        # There is no change in perception so the child condition
        # and effect should stay the same.
        assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg)
        assert child.effect == Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg)
Example #22
0
    def test_should_find_similar(self):
        # given
        cfg = Configuration(3, 2, encoder=RealValueEncoder(2))
        cl1 = Classifier(condition=Condition(
            [UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
                         action=0,
                         effect=Effect([UBR(
                             0, 3), UBR(0, 3), UBR(0, 3)],
                                       cfg=cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition(
            [UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
                         action=0,
                         effect=Effect([UBR(
                             0, 3), UBR(0, 3), UBR(0, 3)],
                                       cfg=cfg),
                         cfg=cfg)

        # then
        assert cl1 == cl2
Example #23
0
class TestClassifier:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder=RealValueEncoder(4))

    def test_should_initialize_without_arguments(self, cfg):
        # when
        c = Classifier(cfg=cfg)

        # then
        assert c.condition == Condition.generic(cfg=cfg)
        assert c.action is None
        assert c.effect == Effect.pass_through(cfg=cfg)
        assert c.exp == 1
        assert c.talp is None
        assert c.tav == 0.0

    def test_should_detect_identical_classifier(self, cfg):
        cl_1 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        cl_2 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg),
                          action=1,
                          effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg),
                          cfg=cfg)

        assert cl_1 == cl_2

    def test_should_find_similar(self):
        # given
        cfg = Configuration(3, 2, encoder=RealValueEncoder(2))
        cl1 = Classifier(condition=Condition(
            [UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
                         action=0,
                         effect=Effect([UBR(
                             0, 3), UBR(0, 3), UBR(0, 3)],
                                       cfg=cfg),
                         cfg=cfg)
        cl2 = Classifier(condition=Condition(
            [UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg),
                         action=0,
                         effect=Effect([UBR(
                             0, 3), UBR(0, 3), UBR(0, 3)],
                                       cfg=cfg),
                         cfg=cfg)

        # then
        assert cl1 == cl2

    @pytest.mark.parametrize("_q, _r, _fitness", [
        (0.0, 0.0, 0.0),
        (0.3, 0.5, 0.15),
        (1.0, 1.0, 1.0),
    ])
    def test_should_calculate_fitness(self, _q, _r, _fitness, cfg):
        assert Classifier(quality=_q, reward=_r, cfg=cfg).fitness == _fitness

    @pytest.mark.parametrize(
        "_effect, _p0, _p1, _result",
        [
            # Classifier with default pass-through effect
            (None, [0.5, 0.5], [0.5, 0.5], True),
            ([UBR(0, 15), UBR(10, 12)], [0.5, 0.5], [0.5, 0.5], False),
            ([UBR(0, 4), UBR(10, 12)], [0.8, 0.8], [0.2, 0.7], True),
            # second perception attribute is unchanged - should be a wildcard
            ([UBR(0, 4), UBR(10, 12)], [0.8, 0.8], [0.2, 0.8], False),
        ])
    def test_should_anticipate_change(self, _effect, _p0, _p1, _result, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        p1 = Perception(_p1, oktypes=(float, ))

        c = Classifier(effect=_effect, cfg=cfg)

        # then
        assert c.does_anticipate_correctly(p0, p1) is _result

    @pytest.mark.parametrize("_q, _reliable", [
        (.5, False),
        (.1, False),
        (.9, False),
        (.91, True),
    ])
    def test_should_detect_reliable(self, _q, _reliable, cfg):
        # given
        cl = Classifier(quality=_q, cfg=cfg)

        # then
        assert cl.is_reliable() is _reliable

    @pytest.mark.parametrize("_q, _inadequate", [
        (.5, False),
        (.1, False),
        (.09, True),
    ])
    def test_should_detect_inadequate(self, _q, _inadequate, cfg):
        # given
        cl = Classifier(quality=_q, cfg=cfg)

        # then
        assert cl.is_inadequate() is _inadequate

    def test_should_increase_quality(self, cfg):
        # given
        cl = Classifier(cfg=cfg)
        assert cl.q == 0.5

        # when
        cl.increase_quality()

        # then
        assert cl.q == 0.525

    def test_should_decrease_quality(self, cfg):
        # given
        cl = Classifier(cfg=cfg)
        assert cl.q == 0.5

        # when
        cl.decrease_quality()

        # then
        assert cl.q == 0.475

    @pytest.mark.parametrize("_condition, _effect, _sua", [
        ([UBR(4, 15), UBR(2, 15)], [UBR(0, 15), UBR(0, 15)], 2),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 1),
        ([UBR(0, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 0),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(5, 15)], 1),
        ([UBR(4, 15), UBR(6, 15)], [UBR(4, 15), UBR(6, 15)], 0),
    ])
    def test_should_count_specified_unchanging_attributes(
            self, _condition, _effect, _sua, cfg):

        # given
        cl = Classifier(condition=Condition(_condition, cfg),
                        effect=Effect(_effect, cfg),
                        cfg=cfg)

        # then
        assert len(cl.specified_unchanging_attributes) == _sua

    def test_should_create_copy(self, cfg):
        # given
        operation_time = random.randint(0, 100)
        condition = Condition(
            [self._random_ubr(), self._random_ubr()], cfg=cfg)
        action = random.randint(0, 2)
        effect = Effect([self._random_ubr(), self._random_ubr()], cfg=cfg)

        cl = Classifier(condition,
                        action,
                        effect,
                        quality=random.random(),
                        reward=random.random(),
                        immediate_reward=random.random(),
                        cfg=cfg)
        # when
        copied_cl = Classifier.copy_from(cl, operation_time)

        # then
        assert cl is not copied_cl
        assert cl.condition == copied_cl.condition
        assert cl.condition is not copied_cl.condition
        assert cl.action == copied_cl.action
        assert cl.effect == copied_cl.effect
        assert cl.effect is not copied_cl.effect
        assert copied_cl.is_marked() is False
        assert cl.r == copied_cl.r
        assert cl.q == copied_cl.q
        assert operation_time == copied_cl.tga
        assert operation_time == copied_cl.talp

    def test_should_specialize(self, cfg):
        # given
        p0 = Perception(np.random.random(2), oktypes=(float, ))
        p1 = Perception(np.random.random(2), oktypes=(float, ))
        cl = Classifier(cfg=cfg)

        # when
        cl.specialize(p0, p1)

        # then
        enc_p0 = list(map(cfg.encoder.encode, p0))
        enc_p1 = list(map(cfg.encoder.encode, p1))

        for i, (c_ubr, e_ubr) in enumerate(zip(cl.condition, cl.effect)):
            assert c_ubr.lower_bound <= enc_p0[i] <= c_ubr.upper_bound
            assert e_ubr.lower_bound <= enc_p1[i] <= e_ubr.upper_bound

    @pytest.mark.parametrize("_condition, _effect, _soa_before, _soa_after", [
        ([UBR(4, 15), UBR(2, 15)], [UBR(0, 15), UBR(0, 15)], 2, 1),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 1, 0),
        ([UBR(0, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 0, 0),
    ])
    def test_should_generalize_randomly_unchanging_condition_attribute(
            self, _condition, _effect, _soa_before, _soa_after, cfg):

        # given
        condition = Condition(_condition, cfg)
        effect = Effect(_effect, cfg)
        cl = Classifier(condition=condition, effect=effect, cfg=cfg)
        assert len(cl.specified_unchanging_attributes) == _soa_before

        # when
        cl.generalize_unchanging_condition_attribute()

        # then
        assert (len(cl.specified_unchanging_attributes)) == _soa_after

    @pytest.mark.parametrize(
        "_c1, _c2, _result",
        [
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 4)], True),
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 6)], False),
            # The same classifiers
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 5)], False)
        ])
    def test_should_find_more_general(self, _c1, _c2, _result, cfg):
        # given
        cl1 = Classifier(condition=Condition(_c1, cfg), cfg=cfg)
        cl2 = Classifier(condition=Condition(_c2, cfg), cfg=cfg)

        # then
        assert cl1.is_more_general(cl2) is _result

    @pytest.mark.parametrize("_cond, _res", [
        ([UBR(4, 6), UBR(1, 5)], {
            1: 2,
            2: 0,
            3: 0,
            4: 0
        }),
        ([UBR(0, 6), UBR(1, 5)], {
            1: 1,
            2: 1,
            3: 0,
            4: 0
        }),
        ([UBR(0, 4), UBR(6, 15)], {
            1: 0,
            2: 1,
            3: 1,
            4: 0
        }),
        ([UBR(0, 15), UBR(6, 14)], {
            1: 1,
            2: 0,
            3: 0,
            4: 1
        }),
        ([UBR(0, 15), UBR(0, 15)], {
            1: 0,
            2: 0,
            3: 0,
            4: 2
        }),
    ])
    def test_count_regions(self, _cond, _res, cfg):
        # given
        cl = Classifier(condition=Condition(_cond, cfg), cfg=cfg)

        # then
        assert cl.get_interval_proportions() == _res

    @staticmethod
    def _random_ubr(lower=0, upper=15):
        return UBR(random.randint(lower, upper), random.randint(lower, upper))
Example #24
0
    def test_crossover(self, cfg):
        # given
        parent = Classifier(
            condition=Condition(
                [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg),
            effect=Effect(
                [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg),
            cfg=cfg)
        donor = Classifier(
            condition=Condition(
                [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg),
            effect=Effect(
                [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg),
            cfg=cfg)

        # when
        np.random.seed(12345)  # left: 3, right: 6
        crossover(parent, donor)

        # then
        assert parent.condition == \
            Condition([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg)
        assert parent.effect == \
            Effect([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg)
        assert donor.condition == \
            Condition([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg)
        assert donor.effect == \
            Effect([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg)
Example #25
0
class TestClassifier:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder_bits=4)

    def test_should_initialize_without_arguments(self, cfg):
        # when
        c = Classifier(cfg=cfg)

        # then
        assert c.condition == Condition.generic(cfg=cfg)
        assert c.action is None
        assert c.effect == Effect.pass_through(cfg=cfg)
        assert c.exp == 1
        assert c.talp is None
        assert c.tav == 0.0

    @pytest.mark.parametrize(
        "_effect, _p0, _p1, _result",
        [
            # Classifier with default pass-through effect
            (None, [0.5, 0.5], [0.5, 0.5], True),
            ([UBR(0, 15), UBR(10, 12)], [0.5, 0.5], [0.5, 0.5], False),
            ([UBR(0, 4), UBR(10, 12)], [0.8, 0.8], [0.2, 0.7], True),
            # second perception attribute is unchanged - should be a wildcard
            ([UBR(0, 4), UBR(10, 12)], [0.8, 0.8], [0.2, 0.8], False),
        ])
    def test_should_anticipate_change(self, _effect, _p0, _p1, _result, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        p1 = Perception(_p1, oktypes=(float, ))

        c = Classifier(effect=_effect, cfg=cfg)

        # then
        assert c.does_anticipate_correctly(p0, p1) is _result

    @pytest.mark.parametrize(
        "_exp, _q, _is_subsumer",
        [
            (1, .5, False),  # too young classifier
            (30, .92, True),  # enough experience and quality
            (15, .92, False),  # not experienced enough
        ])
    def test_should_distinguish_classifier_as_subsumer(self, _exp, _q,
                                                       _is_subsumer, cfg):
        # given
        cl = Classifier(experience=_exp, quality=_q, cfg=cfg)

        # when & then
        # general classifier should not be considered as subsumer
        assert cl.is_subsumer is _is_subsumer

    def test_should_not_distinguish_marked_classifier_as_subsumer(self, cfg):
        # given
        # Now check if the fact that classifier is marked will block
        # it from being considered as a subsumer
        cl = Classifier(experience=30, quality=0.92, cfg=cfg)
        cl.mark[0].add(4)

        # when & then
        assert cl.is_subsumer is False

    @pytest.mark.parametrize("_q, _reliable", [
        (.5, False),
        (.1, False),
        (.9, False),
        (.91, True),
    ])
    def test_should_detect_reliable(self, _q, _reliable, cfg):
        # given
        cl = Classifier(quality=_q, cfg=cfg)

        # then
        assert cl.is_reliable() is _reliable

    @pytest.mark.parametrize("_q, _inadequate", [
        (.5, False),
        (.1, False),
        (.09, True),
    ])
    def test_should_detect_inadequate(self, _q, _inadequate, cfg):
        # given
        cl = Classifier(quality=_q, cfg=cfg)

        # then
        assert cl.is_inadequate() is _inadequate

    def test_should_increase_quality(self, cfg):
        # given
        cl = Classifier(cfg=cfg)
        assert cl.q == 0.5

        # when
        cl.increase_quality()

        # then
        assert cl.q == 0.525

    def test_should_decrease_quality(self, cfg):
        # given
        cl = Classifier(cfg=cfg)
        assert cl.q == 0.5

        # when
        cl.decrease_quality()

        # then
        assert cl.q == 0.475

    @pytest.mark.parametrize("_condition, _effect, _sua", [
        ([UBR(4, 15), UBR(2, 15)], [UBR(0, 15), UBR(0, 15)], 2),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 1),
        ([UBR(0, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 0),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(5, 15)], 1),
        ([UBR(4, 15), UBR(6, 15)], [UBR(4, 15), UBR(6, 15)], 0),
    ])
    def test_should_count_specified_unchanging_attributes(
            self, _condition, _effect, _sua, cfg):

        # given
        cl = Classifier(condition=Condition(_condition, cfg),
                        effect=Effect(_effect, cfg),
                        cfg=cfg)

        # then
        assert len(cl.specified_unchanging_attributes) == _sua

    def test_should_create_copy(self, cfg):
        # given
        operation_time = random.randint(0, 100)
        condition = Condition(
            [self._random_ubr(), self._random_ubr()], cfg=cfg)
        action = random.randint(0, 2)
        effect = Effect([self._random_ubr(), self._random_ubr()], cfg=cfg)

        cl = Classifier(condition,
                        action,
                        effect,
                        quality=random.random(),
                        reward=random.random(),
                        intermediate_reward=random.random(),
                        cfg=cfg)
        # when
        copied_cl = Classifier.copy_from(cl, operation_time)

        # then
        assert cl is not copied_cl
        assert cl.condition == copied_cl.condition
        assert cl.condition is not copied_cl.condition
        assert cl.action == copied_cl.action
        assert cl.effect == copied_cl.effect
        assert cl.effect is not copied_cl.effect
        assert copied_cl.is_marked() is False
        assert cl.r == copied_cl.r
        assert cl.q == copied_cl.q
        assert operation_time == copied_cl.tga
        assert operation_time == copied_cl.talp

    def test_should_specialize(self, cfg):
        # given
        p0 = Perception([random.random()] * 2, oktypes=(float, ))
        p1 = Perception([random.random()] * 2, oktypes=(float, ))
        cl = Classifier(cfg=cfg)

        # when
        cl.specialize(p0, p1)

        # then
        for condition_ubr, effect_ubr in zip(cl.condition, cl.effect):
            assert condition_ubr.lower_bound == condition_ubr.upper_bound
            assert effect_ubr.lower_bound == effect_ubr.upper_bound

    @pytest.mark.parametrize("_condition, _effect, _soa_before, _soa_after", [
        ([UBR(4, 15), UBR(2, 15)], [UBR(0, 15), UBR(0, 15)], 2, 1),
        ([UBR(4, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 1, 0),
        ([UBR(0, 15), UBR(0, 15)], [UBR(0, 15), UBR(0, 15)], 0, 0),
    ])
    def test_should_generalize_randomly_unchanging_condition_attribute(
            self, _condition, _effect, _soa_before, _soa_after, cfg):

        # given
        condition = Condition(_condition, cfg)
        effect = Effect(_effect, cfg)
        cl = Classifier(condition=condition, effect=effect, cfg=cfg)
        assert len(cl.specified_unchanging_attributes) == _soa_before

        # when
        cl.generalize_unchanging_condition_attribute()

        # then
        assert (len(cl.specified_unchanging_attributes)) == _soa_after

    @pytest.mark.parametrize(
        "_c1, _c2, _result",
        [
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 4)], True),
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 6)], False),
            # The same classifiers
            ([UBR(4, 6), UBR(1, 5)], [UBR(4, 6), UBR(1, 5)], False)
        ])
    def test_should_find_more_general(self, _c1, _c2, _result, cfg):
        # given
        cl1 = Classifier(condition=Condition(_c1, cfg), cfg=cfg)
        cl2 = Classifier(condition=Condition(_c2, cfg), cfg=cfg)

        # then
        assert cl1.is_more_general(cl2) is _result

    @pytest.mark.parametrize(
        "_e1, _e2, _exp1, _marked, _reliable,"
        "_more_general, _condition_matching, _result",
        [
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(
                5, 6)], 30, False, True, True, True, True),  # all good
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(
                5, 6)], 30, False, False, True, True, False),  # not reliable
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(
                5, 6)], 30, True, True, True, True, False),  # marked
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(
                5, 6)], 30, False, True, False, True, False),  # less general
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(5, 6)], 30, False, True,
             True, False, False),  # condition not matching
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(5, 7)], 30, False, True,
             True, True, False),  # different effects
            ([UBR(2, 4), UBR(5, 6)], [UBR(2, 4), UBR(
                5, 7)], 10, False, True, True, True, False),  # not experienced
        ])
    def test_should_detect_subsumption(self, _e1, _e2, _exp1, _marked,
                                       _reliable, _more_general,
                                       _condition_matching, _result, mocker,
                                       cfg):
        # given
        cl1 = Classifier(effect=Effect(_e1, cfg), experience=_exp1, cfg=cfg)
        cl2 = Classifier(effect=Effect(_e2, cfg), cfg=cfg)

        # when
        mocker.patch.object(cl1, "is_reliable")
        mocker.patch.object(cl1, "is_marked")
        mocker.patch.object(cl1, "is_more_general")
        mocker.patch.object(cl1.condition, "does_match_condition")

        cl1.is_reliable.return_value = _reliable
        cl1.is_marked.return_value = _marked
        cl1.is_more_general.return_value = _more_general
        cl1.condition.does_match_condition.return_value = _condition_matching

        # then
        assert cl1.does_subsume(cl2) == _result

    @staticmethod
    def _random_ubr(lower=0, upper=15):
        return UBR(random.randint(lower, upper), random.randint(lower, upper))
Example #26
0
class TestMark:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder_bits=4)

    def test_should_initialize_empty_mark(self, cfg):
        # when
        mark = Mark(cfg)

        # then
        assert len(mark) == 2
        for m in mark:
            assert type(m) is set
            assert len(m) == 0

    def test_should_detect_if_not_marked(self, cfg):
        mark = Mark(cfg)
        assert mark.is_marked() is False

    def test_should_detect_if_marked(self, cfg):
        # given
        mark = Mark(cfg)

        # when
        mark[0].add(UBR(2, 5))

        # then
        assert mark.is_marked() is True

    @pytest.mark.parametrize(
        "initmark, perception, changed",
        [
            ([[], []], [0.5, 0.5], False),  # shouldn't set mark if empty
            ([[7], []], [0.5, 0.5], False),  # encoded value already marked
            ([[5], []], [0.5, 0.5], True)
        ])
    def test_should_complement_mark(self, initmark, perception, changed, cfg):
        # given
        p0 = Perception(perception, oktypes=(float, ))
        mark = self._init_mark(initmark, cfg)

        # when
        change_detected = mark.complement_marks(p0)

        # then
        assert change_detected is changed

    @pytest.mark.parametrize(
        "initmark, _p0, initcond, marked_count",
        [
            # not marked, all generic classifier, should mark two positions
            ([[], []], [0.5, 0.5], [], 2),
            # not marked, specified condition, shouldn't get marked
            ([[], []], [0.5, 0.5], [UBR(1, 3), UBR(2, 3)], 0),
            # not marked, one don't care, should mark one
            ([[], []], [0.5, 0.5], [UBR(1, 3), UBR(0, 15)], 1),
            # already marked, should use perception, one mark
            ([[4], []], [0.5, 0.5], [], 1),
        ])
    def test_should_set_mark_using_condition(self, initmark, _p0, initcond,
                                             marked_count, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        mark = self._init_mark(initmark, cfg)
        condition = self._init_condition(initcond, cfg)

        # when
        mark.set_mark_using_condition(condition, p0)

        # then
        assert self._count_marked_attributes(mark) is marked_count

    def test_should_get_no_differences(self, cfg):
        # given
        p0 = Perception([.5, .5], oktypes=(float, ))
        mark = self._init_mark([], cfg)

        # when
        diff = mark.get_differences(p0)

        # then
        assert diff == Condition.generic(cfg)

    @pytest.mark.parametrize(
        "_m, _p0, _specif",
        [
            # There is no perception in mark - one attribute should be
            # randomly specified
            ([[2], [4]], [.5, .5], 1),
            # One perception is marked - the other should be specified
            ([[8], [4]], [.5, .5], 1),
            # Both perceptions are marked - no differences
            ([[7], [7]], [.5, .5], 0)
        ])
    def test_should_handle_unique_differences(self, _m, _p0, _specif, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        mark = self._init_mark(_m, cfg)

        # when
        diff = mark.get_differences(p0)

        # then
        assert diff.specificity == _specif

    @pytest.mark.parametrize(
        "_m, _p0, _specificity",
        [
            # There are two marks in one attribute - it should be specified.
            ([[1, 2], [4]], [.5, .5], 1),
            # Here we have clear unique difference - specify it first
            ([[1, 2], [8]], [.5, .5], 1),
            # Two fuzzy attributes (containing perception value) - both
            # should be specified
            ([[6, 7], [5, 7]], [.5, .5], 2),
            # Two fuzzy attributes - but one is unique (does not contain
            # perception)
            ([[6, 8], [7, 9]], [.5, .5], 1),
        ])
    def test_should_handle_fuzzy_differences(self, _m, _p0, _specificity, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        mark = self._init_mark(_m, cfg)

        # when
        diff = mark.get_differences(p0)

        # then
        assert diff.specificity == _specificity

    @staticmethod
    def _init_mark(vals, cfg):
        mark = Mark(cfg)
        for idx, attribs in enumerate(vals):
            for attrib in attribs:
                mark[idx].add(attrib)

        return mark

    @staticmethod
    def _init_condition(vals, cfg):
        if len(vals) == 0:
            return Condition.generic(cfg)

        return Condition(vals, cfg)

    @staticmethod
    def _count_marked_attributes(mark) -> int:
        return sum(1 for m in mark if len(m) > 0)
Example #27
0
 def _random_ubr(lower=0, upper=15):
     return UBR(random.randint(lower, upper), random.randint(lower, upper))
Example #28
0
class TestCondition:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder=RealValueEncoder(4))

    def test_should_create_generic_condition(self, cfg):
        # when
        cond = Condition.generic(cfg)

        # then
        assert len(cond) == cfg.classifier_length
        for allele in cond:
            assert allele == cfg.classifier_wildcard

    @pytest.mark.parametrize(
        "_init_cond, _other_cond, _result_cond",
        [([UBR(0, 10), UBR(5, 2)], [UBR(0, 15), UBR(0, 15)
                                    ], [UBR(0, 10), UBR(2, 5)]),
         ([UBR(0, 10), UBR(5, 2)], [UBR(3, 12), UBR(
             0, 15)], [UBR(3, 12), UBR(2, 5)])])
    def test_should_specialize_with_condition(self, _init_cond, _other_cond,
                                              _result_cond, cfg):

        # given
        cond = Condition(_init_cond, cfg)
        other = Condition(_other_cond, cfg)

        # when
        cond.specialize_with_condition(other)

        # then
        assert cond == Condition(_result_cond, cfg)

    @pytest.mark.parametrize(
        "_condition, _idx, _generalized",
        [([UBR(1, 4), UBR(5, 7)], 0, [UBR(0, 15), UBR(5, 7)]),
         ([UBR(1, 4), UBR(5, 7)], 1, [UBR(1, 4), UBR(0, 15)])])
    def test_generalize(self, _condition, _idx, _generalized, cfg):
        # given
        cond = Condition(_condition, cfg)

        # when
        cond.generalize(_idx)

        # then
        assert cond == Condition(_generalized, cfg)

    @pytest.mark.parametrize("_condition, _spec_before, _spec_after", [
        ([UBR(2, 6), UBR(7, 2)], 2, 1),
        ([UBR(2, 6), UBR(0, 15)], 1, 0),
        ([UBR(0, 15), UBR(0, 15)], 0, 0),
    ])
    def test_should_generalize_specific_attributes_randomly(
            self, _condition, _spec_before, _spec_after, cfg):

        # given
        condition = Condition(_condition, cfg)
        assert condition.specificity == _spec_before

        # when
        condition.generalize_specific_attribute_randomly()

        # then
        assert condition.specificity == _spec_after

    @pytest.mark.parametrize("_condition, _specificity",
                             [([UBR(0, 15), UBR(0, 15)], 0),
                              ([UBR(0, 15), UBR(2, 15)], 1),
                              ([UBR(5, 15), UBR(2, 12)], 2)])
    def test_should_count_specificity(self, _condition, _specificity, cfg):
        cond = Condition(_condition, cfg=cfg)
        assert cond.specificity == _specificity

    @pytest.mark.parametrize("_condition, _covered_pct", [
        ([UBR(0, 15), UBR(0, 15)], 1.0),
        ([UBR(7, 7), UBR(4, 4)], 0.0625),
        ([UBR(7, 8), UBR(4, 5)], 0.125),
        ([UBR(2, 8), UBR(4, 10)], 0.4375),
    ])
    def test_should_calculate_cover_ratio(self, _condition, _covered_pct, cfg):
        cond = Condition(_condition, cfg=cfg)
        assert cond.cover_ratio == _covered_pct

    @pytest.mark.parametrize("_condition, _perception, _result",
                             [([UBR(0, 15), UBR(0, 15)], [0.2, 0.4], True),
                              ([UBR(0, 15), UBR(0, 2)], [0.5, 0.5], False),
                              ([UBR(8, 8), UBR(10, 10)], [0.5, 0.65], True)])
    def test_should_match_perception(self, _condition, _perception, _result,
                                     cfg):

        # given
        cond = Condition(_condition, cfg=cfg)
        p0 = Perception(_perception, oktypes=(float, ))

        # then
        assert cond.does_match(p0) == _result

    @pytest.mark.parametrize("_cond1, _cond2, _result", [
        ([UBR(0, 15), UBR(0, 15)], [UBR(2, 4), UBR(5, 10)], True),
        ([UBR(6, 10), UBR(0, 15)], [UBR(2, 4), UBR(5, 10)], False),
        ([UBR(0, 15), UBR(4, 10)], [UBR(2, 4), UBR(6, 12)], False),
        ([UBR(2, 4), UBR(5, 5)], [UBR(2, 4), UBR(5, 5)], True),
    ])
    def test_should_subsume_condition(self, _cond1, _cond2, _result, cfg):
        # given
        cond1 = Condition(_cond1, cfg=cfg)
        cond2 = Condition(_cond2, cfg=cfg)

        # then
        assert cond1.subsumes(cond2) == _result

    @pytest.mark.parametrize(
        "_cond, _result", [([UBR(0, 15), UBR(0, 7)], 'OOOOOOOOOo|OOOOo.....')])
    def test_should_visualize(self, _cond, _result, cfg):
        assert repr(Condition(_cond, cfg=cfg)) == _result
Example #29
0
    def __init__(self,
                 classifier_length: int,
                 number_of_possible_actions: int,
                 encoder=None,
                 environment_adapter=EnvironmentAdapter,
                 user_metrics_collector_fcn: Callable = None,
                 metrics_trial_frequency: int = 5,
                 do_ga: bool = False,
                 do_subsumption: bool = True,
                 beta: float = 0.05,
                 gamma: float = 0.95,
                 theta_i: float = 0.1,
                 theta_r: float = 0.9,
                 epsilon: float = 0.5,
                 cover_noise: float = 0.1,
                 mutation_noise: float = 0.1,
                 u_max: int = 100000,
                 theta_exp: int = 20,
                 theta_ga: int = 100,
                 theta_as: int = 20,
                 mu: float = 0.3,
                 chi: float = 0.8) -> None:

        if encoder is None:
            raise TypeError('Real number encoder should be passed')

        self.oktypes = (UBR, )
        self.encoder = encoder

        self.classifier_length = classifier_length
        self.number_of_possible_actions = number_of_possible_actions
        self.classifier_wildcard = UBR(*self.encoder.range)

        self.environment_adapter = environment_adapter

        self.metrics_trial_frequency = metrics_trial_frequency
        self.user_metrics_collector_fcn = user_metrics_collector_fcn

        self.do_ga = do_ga
        self.do_subsumption = do_subsumption

        self.beta = beta
        self.gamma = gamma
        self.theta_i = theta_i
        self.theta_r = theta_r
        self.epsilon = epsilon
        # Max range of uniform noise distribution that can alter
        # the perception during covering U[0, cover_noise]
        self.cover_noise = cover_noise

        # Max range of uniform noise distribution that can broaden the
        # phenotype interval range.
        self.mutation_noise = mutation_noise
        self.u_max = u_max

        self.theta_exp = theta_exp
        self.theta_ga = theta_ga
        self.theta_as = theta_as

        self.mu = mu
        self.chi = chi
Example #30
0
class TestEffect:
    @pytest.fixture
    def cfg(self):
        return Configuration(classifier_length=2,
                             number_of_possible_actions=2,
                             encoder=RealValueEncoder(4))

    @pytest.mark.parametrize("_e, _result", [
        ([UBR(0, 15), UBR(0, 15)], False),
        ([UBR(0, 15), UBR(2, 14)], True),
        ([UBR(4, 10), UBR(2, 14)], True),
    ])
    def test_should_detect_change(self, _e, _result, cfg):
        assert Effect(_e, cfg).specify_change == _result

    def test_should_create_pass_through_effect(self, cfg):
        # when
        effect = Effect.pass_through(cfg)

        # then
        assert len(effect) == cfg.classifier_length
        for allele in effect:
            assert allele == cfg.classifier_wildcard

    @pytest.mark.parametrize(
        "_p0, _p1, _effect, is_specializable",
        [
            # Effect is all pass-through. Can be specialized.
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(0, 15)], True),
            # 1 pass-through effect get skipped. Second effect attribute get's
            # examined. P1 perception is not in correct range. That's invalid
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(2, 4)], False),
            # In this case the range is proper, but no change is anticipated.
            # In this case this should be a pass-through symbol.
            ([0.5, 0.5], [0.5, 0.5], [UBR(0, 15), UBR(2, 12)], False),
            # Here second perception attribute changes. 0.8 => 12
            ([0.5, 0.5], [0.5, 0.8], [UBR(0, 15), UBR(10, 14)], True)
        ])
    def test_should_specialize(self, _p0, _p1, _effect, is_specializable, cfg):
        # given
        p0 = Perception(_p0, oktypes=(float, ))
        p1 = Perception(_p1, oktypes=(float, ))
        effect = Effect(_effect, cfg=cfg)

        # then
        assert effect.is_specializable(p0, p1) is is_specializable

    @pytest.mark.parametrize("_effect1, _effect2, _result", [
        ([UBR(0, 15), UBR(0, 15)], [UBR(2, 4), UBR(5, 10)], True),
        ([UBR(6, 10), UBR(0, 15)], [UBR(2, 4), UBR(5, 10)], False),
        ([UBR(0, 15), UBR(4, 10)], [UBR(2, 4), UBR(6, 12)], False),
        ([UBR(2, 4), UBR(5, 5)], [UBR(2, 4), UBR(5, 5)], True),
    ])
    def test_should_subsume_effect(self, _effect1, _effect2, _result, cfg):
        # given
        effect1 = Effect(_effect1, cfg=cfg)
        effect2 = Effect(_effect2, cfg=cfg)

        # then
        assert effect1.subsumes(effect2) == _result

    @pytest.mark.parametrize(
        "_effect, _result",
        [([UBR(0, 15), UBR(0, 7)], 'OOOOOOOOOo|OOOOo.....')])
    def test_should_visualize(self, _effect, _result, cfg):
        assert repr(Effect(_effect, cfg=cfg)) == _result