def test_should_subsume_condition(self, _cond1, _cond2, _result, cfg): # given cond1 = Condition(_cond1, cfg=cfg) cond2 = Condition(_cond2, cfg=cfg) # then assert cond1.subsumes(cond2) == _result
def crossover(parent: Classifier, donor: Classifier): assert parent.cfg.classifier_length == donor.cfg.classifier_length # flatten parent and donor perception strings p_cond_flat = _flatten(parent.condition) d_cond_flat = _flatten(donor.condition) p_effect_flat = _flatten(parent.effect) d_effect_flat = _flatten(donor.effect) # select crossing points left, right = sorted( np.random.choice(range(0, len(p_cond_flat) + 1), 2, replace=False)) assert left < right # extract chromosomes p_cond_chromosome = p_cond_flat[left:right] d_cond_chromosome = d_cond_flat[left:right] p_effect_chromosome = p_effect_flat[left:right] d_effect_chromosome = d_effect_flat[left:right] # Flip everything p_cond_flat[left:right] = d_cond_chromosome d_cond_flat[left:right] = p_cond_chromosome p_effect_flat[left:right] = d_effect_chromosome d_effect_flat[left:right] = p_effect_chromosome # Rebuild proper perception strings parent.condition = Condition(_unflatten(p_cond_flat), cfg=parent.cfg) donor.condition = Condition(_unflatten(d_cond_flat), cfg=donor.cfg) parent.effect = Effect(_unflatten(p_effect_flat), cfg=parent.cfg) donor.effect = Effect(_unflatten(d_effect_flat), cfg=parent.cfg)
def test_crossover(self, cfg): # given parent = Classifier( condition=Condition( [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg), effect=Effect( [UBR(1, 1), UBR(1, 1), UBR(1, 1)], cfg), cfg=cfg) donor = Classifier( condition=Condition( [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg), effect=Effect( [UBR(2, 2), UBR(2, 2), UBR(2, 2)], cfg), cfg=cfg) # when np.random.seed(12345) # left: 3, right: 6 crossover(parent, donor) # then assert parent.condition == \ Condition([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg) assert parent.effect == \ Effect([UBR(1, 1), UBR(1, 2), UBR(2, 2)], cfg) assert donor.condition == \ Condition([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg) assert donor.effect == \ Effect([UBR(2, 2), UBR(2, 1), UBR(1, 1)], cfg)
def test_should_match_condition(self, _cond1, _cond2, _result, cfg): # given cond1 = Condition(_cond1, cfg=cfg) cond2 = Condition(_cond2, cfg=cfg) # then assert cond1.does_match_condition(cond2) == _result
def test_should_find_more_general(self, _c1, _c2, _result, cfg): # given cl1 = Classifier(condition=Condition(_c1, cfg), cfg=cfg) cl2 = Classifier(condition=Condition(_c2, cfg), cfg=cfg) # then assert cl1.is_more_general(cl2) is _result
def test_generalize(self, _condition, _idx, _generalized, cfg): # given cond = Condition(_condition, cfg) # when cond.generalize(_idx) # then assert cond == Condition(_generalized, cfg)
def test_should_match_perception(self, _condition, _perception, _result, cfg): # given cond = Condition(_condition, cfg=cfg) p0 = Perception(_perception, oktypes=(float, )) # then assert cond.does_match(p0) == _result
def test_should_generalize_specific_attributes_randomly( self, _condition, _spec_before, _spec_after, cfg): # given condition = Condition(_condition, cfg) assert condition.specificity == _spec_before # when condition.generalize_specific_attribute_randomly() # then assert condition.specificity == _spec_after
def test_should_detect_identical_classifier(self, cfg): cl_1 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg), action=1, effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg), cfg=cfg) cl_2 = Classifier(condition=Condition([UBR(0, 1), UBR(0, 2)], cfg=cfg), action=1, effect=Effect([UBR(2, 3), UBR(4, 5)], cfg=cfg), cfg=cfg) assert cl_1 == cl_2
def test_aggressive_mutation(self, _cond, _effect, cfg): # given condition = Condition(_cond, cfg) effect = Effect(_effect, cfg) cfg.encoder = RealValueEncoder(16) # more precise encoder cfg.mutation_noise = 0.5 # strong noise mutation range mu = 1.0 # mutate every attribute cl = Classifier(condition=deepcopy(condition), effect=deepcopy(effect), cfg=cfg) # when mutate(cl, mu) # then range_min, range_max = cfg.encoder.range for idx, (c, e) in enumerate(zip(cl.condition, cl.effect)): # assert that we have new locus if condition[idx] != cfg.classifier_wildcard: assert condition[idx] != c if effect[idx] != cfg.classifier_wildcard: assert effect[idx] != e # assert if condition values are in ranges assert c.lower_bound >= range_min assert c.upper_bound <= range_max # assert if effect values are in ranges assert e.lower_bound >= range_min assert e.upper_bound <= range_max
def test_should_create_copy(self, cfg): # given operation_time = random.randint(0, 100) condition = Condition( [self._random_ubr(), self._random_ubr()], cfg=cfg) action = random.randint(0, 2) effect = Effect([self._random_ubr(), self._random_ubr()], cfg=cfg) cl = Classifier(condition, action, effect, quality=random.random(), reward=random.random(), intermediate_reward=random.random(), cfg=cfg) # when copied_cl = Classifier.copy_from(cl, operation_time) # then assert cl is not copied_cl assert cl.condition == copied_cl.condition assert cl.condition is not copied_cl.condition assert cl.action == copied_cl.action assert cl.effect == copied_cl.effect assert cl.effect is not copied_cl.effect assert copied_cl.is_marked() is False assert cl.r == copied_cl.r assert cl.q == copied_cl.q assert operation_time == copied_cl.tga assert operation_time == copied_cl.talp
def test_should_create_new_classifier_with_covering( self, _p0, _p1, _child_cond, _child_effect, cfg): # given cfg.cover_noise = 0.0 p0 = Perception(_p0, oktypes=(float, )) p1 = Perception(_p1, oktypes=(float, )) action = random.randint(0, cfg.number_of_possible_actions) time = random.randint(0, 100) # when new_cl = cover(p0, action, p1, time, cfg) # then assert new_cl is not None assert new_cl.condition == Condition(_child_cond, cfg) assert new_cl.action == action assert new_cl.effect == Effect(_child_effect, cfg) assert new_cl.q == .5 assert new_cl.r == 0 assert new_cl.ir == 0 assert new_cl.tav == 0 assert new_cl.tga == time assert new_cl.talp == time # assert new_cl.num == 1 assert new_cl.exp == 0
def test_should_create_generic_condition(self, cfg): # when cond = Condition.generic(cfg) # then assert len(cond) == cfg.classifier_length for allele in cond: assert allele == cfg.classifier_wildcard
def get_differences(self, p0: Perception) -> Condition: """ Difference determination is run when the classifier anticipated the change correctly. If it's marked we want to find if we can propose differences that will be applied to new condition part (specialization). There can be two types of differences: 1) unique - one or more attributes in mark does not contain given perception attribute 2) fuzzy - there is no unique difference - one or more attributes in the mark specify more than one value in perception attribute. If only unique differences are present - one random one get specified. If there are fuzzy differences everyone is specified. Parameters ---------- p0: Perception Returns ------- Condition differences between mark and perception that can form a new classifier """ diff = Condition.generic(self.cfg) if self.is_marked(): enc_p0 = list(map(self.cfg.encoder.encode, p0)) # Unique and fuzzy difference counts nr1, nr2 = 0, 0 # Count difference types for idx, item in enumerate(self): if len(item) > 0 and enc_p0[idx] not in item: nr1 += 1 elif len(item) > 1: nr2 += 1 if nr1 > 0: possible_idx = [ pi for pi, p in enumerate(enc_p0) if p not in self[pi] and len(self[pi]) > 0 ] rand_idx = random.choice(possible_idx) p = enc_p0[rand_idx] diff[rand_idx] = UBR(p, p) elif nr2 > 0: for pi, p in enumerate(enc_p0): if len(self[pi]) > 1: diff[pi] = UBR(p, p) return diff
def test_should_find_similar(self): # given cfg = Configuration(3, 2, encoder=RealValueEncoder(2)) cl1 = Classifier( condition=Condition([UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg), action=0, effect=Effect([UBR(0, 3), UBR(0, 3), UBR(0, 3)], cfg=cfg), cfg=cfg ) cl2 = Classifier( condition=Condition([UBR(0, 0), UBR(0, 3), UBR(0, 3)], cfg=cfg), action=0, effect=Effect([UBR(0, 3), UBR(0, 3), UBR(0, 3)], cfg=cfg), cfg=cfg ) # then assert cl1 == cl2
def test_should_get_no_differences(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) mark = self._init_mark([], cfg) # when diff = mark.get_differences(p0) # then assert diff == Condition.generic(cfg)
def test_should_count_specified_unchanging_attributes( self, _condition, _effect, _sua, cfg): # given cl = Classifier(condition=Condition(_condition, cfg), effect=Effect(_effect, cfg), cfg=cfg) # then assert len(cl.specified_unchanging_attributes) == _sua
def test_regions_averaging(self, cfg): # given cl1 = Classifier(condition=Condition([UBR(2, 3), UBR(4, 5)], cfg), cfg=cfg) cl2 = Classifier(condition=Condition([UBR(0, 3), UBR(4, 9)], cfg), cfg=cfg) cl3 = Classifier(condition=Condition([UBR(1, 3), UBR(4, 15)], cfg), cfg=cfg) cl4 = Classifier(condition=Condition( [UBR(0, 13), UBR(0, 15)], cfg), cfg=cfg) population = ClassifierList(*[cl1, cl2, cl3, cl4]) # when result = count_averaged_regions(population) # then assert type(result) is dict assert result == {1: 0.5, 2: 0.25, 3: 0.125, 4: 0.125}
def test_should_initialize_without_arguments(self, cfg): # when c = Classifier(cfg=cfg) # then assert c.condition == Condition.generic(cfg=cfg) assert c.action is None assert c.effect == Effect.pass_through(cfg=cfg) assert c.exp == 1 assert c.talp is None assert c.tav == 0.0
def test_should_form_match_set(self, cfg): # given # 4bit encoding 0.2 => 3, 0.6 => 9 observation = Perception([0.2, 0.6], oktypes=(float, )) cl1 = Classifier(condition=Condition([UBR(2, 5), UBR(8, 11)], cfg=cfg), cfg=cfg) cl2 = Classifier(condition=Condition([UBR(5, 7), UBR(5, 12)], cfg=cfg), cfg=cfg) cl3 = Classifier(cfg=cfg) population = ClassifierList(*[cl1, cl2, cl3]) # when match_set = population.form_match_set(observation) # then assert len(match_set) == 2 assert cl1 in match_set assert cl2 not in match_set assert cl3 in match_set
def test_disabled_mutation(self, _cond, cfg): # given condition = Condition(_cond, cfg) cl = Classifier(condition=condition, cfg=cfg) mu = 0.0 # when mutate(cl, cfg.encoder.range, mu) # then for idx, ubr in enumerate(cl.condition): assert ubr.lower_bound == condition[idx].lower_bound assert ubr.upper_bound == condition[idx].upper_bound
def test_should_generalize_randomly_unchanging_condition_attribute( self, _condition, _effect, _soa_before, _soa_after, cfg): # given condition = Condition(_condition, cfg) effect = Effect(_effect, cfg) cl = Classifier(condition=condition, effect=effect, cfg=cfg) assert len(cl.specified_unchanging_attributes) == _soa_before # when cl.generalize_unchanging_condition_attribute() # then assert (len(cl.specified_unchanging_attributes)) == _soa_after
def test_should_handle_expected_case_3(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) q = 0.4 cl = Classifier(quality=q, cfg=cfg) cl.mark[0].add(2) time = random.randint(0, 1000) # when child = expected_case(cl, p0, time) # then assert child is not None assert child.condition == Condition([UBR(8, 8), UBR(0, 15)], cfg) assert child.q == 0.5
def test_should_specialize_with_condition(self, _init_cond, _other_cond, _result_cond, cfg): # given cond = Condition(_init_cond, cfg) other = Condition(_other_cond, cfg) # when cond.specialize_with_condition(other) # then assert cond == Condition(_result_cond, cfg)
def test_disabled_mutation(self, _cond, _effect, cfg): # given condition = Condition(_cond, cfg) effect = Effect(_effect, cfg) cl = Classifier(condition=deepcopy(condition), effect=deepcopy(effect), cfg=cfg) mu = 0.0 # when mutate(cl, mu) # then for idx, (c, e) in enumerate(zip(cl.condition, cl.effect)): assert c.lower_bound == condition[idx].lower_bound assert c.upper_bound == condition[idx].upper_bound assert e.lower_bound == effect[idx].lower_bound assert e.upper_bound == effect[idx].upper_bound
def test_should_handle_unexpected_case_3(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) p1 = Perception([.5, .8], oktypes=(float, )) # Second effect attribute is specializable effect = Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg) quality = 0.4 time = random.randint(0, 1000) cl = Classifier(effect=effect, quality=quality, cfg=cfg) # when child = unexpected_case(cl, p0, p1, time) # then assert cl.q < quality assert cl.is_marked() is True assert child is not None assert child.is_marked() is False assert child.q == .5 assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg) assert child.effect == Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg)
def test_should_handle_unexpected_case_1(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) p1 = Perception([.5, .5], oktypes=(float, )) # Effect is all pass-through. Can be specialized. effect = Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg) quality = .4 time = random.randint(0, 1000) cl = Classifier(effect=effect, quality=quality, cfg=cfg) # when child = unexpected_case(cl, p0, p1, time) # then assert cl.q < quality assert cl.is_marked() is True assert child assert child.q == .5 assert child.talp == time # There is no change in perception so the child condition # and effect should stay the same. assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg) assert child.effect == Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg)
def test_count_regions(self, _cond, _res, cfg): # given cl = Classifier(condition=Condition(_cond, cfg), cfg=cfg) # then assert cl.get_interval_proportions() == _res
def _init_condition(vals, cfg): if len(vals) == 0: return Condition.generic(cfg) return Condition(vals, cfg)
def test_should_calculate_cover_ratio(self, _condition, _covered_pct, cfg): cond = Condition(_condition, cfg=cfg) assert cond.cover_ratio == _covered_pct