def apply_alp(population: ClassifierList, match_set: ClassifierList, action_set: ClassifierList, p0: Perception, action: int, p1: Perception, time: int, theta_exp: int, cfg: Configuration) -> None: new_list = ClassifierList() new_cl: Optional[Classifier] = None was_expected_case = False delete_counter = 0 for cl in action_set: cl.increase_experience() cl.set_alp_timestamp(time) if cl.does_anticipate_correctly(p0, p1): new_cl = alp_racs.expected_case(cl, p0, time) was_expected_case = True else: new_cl = alp_racs.unexpected_case(cl, p0, p1, time) if cl.is_inadequate(): delete_counter += 1 lists = [x for x in [population, match_set, action_set] if x] for lst in lists: lst.safe_remove(cl) if new_cl is not None: new_cl.tga = time alp.add_classifier(new_cl, action_set, new_list, theta_exp) # No classifier anticipated correctly - generate new one if not was_expected_case: new_cl = alp_racs.cover(p0, action, p1, time, cfg) alp.add_classifier(new_cl, action_set, new_list, theta_exp) # Merge classifiers from new_list into self and population action_set.extend(new_list) population.extend(new_list) if match_set is not None: new_matching = [cl for cl in new_list if cl.condition.does_match(p1)] match_set.extend(new_matching)
def test_should_handle_unexpected_case_2(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) p1 = Perception([.5, .5], oktypes=(float, )) # Effect is not specializable effect = Effect([UBR(0, 15), UBR(2, 4)], cfg=cfg) quality = random.random() time = random.randint(0, 1000) cl = Classifier(effect=effect, quality=quality, cfg=cfg) # when child = unexpected_case(cl, p0, p1, time) # then assert cl.q < quality assert cl.is_marked() is True # We cannot generate child from non specializable parent assert child is None
def test_should_handle_unexpected_case_3(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) p1 = Perception([.5, .8], oktypes=(float, )) # Second effect attribute is specializable effect = Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg) quality = 0.4 time = random.randint(0, 1000) cl = Classifier(effect=effect, quality=quality, cfg=cfg) # when child = unexpected_case(cl, p0, p1, time) # then assert cl.q < quality assert cl.is_marked() is True assert child is not None assert child.is_marked() is False assert child.q == .5 assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg) assert child.effect == Effect([UBR(0, 15), UBR(10, 14)], cfg=cfg)
def test_should_handle_unexpected_case_1(self, cfg): # given p0 = Perception([.5, .5], oktypes=(float, )) p1 = Perception([.5, .5], oktypes=(float, )) # Effect is all pass-through. Can be specialized. effect = Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg) quality = .4 time = random.randint(0, 1000) cl = Classifier(effect=effect, quality=quality, cfg=cfg) # when child = unexpected_case(cl, p0, p1, time) # then assert cl.q < quality assert cl.is_marked() is True assert child assert child.q == .5 assert child.talp == time # There is no change in perception so the child condition # and effect should stay the same. assert child.condition == Condition([UBR(0, 15), UBR(0, 15)], cfg=cfg) assert child.effect == Effect([UBR(0, 15), UBR(0, 15)], cfg=cfg)