def add(self, data): if data.status == Status.INTERESTING: return if data.status < self.best_status: return if data.status > self.best_status: self.best_status = data.status self.reset() self.fresh_examples.append(data) if len(self) > self.pool_size: pop_random(self.random, self.used_examples or self.fresh_examples) assert self.pool_size == len(self)
def maybe_discard_one(self): if len(self) > self.pool_size: if self.used_examples or self.fresh_examples: pop_random(self.random, self.used_examples or self.fresh_examples) else: # Get an arbitrary label from those with the longest pools, # discard the lowest-scored example from that pool, # and discard the label if it had no other examples. label, pool = max(self.scored_examples.items(), key=lambda kv: len(kv[1])) pool.pop() if not pool: self.scored_examples.pop(label) assert len(self) == self.pool_size
def select(self): if self.fresh_examples: result = pop_random(self.random, self.fresh_examples) self.used_examples.append(result) return result else: return self.random.choice(self.used_examples)
def select(self): # If we have feedback from targeted PBT, choose a label and then an example # from that pool. Each example is twice as likely as the next-highest-ranked. if self.scored_examples: # pragma: no cover # For some reason this clause is often showing up as uncovered, # though tests/cover/test_targeting.py definitely executes it :-/ pool = self.random.choice(list(self.scored_examples.values())) stop_at = self.random.random() * self.weights[len(pool) - 1] return pool[bisect(self.weights, stop_at)] # Otherwise, prefer first-time mutations to previously-mutated examples if self.fresh_examples: result = pop_random(self.random, self.fresh_examples) self.used_examples.append(result) return result else: return self.random.choice(self.used_examples)
def selection_order(depth, n): pending = LazySequenceCopy(range(n)) while pending: yield pop_random(random, pending)