def choose_actions(self, actions, k=None): '''Randomly chooses up to k Actions, weighted by .urgency. Returns a collection. k defaults to self.max_actions.''' if k is None: k = self.max_actions return list(sample_without_replacement( actions, k=k, weights=[self.urgency(a) for a in actions] ))
def choose_active_nodes(self, active_nodes, k=None): '''Randomly chooses up to k Actions, weighted by activation. Returns a collection. k defaults to self.max_active_nodes.''' if k is None: k = self.max_active_nodes return list(sample_without_replacement( active_nodes, k=k, #weights=[self.support_for(node) for node in active_nodes] weights=[self.activation(node) for node in active_nodes] ))
def run( # type: ignore[override] self, fm: FARGModel, noderef: NodeRef, ) -> ProgramResult: return Paint( noderef=noderef, content=dict(operands=as_tuple( sample_without_replacement( #cellref.avails_at(), fm.avails_at(noderef), k=2))))
def __call__(self, full_image: CanvasAble) -> CanvasAble: full_image: ValueTup = as_tuple(full_image) if self.npartial is None: return full_image l = len(full_image) npartial = l + self.npartial if self.npartial < 0 else self.npartial if npartial <= 0: return (None, ) * l if npartial >= l: return full_image r = range(l) addrs = set(sample_without_replacement(r, k=npartial)) return tuple(full_image[a] if a in addrs else None for a in r)
def run(self, vv: Optional[int] = None) -> FidelityTestResult: vv: int = self.vv if vv is None else vv seed = reseed(self.seed) num_tests = 0 # number of tests actually run results: Dict[Tuple[BaseValue, ...], int] = defaultdict(int) rmem, initial_canvases_f, initial_canvases, cue_maker = \ self.make_setup() if vv >= 1: print() print( f'{short(rmem):40} niters={rmem.niters} {short(initial_canvases_f)} {short(cue_maker)}' ) #initial_canvases = set(initial_canvases) num_initial_canvases = len(initial_canvases) # Run the tests start_time = perf_counter() for canvas in sample_without_replacement(initial_canvases, k=self.nsamples): if vv >= 2: lo(canvas) for _ in range(self.n_per_sample): num_tests += 1 cue = cue_maker(canvas) if vv >= 3: lo(' CUE', cue) got = as_tuple(self.run1(cue, rmem, vv=vv)) if vv >= 3: lo(' GOT', got) yes = got[-len(canvas):] == canvas if yes: results[canvas] += 1 if vv == 1: print('+' if yes else '.', end='', flush=True) duration = perf_counter() - start_time if vv == 1: print(flush=True) return FidelityTestResult( tspec=self, rmem=rmem, initial_canvases_f=initial_canvases_f, cue_maker=cue_maker, results=results, # type: ignore[arg-type] duration=duration, num_tests=num_tests, num_initial_canvases=num_initial_canvases, seed=seed)
def pulse_slipnet( self, activations_in: Dict[Hashable, float], type: Union[Type, None]=None, k: int=20, num_get: int=1, # number of slipnodes to return filter: Union[Callable, None]=lambda x: True ) -> List[Hashable]: q = self.slipnet.query( activations_in=activations_in, type=type, k=k, filter=filter ) return list(sample_without_replacement( [nas.node for nas in q], k=num_get, weights=[nas.a for nas in q] ))
def search_ws( self, #pred: Union[Type, Callable, None]=None, pred: FMPred, min_a: Union[float, None]=None, max_n: int=1 ) -> Iterable[Elem]: '''Returns generator of up to max_n nodes that match pred, chosen randomly, weighted by activation.''' elems = self.elems(pred) if min_a is not None: elems = (e for e in elems if self.a(e) >= min_a) elems = list(elems) activations = [self.a(e) for e in elems] yield from sample_without_replacement( elems, weights=activations, k=max_n )
def eqn_test( # TODO rename eqns_test show: bool = False, n_per_eqn: int = 3, n_eqns: int = 20, niters: int = 50, seed: int = None, operands=range(1, 11), operators=('+', '-', 'x', '/'), rm: Union[RMem, Type[RMem]] = RMemAbs, npartial: int = 3, ) -> EqnCounter: reseed(seed) full_table = tuple(make_eqns(operands=operands, operators=operators)) l = len(full_table[0]) rmem: RMem if isclass(rm): #rmem = rm.make_from(full_table) rmem = rm().absorb_canvases(full_table) else: rmem = rm # type: ignore[assignment] rmem.absorb_canvases(full_table) counter: EqnCounter = defaultdict(int) for eqn in sample_without_replacement(full_table, k=n_eqns): if show: print(eqn) for i in range(n_per_eqn): startc = partial_eqn(eqn, k=npartial) if show: lo('CUE', startc) got = rmem.regenerate(canvas=startc, niters=niters).as_tuple() if show: lo('GOT', got) if got[-len(eqn):] == eqn: counter[eqn] += 1 if show: print() else: print('.', end='') if not show: print() return counter
def detect_three_tens(avails: Collection[int]) -> Sequence[int]: matching = [a for a in avails if a >= 10 and a <= 19] if len(matching) < 3: return () else: return list(sample_without_replacement(matching, k=3))
def blank_random(self, num=4) -> None: coords = list(self.all_2x2_addrs()) for a in sample_without_replacement(coords, k=4): self.blank_addr(a)
def do_timestep(self, num=1): for local_t in range(num): self.graph['t'] += 1 print('t=%s' % self.graph['t']) #TODO Set a global flag for this self.decay_saliences() for i in range(1): #support.propagate(self, max_total_support=300) self.propagate_support() support.log_support(self) # responses = list(chain.from_iterable( # self.datum(watcher).look(self, watcher) # for watcher in self.watchers() # )) responses = [] for watcher in self.watchers(): for response in self.datum(watcher).look(self, watcher): if response is not None: #print('RESP', response) #HACK: Overriding the Response object's salience if isinstance(response, ConsumeOperands): response.salience = max(self.support_for(watcher), response.salience) responses.append(response) if ShowResponseList.is_logging(): print('Responses generated:') for response in sorted(responses, key=attrgetter('salience')): print(' %.3f (%.3f) %s' % (response.salience, response.action_threshold, response.gstr(self))) responses = [ r for r in responses if r.salience >= r.action_threshold ] if len(responses) == 0: #TODO Better criterion for backtracking #responses = [Backtrack()] self.consecutive_timesteps_with_no_response += 1 if self.consecutive_timesteps_with_no_response >= 60: self.set_done( TooManyTimestepsWithNoResponse( self.consecutive_timesteps_with_no_response)) if ShowResponseResults.is_logging(): print(self.done()) else: self.consecutive_timesteps_with_no_response = 0 #for response in responses: #response = choice(responses) # response = choices( # responses, weights=[r.salience for r in responses], k=1 # )[0] # TODO global parameter: k (number of responses to do each timestep) for response in sample_without_replacement( responses, k=10, weights=[r.salience for r in responses]): #print('RESPONSE', response) response.go(self) if ShowResponseResults.is_logging(): ann = response.annotation(self) #print('IS', ann.__class__, isinstance(ann, FargDone)) #if not isinstance(ann, FargDone) or not self['running']: print(' ', ann) if isinstance(response, Decision): break self.do_touches() self.update_all_support()
def choose(self, k=1): '''Returns a generator of k nodes chosen randomly, weighted by salience.''' return sample_without_replacement(self.nodes, k=k, weights=self.weights)
def choose_agent_by_activation(self, pred: Callable): # TODO OAOO .search_ws agents = list(self.ws_query(pred)) # GLOBAL constant in next line activations = [self.a(agent) ** 2.0 for agent in agents] return first(sample_without_replacement(agents, weights=activations))
def partial_eqn(eqn: Tuple[BaseValue, ...], k: int = 3) -> Tuple[BaseValue, ...]: r = range(len(eqn)) addrs = set(sample_without_replacement(r, k=k)) return tuple(eqn[a] if a in addrs else None for a in r)
def choose_most_similar(avails: Sequence[Value], target: Value) \ -> Sequence[Value]: d: Dict[Value, float] = dict( (a, similarity_to(target, a)) for a in avails ) return list(sample_without_replacement(d.keys(), weights=d.values()))