def model(self, p): random.seed(p.exp_seed) data_dir = os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, 'data') sp_path = os.path.join(data_dir, 'associationmatrices') assoc, i2w, _ = load_assoc_mat(sp_path, p.assocmat) sp_path = os.path.join(data_dir, 'semanticpointers') pointers, _, _ = load_pointers(sp_path, p.sp_file) rat_path = os.path.join(data_dir, 'rat', p.ratfile) self.rat_items = list(filter_valid(load_rat_items(rat_path), i2w)) with spa.SPA(seed=p.model_seed) as model: self.model = model # set up vocab self.vocab = model.get_default_vocab(p.d) for i, pointer in enumerate(pointers): sanitized = i2w[i].upper().replace(' ', '_').replace( '+', '_').replace('-', '_').replace('&', '_').replace( "'", '_') self.vocab.add(sanitized, pointer) # set up model self.stimulus = Stimulus(self.rat_items) model.stimulus = StimulusModule( self.stimulus, self.vocab, p.neurons_per_dimension) model.rat_model = FfwdConnectionsRat( assoc, self.vocab, neurons_per_dimension=p.neurons_per_dimension) nengo.Connection(model.stimulus.cue1.output, model.rat_model.cue1) nengo.Connection(model.stimulus.cue2.output, model.rat_model.cue2) nengo.Connection(model.stimulus.cue3.output, model.rat_model.cue3) self.p_output = nengo.Probe( model.rat_model.rat_state.output, synapse=0.003) self.p_cue1 = nengo.Probe( model.stimulus.cue1.output, synapse=0.003) self.p_cue2 = nengo.Probe( model.stimulus.cue2.output, synapse=0.003) self.p_cue3 = nengo.Probe( model.stimulus.cue3.output, synapse=0.003) self.p_spikes = nengo.Probe( model.rat_model.rat_state.state_ensembles.ensembles[0].neurons, 'spikes') tr = np.dot( self.vocab.vectors.T, np.dot(assoc.T, self.vocab.vectors)) / 3. direct_result = nengo.Ensemble( n_neurons=1, dimensions=p.d, neuron_type=nengo.Direct()) nengo.Connection( model.stimulus.cue1.output, direct_result, transform=tr) nengo.Connection( model.stimulus.cue2.output, direct_result, transform=tr) nengo.Connection( model.stimulus.cue3.output, direct_result, transform=tr) self.p_direct = nengo.Probe(direct_result, synapse=0.003) return model
def model(self, p): random.seed(p.exp_seed) data_dir = os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, 'data') sp_path = os.path.join(data_dir, 'semanticpointers') pointers, i2w, _ = load_pointers(sp_path, p.sp_file) rat_path = os.path.join(data_dir, 'rat', 'problems.txt') self.rat_items = list(filter_valid(load_rat_items(rat_path), i2w)) shuffle(self.rat_items) with spa.SPA(seed=p.model_seed) as model: # set up vocab vocab = model.get_default_vocab(p.d) for i, pointer in enumerate(pointers): vocab.add(i2w[i].upper(), pointer) # set up model self.stimulus = Stimulus(self.rat_items) model.stimulus = StimulusModule(self.stimulus, p.d) model.rat_model = FfwdRat(p.d) nengo.Connection(model.stimulus.cue1.output, model.rat_model.cue1) nengo.Connection(model.stimulus.cue2.output, model.rat_model.cue2) nengo.Connection(model.stimulus.cue3.output, model.rat_model.cue3) self.p_output = nengo.Probe(model.rat_model.rat_state.output) return model
def compute_similarity(method, quiet=True): '''For a given method, computes the similarity between all the words and the cues and finds the position of the target in sorted similarity values. Parameters ---------- method : str Method used to create vectors. quiet : bool Print statistics Returns: -------- sim_target : ndarray Similarity between the cues and the target for every RAT problem sim_everything : ndarray Similarity between the cues and all other words in vocabulary for every RAT problem targets : ndarray Position of target for every problem. ''' rat_problems = load_rat_items( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'data', 'rat', 'problems.txt')) vectors, w2i, words = load_vectors(method) similarities_target = [] similarities_everything = [] target_positions = [] total_problems = len(rat_problems) for problem in rat_problems: if (any(c not in words for c in problem.cues) or problem.target not in words): continue sims = get_similarities(vectors, w2i, problem, method) similarities_everything.append(sims.mean()) similarities_target.append(sims[w2i[problem.target]]) # target position target_pos = lambda s, t: np.where(s.argsort()[::-1] == w2i[t])[0][0] target_positions.append(target_pos(sims, problem.target)) sim_target = np.array(similarities_target, dtype=np.float) sim_everything = np.array(similarities_everything, dtype=np.float) targets = np.array(target_positions, dtype=np.int) if not quiet: print('%d/%d problems exist with the %s vocabulary.' % (len(targets), total_problems, method)) print('Average similarity with the target: %.5f (std=%.3f)' % (sim_target.mean(), sim_target.std())) print('Average similarity with all words: %.5f (std=%.3f)' % (sim_everything.mean(), sim_everything.std())) return sim_target, sim_everything, targets
def compute_similarity(method, quiet=True): '''For a given method, computes the similarity between all the words and the cues and finds the position of the target in sorted similarity values. Parameters ---------- method : str Method used to create vectors. quiet : bool Print statistics Returns: -------- sim_target : ndarray Similarity between the cues and the target for every RAT problem sim_everything : ndarray Similarity between the cues and all other words in vocabulary for every RAT problem targets : ndarray Position of target for every problem. ''' rat_problems = load_rat_items(os.path.join( os.path.dirname(__file__), os.pardir, os.pardir, 'data', 'rat', 'problems.txt')) vectors, w2i, words = load_vectors(method) similarities_target = [] similarities_everything = [] target_positions = [] total_problems = len(rat_problems) for problem in rat_problems: if (any(c not in words for c in problem.cues) or problem.target not in words): continue sims = get_similarities(vectors, w2i, problem, method) similarities_everything.append(sims.mean()) similarities_target.append(sims[w2i[problem.target]]) # target position target_pos = lambda s, t: np.where(s.argsort()[::-1] == w2i[t])[0][0] target_positions.append(target_pos(sims, problem.target)) sim_target = np.array(similarities_target, dtype=np.float) sim_everything = np.array(similarities_everything, dtype=np.float) targets = np.array(target_positions, dtype=np.int) if not quiet: print('%d/%d problems exist with the %s vocabulary.' % (len(targets), total_problems, method)) print('Average similarity with the target: %.5f (std=%.3f)' % (sim_target.mean(), sim_target.std())) print('Average similarity with all words: %.5f (std=%.3f)' % (sim_everything.mean(), sim_everything.std())) return sim_target, sim_everything, targets