continue else: score.append(False) return np.sum(score) / len(score), score if __name__ == "__main__": header = ["word", "iteration", "rt", "freq", "cycles"] results = [] random.seed(44) path = "../../corpora/lexicon_projects/elp-items.csv" words = np.array(list(read_elp_format(path, lengths=[4]))) freqs = [x['frequency'] + 1 for x in words] freqs = np.log10(freqs) sampler = BinnedSampler(words, freqs) np.random.seed(44) n_cyc = 1000 for idx in tqdm(range(100)): w = deepcopy(sampler.sample(1000)) rt = np.array([x['rt'] for x in w]) inputs = ('letters-features',)
continue else: score.append(False) return np.sum(score) / len(score), score if __name__ == "__main__": header = ["word", "iteration", "rt", "freq", "cycles"] results = [] random.seed(44) path = "../../corpora/lexicon_projects/elp-items.csv" w = read_elp_format(path, lengths=[4]) np.random.seed(44) n_cyc = 1000 rt = np.array([x['rt'] for x in w]) inputs = ('letters-features',) w = process_data(w, decomposable=('orthography',), decomposable_names=('letters',), feature_layers=('letters',), feature_sets=('fourteen',), negative_features=True, length_adaptation=True)
score.append(True) continue else: score.append(False) return np.sum(score) / len(score), score if __name__ == "__main__": header = ["word", "rt", "freq", "cycles"] results = [] path = "../../corpora/lexicon_projects/elp-items.csv" words = read_elp_format(path, lengths=list(range(3, 11))) for x in words: x['frequency'] += 1 x['log_frequency'] = np.log10(x['frequency']) n_cyc = 1000 rt = np.array([x['rt'] for x in words]) w = process_data(words, decomposable=('orthography', ), decomposable_names=('letters', ), feature_layers=('letters', ), feature_sets=('fourteen', ), negative_features=True, length_adaptation=True)
score.append(False) return np.sum(score), score if __name__ == "__main__": header = ['word', 'iteration', 'rt', 'freq', 'cycles', 'le', 'ne', 'spa'] results = [] random.seed(44) threshold = .7 path = "../../corpora/lexicon_projects/elp-items.csv" words = np.array(list(read_elp_format(path, lengths=list(range(3, 11))))) num_to_sample = len(words) // 4 freqs = [x['frequency'] + 1 for x in words] freqs = np.log10(freqs) sampler = BinnedSampler(words, freqs) total = (2**3) * 100 n_cyc = 350 for idx, (le, ne, spa) in enumerate( product([True, False], [True, False], [True, False])): length_adaptation = le negative_evidence = ne space_character = spa