w = process_data(w, decomposable=('orthography',), decomposable_names=('letters',), feature_layers=('letters',), feature_sets=('fourteen',), negative_features=True, length_adaptation=False) rla = {k: 'global' for k in {'letters-features', 'letters'}} rla['orthography'] = 'frequency' s = Builder(IA_WEIGHTS, rla, -.05, outputs=('orthography',), monitors=('orthography',), step_size=.5, weight_adaptation=True) m = s.build_model(w) result = m.activate(w, max_cycles=n_cyc, threshold=.7, strict=False) cycles = np.array([len(x['orthography']) for x in result]) right = cycles == n_cyc cycles[right] = -1 for word, c in zip(w, cycles): results.append([word['orthography'][0],
weights = deepcopy(IA_WEIGHTS) # Manually adapt weights to length 4 if not length_adaptation: weights[("letters", "orthography")][0] /= 4 weights[("letters", "orthography")][1] *= 4 weights[("orthography", "letters")][0] /= 4 weights[("orthography", "letters")][1] *= 4 names = set(chain.from_iterable(weights)) rla = {k: 'global' for k in names} rla['orthography'] = 'frequency' s = Builder(weights, rla, -.05, outputs=('orthography', ), monitors=('orthography', ), step_size=.5, weight_adaptation=length_adaptation) m = s.build_model(w) result = m.activate(w, max_cycles=n_cyc, threshold=.7, strict=False) cycles = np.array([len(x['orthography']) for x in result]) right = cycles == n_cyc cycles[right] = -1 for x, word, c in zip(result, w, cycles): results.append([ word['orthography'], (idx * 100) + idx_2, word['rt'],