def fig4(name, func, eps): """Makes figure 4. Args: name (str): Descriptive name of the model. Posterior samples, statistics, and figures are generated and saved in a subdirectory with this name. func (:obj:`<class 'function'>): Function for model construction. Should return a formatted copy of the data. eps (bool): If True, saves the figures to the manuscript subdirectory in .eps format. """ with pm.Model() as m: fit_model(name, func) trace = pm.load_trace(name) params = sorted( [p.name for p in m.deterministics if "Lambda" in p.name]) set_fig_defaults() rcParams["figure.figsize"] = (3, 3 * 2) fig, axes = plt.subplots(5, 1, constrained_layout=True) for p, ax in zip(params, axes): vals, bins, _ = ax.hist(trace[p], bins=50, density=True, histtype="step", color="lightgray") ax.set_xlabel(p) if ax == axes[0]: ax.set_ylabel("Posterior density") start, stop = pm.stats.hpd(trace[p]) for n, l, r in zip(vals, bins, bins[1:]): if l > start: if r < stop: ax.fill_between([l, r], 0, [n, n], color="lightgray") elif l < stop < r: ax.fill_between([l, stop], 0, [n, n], color="lightgray") elif l < start < r: ax.fill_between([start, r], 0, [n, n], color="lightgray") x = np.linspace(min([bins[0], 0]), max([0, bins[-1]])) theta = skewnorm.fit(trace[p]) ax.plot(x, skewnorm.pdf(x, *theta), "k", label="Normal approx.") ax.plot(x, norm.pdf(x), "k--", label="Prior") ax.plot([0, 0], [skewnorm.pdf(0, *theta), norm.pdf(0)], "ko") fig.savefig(f"{name}/fig4.png") if eps is True: fig.savefig("manuscript/fig4.eps")
def table3(name, func, tex): """Makes table 3. Args: name (str): Descriptive name of the model. Posterior samples, statistics, and figures are generated and saved in a subdirectory with this name. func (:obj:`<class 'function'>): Function for model construction. Should return a formatted copy of the data. tex (bool): If True, saves the table to the manuscript subdirectory. """ with pm.Model(): data = fit_model(name, func) df = data.groupby(["listener", "condition"])[list("abdls")].mean().reset_index() df = df.pivot( index="listener", columns="condition", values=list("abdls") ).reset_index() # df = df.T.sort_values(["condition"]).T.set_index("listener") df = df.set_index("listener") df.loc["Group mean"] = df.mean(axis=0) df = df.applymap(latexify) df.to_latex(f"{name}/table3.tex", escape=False) if tex is True: df.to_latex("manuscript/table3.tex", escape=False)
def table2(name, func, tex): """Makes table 2. Args: name (str): Descriptive name of the model. Posterior samples, statistics, and figures are generated and saved in a subdirectory with this name. func (:obj:`<class 'function'>): Function for model construction. Should return a formatted copy of the data. tex (bool): If True, saves the table to the manuscript subdirectory. """ with pm.Model() as m: fit_model(name, func) trace = pm.load_trace(name) params = sorted([p.name for p in m.deterministics if "Lambda" in p.name]) df = pm.summary(trace, var_names=params) table = [] for p, i in zip(params, interps): theta = skewnorm.fit(trace[p]) p0 = norm.pdf(0) p1 = skewnorm.pdf(0, *theta) bf = p0 / p1 a, b, c = df.loc[p, ["mean", "hpd_2.5", "hpd_97.5"]] dic = { "Variable": p, "Posterior mean (95% HPD)": "%s (%s, %s)" % ( latexify(a), latexify(b), latexify(c)), "During roved-frequency trials ...": i, "BF": latexify(bf), "Evidence": interpret(bf), } table.append(dic) # print(p, bf) df = pd.DataFrame(table)[dic.keys()] df.to_latex(f"{name}/table2.tex", escape=False, index=False) if tex is True: df.to_latex("manuscript/table2.tex", escape=False, index=False)
def main(): print("performing all analyses for the 'perceptual anchors' paper") started = time() details = [ ("modela", modela, True), ("modelb", modelb, False) ] if not exists("manuscript"): makedirs("manuscript") for name, func, eps in details[:]: print("fitting or loading model ... ", end="") data = fit_model(name, func) print(f"done in {time() - started:.2f} s") print("creating figs 1 and 2 ... ", end="") fig12(data, name, eps) print(f"done in {time() - started:.2f} s") print("creating fig 3 ... ", end="") fig3(data, name, eps) print(f"done in {time() - started:.2f} s") print("creating fig 4 ... ", end="") fig4(name, func, eps) print(f"done in {time() - started:.2f} s") print("creating table 2 ... ", end="") table2(name, func, eps) print(f"done in {time() - started:.2f} s") print("creating table 3 ... ", end="") table3(name, func, eps) print(f"done in {time() - started:.2f} s")
raw_x, raw_y = load_training_data('data/raw_maps.npz') x_train, x_test, y_train, y_test, _, _ = train_test_split(raw_x, raw_y, raw_y) denses = [5, 20, 50, 100] neofs = range(1, 30) epochs = 1000 for hidden_layer_neurons in denses: model = dense_model(raw_x, raw_y, hidden_layer_neurons=hidden_layer_neurons, name='dense_%i_trained_on_raw_maps' % hidden_layer_neurons, optimizer=optimizers.Adam(learning_rate=0.004)) fit_model(x_train, x_test, y_train, y_test, model, epochs=epochs) model = conv_model(raw_x, raw_y, name='conv_trained_on_raw_maps', optimizer=optimizers.Adam(learning_rate=0.004)) fit_model(x_train, x_test, y_train, y_test, model, epochs=epochs) for n in neofs: x, y, pcs, eofs = load_reconstructed_training_data( 'data/reconstructed_maps(neofs=%i).npz' % n) x_train, x_test, y_train, y_test, real_train, real_test = train_test_split( x, y, raw_y) for hidden_layer_neurons in denses: model = dense_model(
class_train = class_train class_test = class_test hyperpars = { 'drop_rate': 0.4, 'learning_rate': 0.0001, 'dense_size': 64, "conv_filters": [16, 32], 'batch_size': 512, "epochs": 25 } pos = 0 cnn = models.CNN(im.shape, 3, hyperpars, name="colour") cnn.build_layers() models.fit_model( cnn, [img_train, class_train[:, pos], img_test, class_test[:, pos]]) del cnn # exit(0) # hyperpars['dense_size'] = 128 # pos = 1 # cnn = models.CNN(im.shape, 3, hyperpars, name = "count") # cnn.build_layers() # models.fit_model(cnn, [img_train,class_train[:,pos], img_test, class_test[:,pos]]) # del cnn # hyperpars['dense_size'] = 512 #512 # pos = 2 # cnn = models.CNN(im.shape, 3, hyperpars, name = "fill")
if _SAVE_BLENDED_: train_to_save = train_data.copy(deep=True) train_to_save["label"] = train_labels train_to_save.to_csv("blended_train_data.csv") holdout_to_save = holdout.copy(deep=True) holdout_to_save["label"] = holdout_labels holdout_to_save.to_csv("blended_holdout_data.csv") test_data.to_csv("blended_test_data.csv") ## final steps # reinstantiate model = md._ESTIMATORS_META_[_MAIN_ESTIMATOR_]() err = md.fit_model(model, train_data, train_labels) print "###############################################" print "MODEL:", model print "Trianing error rate:", err print "###############################################" if _BLENDING_ or _HOLDOUT_: holdout_preds = model.predict(holdout) holdout_acc = 1 - md.evaluate(holdout_preds, holdout_labels.ravel()) print "###############################################" print "Holdout error rate:", holdout_acc print "###############################################" ## now re-instantiate and train on concatenated holdout + train train_data = pd.concat([train_data, holdout], axis=0) train_labels = np.concatenate([train_labels, holdout_labels], axis=0)
# Split test data into input (X) and output (Y) variables. X_test = test[:, 1:3197] y_test = test[:, 0] # Normalize train and test features X_train, X_test = normalize_data(X_train, X_test) # Create model. model = build_model(gpus, units, dropout) # Compile model. model = compile_model(model, lr_rate) # Fit model. model = fit_model(model, loss_patience, X_train, y_train, X_test, y_test) # Evaluate training data on the model. validate_data("Train", X_train, y_train) # Evaluate test data on the model. validate_data("Test", X_test, y_test) # Predict our test dataset. predictions = model.predict(X_test) # Output our test dataset for visualization. print_predictions(predictions, print_results) # Print script execution time. print("\nExecution time: %s %s \n " % (time() - startTime, "seconds"))