av_train_perf = {"acc": 0, "prec": 0, "rec": 0, "f1": 0} av_val_perf = {"acc": 0, "prec": 0, "rec": 0, "f1": 0} for i in range(args.ensemble_size): print("Building model") model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights) model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") print("Model built") history = model.fit( X=Xs["train"], y=ys["train"], batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(Xs["val"], ys["val"]), shuffle=True, show_accuracy=True, callbacks=[ LearningRateScheduler(lambda e: lr_schedule(epochs, lr, e)) ]) print("\n".join(map(str, history.history["acc"])), file=open( os.path.join(save_path, "train_accs{}.txt".format(i)), "w")) print("\n".join(map(str, history.history["loss"])), file=open( os.path.join(save_path, "train_losses{}.txt".format(i)), "w")) print("\n".join(map(str, history.history["val_acc"])),
save_path = os.path.join(base_save_dir, "lr{};epochs{};dropout{};dense_layers{};dense_layer_units{};batch_size{}".format(*params)) os.makedirs(save_path) train_preds = np.zeros((Xs["train"].shape[0], args.ensemble_size)) val_preds = np.zeros((Xs["val"].shape[0], args.ensemble_size)) print("Building model") model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights) model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") print("Model built") history = model.fit( X=Xs["train"], y=ys["train"], batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(Xs["val"], ys["val"]), shuffle=True, show_accuracy=True, ) model.layers.pop() model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") train_pred = model.predict(X=Xs["train"], batch_size=batch_size, verbose=0) val_pred = model.predict(X=Xs["val"], batch_size=batch_size, verbose=0) cou = 0 with open(args.last_layer_file, "w") as layer_file: for vec in train_pred: layer_file.write(",".join([str(i) for i in vec])) layer_file.write("\n") cou += 1
os.makedirs(save_path) train_preds = np.zeros((Xs["train"].shape[0], args.ensemble_size)) val_preds = np.zeros((Xs["val"].shape[0], args.ensemble_size)) print("Building model") model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights) model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") print("Model built") history = model.fit( X=Xs["train"], y=ys["train"], batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(Xs["val"], ys["val"]), shuffle=True, show_accuracy=True, ) model.layers.pop() model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") train_pred = model.predict(X=Xs["train"], batch_size=batch_size, verbose=0) val_pred = model.predict(X=Xs["val"], batch_size=batch_size, verbose=0) cou = 0 with open(args.last_layer_file, "w") as layer_file: for vec in train_pred: layer_file.write(",".join([str(i) for i in vec]))
os.makedirs(save_path) av_train_perf = {"acc": 0, "prec": 0, "rec": 0, "f1": 0} av_val_perf = {"acc": 0, "prec": 0, "rec": 0, "f1": 0} for i in range(args.ensemble_size): print("Building model") model = ShallowNet(Xs["train"].shape[1], dropout, dense_layers, dense_layer_units, args.weights) model.compile(optimizer=Adam(lr=lr), loss="binary_crossentropy") print("Model built") history = model.fit( X=Xs["train"], y=ys["train"], batch_size=batch_size, nb_epoch=epochs, verbose=1, validation_data=(Xs["val"], ys["val"]), shuffle=True, show_accuracy=True, callbacks=[LearningRateScheduler(lambda e: lr_schedule(epochs, lr, e))] ) print("\n".join(map(str, history.history["acc"])), file=open(os.path.join(save_path, "train_accs{}.txt".format(i)), "w")) print("\n".join(map(str, history.history["loss"])), file=open(os.path.join(save_path, "train_losses{}.txt".format(i)), "w")) print("\n".join(map(str, history.history["val_acc"])), file=open(os.path.join(save_path, "val_accs{}.txt".format(i)), "w")) print("\n".join(map(str, history.history["val_loss"])), file=open(os.path.join(save_path, "val_losses{}.txt".format(i)), "w")) train_pred = model.predict_classes(X=Xs["train"], batch_size=batch_size, verbose=0) train_perf = eval_pred(ys["train"], train_pred) for met in ["acc", "prec", "rec", "f1"]: av_train_perf[met] += float(train_perf[met]) / args.ensemble_size