options = [(opt,cl,dr,gbs,gsf,l2) for opt in ['adadelta'] for cl in ['categorical_crossentropy'] for dr in [0.001] for gbs in [0.001] for gsf in [2] for l2 in [0.001]] for (opt,cl,dr,gbs,gsf,l2) in options: ae.cls_opt = opt ae.cls_lss = cl ae.drop_rate = dr ae.sigma_base = gbs ae.sigma_fact = gsf name = "o-"+str(opt)+"-cl-"+str(cl)+"-dr-"+str(dr)+"-gbs-"+str(gbs)+"-gsf-"+str(gsf)+"-l2-"+str(l2) update_status(ae, "current", "training classif "+name) history = ae.finetune(train_encdecs=False, early_stopping={"monitor": "val_acc", "patience": 10, "verbose": 1}) info = {"history": history, "loss_fine": history[-1][0], "acc_fine": history[-1][1]} for key in ae.previous_data: print(key) arr = np.array(ae.previous_data[key]) phl = len(phase_names) eye = np.identity(phl) tc = to_categorical eva = ae.model.evaluate(arr, tc(map(lambda n: phase_names.index(n) ,list(repeat(key, len(arr)))) ,phl) ,show_accuracy=True) counts = np.sum(map(lambda p: eye[np.argmax(p)], ae.model.predict(arr)), axis=0) print(eva) print(counts)
ae.enc_opt = opt ae.drop_rate = dr ae.sigma_base = gbs ae.sigma_fact = gsf ae.l1 = l1 ae.l2 = l2 ae.new_encdecs(use_dropout=(dr != 0), use_noise=(gbs != 0)) print("training encdec "+name) h = ae.pretrain(name=name) info = {"acc_pre_"+str(i): float(e[-1][1]) for (i,e) in enumerate(h)} info.update({("loss_pre_"+str(i)): float(e[-1][0]) for (i,e) in enumerate(h)}) ae.update_catalog("ed_"+name,info) ### FINETUNE ### cur_left_out = nsm.cross_val_keys[nsm.cross_val_index] ae.load_encdecs("best") ae.finetune(name="x-"+cur_left_out+"-val", train_encdecs=True, test_data=nsm.data[cur_left_out]) info = {} for key in ae.previous_data: print(key) arr = np.array(ae.previous_data[key]) phl = len(phase_names) eye = np.identity(phl) tc = to_categorical eva = ae.model.evaluate(arr, tc(map(lambda n: phase_names.index(n) ,list(repeat(key, len(arr)))) ,phl) ,show_accuracy=True) counts = np.sum(map(lambda p: eye[np.argmax(p)], ae.model.predict(arr)), axis=0) print(eva) print(counts)
ae.l2 = l2 ae.new_encdecs(use_dropout=(dr != 0), use_noise=(gbs != 0)) print("training encdec " + name) h = ae.pretrain(name=name) info = { "acc_pre_" + str(i): float(e[-1][1]) for (i, e) in enumerate(h) } info.update({("loss_pre_" + str(i)): float(e[-1][0]) for (i, e) in enumerate(h)}) ae.update_catalog("ed_" + name, info) ### FINETUNE ### cur_left_out = nsm.cross_val_keys[nsm.cross_val_index] ae.load_encdecs("best") ae.finetune(name="x-" + cur_left_out + "-val", train_encdecs=True, test_data=nsm.data[cur_left_out]) info = {} for key in ae.previous_data: print(key) arr = np.array(ae.previous_data[key]) phl = len(phase_names) eye = np.identity(phl) tc = to_categorical eva = ae.model.evaluate(arr, tc( map(lambda n: phase_names.index(n), list(repeat(key, len(arr)))), phl), show_accuracy=True) counts = np.sum(map(lambda p: eye[np.argmax(p)], ae.model.predict(arr)),