def multi_win_during_val(val_fn, inputs, augs, targets): for idraw in [ 20, ]: for jdraw in [ 20, ]: inpt_multiwin = data_aug(inputs, mu, sigma, deterministic=True, idraw=idraw, jdraw=jdraw) err_pat, output_pat = val_fn(inpt_multiwin, augs, targets) if 'weight' in locals(): dis = ((idraw / 100.0 - 1.0)**2 + (jdraw / 100.0 - 1.0)**2)**0.5 wei = np.exp(-np.square(dis) / 2.0 / 0.5**2) weight += wei err += err_pat * wei output += output_pat * wei else: dis = ((idraw / 100.0 - 1.0)**2 + (jdraw / 100.0 - 1.0)**2)**0.5 weight = np.exp(-np.square(dis) / 2.0 / 1.0**2) err = err_pat * weight output = output_pat * weight return err / weight, output / weight
def train_round(num_epochs, network, valid_num, train_fn, val_fn, classn, X_train, a_train, y_train, X_test, a_test, y_test): print("Starting training...") print("TrLoss\tVaLoss\tAUC\tCMatrix0\tCMatrix1\tCMatrix2\tEpochs\tTime") start_time = time.time() for epoch in range(num_epochs + 1): train_err = 0 train_batches = 0 for batch in iterate_minibatches(X_train, a_train, y_train, BatchSize, shuffle=True): inputs, augs, targets = batch inputs = data_aug(inputs, mu, sigma) train_err += train_fn(inputs, augs, targets) train_batches += 1 train_err = train_err / train_batches if epoch % 1 == 0: # And a full pass over the validation data: test_err, _, _, _, Or, Tr = val_fn_epoch(classn, val_fn, X_test, a_test, y_test) tpos0, tneg0, fpos0, fneg0 = confusion_matrix(Or, Tr, 0.4) tpos1, tneg1, fpos1, fneg1 = confusion_matrix(Or, Tr, 0.5) tpos2, tneg2, fpos2, fneg2 = confusion_matrix(Or, Tr, 0.6) val_auc = auc_roc(Or, Tr) # Then we print the results for this epoch: print( "{:.4f}\t{:.4f}\t{:.4f}\t{}/{}/{}/{}\t{}/{}/{}/{}\t{}/{}/{}/{}\t{}/{}\t{:.3f}" .format(train_err, test_err, val_auc, tpos0, tneg0, fpos0, fneg0, tpos1, tneg1, fpos1, fneg1, tpos2, tneg2, fpos2, fneg2, epoch + 1, num_epochs, time.time() - start_time)) start_time = time.time() if epoch % 5 == 0: param_values = layers.get_all_param_values(network) pickle.dump( param_values, open(model_dump + "_e{}_cv{}.pkl".format(epoch, valid_num), 'w')) if epoch == 20: LearningRate.set_value(np.float32(0.10 * LearningRate.get_value())) if epoch == 40: LearningRate.set_value(np.float32(0.10 * LearningRate.get_value())) if epoch == 100: LearningRate.set_value(np.float32(0.10 * LearningRate.get_value()))