def augment_data_ext(x, y): return data.augment_data_extended(x, y, saturation=aug_sat, use_grayscale=aug_gs, blur_amount=aug_blur, num_random_rotations=aug_rr)
def cross_val(model, model_name, epochs=100, batch_size=8, verbose=2): x, y = data.get_training_data() kf = KFold(n_splits=5, shuffle=True, random_state=random_state) histories = [] index = 0 best_losses = [] current_name = '' for train_index, test_index in kf.split(x): x_train, x_test = x[train_index], x[test_index] y_train, y_test = y[train_index], y[test_index] print('augment data') x_train, y_train = data.augment_data_extended(x_train, y_train, num_random_rotations=3) current_name = model_name + '_crossval-k' + str(index) crt_history = fit(model, x_train, y_train, epochs=epochs, validation_data=(x_test, y_test), checkpoint_suffix=current_name, batch_size=batch_size, verbose=verbose) histories.append(crt_history) best_epoch = get_min_index(crt_history.history['loss']) best_loss = crt_history.history['loss'][best_epoch] best_losses.append(best_loss) model.load_weights(current_name + '.h5') index += 1 # get used metrics keys = histories[0].history.keys() # average of metrics over all data splits average = {} for h in histories: best_index = get_min_index(h.history['loss']) for k in keys: if k not in average: average[k] = h.history[k][best_index] else: average[k] += h.history[k][best_index] for k in average: average[k] /= len(histories) print("\nCross-Validation") print("model_name: " + model_name) print("optimizer: " + str(model.optimizer)) print("loss: " + str(model.loss)) print("epoches: 100, early_stopping_patience = 9") print("cross_val_seed: " + str(random_state)) print("AVERAGE-METRICS") print(average) # reload best model weights #best_model_index = get_min_index(best_losses) #model.load_weights("cps/ckp_" + model_name + '_crossval-k' + str(best_model_index) + ".h5") #print("best model: cps/ckp_" + model_name + '_crossval-k' + str(best_model_index) + ".h5") create_sub(model, model_name)