def train_sub(model,
              model_name,
              train,
              val,
              epochs=100,
              batch_size=8,
              verbose=2):
    tf.compat.v1.reset_default_graph()
    tf.random.set_seed(42424242)
    tf.compat.v1.set_random_seed(42424242)

    print('\n\n\nMODEL:' + model_name)
    earlystopper = EarlyStopping(patience=20, verbose=2)
    os.makedirs("checkpoints", exist_ok=True)
    model_path_name = 'checkpoints/ckp_{}.h5'.format(model_name)
    checkpointer = ModelCheckpoint(model_path_name,
                                   verbose=1,
                                   save_best_only=True)

    os.makedirs("tf_logs", exist_ok=True)
    log_dir = "tf_logs/" + model_name + "_" + datetime.datetime.now().strftime(
        "%Y%m%d-%H%M%S")
    tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir,
                                                          histogram_freq=1)

    history = model.fit(
        train,
        validation_data=val,
        epochs=epochs,
        callbacks=[earlystopper, checkpointer, tensorboard_callback],
        verbose=verbose)

    model.load_weights(model_path_name)

    submission.create(model, model_name)
コード例 #2
0
def main():
    x1, y1 = data.get_training_data()
    x2, y2 = data.get_training_data2()


    x = np.concatenate((x1, x2), axis=0)
    y = np.concatenate((y1, y2), axis=0)

             
    from models.sdf_model import get_CNN_SDFt, get_flat_tanh_CNN_SDFt

    # # sdf flat tanh
    # train, val = data_g.get_train_val_iterators(aug=None)

    # model = get_flat_tanh_CNN_SDFt()
    # model_name = 'SDF_cnn_scaled_tanh_EXTDATA_noaug'

    # train_sub(model, model_name, x, y, epochs=100)
    # submission.create(model, model_name)


    # sdf normal tanh

    model = get_CNN_SDFt()
    model_name = 'SDF_cnn_tanh_EXTDATA_noaug'

    train_sub(model, model_name, x, y, epochs=100)
    submission.create(model, model_name)
コード例 #3
0
def main():

    # no augmentation
    
    train, val = data_g.get_train_val_iterators(aug=None)

    model = unet.get_model(None, None, 3, do_compile=False)
    model.compile(optimizer='adam', loss='binary_crossentropy',
                  metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)])
    model_name = 'unet2_crossentropy_augmentation_none'
    train_sub(model, model_name, train, val, epochs=100, verbose=1)

    submission.create(model, model_name)

        
        
    # small augmentation

    train, val = data_g.get_train_val_iterators(aug='small')

    model = unet.get_model(None, None, 3, do_compile=False)
    model.compile(optimizer='adam', loss='binary_crossentropy',
                  metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)])
    model_name = 'unet2_crossentropy_augmentation_small'
    train_sub(model, model_name, train, val, epochs=100, verbose=1)

    submission.create(model, model_name)




    # medium augmentation

    train, val = data_g.get_train_val_iterators(aug='medium')

    model = unet.get_model(None, None, 3, do_compile=False)
    model.compile(optimizer='adam', loss='binary_crossentropy',
                  metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)])
    model_name = 'unet2_crossentropy_augmentation_medium'
    train_sub(model, model_name, train, val, epochs=100, verbose=1)

    submission.create(model, model_name)



    # large augmentation

    train, val = data_g.get_train_val_iterators(aug='large')

    model = unet.get_model(None, None, 3, do_compile=False)
    model.compile(optimizer='adam', loss='binary_crossentropy',
                  metrics=['accuracy', f1, tf.keras.metrics.MeanIoU(num_classes=2)])
    model_name = 'unet2_crossentropy_augmentation_large'
    train_sub(model, model_name, train, val, epochs=100, verbose=1)

    submission.create(model, model_name)
コード例 #4
0
def cross_val(model, model_name, load_training_data=True, x=None, y=None, augment_data_func=None, use_class_weight=False, epochs=100, batch_size=4, verbose=2):
    print('\n\n\n' '5-Cross-Validation: ' + model_name)
    
    if load_training_data:
        x, y = data.get_training_data()
    
    kf = KFold(n_splits=5, shuffle=True, random_state=42424242)
    
    reset_weights = model.get_weights()  # for reseting the model weights

    histories = []
    index = 0
    best_losses = []
    for train_index, test_index in kf.split(x):
        print('\nSplit k=' + str(index))
        x_train, x_test = x[train_index], x[test_index]
        y_train, y_test = y[train_index], y[test_index]

        if augment_data_func != None:
            x_train, y_train = augment_data_func(x_train, y_train)
    
        name = model_name + '_crossval-k' + str(index)
        crt_history = fit(model, x_train, y_train, epochs=epochs, validation_data=(x_test, y_test), checkpoint_suffix=name, batch_size=batch_size, verbose=verbose,
                        use_class_weight=use_class_weight)
        histories.append(crt_history)
        
        best_epoch = get_min_index(crt_history.history['loss'])
        best_loss = crt_history.history['loss'][best_epoch]
        best_losses.append(best_loss)
    
        index += 1
        model.set_weights(reset_weights)  # reset the model weights



    # EVALUATION

    print("\nCROSS-VALIDATION-RESULTS")
    print("model_name: " + model_name)
    #print("optimizer: " + str(model.optimizer))
    #print("loss: " + str(model.loss))
    print("epochs: 100, early_stopping_patience = " + str(PATIENCE))


    print('\nMETRICS')
    # get used metrics
    keys = histories[0].history.keys()
    # average of metrics over all data splits
    average_metrics = {}
    index = 0
    for h in histories:
        best_index = get_min_index(h.history['loss'])
        current_metrics = {}
        for k in keys:
            if k not in average_metrics:
                average_metrics[k] = h.history[k][best_index]
            else:
                average_metrics[k] += h.history[k][best_index]
            
            current_metrics[k] = h.history[k][best_index]
        print('k='+str(index), current_metrics)
        index += 1
            
    for k in average_metrics:
        average_metrics[k] /= len(histories)


    print("\nAVERAGE-METRICS")
    print(average_metrics)

    # reload first split model weights
    model.load_weights("checkpoints/ckp_" + model_name + '_crossval-k' + '0' + ".h5")
    # create submission
    submission.create(model, model_name + '_crossval-k' + '0' + ".h5")
コード例 #5
0
def main():

    model = cnn.get_model(None, None, 3, do_compile=False)
    model.load_weights('checkpoints/ckp_cnn_dice_SPECIALDATA.h5')
    submission.create(model, 'ckp_cnn_dice_SPECIALDATA')