Exemple #1
0
def set_up_train(path_model_id='', config_names=['config.gin'], bindings=[]):
    """
    Setup complete environment including datasets, models and start training
    :param path_model_id: use if continue from existing model
    :param config_names: name of config file(s) for gin config
    :param bindings: to overwrite single parameters defined in the gin config file(s)
    :return: return the last validation metric
    """
    # inject config
    utils_params.inject_gin(
        config_names, path_model_id=path_model_id, bindings=bindings
    )  # bindings=['train_and_eval.n_epochs = 3','train_and_eval.save_period = 1']

    # generate folder structures
    run_paths = utils_params.gen_run_folder(path_model_id=path_model_id)

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # Define input pipeline depending on the type of training
    logging.info('Setup input pipeline...')
    train_ds, train_ds_info, repetition = gen_pipeline_train_meta()
    eval_ds, eval_ds_info = gen_pipeline_eval_meta()

    # Define model
    logging.info("Setup model...")
    model = model_fn.gen_model  # not passing an instance, since multiple instances are needed later

    # Train and eval
    logging.info('Init M3T trainer...')
    trainer = train_meta_obj.MT3Trainer(model, train_ds, train_ds_info,
                                        eval_ds, run_paths, repetition)
    logging.info('Start M3T trainer...')
    results = trainer.train()
    return results
Exemple #2
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings[2]))

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test = load_from_tfrecords()

    # model
    model = TransformerS2S()

    trainer = Trainer(model, ds_train, ds_val, run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(bindings[2])

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    # gin dir should be replaced by your own dir
    gin.parse_config_files_and_bindings([r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'],
                                        bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # model
    model = DenseNet121(IMG_SIZE=256)

    trainer = Trainer(model=model, ds_train=train_ds, ds_val=test_ds, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
Exemple #4
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['/mnt/home/repos/dl-lab-skeleton/diabetic_retinopathy/configs/config.gin'], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = load()

    # model
    model = vgg_like(input_shape=ds_info.features["image"].shape, n_classes=ds_info.features["label"].num_classes)

    trainer = Trainer(model, ds_train, ds_val, ds_info, run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
def tuning(config):
    # set hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    if model_type == 'regression':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.Huber(delta=0.3), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'binary_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'multi_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['binary_accuracy']
    tune.report(test_accuracy=test_accuracy * 100)
Exemple #6
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6], num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_name, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

        # evaluate the model
        evaluate(model, ds_test, ds_info, model_name, run_paths=run_paths, num_categories=num_categories)
        visulization(model, run_paths, ds_test, model_name, num_categories=num_categories)
Exemple #7
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder()

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test = load_tfrecords.load_from_tfrecords()

    # print number of available GPUs
    print("Num GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))

    if FLAGS.train:
        model = TransformerS2S()
        model.build((None, 250, 6))
        model.summary()
        trainer = Trainer(model, ds_train, ds_val, run_paths)
        for _ in trainer.train():
            continue

    else:
        # get one completely trained model to do evaluating
        opt = tf.keras.optimizers.Adam()
        model = TransformerS2S()
        ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                                   optimizer=opt,
                                   net=model)

        # change ckpt dir to load the ckpt you want
        manager = tf.train.CheckpointManager(
            ckpt,
            "/content/drive/MyDrive/experiments/run_2021-01-24T13-52-22-787253/ckpts",
            max_to_keep=3)
        ckpt.restore(manager.latest_checkpoint)
        print("Restored from {}".format(manager.latest_checkpoint))
        evaluate(model, ds_test)
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    model.summary()

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)
        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)
        # evaluate the model
        evaluate(model, ds_test, ds_info, model_type=model_type, run_paths=run_paths)
Exemple #9
0
def set_up_train(path_model_id='', config_names=['config.gin'], bindings=[]):
    """
    Setup complete environment including datasets, models and start training
    :param path_model_id: use if continue from existing model
    :param config_names: name of config file(s) for gin config
    :param bindings: to overwrite single parameters defined in the gin config file(s)
    :return: return the last validation metric
    """
    # inject config
    utils_params.inject_gin(
        config_names, path_model_id=path_model_id, bindings=bindings
    )  # bindings=['train_and_eval.n_epochs = 3','train_and_eval.save_period = 1']

    # generate folder structures
    run_paths = utils_params.gen_run_folder(path_model_id=path_model_id)

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # Define input pipeline depending on the type of training
    logging.info('Setup input pipeline...')
    train_ds, train_ds_info = gen_pipeline_train_byol()
    eval_ds, eval_ds_info = gen_pipeline_eval_byol()
    test_ds, test_info = gen_pipeline_test_byol()

    # Define model
    logging.info("Setup model...")
    online_model = model_fn.gen_model(
        n_classes=train_ds_info.features['label'].num_classes)
    target_model = model_fn.gen_model(
        n_classes=train_ds_info.features['label'].num_classes)

    # Train and eval
    logging.info('Start training...')
    results = train_byol.train_and_eval_byol(online_model, target_model,
                                             train_ds, train_ds_info, eval_ds,
                                             test_ds, run_paths)
    return results
Exemple #10
0
def main(argv):

    # generate folder structures
    run_paths = utils_params.gen_run_folder()
    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings([
        r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'
    ], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # training including fine tuning
    if FLAGS.train:
        # model
        if FLAGS.train:
            model = DenseNet121(IMG_SIZE=256)
            model.summary()

            # training and fine tuning
            trainer = Trainer(model=model,
                              ds_train=train_ds,
                              ds_val=valid_ds,
                              run_paths=run_paths)
            for _ in trainer.train():
                continue

    else:
        # evaluation
        # model dir should be replaced by saved model dir
        model_dir = r"\diabetic_retinopathy\logs\20201221-225335\saved_model_ft"
        model = tf.keras.models.load_model(model_dir)
        evaluate(model, valid_ds)
Exemple #11
0
        f'cifar10_corrupted/defocus_blur_{LEVEL}',
        f'cifar10_corrupted/elastic_{LEVEL}', f'cifar10_corrupted/fog_{LEVEL}',
        f'cifar10_corrupted/frost_{LEVEL}',
        f'cifar10_corrupted/frosted_glass_blur_{LEVEL}',
        f'cifar10_corrupted/gaussian_noise_{LEVEL}',
        f'cifar10_corrupted/impulse_noise_{LEVEL}',
        f'cifar10_corrupted/jpeg_compression_{LEVEL}',
        f'cifar10_corrupted/motion_blur_{LEVEL}',
        f'cifar10_corrupted/pixelate_{LEVEL}',
        f'cifar10_corrupted/shot_noise_{LEVEL}',
        f'cifar10_corrupted/snow_{LEVEL}',
        f'cifar10_corrupted/zoom_blur_{LEVEL}'
    ]

    # generate folder structures
    run_paths = utils_params.gen_run_folder(path_model_id=path_model_id)

    # gin config files and bindings
    config_names = []
    bindings = []

    # inject config
    utils_params.inject_gin([run_paths['path_gin']],
                            path_model_id=path_model_id,
                            bindings=bindings)

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_test'], logging.INFO)

    # run testing over all datasets
    results = {}
Exemple #12
0
def tuning(config):
    # hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(
        num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6],
                                   num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6],
                                     num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6],
                                           num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model,
                      ds_train,
                      ds_val,
                      ds_info,
                      model_name,
                      run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[[Accuracy()],
                           [ConfusionMatrix(num_categories=num_categories)]])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['accuracy']
    visulization(model,
                 run_paths,
                 ds_test,
                 model_name,
                 num_categories=num_categories)
    tune.report(test_accuracy=test_accuracy * 100)
# gin-config
gin.parse_config_files_and_bindings(['configs/config.gin'], [])

# record the predictions and labels of each model
regression_predictions_list = []
regression_label_list = []
multi_predictions_list = []
multi_label_list = []

# evaluate each model
for model_name, model_type in model_list.items():
    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)
    # generate folder structures
    run_paths = utils_params.gen_run_folder(model_name)
    # setup model
    if model_name.find('VGG16') != -1:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('Inception') != -1:
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('SEResNeXt') != -1:
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('DenseNet201') != -1:
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('EfficientNetB3') != -1:
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)