Ejemplo n.º 1
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings[2]))

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test = load_from_tfrecords()

    # model
    model = TransformerS2S()

    trainer = Trainer(model, ds_train, ds_val, run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
Ejemplo n.º 2
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(bindings[2])

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    # gin dir should be replaced by your own dir
    gin.parse_config_files_and_bindings([r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'],
                                        bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # model
    model = DenseNet121(IMG_SIZE=256)

    trainer = Trainer(model=model, ds_train=train_ds, ds_val=test_ds, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
Ejemplo n.º 3
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['/mnt/home/repos/dl-lab-skeleton/diabetic_retinopathy/configs/config.gin'], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = load()

    # model
    model = vgg_like(input_shape=ds_info.features["image"].shape, n_classes=ds_info.features["label"].num_classes)

    trainer = Trainer(model, ds_train, ds_val, ds_info, run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
def tuning(config):
    # set hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    if model_type == 'regression':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.Huber(delta=0.3), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'binary_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'multi_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['binary_accuracy']
    tune.report(test_accuracy=test_accuracy * 100)
Ejemplo n.º 5
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6], num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_name, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

        # evaluate the model
        evaluate(model, ds_test, ds_info, model_name, run_paths=run_paths, num_categories=num_categories)
        visulization(model, run_paths, ds_test, model_name, num_categories=num_categories)
Ejemplo n.º 6
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder()

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test = load_tfrecords.load_from_tfrecords()

    # print number of available GPUs
    print("Num GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))

    if FLAGS.train:
        model = TransformerS2S()
        model.build((None, 250, 6))
        model.summary()
        trainer = Trainer(model, ds_train, ds_val, run_paths)
        for _ in trainer.train():
            continue

    else:
        # get one completely trained model to do evaluating
        opt = tf.keras.optimizers.Adam()
        model = TransformerS2S()
        ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                                   optimizer=opt,
                                   net=model)

        # change ckpt dir to load the ckpt you want
        manager = tf.train.CheckpointManager(
            ckpt,
            "/content/drive/MyDrive/experiments/run_2021-01-24T13-52-22-787253/ckpts",
            max_to_keep=3)
        ckpt.restore(manager.latest_checkpoint)
        print("Restored from {}".format(manager.latest_checkpoint))
        evaluate(model, ds_test)
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    model.summary()

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)
        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)
        # evaluate the model
        evaluate(model, ds_test, ds_info, model_type=model_type, run_paths=run_paths)
Ejemplo n.º 8
0
def main(argv):

    # generate folder structures
    run_paths = utils_params.gen_run_folder()
    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings([
        r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'
    ], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # training including fine tuning
    if FLAGS.train:
        # model
        if FLAGS.train:
            model = DenseNet121(IMG_SIZE=256)
            model.summary()

            # training and fine tuning
            trainer = Trainer(model=model,
                              ds_train=train_ds,
                              ds_val=valid_ds,
                              run_paths=run_paths)
            for _ in trainer.train():
                continue

    else:
        # evaluation
        # model dir should be replaced by saved model dir
        model_dir = r"\diabetic_retinopathy\logs\20201221-225335\saved_model_ft"
        model = tf.keras.models.load_model(model_dir)
        evaluate(model, valid_ds)
Ejemplo n.º 9
0
def tuning(config):
    # hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(
        num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6],
                                   num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6],
                                     num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6],
                                           num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model,
                      ds_train,
                      ds_val,
                      ds_info,
                      model_name,
                      run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[[Accuracy()],
                           [ConfusionMatrix(num_categories=num_categories)]])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['accuracy']
    visulization(model,
                 run_paths,
                 ds_test,
                 model_name,
                 num_categories=num_categories)
    tune.report(test_accuracy=test_accuracy * 100)