예제 #1
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(bindings[2])

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    # gin dir should be replaced by your own dir
    gin.parse_config_files_and_bindings([r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'],
                                        bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # model
    model = DenseNet121(IMG_SIZE=256)

    trainer = Trainer(model=model, ds_train=train_ds, ds_val=test_ds, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
예제 #2
0
def train_func(config):
    # Hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append(f'{key}={value}')

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['/mnt/home/repos/dl-lab-skeleton/diabetic_retinopathy/configs/config.gin'], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = load()

    # model
    model = vgg_like(input_shape=ds_info.features["image"].shape, n_classes=ds_info.features["label"].num_classes)

    trainer = Trainer(model, ds_train, ds_val, ds_info, run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy)
def tuning(config):
    # set hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    if model_type == 'regression':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.Huber(delta=0.3), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'binary_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])
    elif model_type == 'multi_classification':
        trained_model.compile(optimizer=tf.keras.optimizers.Adam(), loss=tf.keras.losses.SparseCategoricalCrossentropy(), metrics=[BinaryAccuracy(model_type=model_type)])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['binary_accuracy']
    tune.report(test_accuracy=test_accuracy * 100)
예제 #4
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6], num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_name, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

        # evaluate the model
        evaluate(model, ds_test, ds_info, model_name, run_paths=run_paths, num_categories=num_categories)
        visulization(model, run_paths, ds_test, model_name, num_categories=num_categories)
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    model.summary()

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)
        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)
        # evaluate the model
        evaluate(model, ds_test, ds_info, model_type=model_type, run_paths=run_paths)
예제 #6
0
def main(argv):

    # generate folder structures
    run_paths = utils_params.gen_run_folder()
    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings([
        r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'
    ], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # training including fine tuning
    if FLAGS.train:
        # model
        if FLAGS.train:
            model = DenseNet121(IMG_SIZE=256)
            model.summary()

            # training and fine tuning
            trainer = Trainer(model=model,
                              ds_train=train_ds,
                              ds_val=valid_ds,
                              run_paths=run_paths)
            for _ in trainer.train():
                continue

    else:
        # evaluation
        # model dir should be replaced by saved model dir
        model_dir = r"\diabetic_retinopathy\logs\20201221-225335\saved_model_ft"
        model = tf.keras.models.load_model(model_dir)
        evaluate(model, valid_ds)
예제 #7
0
def main(argv):
    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])

    if FLAGS.hparam_tune:
        # For hyper parameter Tuning choose this mode
        from hyper_parameter_tuning.hparam_tuning import run_hparam_tuning
        run_hparam_tuning()

    else:
        if FLAGS.ds2:
            # setup pipeline without image data generator
            ds_train, ds_val, ds_test = datasets2.load_data()

            if FLAGS.Transfer_learning:
                # for finetuning pretrained model
                epochs = constants.H_TRANSFER_LEARNING_EPOCHS
                model = transfer_learning((256, 256, 3))
            else:
                # For using Custom model
                epochs = constants.H_EPOCHS
                model = vgg_base_3custom((256, 256, 3))

        else:
            # use pipeline using image data generator
            ds_train, ds_val, ds_test = datasets.load()
            if FLAGS.Transfer_learning:
                epochs = constants.H_TRANSFER_LEARNING_EPOCHS
                model = transfer_learning((256, 256, 3))
            else:
                epochs = constants.H_EPOCHS
                model = vgg_base_3custom((256, 256, 3))

        opt = tf.optimizers.Adam(constants.H_LEARNING_RATE, name='ADAM')

        if FLAGS.train:

            # Build and compile the model
            model.build((constants.N_BATCH_SIZE, constants.ip_shape[0],
                         constants.ip_shape[1], 3))

            model.compile(
                optimizer=opt,
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'],
            )
            print(model.summary())

            # tensor board call back
            if not os.path.isdir(constants.dir_fit):
                os.makedirs(constants.dir_fit)
            log_dir = os.path.join(
                constants.dir_fit,
                datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
            tensorboard_callbk = tf.keras.callbacks.TensorBoard(
                log_dir=log_dir,
                histogram_freq=1,
                write_graph=True,
                write_images=True,
                update_freq='epoch',
                # profile_batch=2,
                embeddings_freq=1)

            # Checkpoint call back
            cpt_dir = os.path.join(
                constants.dir_cpts,
                datetime.datetime.now().strftime("%Y%m%d-%H%M"))
            if not os.path.isdir(cpt_dir):
                os.makedirs(cpt_dir)
            print(cpt_dir)
            checkpoint_dir = os.path.join(
                cpt_dir,
                'epochs:{epoch:03d}-val_accuracy:{val_accuracy:.3f}.h5')
            # check point to save the model based on improving validation accuracy
            checkpoint_callbk = tf.keras.callbacks.ModelCheckpoint(
                checkpoint_dir,
                monitor='val_accuracy',
                verbose=1,
                save_best_only=False,
                mode='max',
                save_weights_only=False,
                save_freq='epoch')
            # csv  call back, if dir doesnt exist create directory
            if not os.path.isdir(constants.dir_csv):
                os.makedirs(constants.dir_csv)
            log_file_name = os.path.join(
                constants.dir_csv,
                (datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.csv'))
            csv_callbk = tf.keras.callbacks.CSVLogger(log_file_name,
                                                      separator=',',
                                                      append=True)

            callbacks_list = [
                checkpoint_callbk, tensorboard_callbk, csv_callbk
            ]

            # Training the model and saving it using checkpoint call back
            history_model = model.fit(ds_train,
                                      verbose=1,
                                      epochs=int(epochs / 2),
                                      batch_size=constants.N_BATCH_SIZE,
                                      validation_data=ds_val,
                                      callbacks=callbacks_list)
            # training the saved model for rest of the epochs
            history_model = model.fit(ds_train,
                                      verbose=1,
                                      initial_epoch=int(epochs / 2),
                                      epochs=epochs,
                                      batch_size=constants.N_BATCH_SIZE,
                                      validation_data=ds_val,
                                      callbacks=callbacks_list)

            # save final model
            if not os.path.isdir(constants.WEIGHTS_PATH):
                os.makedirs(constants.WEIGHTS_PATH)
            model_save_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            model_name = model_save_time + '_' + model.optimizer.get_config(
            )['name'] + '_epochs_' + str(epochs) + '.h5'
            model_save_path = os.path.join(constants.WEIGHTS_PATH, model_name)
            print(model_save_path)
            try:
                _ = os.stat(constants.WEIGHTS_PATH)
                model.save(model_save_path)
            except NotADirectoryError:
                raise

            # plot final training data, for runtime progress look at tensor board log
            plt.figure()
            plt.subplot(1, 2, 1)
            plt.plot(history_model.history["loss"])
            plt.plot(history_model.history["val_loss"])
            plt.legend(["loss", "val_loss"])
            # plt.xticks(range(constants.H_EPOCHS))
            plt.xlabel("epochs")
            plt.title("Train and val loss")

            plt.subplot(1, 2, 2)
            plt.plot(history_model.history["accuracy"])
            plt.plot(history_model.history["val_accuracy"])
            plt.legend(["accuracy", "val_accuracy"])
            plt.title("Train and Val acc")
            plt.show()
            '''
            test_history = model.evaluate(ds_test,
                                          batch_size=constants.N_BATCH_SIZE,
                                          verbose=1, steps=4)

            '''

            eval.evaluate(model=model,
                          ds_test=ds_test,
                          opt=opt,
                          is_training=FLAGS.train,
                          SAVE_RESULT=True,
                          checkpoint_path=None)

        else:

            # Load checkpoint model to evaluate
            check_point_path = constants.trained_model_name

            # check_point_path = 'weights/20201222-220802_ADAM_epochs_100_test_acc_78.h5'
            eval.evaluate(model=model,
                          ds_test=ds_test,
                          opt=opt,
                          is_training=FLAGS.train,
                          SAVE_RESULT=True,
                          checkpoint_path=check_point_path)
예제 #8
0
def tuning(config):
    # hyperparameters
    bindings = []
    for key, value in config.items():
        bindings.append('{}={}'.format(str(key), str(value)))

    # generate folder structures
    run_paths = utils_params.gen_run_folder(','.join(bindings))

    # gin-config
    gin.parse_config_files_and_bindings([config_path], bindings)
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(
        num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6],
                                   num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6],
                                     num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6],
                                      num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6],
                                        num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6],
                                           num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6],
                                    num_categories=num_categories)

    # set training loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # train the model
    trainer = Trainer(model,
                      ds_train,
                      ds_val,
                      ds_info,
                      model_name,
                      run_paths=run_paths)
    for val_accuracy in trainer.train():
        tune.report(val_accuracy=val_accuracy * 100)

    # set validation loggers
    utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

    # evaluate the model
    trained_model = trainer.model_output()
    model.compile(optimizer=tf.keras.optimizers.Adam(),
                  loss=tf.keras.losses.SparseCategoricalCrossentropy(),
                  metrics=[[Accuracy()],
                           [ConfusionMatrix(num_categories=num_categories)]])

    result = trained_model.evaluate(ds_test, return_dict=True)
    test_accuracy = result['accuracy']
    visulization(model,
                 run_paths,
                 ds_test,
                 model_name,
                 num_categories=num_categories)
    tune.report(test_accuracy=test_accuracy * 100)
assert model_list != {}

# gin-config
gin.parse_config_files_and_bindings(['configs/config.gin'], [])

# record the predictions and labels of each model
regression_predictions_list = []
regression_label_list = []
multi_predictions_list = []
multi_label_list = []

# evaluate each model
for model_name, model_type in model_list.items():
    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)
    # generate folder structures
    run_paths = utils_params.gen_run_folder(model_name)
    # setup model
    if model_name.find('VGG16') != -1:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('Inception') != -1:
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('SEResNeXt') != -1:
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('DenseNet201') != -1:
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name.find('EfficientNetB3') != -1:
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)