Exemple #1
0
def evaluate_NGTree(model,
                    pre_wordscount,
                    post_wordscount,
                    lang,
                    max_pred=1,
                    trace=False):
    print(f"{model.__name__}:")
    tree = model(pre_wordscount, post_wordscount)
    tree.train(get_training_file(lang))
    evaluate(tree, pre_wordscount, post_wordscount, lang, words[lang], trace,
             max_pred)
    print()
Exemple #2
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(num_categories=num_categories)

    # setup model
    if model_name == 'Sequence_LSTM':
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiLSTM':
        model = sequence_BiLSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_GRU':
        model = sequence_GRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiGRU':
        model = sequence_BiGRU_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Conv1D':
        model = sequence_Conv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_BiConv1D':
        model = sequence_BiConv1D_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Sequence_Ensemble':
        model = sequence_Ensemble_model(input_shape=[windows_size, 6], num_categories=num_categories)
    elif model_name == 'Seq2Seq':
        model = Seq2Seq(num_categories=num_categories)
    elif model_name == 'Sequence_RNN_Fourier':
        model = sequence_RNN_Fourier_model(input_shape=[windows_size, 6], num_categories=num_categories)
    else:
        model = sequence_LSTM_model(input_shape=[windows_size, 6], num_categories=num_categories)

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_name, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)

        # evaluate the model
        evaluate(model, ds_test, ds_info, model_name, run_paths=run_paths, num_categories=num_categories)
        visulization(model, run_paths, ds_test, model_name, num_categories=num_categories)
Exemple #3
0
def evaluate_NGM(modelclass,
                 pre_wordscount,
                 post_wordscount,
                 lang,
                 max_pred=1,
                 crash=False,
                 trace=False):
    print(f"{modelclass.__name__} ({pre_wordscount}c{post_wordscount}):")
    model = modelclass(pre_wordscount, post_wordscount)
    model.train(get_training_file(lang))
    if crash:
        evaluatecrash(model, pre_wordscount, post_wordscount, lang,
                      words[lang])
    else:
        evaluate(model, pre_wordscount, post_wordscount, lang, words[lang],
                 trace, crash, max_pred)
    print()
Exemple #4
0
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder()

    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test = load_tfrecords.load_from_tfrecords()

    # print number of available GPUs
    print("Num GPUs Available: ",
          len(tf.config.experimental.list_physical_devices('GPU')))

    if FLAGS.train:
        model = TransformerS2S()
        model.build((None, 250, 6))
        model.summary()
        trainer = Trainer(model, ds_train, ds_val, run_paths)
        for _ in trainer.train():
            continue

    else:
        # get one completely trained model to do evaluating
        opt = tf.keras.optimizers.Adam()
        model = TransformerS2S()
        ckpt = tf.train.Checkpoint(step=tf.Variable(1),
                                   optimizer=opt,
                                   net=model)

        # change ckpt dir to load the ckpt you want
        manager = tf.train.CheckpointManager(
            ckpt,
            "/content/drive/MyDrive/experiments/run_2021-01-24T13-52-22-787253/ckpts",
            max_to_keep=3)
        ckpt.restore(manager.latest_checkpoint)
        print("Restored from {}".format(manager.latest_checkpoint))
        evaluate(model, ds_test)
def main(argv):
    # generate folder structures
    run_paths = utils_params.gen_run_folder(folder)

    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    ds_train, ds_val, ds_test, ds_info = datasets.load(model_type=model_type)

    # setup model
    if model_name == 'VGG16':
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified Inception':
        model = simplified_inception(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'Simplified SEResNeXt':
        model = simplified_seresnext(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'RepVGG':
        model = rep_vgg(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'DenseNet201':
        model = densenet201(input_shape=(256, 256, 3), model_type=model_type)
    elif model_name == 'EfficientNetB3':
        model = efficientnetb3(input_shape=(256, 256, 3), model_type=model_type)
    else:
        model = vgg_like(input_shape=(256, 256, 3), model_type=model_type)
    model.summary()

    if FLAGS.train:
        # set training loggers
        utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)
        # train the model
        trainer = Trainer(model, ds_train, ds_val, ds_info, model_type=model_type, run_paths=run_paths)
        for _ in trainer.train():
            continue
    else:
        # set validation loggers
        utils_misc.set_loggers(run_paths['path_logs_eval'], logging.INFO)
        # evaluate the model
        evaluate(model, ds_test, ds_info, model_type=model_type, run_paths=run_paths)
Exemple #6
0
def main(argv):

    # generate folder structures
    run_paths = utils_params.gen_run_folder()
    # set loggers
    utils_misc.set_loggers(run_paths['path_logs_train'], logging.INFO)

    # gin-config
    gin.parse_config_files_and_bindings([
        r'D:\Uni Stuttgart\Deep learning lab\Diabetic Retinopathy Detection\dl-lab-2020-team08\diabetic_retinopathy\configs\config.gin'
    ], [])
    utils_params.save_config(run_paths['path_gin'], gin.config_str())

    # setup pipeline
    train_ds, valid_ds, test_ds = datasets.load()

    # training including fine tuning
    if FLAGS.train:
        # model
        if FLAGS.train:
            model = DenseNet121(IMG_SIZE=256)
            model.summary()

            # training and fine tuning
            trainer = Trainer(model=model,
                              ds_train=train_ds,
                              ds_val=valid_ds,
                              run_paths=run_paths)
            for _ in trainer.train():
                continue

    else:
        # evaluation
        # model dir should be replaced by saved model dir
        model_dir = r"\diabetic_retinopathy\logs\20201221-225335\saved_model_ft"
        model = tf.keras.models.load_model(model_dir)
        evaluate(model, valid_ds)
def main(argv):
    # gin-config
    gin.parse_config_files_and_bindings(['configs/config.gin'], [])

    if FLAGS.hparam_tune:
        # For hyper parameter Tuning choose this mode
        from hyper_parameter_tuning.hparam_tuning import run_hparam_tuning
        run_hparam_tuning()

    else:
        if FLAGS.ds2:
            # setup pipeline without image data generator
            ds_train, ds_val, ds_test = datasets2.load_data()

            if FLAGS.Transfer_learning:
                # for finetuning pretrained model
                epochs = constants.H_TRANSFER_LEARNING_EPOCHS
                model = transfer_learning((256, 256, 3))
            else:
                # For using Custom model
                epochs = constants.H_EPOCHS
                model = vgg_base_3custom((256, 256, 3))

        else:
            # use pipeline using image data generator
            ds_train, ds_val, ds_test = datasets.load()
            if FLAGS.Transfer_learning:
                epochs = constants.H_TRANSFER_LEARNING_EPOCHS
                model = transfer_learning((256, 256, 3))
            else:
                epochs = constants.H_EPOCHS
                model = vgg_base_3custom((256, 256, 3))

        opt = tf.optimizers.Adam(constants.H_LEARNING_RATE, name='ADAM')

        if FLAGS.train:

            # Build and compile the model
            model.build((constants.N_BATCH_SIZE, constants.ip_shape[0],
                         constants.ip_shape[1], 3))

            model.compile(
                optimizer=opt,
                loss='sparse_categorical_crossentropy',
                metrics=['accuracy'],
            )
            print(model.summary())

            # tensor board call back
            if not os.path.isdir(constants.dir_fit):
                os.makedirs(constants.dir_fit)
            log_dir = os.path.join(
                constants.dir_fit,
                datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
            tensorboard_callbk = tf.keras.callbacks.TensorBoard(
                log_dir=log_dir,
                histogram_freq=1,
                write_graph=True,
                write_images=True,
                update_freq='epoch',
                # profile_batch=2,
                embeddings_freq=1)

            # Checkpoint call back
            cpt_dir = os.path.join(
                constants.dir_cpts,
                datetime.datetime.now().strftime("%Y%m%d-%H%M"))
            if not os.path.isdir(cpt_dir):
                os.makedirs(cpt_dir)
            print(cpt_dir)
            checkpoint_dir = os.path.join(
                cpt_dir,
                'epochs:{epoch:03d}-val_accuracy:{val_accuracy:.3f}.h5')
            # check point to save the model based on improving validation accuracy
            checkpoint_callbk = tf.keras.callbacks.ModelCheckpoint(
                checkpoint_dir,
                monitor='val_accuracy',
                verbose=1,
                save_best_only=False,
                mode='max',
                save_weights_only=False,
                save_freq='epoch')
            # csv  call back, if dir doesnt exist create directory
            if not os.path.isdir(constants.dir_csv):
                os.makedirs(constants.dir_csv)
            log_file_name = os.path.join(
                constants.dir_csv,
                (datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + '.csv'))
            csv_callbk = tf.keras.callbacks.CSVLogger(log_file_name,
                                                      separator=',',
                                                      append=True)

            callbacks_list = [
                checkpoint_callbk, tensorboard_callbk, csv_callbk
            ]

            # Training the model and saving it using checkpoint call back
            history_model = model.fit(ds_train,
                                      verbose=1,
                                      epochs=int(epochs / 2),
                                      batch_size=constants.N_BATCH_SIZE,
                                      validation_data=ds_val,
                                      callbacks=callbacks_list)
            # training the saved model for rest of the epochs
            history_model = model.fit(ds_train,
                                      verbose=1,
                                      initial_epoch=int(epochs / 2),
                                      epochs=epochs,
                                      batch_size=constants.N_BATCH_SIZE,
                                      validation_data=ds_val,
                                      callbacks=callbacks_list)

            # save final model
            if not os.path.isdir(constants.WEIGHTS_PATH):
                os.makedirs(constants.WEIGHTS_PATH)
            model_save_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
            model_name = model_save_time + '_' + model.optimizer.get_config(
            )['name'] + '_epochs_' + str(epochs) + '.h5'
            model_save_path = os.path.join(constants.WEIGHTS_PATH, model_name)
            print(model_save_path)
            try:
                _ = os.stat(constants.WEIGHTS_PATH)
                model.save(model_save_path)
            except NotADirectoryError:
                raise

            # plot final training data, for runtime progress look at tensor board log
            plt.figure()
            plt.subplot(1, 2, 1)
            plt.plot(history_model.history["loss"])
            plt.plot(history_model.history["val_loss"])
            plt.legend(["loss", "val_loss"])
            # plt.xticks(range(constants.H_EPOCHS))
            plt.xlabel("epochs")
            plt.title("Train and val loss")

            plt.subplot(1, 2, 2)
            plt.plot(history_model.history["accuracy"])
            plt.plot(history_model.history["val_accuracy"])
            plt.legend(["accuracy", "val_accuracy"])
            plt.title("Train and Val acc")
            plt.show()
            '''
            test_history = model.evaluate(ds_test,
                                          batch_size=constants.N_BATCH_SIZE,
                                          verbose=1, steps=4)

            '''

            eval.evaluate(model=model,
                          ds_test=ds_test,
                          opt=opt,
                          is_training=FLAGS.train,
                          SAVE_RESULT=True,
                          checkpoint_path=None)

        else:

            # Load checkpoint model to evaluate
            check_point_path = constants.trained_model_name

            # check_point_path = 'weights/20201222-220802_ADAM_epochs_100_test_acc_78.h5'
            eval.evaluate(model=model,
                          ds_test=ds_test,
                          opt=opt,
                          is_training=FLAGS.train,
                          SAVE_RESULT=True,
                          checkpoint_path=check_point_path)