Esempio n. 1
0
def main(argv):

    # print(argv)
    # print(FLAGS)

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    out_dir = os.path.join(FLAGS.output_dir, 'EDL', current_time)

    ##########################
    # Hyperparmeters & Model #
    ##########################
    input_shape = dict(medium=(256, 256, 3),
                       realworld=(512, 512, 3))[FLAGS.level]

    hparams = dict(dropout_rate=FLAGS.dropout_rate,
                   num_base_filters=FLAGS.num_base_filters,
                   learning_rate=FLAGS.learning_rate,
                   l2_reg=FLAGS.l2_reg,
                   input_shape=input_shape)

    classifier = VGGDrop(**hparams)
    # classifier.summary()
    print('********** Output dir: {} ************'.format(out_dir))

    if FLAGS.model_dir:
        FLAGS.num_epochs = 0
        latest = tf.train.latest_checkpoint(FLAGS.model_dir)
        print('********** Loading checkpoint weights: {}'.format(latest))
        classifier.load_weights(latest)

    #############
    # Load Task #
    #############
    dtask = bdlb.load(
        benchmark="diabetic_retinopathy_diagnosis",
        level=FLAGS.level,
        batch_size=FLAGS.batch_size,
        download_and_prepare=False,  # do not download data from this script
    )
    ds_train, ds_validation, ds_test = dtask.datasets

    #################
    # Training Loop #
    #################
    history = classifier.fit(
        ds_train,
        epochs=FLAGS.num_epochs,
        validation_data=ds_validation,
        class_weight=dtask.class_weight(),
        callbacks=[
            tfk.callbacks.TensorBoard(
                log_dir=os.path.join(out_dir, "tensorboard"),
                update_freq="epoch",
                write_graph=True,
                histogram_freq=1,
            ),
            tfk.callbacks.ModelCheckpoint(
                filepath=os.path.join(out_dir, "checkpoints",
                                      "weights-{epoch}.ckpt"),
                verbose=1,
                save_weights_only=True,
            )
        ],
    )
    plotting.tfk_history(history, output_dir=os.path.join(out_dir, "history"))

    ##############
    # Evaluation #
    ##############
    additional_metrics = []
    try:
        import sail.metrics
        additional_metrics.append(
            ('ECE', sail.metrics.GPleissCalibrationError()))
    except ImportError:
        import warnings
        warnings.warn('Could not import SAIL metrics.')
    dtask.evaluate(functools.partial(predict,
                                     model=classifier,
                                     type=FLAGS.uncertainty),
                   dataset=ds_test,
                   output_dir=os.path.join(out_dir, 'evaluation'),
                   additional_metrics=additional_metrics)
Esempio n. 2
0
def main(argv):

    print(argv)
    print(FLAGS)

    ##########################
    # Hyperparmeters & Model #
    ##########################
    input_shape = dict(medium=(256, 256, 3),
                       realworld=(512, 512, 3))[FLAGS.level]

    hparams = dict(num_base_filters=FLAGS.num_base_filters,
                   learning_rate=FLAGS.learning_rate,
                   input_shape=input_shape)
    classifier = VGGFlipout(**hparams)
    classifier.summary()

    #############
    # Load Task #
    #############
    dtask = bdlb.load(
        benchmark="diabetic_retinopathy_diagnosis",
        level=FLAGS.level,
        batch_size=FLAGS.batch_size,
        download_and_prepare=False,  # do not download data from this script
    )
    ds_train, ds_validation, ds_test = dtask.datasets

    #################
    # Training Loop #
    #################
    history = classifier.fit(
        ds_train,
        epochs=FLAGS.num_epochs,
        validation_data=ds_validation,
        class_weight=dtask.class_weight(),
        callbacks=[
            tfk.callbacks.TensorBoard(
                log_dir=os.path.join(FLAGS.output_dir, "tensorboard"),
                update_freq="epoch",
                write_graph=True,
                histogram_freq=1,
            ),
            tfk.callbacks.ModelCheckpoint(
                filepath=os.path.join(
                    FLAGS.output_dir,
                    "checkpoints",
                    "weights-{epoch}.ckpt",
                ),
                verbose=1,
                save_weights_only=True,
            )
        ],
    )
    plotting.tfk_history(history,
                         output_dir=os.path.join(FLAGS.output_dir, "history"))

    ##############
    # Evaluation #
    ##############
    dtask.evaluate(functools.partial(predict,
                                     model=classifier,
                                     num_samples=FLAGS.num_mc_samples,
                                     type=FLAGS.uncertainty),
                   dataset=ds_test,
                   output_dir=FLAGS.output_dir)
Esempio n. 3
0
def main(argv):

    print(argv)
    print(FLAGS)

    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    out_dir = os.path.join(FLAGS.output_dir, 'MCdropout', current_time)
    file_writer = tf.summary.create_file_writer(
        os.path.join(os.path.join(out_dir, 'summary')))
    file_writer.set_as_default()

    ##########################
    # Hyperparmeters & Model #
    ##########################
    input_shape = dict(medium=(256, 256, 3),
                       realworld=(512, 512, 3))[FLAGS.level]

    hparams = dict(dropout_rate=FLAGS.dropout_rate,
                   num_base_filters=FLAGS.num_base_filters,
                   learning_rate=FLAGS.learning_rate,
                   l2_reg=FLAGS.l2_reg,
                   input_shape=input_shape)
    classifier = VGGDrop(**hparams)
    classifier.summary()
    print('********** Output dir: {} ************'.format(out_dir))

    #############
    # Load Task #
    #############
    dtask = bdlb.load(
        benchmark="diabetic_retinopathy_diagnosis",
        level=FLAGS.level,
        batch_size=FLAGS.batch_size,
        download_and_prepare=False,  # do not download data from this script
    )
    ds_train, ds_validation, ds_test = dtask.datasets

    #################
    # Training Loop #
    #################
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
    history = classifier.fit(
        ds_train,
        epochs=FLAGS.num_epochs,
        validation_data=ds_validation,
        class_weight=dtask.class_weight(),
        callbacks=[
            tfk.callbacks.TensorBoard(
                log_dir=os.path.join(out_dir, 'fit'),
                update_freq="epoch",
                write_graph=True,
                histogram_freq=1,
            ),
            tfk.callbacks.ModelCheckpoint(
                filepath=os.path.join(out_dir, "checkpoints",
                                      "weights-{epoch}.ckpt"),
                verbose=1,
                save_weights_only=True,
            )
        ],
    )
    plotting.tfk_history(history, output_dir=os.path.join(out_dir, "history"))

    ##############
    # Evaluation #
    ##############
    dtask.evaluate(functools.partial(predict,
                                     model=classifier,
                                     num_samples=FLAGS.num_mc_samples,
                                     type=FLAGS.uncertainty),
                   dataset=ds_test,
                   output_dir=os.path.join(out_dir, 'evaluation'))