Beispiel #1
0
def predict_epistemic_uncertainties(batch_size, verbose,
                                    epistemic_monte_carlo_simulations, debug,
                                    full_model, x_train, y_train, x_test,
                                    y_test, encoder, dataset, model_batch_size,
                                    model_epochs,
                                    model_monte_carlo_simulations):
    # set learning phase to 1 so that Dropout is on. In keras master you can set this
    # on the TimeDistributed layer
    K.set_learning_phase(1)
    min_image_size = list(encoder_min_input_size(encoder))
    min_image_size.append(3)

    config = BayesianConfig(encoder, dataset, model_batch_size, model_epochs,
                            model_monte_carlo_simulations)
    epistemic_model = load_testable_epistemic_uncertainty_model(
        full_model, min_image_size, config, epistemic_monte_carlo_simulations)

    # Shape (N)
    print("Predicting epistemic_uncertainties.")
    if hasattr(x_train, 'shape'):
        epistemic_uncertainties_train = epistemic_model.predict(
            x_train, batch_size=batch_size, verbose=verbose)[0]
        epistemic_uncertainties_test = epistemic_model.predict(
            x_test, batch_size=batch_size, verbose=verbose)[0]
    else:
        # generator
        epistemic_uncertainties_train = epistemic_model.predict_generator(
            x_train,
            int(math.ceil(len(y_train / batch_size))),
            verbose=verbose)[0]
        epistemic_uncertainties_test = epistemic_model.predict_generator(
            x_test, int(math.ceil(len(y_test / batch_size))),
            verbose=verbose)[0]

    return (epistemic_uncertainties_train, epistemic_uncertainties_test)
def main(_):
    config = BatchConfig(FLAGS.encoder, FLAGS.dataset)
    config.info()

    if os.path.exists(full_path(config.batch_folder())) == False:
        os.makedirs(full_path(config.batch_folder))

    min_image_size = encoder_min_input_size(FLAGS.encoder)

    ((x_train, y_train),
     (x_test, y_test)) = test_train_data(FLAGS.dataset,
                                         min_image_size,
                                         FLAGS.debug,
                                         augment_data=FLAGS.augment,
                                         batch_size=FLAGS.batch_size)

    input_shape = list(min_image_size)
    input_shape.append(3)

    encoder = create_encoder_model(FLAGS.encoder, input_shape)

    print("Compiling model.")
    encoder.compile(optimizer='sgd', loss='mean_squared_error')

    print("Encoding training data.")
    x_train_encoded = encoder.predict_generator(
        x_train,
        int(ceil(len(y_train) / FLAGS.batch_size)),
        verbose=FLAGS.verbose)

    print("Encoding test data.")
    x_test_encoded = encoder.predict_generator(
        x_test,
        int(ceil(len(y_test) / FLAGS.batch_size)),
        verbose=FLAGS.verbose)

    print("Finished encoding data.")

    if FLAGS.augment:
        train_file_name = "/augment-train.p"
        test_file_name = "/augment-test.p"
    else:
        train_file_name = "/train.p"
        test_file_name = "/test.p"

    train_file = config.batch_folder() + train_file_name
    test_file = config.batch_folder() + test_file_name
    save_pickle_file(train_file, (x_train_encoded, y_train))
    save_pickle_file(test_file, (x_test_encoded, y_test))

    if isAWS() and FLAGS.debug == False:
        upload_s3(train_file)
        upload_s3(test_file)

    if isAWS() and FLAGS.stop:
        stop_instance()
Beispiel #3
0
def predict(batch_size, verbose, epistemic_monte_carlo_simulations, debug,
            full_model, encoder, dataset, model_batch_size, model_epochs,
            model_monte_carlo_simulations):

    min_image_size = encoder_min_input_size(encoder)
    if full_model:
        ((x_train, y_train), (x_test,
                              y_test)) = test_train_data(dataset,
                                                         min_image_size[0:2],
                                                         debug,
                                                         augment_data=False,
                                                         batch_size=batch_size)
    else:
        ((x_train, y_train),
         (x_test, y_test)) = test_train_batch_data(dataset,
                                                   encoder,
                                                   debug,
                                                   augment_data=False)

    return predict_on_data(batch_size, verbose,
                           epistemic_monte_carlo_simulations, debug,
                           full_model, x_train, y_train, x_test, y_test,
                           encoder, dataset, model_batch_size, model_epochs,
                           model_monte_carlo_simulations)
Beispiel #4
0
def main(_):
    config = BayesianConfig(FLAGS.encoder, FLAGS.dataset, FLAGS.batch_size,
                            FLAGS.epochs, FLAGS.monte_carlo_simulations)
    config.info()

    min_image_size = encoder_min_input_size(FLAGS.encoder)

    ((x_train, y_train), (x_test,
                          y_test)) = test_train_batch_data(FLAGS.dataset,
                                                           FLAGS.encoder,
                                                           FLAGS.debug,
                                                           augment_data=True)

    min_image_size = list(min_image_size)
    min_image_size.append(3)
    num_classes = y_train.shape[-1]

    model = create_bayesian_model(FLAGS.encoder, min_image_size, num_classes)

    if FLAGS.debug:
        print(model.summary())
        callbacks = None
    else:
        callbacks = [
            ModelCheckpoint(config.model_file(),
                            verbose=FLAGS.verbose,
                            save_best_only=True),
            CSVLogger(config.csv_log_file()),
            EarlyStopping(monitor='val_logits_variance_loss',
                          min_delta=FLAGS.min_delta,
                          patience=FLAGS.patience,
                          verbose=1)
        ]

    print("Compiling model.")
    model.compile(optimizer=Adam(lr=1e-3, decay=0.001),
                  loss={
                      'logits_variance':
                      bayesian_categorical_crossentropy(
                          FLAGS.monte_carlo_simulations, num_classes),
                      'softmax_output':
                      'categorical_crossentropy'
                  },
                  metrics={'softmax_output': metrics.categorical_accuracy},
                  loss_weights={
                      'logits_variance': .2,
                      'softmax_output': 1.
                  })

    print("Starting model train process.")
    model.fit(x_train, {
        'logits_variance': y_train,
        'softmax_output': y_train
    },
              callbacks=callbacks,
              verbose=FLAGS.verbose,
              epochs=FLAGS.epochs,
              batch_size=FLAGS.batch_size,
              validation_data=(x_test, {
                  'logits_variance': y_test,
                  'softmax_output': y_test
              }))

    print("Finished training model.")

    if isAWS() and FLAGS.debug == False:
        upload_s3(config.model_file())
        upload_s3(config.csv_log_file())

    if isAWS() and FLAGS.stop:
        stop_instance()
Beispiel #5
0
def predict_softmax_aleatoric_uncertainties(batch_size, verbose, debug,
                                            full_model, x_train, y_train,
                                            x_test, y_test, encoder, dataset,
                                            model_batch_size, model_epochs,
                                            model_monte_carlo_simulations):

    num_classes = len(y_train[0])
    min_image_size = encoder_min_input_size(encoder)
    min_image_size = list(min_image_size)
    min_image_size.append(3)
    config = BayesianConfig(encoder, dataset, model_batch_size, model_epochs,
                            model_monte_carlo_simulations)
    model = load_testable_model(encoder, config, model_monte_carlo_simulations,
                                num_classes, min_image_size, full_model)

    print("Predicting softmax and aleatoric_uncertainties.")
    if hasattr(x_train, 'shape'):
        predictions_train = model.predict(x_train,
                                          batch_size=batch_size,
                                          verbose=verbose)
        predictions_test = model.predict(x_test,
                                         batch_size=batch_size,
                                         verbose=verbose)
    else:
        # generator
        predictions_train = model.predict_generator(
            x_train,
            int(math.ceil(len(y_train / batch_size))),
            verbose=verbose)
        predictions_test = model.predict_generator(
            x_test, int(math.ceil(len(y_test / batch_size))), verbose=verbose)

    # Shape (N)
    aleatoric_uncertainties_train = np.reshape(
        predictions_train[0][:, num_classes:], (-1))
    aleatoric_uncertainties_test = np.reshape(
        predictions_test[0][:, num_classes:], (-1))

    logits_train = predictions_train[0][:, 0:num_classes]
    logits_test = predictions_test[0][:, 0:num_classes]

    # Shape (N, C)
    softmax_train = predictions_train[1]
    softmax_test = predictions_test[1]

    p_train = np.argmax(softmax_train, axis=1)
    p_test = np.argmax(softmax_test, axis=1)
    l_train = np.argmax(y_train, axis=1)
    l_test = np.argmax(y_test, axis=1)
    # Shape (N)
    prediction_comparision_train = np.equal(p_train, l_train).astype(int)
    prediction_comparision_test = np.equal(p_test, l_test).astype(int)

    train_results = [{
        'softmax_raw': softmax_train[i],
        'softmax': p_train[i],
        'logits_raw': logits_train[i],
        'label': np.argmax(y_train[i]),
        'label_expanded': y_train[i],
        'aleatoric_uncertainty': aleatoric_uncertainties_train[i],
        'is_correct': prediction_comparision_train[i]
    } for i in range(len(prediction_comparision_train))]

    test_results = [{
        'softmax_raw': softmax_test[i],
        'softmax': p_test[i],
        'logits_raw': logits_test[i],
        'label': np.argmax(y_test[i]),
        'label_expanded': y_test[i],
        'aleatoric_uncertainty': aleatoric_uncertainties_test[i],
        'is_correct': prediction_comparision_test[i]
    } for i in range(len(prediction_comparision_test))]

    return (train_results, test_results)