Ejemplo n.º 1
0
import numpy as np

from geometry_processing.globals import MODEL_WEIGHTS
from geometry_processing.utils.helpers import load_weights
from geometry_processing.models.multiview_cnn import load_model, test

parser = argparse.ArgumentParser(description='Test MVCNN classification.')

parser.add_argument('--matrix_path',
                    required=True,
                    type=str,
                    help='Path to save the confusion matrix.')

args = parser.parse_args()
matrix_path = args.matrix_path

if __name__ == '__main__':
    # Initialize model.
    mvcnn = load_model()
    load_weights(mvcnn, MODEL_WEIGHTS)

    # Run through entire test dataset.
    matrix = test(mvcnn)
    print('Accuracy %.4f' % np.mean(np.diag(matrix) / np.sum(matrix, axis=1)))

    # Save matrix to disk.
    if matrix_path:
        print('Saving to %s.' % matrix_path)
        np.save(matrix_path, matrix)
Ejemplo n.º 2
0
import numpy as np

from geometry_processing.globals import SALIENCY_MODEL
from geometry_processing.utils.helpers import load_weights
from geometry_processing.models.saliency import build_model, test


parser = argparse.ArgumentParser(description='Test saliency classification.')

parser.add_argument('--matrix_path', required=True, type=str,
        help='Path to save the confusion matrix.')

args = parser.parse_args()
matrix_path = args.matrix_path


if __name__ == '__main__':
    # Initialize model.
    saliency_cnn = build_model()
    load_weights(saliency_cnn, SALIENCY_MODEL)

    # Run through entire test dataset.
    matrix = test(saliency_cnn)
    print('Per Class Accuracy %s' % (np.diag(matrix) / np.sum(matrix, axis=1)))
    print('MAP: %.4f' % np.mean(np.diag(matrix) / np.sum(matrix, axis=1)))

    # Save matrix to disk.
    if matrix_path:
        print('Saving to %s.' % matrix_path)
        np.save(matrix_path, matrix)
import argparse

from geometry_processing.globals import SALIENCY_MODEL, MODEL_WEIGHTS
from geometry_processing.utils.helpers import load_weights
from geometry_processing.models.saliency import build_model, train

parser = argparse.ArgumentParser(description='Train a saliency NN.')

parser.add_argument('--verbose',
                    required=False,
                    type=int,
                    default=1,
                    help='[1] for ncurses, [2] for per epoch.')
parser.add_argument('--log_file',
                    required=False,
                    type=str,
                    default='',
                    help='File to log training, validation loss and accuracy.')

args = parser.parse_args()
verbose = args.verbose
log_file = args.log_file

if __name__ == '__main__':
    # Build and load cached weights.
    saliency_cnn = build_model()
    load_weights(saliency_cnn, MODEL_WEIGHTS)

    # Update model.
    train(saliency_cnn, save_path=SALIENCY_MODEL)
                    print(path_prediction[0], 0.0)
        else:
            for i in range(predictions.shape[0]):
                if entropy(predictions[i]) <= confidence_threshold:
                    print(full_paths[i], 1.0)
                else:
                    print(full_paths[i], 0.0)


if __name__ == '__main__':
    # Data source and image normalization.
    img_normalize = samplewise_normalize(IMAGE_MEAN, IMAGE_STD)

    # Directory of images.
    if generate_dataset == 'train':
        datagen = FilenameImageDatagen(TRAIN_DIR, preprocess=img_normalize)
    elif generate_dataset == 'test':
        datagen = FilenameImageDatagen(VALID_DIR, preprocess=img_normalize)
    else:
        raise ValueError('Invalid input for --generate_dataset.')

    # # Use the fc activations as features.
    model = load_model()
    load_weights(model, MODEL_WEIGHTS)

    # Wrapper around Tensorflow run operation.
    functor = K.function([model.layers[0].input, K.learning_phase()],
                         [model.get_layer('predictions').output])

    generate(datagen, functor)