示例#1
0
def get_prediction(img, convolutional_model: BaselineModel,
                   tensorflow_session: tf.Session):
    """
    Get the prediction for a given input image
    
    :param convolutional_model: The convolutional neural network model
    :param tensorflow_session: The tensorflow session
    :param img: The image for which to generate the prediction
    
    :return: The prediction
    """
    data = numpy.asarray(img_crop(img, IMG_PATCH_SIZE, IMG_PATCH_SIZE))

    data_indices = range(data.shape[0])
    img_predictions = []

    data_node = tf.placeholder(tf.float32,
                               shape=(None, EFFECTIVE_INPUT_SIZE,
                                      EFFECTIVE_INPUT_SIZE, NUM_CHANNELS))
    output = tf.nn.softmax(convolutional_model.model_func()(data_node))

    for i in range(0, data.shape[0], BATCH_SIZE):
        batch_data = data[data_indices[i:i + BATCH_SIZE]]
        output_prediction = tensorflow_session.run(
            output, feed_dict={data_node: batch_data})
        img_predictions.append(output_prediction)

    stacked_predictions = [
        numpy.stack(batch_predictions_list)
        for batch_predictions_list in img_predictions
    ]
    stacked_batches = numpy.vstack(stacked_predictions)

    return label_to_img(img.shape[0], img.shape[1], IMG_PATCH_SIZE,
                        IMG_PATCH_SIZE, stacked_batches)
示例#2
0
def extract_all_labels(path, num_images=-1, convert_to_1hot=True):
    """
    Extract from ground truth images the class labels and convert them 
    into a 1-hot matrix of the form [image index, label index

    :param path: The common path for all images
    :return: A tensor of 1-hot matrix representation of the class labels
    """
    print("Extracting all labels from " + path)
    image_count = num_images if num_images != -1 else len(
        next(os.walk(path))[2])

    gt_imgs = []
    data = []

    for filename in os.listdir(path):
        print(filename)
        gt_imgs.append(mpimg.imread(os.path.join(path, filename)))

    data = []
    if (len(gt_imgs) > 0):
        IMG_WIDTH = gt_imgs[0].shape[0]

        if IMG_WIDTH > EFFECTIVE_INPUT_SIZE:
            # List formed by consecutive series of patches of each image (patches ordered in row order)
            gt_patches = [
                img_crop(i, IMG_PATCH_SIZE, IMG_PATCH_SIZE, is_2d=True)
                for i in gt_imgs
            ]

            # List of all the patches, ordered by image
            data = numpy.asarray([
                gt_patches[i][j] for i in range(len(gt_patches))
                for j in range(len(gt_patches[i]))
            ])

        else:
            data = gt_imgs

    # Compute the class label of each patch based on the mean value
    if convert_to_1hot:
        labels = numpy.asarray(
            [value_to_class(numpy.mean(data[i])) for i in range(len(data))])
    else:
        labels = data

    return labels.astype(numpy.float32)
示例#3
0
def extract_labels(filename, permutations):
    """
    Extract from ground truth images the class labels and convert them 
    into a 1-hot matrix of the form [image index, label index
    
    :param filename: The common path for all images
    :return: A tensor of 1-hot matrix representation of the class labels
    """
    """Extract the labels into a 1-hot matrix [image index, label index]."""
    gt_imgs = []
    for i in range(1, NUM_IMAGES + 1):
        imageid = "satImage_%.3d" % i
        image_filename = filename + imageid + ".png"
        if os.path.isfile(image_filename):
            print('Loading ' + image_filename)
            img = mpimg.imread(image_filename)
            gt_imgs.append(img)
        else:
            print('File ' + image_filename + ' does not exist')

    gt_imgs = numpy.array(gt_imgs)

    if SHUFFLE_DATA:
        gt_imgs = gt_imgs[permutations]

    # List formed by consecutive series of patches of each image (patches ordered in row order)
    gt_patches = [
        img_crop(gt_imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, is_2d=True)
        for i in range(NUM_IMAGES)
    ]

    # List of all the patches, ordered by image
    data = numpy.asarray([
        gt_patches[i][j] for i in range(len(gt_patches))
        for j in range(len(gt_patches[i]))
    ])

    # Compute the class label of each patch based on the mean value
    labels = numpy.asarray(
        [value_to_class(numpy.mean(data[i])) for i in range(len(data))])

    # Convert to dense 1-hot representation.
    return labels.astype(numpy.float32)
示例#4
0
def extract_data(filename, permutations):
    """
    Extract the images into a 4D tensor [image index, y, x, channels]
    Values are rescales from [0, 255] down to [-0.5, 0.5]
    
    :param filename: The common path of all images
    :return: The extracted 4D tensor
    """

    imgs = []
    for i in range(1, NUM_IMAGES + 1):
        imageid = "satImage_%.3d" % i
        image_filename = filename + imageid + ".png"
        if os.path.isfile(image_filename):
            print('Loading ' + image_filename)
            img = mpimg.imread(image_filename)
            imgs.append(img)
        else:
            print('File ' + image_filename + ' does not exist')

    IMG_WIDTH = imgs[0].shape[0]
    IMG_HEIGHT = imgs[0].shape[1]
    N_PATCHES_PER_IMAGE = (IMG_WIDTH / IMG_PATCH_SIZE) * (IMG_HEIGHT /
                                                          IMG_PATCH_SIZE)

    imgs = numpy.array(imgs)

    if SHUFFLE_DATA:
        imgs = imgs[permutations]

    # List formed by consecutive series of patches of each image (patches ordered in row order)
    img_patches = [
        img_crop(imgs[i], IMG_PATCH_SIZE, IMG_PATCH_SIZE, is_2d=False)
        for i in range(NUM_IMAGES)
    ]

    # List of all the patches, ordered by image
    data = [
        img_patches[i][j] for i in range(len(img_patches))
        for j in range(len(img_patches[i]))
    ]

    return numpy.asarray(data)
示例#5
0
def extract_all_data(path, num_images=-1):
    """
    Extract the images into a 4D tensor [image index, y, x, channels]
    Values are rescales from [0, 255] down to [-0.5, 0.5]

    :param path: The common path of all images
    :return: The extracted 4D tensor
    """

    imgs = []
    data = []
    print("Extracting all patches from " + path)

    for filename in os.listdir(path):
        print(filename)
        imgs.append(mpimg.imread(os.path.join(path, filename)))

    data = []
    if (len(imgs) > 0):
        IMG_WIDTH = imgs[0].shape[0]

        # Check whether the images are already cropped
        if IMG_WIDTH > EFFECTIVE_INPUT_SIZE:
            # List formed by consecutive series of patches of each image (patches ordered in row order)
            img_patches = [
                img_crop(i, IMG_PATCH_SIZE, IMG_PATCH_SIZE, is_2d=False)
                for i in imgs
            ]

            # List of all the patches, ordered by image
            data = [
                img_patches[i][j] for i in range(len(img_patches))
                for j in range(len(img_patches[i]))
            ]
        else:
            data = imgs
            print("Detected already cropped image")

    return numpy.asarray(data)