+ str(name) + "') ..."

    # Load large aerial view image and corresponding building and highway labels.
    img = np.asarray(Image.open(GROUND_TRUTH_IMAGES_ROOT + name))

    # np.unique(img[5471:6000, 5471:6000, :])
    # for i in range(tiles_img.shape[0]):
    #     for j in range(tiles_img.shape[1]):
    #         print str(i * 12 + j + 1) + " unique: " + str(len(np.unique(tiles_img[i, j, :, :, :])))

    # Get name of label file that corresponds to img.
    label_file_name = name.replace('RGB.png', 'label_ETH_transformed.npy')
    labels = np.load(GROUND_TRUTH_LABELS_ROOT + label_file_name)

    # Split large aerival view image and np.ndarray of labels into tiles.
    tiles_img = lib.image_to_tiles(img, lng_tiles, hgh_tiles)
    tiles_labels = lib.image_to_tiles(labels, lng_tiles, hgh_tiles)

    # Determine shape of the split of 'img' into tiles.
    dim_tiles = lib.tiles_in_image(img, lng_tiles, hgh_tiles)

    for i in range(0, dim_tiles[0]):
        for j in range(0, dim_tiles[1]):
            # Calculate the identifier for this tile.
            identifier = str(number_tile_out).rjust(nr_of_digits_identifier, '0')
            # Save tile image as numpy.ndarray.
            np.save(TILES_STORE_ROOT + 'img_npy/' + identifier + 't_' + name.replace('/', '') + '_img',
                    tiles_img[i, j].astype(np.uint8))

            asdf = np.load(TILES_STORE_ROOT + 'img_npy/' + identifier + 't_' + name.replace('/', '') + '_img.npy')
Exemplo n.º 2
0
                                  lab=labels_predicted,
                                  alpha=0.5,
                                  plot_img=True,
                                  store_img=False,
                                  plot_with='pyplot',
                                  dir_name=RESULTS_ROOT,
                                  file_name='test_image_x_' + area_name + RUN_NAME + str(number_of_training_steps),
                                  cmap=cmap1)

    img = lib.rgb_to_bgr(img)
    blue_mean = channel_means['blue_mean']
    green_mean = channel_means['green_mean']
    red_mean = channel_means['red_mean']
    img = lib.subtract_bgr_channel_means(img, blue_mean, green_mean, red_mean)
    tiles_of_image = lib.image_to_tiles(img=img,
                                        hgh=500,
                                        wdt=500)

    net.predict([tiles_of_image[0, 0, :, :, :]], oversample=False)
    caffe_out = net.blobs['score'].data[0, :, :, :]
    asdf = np.argmax(caffe_out, 0).astype(np.uint8)

    lib.plot_sat_image_and_labels(img=img_cropped,
                                  lab=asdf,
                                  alpha=0.5,
                                  plot_img=False,
                                  store_img=True,
                                  plot_with='Image',
                                  dir_name=RESULTS_ROOT,
                                  file_name='test_image_xx_' + area_name + RUN_NAME + str(number_of_training_steps),
                                  cmap=cmap1)
Exemplo n.º 3
0
img_ori = Image.open(image_file)

# Cast image to numpy.ndarray.
img = np.array(img_ori)

# Bring image into required FCN input format. This is, swap RGB to BGR and subtract corresponding
# per-channel means. This per-channel means can be used for the FCN models:
# FCN-32s_PASCAL_Context, FCN-16s_PASCAL_Context & FCN-8s_PASCAL_Context
img = lib.rgb_to_bgr(img)
blue_mean = channel_means['blue_mean']
green_mean = channel_means['green_mean']
red_mean = channel_means['red_mean']
img = lib.subtract_bgr_channel_means(img, blue_mean, green_mean, red_mean)

# Partition image into tiles.
tiles_of_image = lib.image_to_tiles(img, tile_hgh, tile_wdt)
tiles_of_image.shape

# ----------------------------------------------------------------------------------------------------------------------
# Classify using pre-trained FCN. Note that it's unfortunately not possible to outsource the following 60 or so line
# of codes into an own function. When this is done and the function is called on the remote python interpreter on Jan's
# machine there is a problem. The problem is related to a pointer which is needed in the process of deconstructing the
# caffe objects from the GPU, which is somehow not accessible and this causes the remote python interpreter to crash.
# We don't want that, so the function code stands basically bellow.
# ----------------------------------------------------------------------------------------------------------------------
# Classify using pre-trained FCN.
data = tiles_of_image
caffe_root = CAFFE_ROOT
model_root = MODEL_ROOT
caffe_mod = CAFFE_MODEL
caffe_mod_par = CAFFE_MODEL_PAR