コード例 #1
0
ファイル: utils.py プロジェクト: abhyantrika/tile_decoder
def decode_all_cae(encoders, model, config):
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    enc_shape = encoders.shape

    decoded = []
    for i in range(enc_shape[0]):
        for j in range(enc_shape[1]):
            with torch.no_grad():
                out = model.decode(encoders[i][j])
            decoded.append(out)

    decoded = torch.stack(decoded)
    dec_shape = decoded.shape
    decoded = decoded.view(enc_shape[0], enc_shape[1], dec_shape[1],
                           dec_shape[2], dec_shape[3], dec_shape[4])
    decoded = decoded.permute(0, 1, 2, 4, 5, 3).cpu().detach().numpy()

    out_img = patchify.unpatchify(
        decoded, (config['img_size'][1], config['img_size'][0],
                  3))  #opposite notation. H,W
    out_img = out_img * 255
    out_img = out_img.astype(np.uint8)

    pil_img = Image.fromarray(out_img)
    pil_img.save('resources/out_decoded.jpg')
コード例 #2
0
def save_priorinfo_image(patch_size=10, window_step=10, batch_size=5):
    cuda = torch.device('cuda')
    roi, t1_landsat, t2_sentinel = load_mat_file(
        path=r'resources/Flood_UiT_HCD_California_2017_Luppino.mat')

    t1_patches = patchify(t1_landsat, (patch_size, patch_size, 11),
                          window_step)
    h, w, _, _, _, _ = t1_patches.shape
    t1_patches = torch.tensor(
        np.reshape(t1_patches, (-1, patch_size, patch_size, 11)))

    t2_patches = patchify(t2_sentinel, (patch_size, patch_size, 3),
                          window_step)
    t2_patches = torch.tensor(
        np.reshape(t2_patches, (-1, patch_size, patch_size, 3)))

    assert t1_patches.shape[0] == t2_patches.shape[0]

    full_image_alpha_prior = torch.zeros(
        [t1_patches.shape[0], patch_size, patch_size]).to(cuda)

    for patch_idx in trange(0, t1_patches.shape[0], batch_size):
        var = alpha_prior(
            t1_patches[patch_idx:patch_idx + batch_size].to(cuda),
            t2_patches[patch_idx:patch_idx + batch_size].to(cuda))
        full_image_alpha_prior[patch_idx:patch_idx + batch_size] = var

    full_image_alpha_prior = full_image_alpha_prior.cpu().detach().numpy()
    full_image_alpha_prior = unpatchify(
        np.reshape(full_image_alpha_prior, (h, w, patch_size, patch_size)),
        (3500, 2000))
    save_image(torch.tensor(full_image_alpha_prior),
               'resources/alpha_prior_image.png')
コード例 #3
0
 def merge(self, patches: list):
     patches = np.concatenate(patches, 0)
     chunk_main_patches = patches[:self.patchcount[0]]
     chunk_right_patches = patches[self.patchcount[0]:self.patchcount[0] +
                                   self.patchcount[1]]
     chunk_lower_patches = patches[self.patchcount[0] +
                                   self.patchcount[1]:self.patchcount[0] +
                                   self.patchcount[1] + self.patchcount[2]]
     chunk_corner_patches = patches[-1]
     chunk_main_patches = rearrange(
         chunk_main_patches,
         '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
         p1=self.chunkcountshape[0][0],
         p2=self.chunkcountshape[0][1],
         p3=self.chunkcountshape[0][2])
     chunk_right_patches = rearrange(
         chunk_right_patches,
         '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
         p1=self.chunkcountshape[1][0],
         p2=self.chunkcountshape[1][1],
         p3=self.chunkcountshape[1][2])
     chunk_lower_patches = rearrange(
         chunk_lower_patches,
         '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
         p1=self.chunkcountshape[2][0],
         p2=self.chunkcountshape[2][1],
         p3=self.chunkcountshape[2][2])
     chunk_corner_patches = chunk_corner_patches
     chunk_main = unpatchify(chunk_main_patches, self.chunkshape[0])
     chunk_right = unpatchify(chunk_right_patches, self.chunkshape[1])
     chunk_lower = unpatchify(chunk_lower_patches, self.chunkshape[2])
     chunk_corner = chunk_corner_patches
     #
     img = np.zeros((self.h, self.w, self.c), dtype=np.float)
     img[:self.chunk_h, :self.chunk_w, :] += chunk_main
     img[:self.chunk_h, -self.ps_w:, :] += chunk_right
     img[-self.ps_h:, :self.chunk_w, :] += chunk_lower
     img[-self.ps_h:, -self.ps_w:, :] += chunk_corner
     #
     weight = np.zeros((self.h, self.w, self.c), dtype=np.float)
     weight[:self.chunk_h, :self.chunk_w, :] += np.ones_like(chunk_main)
     weight[:self.chunk_h, -self.ps_w:, :] += np.ones_like(chunk_right)
     weight[-self.ps_h:, :self.chunk_w, :] += np.ones_like(chunk_lower)
     weight[-self.ps_h:, -self.ps_w:, :] += np.ones_like(chunk_corner)
     img = img / weight
     return img
コード例 #4
0
def reconstruct_residual(crops, H, W, D, pshape0, pshape1, crop_size):

    patchify_patches = np.reshape(crops,
                                  (pshape0, pshape1, crop_size, crop_size, 3),
                                  order='F')

    c1 = patchify_patches[:, :, :, :, 0]
    c2 = patchify_patches[:, :, :, :, 1]
    c3 = patchify_patches[:, :, :, :, 2]

    dec1 = unpatchify(c1, (H, W))
    dec2 = unpatchify(c2, (H, W))
    dec3 = unpatchify(c3, (H, W))

    img = np.stack((dec1, dec2, dec3), axis=-1)
    img = img.astype('int16')

    return img
コード例 #5
0
def Dic_proj_one(data, n_coef):
    """
    The dictionary projection method
    """
    data = patchify(data, patch_size, step)
    data = data.reshape(-1, patch_size[0] * patch_size[1])
    intercept = np.mean(data, axis=0)
    data -= intercept

    dico.set_params(transform_algorithm='omp',
                    transform_n_nonzero_coefs=n_coef)
    code = dico.transform(data)

    patch = np.dot(code, V)
    patch += intercept
    patch = np.reshape(patch, initial_patch_size)

    im_re = unpatchify(np.asarray(patch), image_size)

    return im_re
コード例 #6
0
def Dic_proj_verso(data, n_coef, alpha):
    """
    The dictionary projection method
    """
    data = patchify(data, patch_size, step)
    data = data.reshape(-1, patch_size[0] * patch_size[1])
    intercept = np.mean(data, axis=0)
    data -= intercept

    dico_verso.set_params(transform_algorithm='omp',
                          transform_n_nonzero_coefs=n_coef)
    code = dico_verso.transform(data)

    patch = np.dot(code, V_verso)
    patch += intercept

    patch = np.reshape(patch, initial_patch_size)
    # if we use threshold then we have this
    # patch -= patch.min()
    # patch /= patch.max()

    im_re = unpatchify(np.asarray(patch), image_size)
    return im_re
コード例 #7
0
def dictionary_projection(image, dico, V, n_coef, patch_size, step):
    """
    Function that does the dictionary projection on the input image
    
    Parameters
    ----------
    image: numpy.matrix, the distorted image
    dico: a dictionary (a set of atoms) that can best be used to represent data using a sparse code
    V: array, [n_components, n_features], the components of the fitted data
    n_coef = int, the number of non zero atoms
    patch_size: (int, int), the size of the patches to be extracted from the image
    step: int, the step of the moving patches, overlap of patches = patch_size - step
    
    Return
    -----------
    reconstructed_image: numpy.matrix, the estimated image reconstructed from the learned patches
    """

    global initial_patch_size

    data = patchify(image, patch_size, step)
    data = data.reshape(-1, patch_size[0] * patch_size[1])
    intercept = np.mean(data, axis=0)
    data -= intercept

    dico.set_params(transform_algorithm='omp',
                    transform_n_nonzero_coefs=n_coef)
    code = dico.transform(data)

    patch = np.dot(code, V)
    patch += intercept

    patch = np.reshape(patch, initial_patch_size)
    reconstructed_image = unpatchify(np.asarray(patch), np.shape(image))

    return reconstructed_image
コード例 #8
0
ファイル: __main__.py プロジェクト: andstor/forest-detection
    org_imgs=x_val[:,:,:,:3], # required - original images
    mask_imgs=y_val[:,:,:,:3], # required - ground truth masks
    pred_imgs=pred[:,:,:,:], # optional - predicted masks
    nm_img_to_plot=10, # optional - number of images to plot
    alpha=1,
    color="red"
    )
"""


print(pred.shape)
print(TCI.shape)
patches2 = pred.reshape(16, 16, 1, *pred.shape[-3:])
print("reconstruct", patches2.shape)

reconstructed_image = unpatchify(patches2, (512,  512, 1))
masked_data = reconstructed_image
#masked_data = np.where((masked_data)>0.85, 1, 0)
masked_data = np.ma.masked_where(masked_data < 0.5, masked_data)
#masked_data = np.ma.masked_where(masked_data < 0.75, masked_data)
#masked_data = np.ma.masked_where(masked_data <= 0, masked_data)

plt.figure(figsize=(10, 10))
plt.imshow(TCI, 'jet', interpolation='none')
plt.savefig('foo.png', bbox_inches='tight')
plt.show()

plt.figure(figsize=(10, 10))
#plt.imshow(TCI, cmap='jet', interpolation='none')
im = plt.imshow(masked_data, cmap='gray', interpolation='none', alpha=1)
#plt.colorbar(im)
コード例 #9
0
      single_patch_prediction = my_model.predict(single_patch_3ch_input)
      single_patch_prediction_argmax = np.argmax(single_patch_prediction, axis=4)[0,:,:,:]
      predicted_patches.append(single_patch_prediction_argmax)

#Convert list to numpy array
predicted_patches = np.array(predicted_patches)
print(predicted_patches.shape)

#Reshape to the shape we had after patchifying
predicted_patches_reshaped = np.reshape(predicted_patches, 
                                        (patches.shape[0], patches.shape[1], patches.shape[2],
                                         patches.shape[3], patches.shape[4], patches.shape[5]) )
print(predicted_patches_reshaped.shape)

#Repach individual patches into the orginal volume shape
reconstructed_image = unpatchify(predicted_patches_reshaped, large_image.shape)
print(reconstructed_image.shape)

print(reconstructed_image.dtype)

#Convert to uint8 so we can open image in most image viewing software packages
reconstructed_image=reconstructed_image.astype(np.uint8)
print(reconstructed_image.dtype)

#Now save it as segmented volume.
from tifffile import imsave
imsave('/content/drive/MyDrive/Colab Notebooks/data/sandstone_3d/all_images/segmented.tif', reconstructed_image)

#If you would like to save the volume as multichannel dataset....

print(np.unique(reconstructed_image))
コード例 #10
0
# Extract noisy patches and reconstruct them using the dictionary
print('Extracting noisy patches... ')
t0 = time()
data = patchify(img2_gray, patch_size, step)
data = data.reshape(-1, patch_size[0] * patch_size[1])
print(data.shape)
intercept = np.mean(data, axis=0)
data -= intercept
print('done in %.2fs.' % (time() - t0))

t0 = time()
dico.set_params(transform_algorithm='omp', transform_n_nonzero_coefs=50)
code = dico.transform(data)
print("code shape =", code.shape)
patch = np.dot(code, V)
patch += intercept
patch = np.reshape(patch, initial_patch_size)
print(patch.shape)

im_re = unpatchify(np.asarray(patch), image_size)
dt = time() - t0
print('done in %.2fs.' % dt)

plt.figure()
plt.imshow(im_re, cmap='gray')
plt.title("Estimated source 2")
plt.show

diff = img2_gray - im_re
print(np.sqrt(np.sum(diff**2)))
コード例 #11
0
ファイル: patch_utils.py プロジェクト: adibMosharrof/medifor
 def get_image_from_patches(patches, new_image_shape, patch_window_shape):
     recon_patch_windows = patches.reshape(patch_window_shape)
     try:
         return unpatchify(recon_patch_windows, new_image_shape)
     except ZeroDivisionError as err:
         return patches[0]
コード例 #12
0
ファイル: test.py プロジェクト: summertune/2017Summer_Duke
def main(flags):
    IMG_MEAN = np.zeros(3)

    # parameters of building data set
    citylist = ['Norfolk', 'Arlington', 'Atlanta', 'Austin', 'Seekonk', 'NewHaven']
    image_mean_list = {'Norfolk': [127.07435926, 129.40160709, 128.28713284],
                       'Arlington': [88.30304996, 94.97338776, 93.21268212],
                       'Atlanta': [101.997014375, 108.42171833, 110.044871],
                       'Austin': [97.0896012682, 102.94697026, 100.7540157],
                       'Seekonk': [86.67800904, 93.31221168, 92.1328146],
                       'NewHaven': [106.7092798, 111.4314,
                                    110.74903832]}  # BGR mean for the training data for each city

    num_samples = {'Norfolk': 3,
                   'Arlington': 3,
                   'Atlanta': 3,
                   'Austin': 3,
                   'Seekonk': 3,
                   'NewHaven': 2}  # number of samples for each city

    # set evaluation data
    if flags.training_data == 'SP':
        IMG_MEAN = np.array((121.68045527, 132.14961763, 129.30317439),
                            dtype=np.float32)  # mean of solar panel data in BGR order
        valid_list = ['11ska625680{}', '11ska610860{}', '11ska445890{}', '11ska520695{}', '11ska355800{}',
                      '11ska370755{}',
                      '11ska385710{}', '11ska550770{}', '11ska505740{}', '11ska385800{}', '11ska655770{}',
                      '11ska385770{}',
                      '11ska610740{}', '11ska550830{}', '11ska625830{}', '11ska535740{}', '11ska520815{}',
                      '11ska595650{}',
                      '11ska475665{}', '11ska520845{}']

    elif flags.training_data in citylist:
        IMG_MEAN = image_mean_list[flags.training_data] # mean of building data in RGB order
        valid_list = ["{}_{:0>2}{{}}".format(flags.testing_data, i) for i in
                      range(1, num_samples[flags.testing_data] + 1)]


    elif 'all_but' in flags.training_data:
        except_city_name = flags.training_data.split('_')[2]
        for cityname in citylist:
            if cityname != except_city_name and cityname != 'Seekonk':
                IMG_MEAN = IMG_MEAN + np.array(image_mean_list[cityname])
        IMG_MEAN = IMG_MEAN / 4
        valid_list = ["{}_{:0>2}{{}}".format(flags.testing_data, i) for i in
                      range(1, num_samples[flags.testing_data] + 1)]

    elif flags.training_data == 'all':
        for cityname in citylist:
            if cityname != 'Seekonk':
                IMG_MEAN = IMG_MEAN + np.array(image_mean_list[cityname])
        IMG_MEAN = IMG_MEAN / 5
        valid_list = ["{}_{:0>2}{{}}".format(flags.testing_data, i) for i in
                      range(1, num_samples[flags.testing_data] + 1)]

    else:
        print("Wrong data option: {}".format(flags.data_option))

    IMG_MEAN = [IMG_MEAN[2], IMG_MEAN[1], IMG_MEAN[0]]  # convert to RGB order

    # setup used GPU
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = flags.GPU

    # presetting
    tf.set_random_seed(1234)

    # input image batch with zero mean
    image_batch = tf.placeholder(tf.float32, shape=[None, 128, 128, 3], name="image_batch")
    # Convert RGB to BGR.
    img_r, img_g, img_b = tf.split(axis=3, num_or_size_splits=3, value=image_batch)
    img_bgr = tf.cast(tf.concat(axis=3, values=[img_b, img_g, img_r]), dtype=tf.float32)

    prediction_batch = tf.placeholder(tf.float32, shape=[None, 128, 128, 1], name="prediction_batch")

    pred_raw = make_unet(img_bgr, training=False)
    pred = tf.nn.sigmoid(pred_raw)
    tf.add_to_collection("inputs", image_batch)
    tf.add_to_collection("outputs", pred)

    # Set up TF session and initialize variables.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:

        init = tf.global_variables_initializer()
        sess.run(init)

        saver = tf.train.Saver(var_list=tf.global_variables())

        if os.path.exists(flags.restore_from) and tf.train.get_checkpoint_state(flags.restore_from):
            latest_check_point = tf.train.latest_checkpoint(flags.restore_from)
            print("Loading model: {}".format(latest_check_point))
            saver.restore(sess, latest_check_point)
        else:
            print("No model found at{}".format(flags.restore_from))
            sys.exit()

        if not os.path.exists(flags.save_dir):
            os.makedirs(flags.save_dir)

        # testing model on large images by running over patches
        gfilter = gauss2D(shape=[flags.image_size, flags.image_size], sigma=(flags.image_size - 1) / 4)
        seg_metric = SegMetric(1)
        valid_stride = int(flags.image_size / 2)

        print("Testing {} model on {} data {}{}".format(flags.training_data, flags.testing_data, "with transferring mean" if flags.is_mean_transfer else "", "with CORAL domain adaption" if flags.is_CORAL else ""))

        file = open(os.path.join(flags.restore_from, 'test_log.csv'), 'a')
        file.write("\nTest Model: {}\ntransfer_mean:{} CORAL domain adaption:{}\n".format(latest_check_point, flags.is_mean_transfer, flags.is_CORAL))

        for valid_file in valid_list:
            print("Testing image {}".format(valid_file[0:-2]))
            if flags.testing_data == 'SP':
                valid_image = misc.imread(os.path.join(flags.img_path, valid_file.format('.png')))
            else:
                valid_image = misc.imread(os.path.join(flags.img_path, flags.testing_data, valid_file.format('_RGB_1feet.png')))

            valid_truth = (misc.imread(os.path.join(flags.img_path, flags.testing_data, valid_file.format('_truth_1feet.png'))) / 255).astype(np.uint8)

            if flags.is_CORAL:
                train_image = misc.imread(os.path.join(flags.img_path, flags.training_data, '{}_01_RGB.png'.format(flags.training_data)))
                valid_image = image_adapt(valid_image, train_image, 1)

            valid_image = misc.imresize(valid_image, flags.resolution_ratio, interp='bilinear')
            valid_truth = misc.imresize(valid_truth, flags.resolution_ratio, interp='nearest')

            if flags.is_mean_transfer:
                IMG_MEAN = np.mean(valid_image, axis=(0, 1))  # Image mean of testing data

            valid_image = valid_image - IMG_MEAN  # substract mean from image

            image_shape = valid_truth.shape

            valid_patches = patchify(valid_image, flags.image_size, valid_stride)
            """divided patches into smaller batch for evaluation"""
            pred_pmap = valid_in_batch(valid_patches, sess, pred, image_batch, step=flags.batch_size)

            # pred_pmap = np.ones(valid_patches.shape[0:-1])

            print("Stiching patches")
            pred_pmap_weighted = pred_pmap * gfilter[None, :, :]
            pred_pmap_weighted_large = unpatchify(pred_pmap_weighted, image_shape, valid_stride)
            gauss_mask_large = unpatchify(np.ones(pred_pmap.shape) * gfilter[None, :, :], image_shape, valid_stride)
            pred_pmap_weighted_large_normalized = np.nan_to_num(pred_pmap_weighted_large / gauss_mask_large)
            pred_binary = (pred_pmap_weighted_large_normalized > flags.pred_threshold).astype(np.uint8)

            # mean IoU
            seg_metric.add_image_pair(pred_binary, valid_truth)
            message_temp = "{}, {:.4f}".format(valid_file[0:-2], mean_IU(pred_binary, valid_truth))
            print(message_temp)
            file.write(message_temp + '\n')

            print("Saving evaluation prediction")

            # misc.imsave(os.path.join(flags.save_dir, '{}_{}pred.png'.format(valid_file[0:-2], 'NT_' if not flags.is_mean_transfer else '')), pred_binary)
            misc.imsave(os.path.join(flags.save_dir, '{}_pred_threshold_{}{}{}.png'.format(valid_file[0:-2],flags.pred_threshold, '_TM' if flags.is_mean_transfer else '', '_CORAL' if flags.is_CORAL else '')), pred_binary * 255)
            misc.toimage(pred_pmap_weighted_large_normalized.astype(np.float32), high=1.0, low=0.0, cmin=0.0, cmax=1.0, mode='F').save(os.path.join(flags.save_dir, '{}_pred_pmap{}{}.tif'.format(valid_file[0:-2], '_TM' if flags.is_mean_transfer else '', '_CORAL' if flags.is_CORAL else '')))



        message_overall = "Overall, {:.4f}".format(seg_metric.mean_IU())
        print(message_overall)
        file.write(message_overall + '\n')
        file.close()
        print('The output file has been saved to {}'.format(flags.save_dir))

        sess.close()
コード例 #13
0
    print("******")
    s1 = source_estimated[0,:]
    s1 = np.reshape(s1, patch_size)

    s2 = source_estimated[1,:]
    s2 = np.reshape(s2, patch_size)
    
    estimated_patches1.append(s1)
    estimated_patches2.append(s2)

# reconstruct the estimated sources

estimated_patches1 = np.reshape(estimated_patches1, initial_size1)
estimated_patches2 = np.reshape(estimated_patches2, initial_size2)

es1 = unpatchify(np.asarray(estimated_patches1), image_size)
es2 = unpatchify(np.asarray(estimated_patches2), image_size)

# Show estimated sources

plt.figure()
plt.imshow(es1, cmap='gray')
plt.title("Estimated source 1")
plt.show

plt.figure()
plt.imshow(es2, cmap='gray')
plt.title("Estimated source 2")
plt.show()

est1 = es1.flatten('F') #column wise
コード例 #14
0
def main():
    # get arguments
    args = get_arguments()

    # setup used GPU
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU
    """Create the model and start the evaluation process."""

    # data reader.

    # input image
    input_img = tf.placeholder(tf.float32,
                               shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3],
                               name="input_image")
    # img = tf.image.decode_jpeg(tf.read_file(args.img_path), channels=3)
    # Convert RGB to BGR.
    img_r, img_g, img_b = tf.split(axis=3,
                                   num_or_size_splits=3,
                                   value=input_img)
    img = tf.cast(tf.concat(axis=3, values=[img_b, img_g, img_r]),
                  dtype=tf.float32)
    # Extract mean.
    img -= IMG_MEAN

    img_upscale = tf.image.resize_bilinear(
        img, [IMAGE_SIZE * args.up_scale, IMAGE_SIZE * args.up_scale])

    # Create network.
    net = DeepLabResNetModel({'data': img},
                             is_training=False,
                             num_classes=args.num_classes)

    # Which variables to load.
    restore_var = tf.global_variables()

    # Predictions.
    res5c_relu = net.layers['res5c_relu']
    fc1_voc12_c0 = net.layers['fc1_voc12_c0']
    fc1_voc12_c1 = net.layers['fc1_voc12_c1']
    fc1_voc12_c2 = net.layers['fc1_voc12_c2']
    fc1_voc12_c3 = net.layers['fc1_voc12_c3']

    raw_output = net.layers['fc1_voc12']

    raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[1:3, ])
    # raw_output_up_argmax = tf.argmax(raw_output_up, dimension=3)
    # pred = tf.expand_dims(raw_output_up_argmax, dim=3)
    pmap = tf.nn.softmax(raw_output_up, name="probability_map")

    # Set up TF session and initialize variables.
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()

    sess.run(init)

    # Load weights.
    loader = tf.train.Saver(var_list=restore_var)

    if os.path.isdir(args.restore_from):
        # search checkpoint at given path
        ckpt = tf.train.get_checkpoint_state(args.restore_from)
        if ckpt and ckpt.model_checkpoint_path:
            # load checkpoint file
            load(loader, sess, ckpt.model_checkpoint_path)
            print("Model restored from {}".format(ckpt.model_checkpoint_path))
        else:
            print("No model found at{}".format(args.restore_from))
    elif os.path.isfile(args.restore_from):
        # load checkpoint file
        load(loader, sess, args.restore_from)
    else:
        print("No model found at{}".format(args.restore_from))
    '''Perform validation on large images.'''
    # preds, scoremap, pmap, cnn_out, fc0, fc1, fc2, fc3 = sess.run([pred, raw_output, raw_output_up, res5c_relu, fc1_voc12_c0, fc1_voc12_c1, fc1_voc12_c2, fc1_voc12_c3], feed_dict={input_img})

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # gaussian weight kernel
    gfilter = gauss2D(shape=[IMAGE_SIZE, IMAGE_SIZE],
                      sigma=(IMAGE_SIZE - 1) / 4)

    seg_metric = SegMetric(1)

    for valid_file in valid_list:
        print("Validate image {}".format(valid_file[0:-2]))
        valid_image = misc.imread(
            os.path.join(args.img_path, valid_file.format('.png')))
        valid_truth = (misc.imread(
            os.path.join(args.img_path, valid_file.format('_truth.png'))) /
                       255).astype(np.uint8)
        image_shape = valid_truth.shape

        valid_patches = patchify(valid_image, IMAGE_SIZE, valid_stride)
        """divided patches into smaller batch for validation"""
        pred_pmap = valid_in_batch(valid_patches,
                                   sess,
                                   pmap,
                                   input_img,
                                   step=valid_batch_size)

        # pred_pmap = np.ones(valid_patches.shape[0:-1])

        print("Stiching patches")
        pred_pmap_weighted = pred_pmap * gfilter[None, :, :]
        pred_pmap_weighted_large = unpatchify(pred_pmap_weighted, image_shape,
                                              valid_stride)
        gauss_mask_large = unpatchify(
            np.ones(pred_pmap.shape) * gfilter[None, :, :], image_shape,
            valid_stride)
        pred_pmap_weighted_large_normalized = np.nan_to_num(
            pred_pmap_weighted_large / gauss_mask_large)
        pred_binary = (pred_pmap_weighted_large_normalized > 0.5).astype(
            np.uint8)

        # mean IoU
        seg_metric.add_image_pair(pred_binary, valid_truth)
        print("mean_IU: {:.4f}".format(mean_IU(pred_binary, valid_truth)))

        # print("Save validation prediction")
        misc.imsave(
            os.path.join(args.save_dir,
                         '{}_valid_pred.png'.format(valid_file[0:-2])),
            pred_binary)
        misc.imsave(
            os.path.join(args.save_dir,
                         '{}_valid_pred_255.png'.format(valid_file[0:-2])),
            pred_binary * 255)
        misc.toimage(pred_pmap_weighted_large_normalized.astype(np.float32),
                     high=1.0,
                     low=0.0,
                     cmin=0.0,
                     cmax=1.0,
                     mode='F').save(
                         os.path.join(
                             args.save_dir,
                             '{}_valid_pmap.tif'.format(valid_file[0:-2])))

        # # Plot PR curve
        # precision, recall, thresholds = precision_recall_curve(valid_truth.flatten(), pred_pmap_weighted_large_normalized.flatten(), 1)
        # plt.figure()
        # plt.plot(recall, precision, lw=2, color='navy',
        #          label='Precision-Recall curve')
        # plt.xlabel('Recall')
        # plt.ylabel('Precision')
        # plt.ylim([0.0, 1.05])
        # plt.xlim([0.0, 1.0])
        # plt.title('Precision-Recal')
        # # plt.legend(loc="lower left")
        # plt.savefig(os.path.join(args.save_dir, '{}_PR_curve.png'.format(valid_file[0:-2])))

    # msk = decode_labels(preds, num_classes=args.num_classes)
    # im = Image.fromarray(msk[0])

    # im.save(args.save_dir + 'pred.png')

    print("Overal mean IoU: {:.4f}".format(seg_metric.mean_IU()))
    print('The output file has been saved to {}'.format(args.save_dir))
コード例 #15
0
def main():
    # get arguments
    args = get_arguments()

    IMG_MEAN = np.zeros(3)
    valid_list=[]

    # parameters of building data set
    citylist = ['Norfolk', 'Arlington', 'Atlanta', 'Austin', 'Seekonk', 'NewHaven']
    image_mean_list = {'Norfolk': [127.07435926, 129.40160709, 128.28713284],
                       'Arlington': [88.30304996, 94.97338776, 93.21268212],
                       'Atlanta': [101.997014375, 108.42171833, 110.044871],
                       'Austin': [97.0896012682, 102.94697026, 100.7540157],
                       'Seekonk': [86.67800904, 93.31221168, 92.1328146],
                       'NewHaven': [106.7092798, 111.4314, 110.74903832]} # BGR mean for the training data for each city
    num_samples = {'Norfolk': 3,
                      'Arlington': 3,
                      'Atlanta': 3,
                      'Austin': 3,
                      'Seekonk': 3,
                      'NewHaven': 2} # number of samples for each city
    # set evaluation data
    if args.evaluation_data == 'SP':
        IMG_MEAN = np.array((121.68045527, 132.14961763, 129.30317439),
                        dtype=np.float32)  # mean of solar panel data in BGR order
        IMG_MEAN = [IMG_MEAN[2], IMG_MEAN[1], IMG_MEAN[0]] # convert to RGB order

        # valid_list = [ '11ska505665{}', '11ska580710{}', '11ska475635{}', '11ska475875{}', '11ska565905{}', '11ska490860{}', '11ska325740{}', '11ska460725{}', '11ska490605{}', '11ska430815{}', '11ska400740{}', '11ska580875{}', '11ska655725{}', '11ska595860{}', '11ska460890{}', '11ska655695{}', '11ska640605{}', '11ska580605{}', '11ska595665{}', '11ska505755{}', '11ska475650{}', '11ska595755{}', '11ska625755{}', '11ska490740{}', '11ska565755{}', '11ska520725{}', '11ska595785{}', '11ska580755{}', '11ska445785{}', '11ska625710{}', '11ska520830{}', '11ska640800{}', '11ska535785{}', '11ska430905{}', '11ska505695{}', '11ska565770{}']
        # valid_list = ['11ska580860{}', '11ska565845{}']
        valid_list = ['11ska625680{}', '11ska610860{}', '11ska445890{}', '11ska520695{}', '11ska355800{}', '11ska370755{}',
                  '11ska385710{}', '11ska550770{}', '11ska505740{}', '11ska385800{}', '11ska655770{}', '11ska385770{}',
                  '11ska610740{}', '11ska550830{}', '11ska625830{}', '11ska535740{}', '11ska520815{}', '11ska595650{}',
                  '11ska475665{}', '11ska520845{}']

    elif args.training_data in citylist:
        IMG_MEAN = image_mean_list[args.training_data]
        IMG_MEAN = [IMG_MEAN[2], IMG_MEAN[1], IMG_MEAN[0]] # convert to RGB order
        valid_list = ["{}_{:0>2}{{}}".format(args.evaluation_data, i) for i in range(1,num_samples[args.evaluation_data]+1)]

    else:
        print("Wrong data option: {}".format(args.training_data))

    # set image mean

    # setup used GPU
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = args.GPU

    """Create the model and start the evaluation process."""

    # data reader.

    # input image
    input_img = tf.placeholder(tf.float32, shape=[None, args.image_size, args.image_size, 3], name="input_image")
    # img = tf.image.decode_jpeg(tf.read_file(args.img_path), channels=3)
    # Convert RGB to BGR.
    img_r, img_g, img_b = tf.split(axis=3, num_or_size_splits=3, value=input_img)
    img = tf.cast(tf.concat(axis=3, values=[img_b, img_g, img_r]), dtype=tf.float32)
    # Extract mean.

    # Create network.
    net = DeepLabResNetModel({'data': img}, is_training=False, num_classes=args.num_classes)

    # Which variables to load.
    restore_var = tf.global_variables()

    # Predictions.
    res5c_relu = net.layers['res5c_relu']
    fc1_voc12_c0 = net.layers['fc1_voc12_c0']
    fc1_voc12_c1 = net.layers['fc1_voc12_c1']
    fc1_voc12_c2 = net.layers['fc1_voc12_c2']
    fc1_voc12_c3 = net.layers['fc1_voc12_c3']

    raw_output = net.layers['fc1_voc12']

    raw_output_up = tf.image.resize_bilinear(raw_output, tf.shape(img)[1:3, ])
    # raw_output_up_argmax = tf.argmax(raw_output_up, dimension=3)
    # pred = tf.expand_dims(raw_output_up_argmax, dim=3)
    pmap = tf.nn.softmax(raw_output_up, name="probability_map")

    # Set up TF session and initialize variables. 
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    init = tf.global_variables_initializer()

    sess.run(init)

    # Load weights.
    loader = tf.train.Saver(var_list=restore_var)

    if os.path.isdir(args.restore_from):
        # search checkpoint at given path
        ckpt = tf.train.get_checkpoint_state(args.restore_from)
        if ckpt and ckpt.model_checkpoint_path:
            # load checkpoint file
            load(loader, sess, ckpt.model_checkpoint_path)
            file = open(os.path.join(args.restore_from, 'test.csv'), 'a')
            file.write("\nTest Model: {}\ntransfer_mean:{}\n".format(ckpt.model_checkpoint_path, args.is_mean_transfer))
        else:
            print("No model found at{}".format(args.restore_from))
            sys.exit()
    elif os.path.isfile(args.restore_from):
        # load checkpoint file
        load(loader, sess, args.restore_from)
        file = open(os.path.join(args.restore_from, 'test.csv'), 'a')
        file.write("\nTest Model: {}\ntransfer_mean:{}\n".format(args.restore_from, args.is_mean_transfer))
    else:
        print("No model found at{}".format(args.restore_from))
        sys.exit()

    '''Perform evaluation on large images.'''
    # preds, scoremap, pmap, cnn_out, fc0, fc1, fc2, fc3 = sess.run([pred, raw_output, raw_output_up, res5c_relu, fc1_voc12_c0, fc1_voc12_c1, fc1_voc12_c2, fc1_voc12_c3], feed_dict={input_img})

    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)

    # gaussian weight kernel
    gfilter = gauss2D(shape=[args.image_size, args.image_size], sigma=(args.image_size - 1) / 4)

    seg_metric = SegMetric(1)

    valid_stride = int(args.image_size/2)


    for valid_file in valid_list:
        print("evaluate image {}".format(valid_file[0:-2]))
        if args.evaluation_data == 'SP':
            valid_image = misc.imread(os.path.join(args.img_path, valid_file.format('.png')))
        else:
            valid_image = misc.imread(os.path.join(args.img_path, valid_file.format('_RGB.png')))
        valid_truth = (misc.imread(os.path.join(args.img_path, valid_file.format('_truth.png')))/255).astype(np.uint8)

        valid_image = misc.imresize(valid_image, args.resolution_ratio, interp='bilinear')
        valid_truth = misc.imresize(valid_truth, args.resolution_ratio, interp='nearest')

        if args.is_mean_transfer:
            IMG_MEAN = np.mean(valid_image, axis=(0,1)) # Image mean of testing data

        valid_image = valid_image - IMG_MEAN # substract mean from image

        image_shape = valid_truth.shape

        valid_patches = patchify(valid_image, args.image_size, valid_stride)
        """divided patches into smaller batch for evaluation"""
        pred_pmap = valid_in_batch(valid_patches, sess, pmap, input_img, step=args.batch_size)

        # pred_pmap = np.ones(valid_patches.shape[0:-1])

        print("Stiching patches")
        pred_pmap_weighted = pred_pmap * gfilter[None, :, :]
        pred_pmap_weighted_large = unpatchify(pred_pmap_weighted, image_shape, valid_stride)
        gauss_mask_large = unpatchify(np.ones(pred_pmap.shape) * gfilter[None, :, :], image_shape, valid_stride)
        pred_pmap_weighted_large_normalized = np.nan_to_num(pred_pmap_weighted_large / gauss_mask_large)
        pred_binary = (pred_pmap_weighted_large_normalized > 0.5).astype(np.uint8)
        
        print("Save evaluation prediction")

        misc.imsave(os.path.join(args.save_dir, '{}_valid_pred.png'.format(valid_file[0:-2])), pred_binary)
        misc.imsave(os.path.join(args.save_dir, '{}_valid_pred_255.png'.format(valid_file[0:-2])), pred_binary*255)
        misc.toimage(pred_pmap_weighted_large_normalized.astype(np.float32), high=1.0, low=0.0, cmin=0.0, cmax=1.0, mode='F').save(
            os.path.join(args.save_dir, '{}_valid_pmap.tif'.format(valid_file[0:-2])))

        # mean IoU
        seg_metric.add_image_pair(pred_binary, valid_truth)
        message_temp = "{}, {:.4f}".format(valid_file[0:-2], mean_IU(pred_binary, valid_truth))
        print(message_temp)
        file.write(message_temp+'\n')
        # # Plot PR curve
        # precision, recall, thresholds = precision_recall_curve(valid_truth.flatten(), pred_pmap_weighted_large_normalized.flatten(), 1)
        # plt.figure()
        # plt.plot(recall, precision, lw=2, color='navy',
        #          label='Precision-Recall curve')
        # plt.xlabel('Recall')
        # plt.ylabel('Precision')
        # plt.ylim([0.0, 1.05])
        # plt.xlim([0.0, 1.0])
        # plt.title('Precision-Recal')
        # # plt.legend(loc="lower left")
        # plt.savefig(os.path.join(args.save_dir, '{}_PR_curve.png'.format(valid_file[0:-2])))

    # msk = decode_labels(preds, num_classes=args.num_classes)
    # im = Image.fromarray(msk[0])

    # im.save(args.save_dir + 'pred.png')
    message_overall = "Overall, {:.4f}".format(seg_metric.mean_IU())
    print(message_overall)
    file.write(message_overall + '\n')
    file.close()
    print('The output file has been saved to {}'.format(args.save_dir))

    sess.close()
コード例 #16
0
    def merge(self, patches: list):
        patches = np.concatenate(patches, 0)
        chunk_main_patches = patches[:self.patchcount[0]]
        chunk_right_patches = patches[self.patchcount[0]:self.patchcount[0] +
                                      self.patchcount[1]]
        chunk_lower_patches = patches[self.patchcount[0] +
                                      self.patchcount[1]:self.patchcount[0] +
                                      self.patchcount[1] + self.patchcount[2]]
        right = True if chunk_right_patches.size > 0 else False
        lower = True if chunk_lower_patches.size > 0 else False
        chunk_corner_patches = patches[-1]
        chunk_main_patches = rearrange(
            chunk_main_patches,
            '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
            p1=self.chunkcountshape[0][0],
            p2=self.chunkcountshape[0][1],
            p3=self.chunkcountshape[0][2])
        if right:
            chunk_right_patches = rearrange(
                chunk_right_patches,
                '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
                p1=self.chunkcountshape[1][0],
                p2=self.chunkcountshape[1][1],
                p3=self.chunkcountshape[1][2])
        if lower:
            chunk_lower_patches = rearrange(
                chunk_lower_patches,
                '(p1 p2 p3) s1 s2 s3 -> p1 p2 p3 s1 s2 s3',
                p1=self.chunkcountshape[2][0],
                p2=self.chunkcountshape[2][1],
                p3=self.chunkcountshape[2][2])
        chunk_corner_patches = chunk_corner_patches
        chunk_main = unpatchify(chunk_main_patches, self.chunkshape[0])
        if right:
            chunk_right = unpatchify(chunk_right_patches, self.chunkshape[1])
        if lower:
            chunk_lower = unpatchify(chunk_lower_patches, self.chunkshape[2])
        chunk_corner = chunk_corner_patches
        #
        img = np.zeros((self.h, self.w, self.c), dtype=np.float)
        img[:self.chunk_h, :self.chunk_w, :] += chunk_main
        if right:
            img[:self.chunk_h, -self.ps_w:, :] += chunk_right
        if lower:
            img[-self.ps_h:, :self.chunk_w, :] += chunk_lower
        img[-self.ps_h:, -self.ps_w:, :] += chunk_corner
        #
        weight = np.zeros((self.h, self.w, self.c), dtype=np.float)
        weight[:self.chunk_h, :self.chunk_w, :] += np.ones_like(chunk_main)
        if right:
            weight[:self.chunk_h, -self.ps_w:, :] += np.ones_like(chunk_right)
        if lower:
            weight[-self.ps_h:, :self.chunk_w, :] += np.ones_like(chunk_lower)
        weight[-self.ps_h:, -self.ps_w:, :] += np.ones_like(chunk_corner)
        img = img / weight

        return img


# if __name__=='__main__':
#     path=r'C:\Users\Pictures\Lenna.png'
#     img=cv2.imread(path,-1)[:92,:92,::-1]
#     croppatch=ImgPatches(img,48,48,0.1)
#     patches=croppatch.crop(img,32)
#     img_merge = croppatch.merge(patches)
#     fig,ax=plt.subplots(1,3)
#     ax[0].imshow(img)
#     ax[1].imshow(img_merge/255)
#     ax[2].imshow(img_merge-img)
#     plt.show()
コード例 #17
0
ファイル: infer.py プロジェクト: abhyantrika/tile_decoder
output = []
encoded = []
all_patches = dataset.patches
for i in range(len(all_patches)):
	x = all_patches[i].unsqueeze(0).cuda().float()
	with torch.no_grad():
		out = model(x)
	output.append(out.cpu())
	encoded.append(model.encoded)


output = torch.stack(output)
output = output.permute(0,1,3,4,2)
output = output.reshape(33,32,1,128,128,3)
out = patchify.unpatchify(output.numpy(),(4224,4096,3))
cv2.imwrite('out.png',out*255)

encoded = torch.stack(encoded).squeeze()
encoded = encoded.bool().cpu().numpy()
np.save('encoded',encoded)

#out = out.reshape(33*32,3,128,128)
#out = np.transpose(out, (0, 3, 1, 4, 2))
#out = np.reshape(out, (4096, 4224, 3))
#out = np.transpose(out, (2, 0, 1))

# y = T.cat((img[0], out), dim=2).unsqueeze(0)
# save_imgs(
#     imgs=y,
#     to_size=(3, 4096, 2 * 4224),
コード例 #18
0
t0 = time()
data = patchify(img1_gray, patch_size, step)
data = data.reshape(-1, patch_size[0] * patch_size[1])
intercept = np.mean(data, axis=0)
data -= intercept

n_coef = 3
dico_recto.set_params(transform_algorithm='omp',
                      transform_n_nonzero_coefs=n_coef)
code_recto = dico_recto.transform(data)

patch = np.dot(code_recto, V_recto)
patch += intercept
patch = np.reshape(patch, initial_patch_size)

im_re_recto = unpatchify(np.asarray(patch), image_size)
print('done in %.2fs.' % (time() - t0))

difference_recto = img1_gray - im_re_recto
print('Difference is ', np.sqrt(np.sum(difference_recto**2)))

######################################
print('Extracting noisy patches... ')
t0 = time()
data = patchify(img1_gray, patch_size, step)
data = data.reshape(-1, patch_size[0] * patch_size[1])
intercept = np.mean(data, axis=0)
data -= intercept

n_coef = 3
dico_verso.set_params(transform_algorithm='omp',
コード例 #19
0
                axs[1, 0].imshow((x_hat[:, :, [3, 2, 1]] + 1) / (x_hat.max()))
                axs[1, 1].set_title("y_hat")
                axs[1, 1].imshow(y_hat)
                axs[2, 0].set_title("difference image")
                axs[2, 0].imshow(diff_patch)
                axs[2, 1].set_title("ground truth")
                axs[2, 1].imshow(ground_truth_patch[0])

                plt.show()

            patches.append(diff_patch.detach().numpy())

        diff_image = np.array(patches)
        diff_image = unpatchify(
            np.array(diff_image).reshape(
                (ground_truth.shape[0] // args.patch_size,
                 ground_truth.shape[1] // args.patch_size, args.patch_size,
                 args.patch_size)), ground_truth.shape)

        diff_image[diff_image > np.mean(diff_image) +
                   3 * np.std(diff_image)] = np.mean(
                       diff_image) + 3 * np.std(diff_image)
        Image.fromarray(
            (diff_image * 255).astype(np.uint8)).save("full_diff.png", "PNG")
        plt.figure(), plt.imshow(diff_image), plt.colorbar(), plt.show()

        threshold = threshold_otsu(diff_image)
        threshold += 0.65 * threshold
        img_segm = diff_image > threshold
        plt.figure(), plt.imshow(img_segm, cmap='binary'), plt.show()
        Image.fromarray(
コード例 #20
0
##
%load_ext autoreload
%autoreload 2
##
sys.path.append(".")
##
from skimage.io import imread, imshow
from patchify import patchify, unpatchify
##
img = imread("wp2559551.jpg")

##
patches = patchify(img, (900, 256, 3), step=1)

##
imshow(unpatchify(patches, img.shape))
##
patches = patchify(img[:,:,1], (645, 256), step=1)

##
imshow(img[:,:,1])
##
imshow(unpatchify(patches, img[:,:,1].shape))

##