Exemple #1
0
def load_depth(file_name):
    """Loads the depth using the sintel SDK and converts to torch.Tensor
    """
    assert os.path.isfile(file_name), "Invalid file {}".format(file_name)
    import sintel_io
    depth = sintel_io.depth_read(file_name)
    return torch.from_numpy(depth).view(1, 1, *depth.shape).float()
Exemple #2
0
def depth_read_and_extend(depth_path):
    depth = depth_read(depth_path)
    if depth.shape[0] % 32 != 0:
        depth = np.pad(depth, ((38, 38), (0, 0)), 'constant')
    if np.min(depth) < 0:
        depth += 5
    return depth
Exemple #3
0
def sintel_iter(rgb, depth, bs = 6):
    for i in range(0, len(depth), bs):
        b_r = rgb[i:i + bs]
        b_d = depth[i:i + bs]
        depths = np.exp(np.clip(np.concatenate([depth_read(d)[None,:,:,None] for d in b_d]), 0, 20))
        rgbs = np.concatenate([open_rgb(r)[None,:,:] for r in b_r])
        yield rgbs, depths
Exemple #4
0
def load_depth(file_name):
    """Load the depth using the syntel SDK and converts to torch.Tensor."""
    if not os.path.isfile(file_name):
        raise AssertionError(f"Invalid file {file_name}")
    import sintel_io

    depth = sintel_io.depth_read(file_name)
    return torch.from_numpy(depth).view(1, 1, *depth.shape).float()
Exemple #5
0
def get_histo_for_discrete_depth(train_dir):
    histo = np.zeros(15, dtype=np.int32)
    for patch in os.listdir(train_dir):
        if patch.endswith('.dpt'):
            patch_path = path.join(train_dir, patch)
            dpt = depth_read(patch_path)
            dpt_histo = np.bincount(np.concatenate(dpt).astype(int), minlength=16)[1:]
            histo += dpt_histo
    return histo
Exemple #6
0
def get_depth_histogram(depth_dir):
    dpt_list = list()
    for depth in os.listdir(depth_dir):
        path = os.path.join(depth_dir,depth)
        dpt = depth_read(path)
        dpt_list.append(dpt)
    dpt_list = np.concatenate(np.concatenate(dpt_list))
    histo = np.histogram(dpt_list, np.arange(1,np.max(dpt_list)))
    plt.plot(histo[0])
Exemple #7
0
def get_depth_stats(depth_dir):
    depth_min = 10000
    depth_max = 0
    for file in os.listdir(depth_dir):
        filepath = os.path.join(depth_dir, file)
        dpt = depth_read(filepath)
        if (np.max(dpt) > depth_max):
            depth_max = np.max(dpt)
        if (np.min(dpt) < depth_min):
            depth_min = np.min(dpt)
    return depth_min, depth_max
Exemple #8
0
import numpy as np
from matplotlib import pyplot as plt
import sys
sys.path.append('../')
import sintel_io as sio

# Test and display some real data
folder_name = 'alley_1'
frame_no = 1  #smaller than 10
DEPTHFILE = '/cluster/scratch/takmaza/CVL/MPI-Sintel-complete/training/depth/' + folder_name + '/frame_000' + str(
    frame_no) + '.dpt'
CAMFILE = '/cluster/scratch/takmaza/CVL/MPI-Sintel-complete/training/camdata_left/' + folder_name + '/frame_0001.cam'

# Load data
depth = sio.depth_read(DEPTHFILE)
I, E = sio.cam_read(CAMFILE)

print(depth.shape)
# Display data
#plt.figure()
#plt.imshow(depth,cmap='gray')
#plt.title('depth')

print(I)
print(E)

#plt.show()
 def __init__(self,
              root,
              train=True,
              transform=None,
              target_transform=None,
              load_pickle=True,
              train_dir=None,
              label_dir=None,
              test_dir=None,
              add_noise=True):
     self.root = os.path.expanduser(root)
     self.transform = transform
     self.target_transform = target_transform
     self.train = train  # training set or test set
     self.add_noise = add_noise
     if train:
         pickle_dir = path.join(train_dir, "pickle")
     else:
         pickle_dir = path.join(test_dir, "pickle")
     # now load the picked numpy arrays
     if self.train:
         self.train_data = []
         self.train_labels = []
         if load_pickle:
             for pckl in os.listdir(pickle_dir):
                 if pckl.endswith(".pickle"):
                     pckl_path = path.join(pickle_dir, pckl)
                     pckl_file = open(pckl_path, 'rb')
                     entry = pickle.load(pckl_file, encoding='latin1')
                     self.train_data.append(entry['data'])
                     self.train_labels.append(entry['labels'])
                     pckl_file.close()
             self.train_data = np.concatenate(self.train_data)
             self.train_labels = np.concatenate(self.train_labels)
         else:
             for patch in os.listdir(train_dir):
                 if patch.endswith('.png'):
                     patch_path = path.join(train_dir, patch)
                     base = path.splitext(patch)[0]
                     label = base + '.png'
                     label_path = path.join(label_dir, label)
                     patch_file = Image.open(patch_path)
                     patch_arr = np.array(patch_file)
                     label_file = depth_read(label_path)
                     label_arr = np.array(label_file)
                     label_arr = label_arr + 4  # Fix from -4:10 to 0:14
                     if patch_arr.shape[0] == 436:
                         patch_arr_extended = np.zeros((512, 1024, 3),
                                                       dtype=np.uint8)
                         patch_arr_extended[38:-38, :, :] = patch_arr
                         patch_arr = patch_arr_extended
                         label_arr_extended = np.zeros((512, 1024),
                                                       dtype=np.uint8)
                         label_arr_extended[38:-38, :] = label_arr
                         label_arr = label_arr_extended
                     #Make the classes 0-14 instead of 1-15
                     # label_arr -= 1
                     self.train_data.append(patch_arr)
                     self.train_labels.append(label_arr)
             self.train_data = np.expand_dims(self.train_data, axis=0)
             self.train_labels = np.expand_dims(self.train_labels, axis=0)
             self.train_data = np.squeeze(self.train_data, axis=0)
             self.train_labels = np.squeeze(self.train_labels, axis=0)
             entry = dict()
             entry['data'] = self.train_data
             entry['labels'] = self.train_labels
             pckl_file = path.join(pickle_dir, 'train_seg.pickle')
             if not path.isdir(pickle_dir):
                 os.mkdir(pickle_dir)
             with open(pckl_file, 'wb') as pckl:
                 pickle.dump(entry, pckl)
     else:
         self.test_data = []
         self.test_labels = []
         if load_pickle:
             for pckl in os.listdir(pickle_dir):
                 if pckl.endswith(".pickle"):
                     pckl_path = path.join(pickle_dir, pckl)
                     pckl_file = open(pckl_path, 'rb')
                     entry = pickle.load(pckl_file, encoding='latin1')
                     self.test_data.append(entry['data'])
                     self.test_labels.append(entry['labels'])
                     pckl_file.close()
             self.test_data = np.concatenate(self.test_data)
             self.test_labels = np.concatenate(self.test_labels)
         else:
             for patch in os.listdir(test_dir):
                 if patch.endswith('.png'):
                     patch_path = path.join(test_dir, patch)
                     base = path.splitext(patch)[0]
                     label = base + '.dpt'
                     label_path = path.join(label_dir, label)
                     patch_file = Image.open(patch_path)
                     patch_arr = np.array(patch_file)
                     label_file = depth_read(label_path)
                     label_arr = np.array(label_file)
                     label_arr = label_arr + 4  #Fix from -4:10 to 0:14
                     if patch_arr.shape[0] == 436:
                         patch_arr_extended = np.zeros((512, 1024, 3),
                                                       dtype=np.uint8)
                         patch_arr_extended[38:-38, :, :] = patch_arr
                         patch_arr = patch_arr_extended
                         label_arr_extended = np.zeros((512, 1024),
                                                       dtype=np.uint8)
                         label_arr_extended[38:-38, :] = label_arr
                         label_arr = label_arr_extended
                     # Make the classes 0-14 instead of 1-15
                     # label_arr -= 1
                     self.test_data.append(patch_arr)
                     self.test_labels.append(label_arr)
             self.test_data = np.expand_dims(self.test_data, axis=0)
             self.test_labels = np.expand_dims(self.test_labels, axis=0)
             self.test_data = np.squeeze(self.test_data, axis=0)
             self.test_labels = np.squeeze(self.test_labels, axis=0)
             entry = dict()
             entry['data'] = self.test_data
             entry['labels'] = self.test_labels
             pckl_file = path.join(pickle_dir, 'test_seg.pickle')
             if not path.isdir(pickle_dir):
                 os.mkdir(pickle_dir)
             with open(pckl_file, 'wb') as pckl:
                 pickle.dump(entry, pckl)
Exemple #10
0
    def load_dpt(self, dpt_path):
        dpt = np.float32(sintel_io.depth_read(dpt_path))+ 1.0
#         dpt = fake_depth() + 1.0
        #dpt = np.float32(skimage.transform.resize(dpt, (480, 854)))

        return dpt
Exemple #11
0
def depth_loader(path):
    # return rp.readPFM(path)
    return depth_read(path)
Exemple #12
0
def get_depth_histogram(depth_dir):
    all_d = list()
    for file in os.listdir(depth_dir):
        d = depth_read(os.path.join(depth_dir,file))
        all_d.append(np.concatenate(d))
    np.histogram(all_d)
Exemple #13
0
def evaluate_exp3():
    print("Start of generating of png files, attached to paper")
    synthia_list = get_file_list(synthia_dir)
    synthia_depth_list = filter_files(synthia_list, ['Stereo_Left', 'Depth', 'png'])
    synthia_rgb_list =  filter_files(synthia_list, ['Stereo_Left','RGB','png'])

    synthia_rgb = open_rgb(synthia_rgb_list[230])
    synthia_depth = open_depth(synthia_depth_list[230])

    synthia_rgb   = cv2.resize(synthia_rgb, tuple(synthia_size[::-1]))
    synthia_depth = cv2.resize(synthia_depth, tuple(synthia_size[::-1]))

    mat_f = h5py.File(nyu_dir)
    NYU_depth_list = mat_f['depths']
    NYU_rgb_list = mat_f['images']

    nyu_rgb = NYU_rgb_list[18][None,:,:,:].transpose((0,3,2,1))[0]
    nyu_depth = NYU_depth_list[18].T

    nyu_rgb  = cv2.resize(nyu_rgb, tuple(synthia_size[::-1]))
    nyu_depth = cv2.resize(nyu_depth, tuple(synthia_size[::-1]))

    sintel_depth_list = sorted(get_file_list(sintel_depth_dir))
    sintel_rgb_list = sorted(get_file_list(sintel_rgb_dir))

    sintel_rgb   = open_rgb(sintel_rgb_list[169])
    sintel_depth = depth_read(sintel_depth_list[169]) 

    sintel_rgb   = cv2.resize(sintel_rgb, tuple(synthia_size[::-1]))
    sintel_depth = cv2.resize(sintel_depth, tuple(synthia_size[::-1]))

    mask_uni = get_mask(synthia_size, 0.4)
    mask_reg = get_regular_grid_mask(synthia_size)
    
    mask_grad_synthia = combined_mask(synthia_rgb, synthia_size, combined = False)
    mask_comb_synthia = combined_mask(synthia_rgb, synthia_size, combined = True)

    print("Mask shapes: ",mask_uni.shape, mask_reg.shape, mask_grad_synthia.shape, mask_comb_synthia.shape)

    mask_grad_nyu = combined_mask(nyu_rgb, synthia_size, combined = False)
    mask_comb_nyu = combined_mask(nyu_rgb, synthia_size, combined = True)

    mask_grad_sintel = combined_mask(sintel_rgb, synthia_size, combined = False)
    mask_comb_sintel = combined_mask(sintel_rgb, synthia_size, combined = True)

    print("Passed stage of generating masks")
    tf.reset_default_graph()
    input_rgb = tf.placeholder(tf.float32, shape=(None,None,None, 3))
    target = tf.placeholder(tf.float32, shape=(None,None,None, 1))
    mask_t = tf.placeholder(tf.float32, shape=(None,None,None,1))
    d_flg = tf.placeholder(tf.bool)
    y_true, gr_input, d_input, m = preprocess(input_rgb, target, mask_t, synthia_size, eps, maxi)

    G_output = G_Depth(d_input, m, d_flg)

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    #saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir2))

    synthia_masks = [mask_grad_synthia, mask_grad_synthia, mask_grad_synthia, mask_comb_synthia]
    nyu_masks     = [mask_reg, mask_uni, mask_grad_nyu, mask_comb_nyu]
    sintel_masks  = [mask_reg, mask_uni, mask_grad_sintel, mask_comb_sintel]
    model_list = [checkpoint_dir5, checkpoint_dir6, checkpoint_dir7, checkpoint_dir2]
    modell_list = [checkpoint_dir3, checkpoint_dir4, checkpoint_dir7, checkpoint_dir2]
    synthia_depth_outcome = []
    nyu_depth_outcome = []
    sintel_depth_outcome = []

    #getting depth for each of mask
    for i in zip(synthia_masks,nyu_masks,sintel_masks, model_list, modell_list):
        saver.restore(sess, tf.train.latest_checkpoint(i[4]))
        synthia_temp = sess.run(G_output, feed_dict={input_rgb:synthia_rgb[None,:,:,:], target:synthia_depth[None,:,:,None], mask_t:i[0], d_flg:False})
        saver.restore(sess, tf.train.latest_checkpoint(i[3]))
        nyu_temp     = sess.run(G_output, feed_dict={input_rgb:nyu_rgb[None,:,:,:], target:nyu_depth[None,:,:,None], mask_t:i[1], d_flg:False})
        sintel_temp  = sess.run(G_output, feed_dict={input_rgb:sintel_rgb[None,:,:,:], target:sintel_depth[None,:,:,None], mask_t:i[2], d_flg:False})
        print(synthia_temp.shape)
        synthia_depth_outcome.append(synthia_temp[0,:,:,0])
        nyu_depth_outcome.append(nyu_temp[0,:,:,0])
        sintel_depth_outcome.append(sintel_temp[0,:,:,0])

    synthia_depth_outcome.insert(0, np.log(synthia_depth))
    nyu_depth_outcome.insert(0, np.log(nyu_depth))
    sintel_depth_outcome.insert(0, np.log(sintel_depth))

    synthia_upper = [synthia_rgb.astype(int), mask_reg[0,:,:,0]*np.log(synthia_depth), mask_uni[0,:,:,0]*np.log(synthia_depth),
                     mask_grad_synthia[0,:,:,0], mask_comb_synthia[0,:,:,0]]
    nyu_upper     = [nyu_rgb.astype(int), mask_reg[0,:,:,0]*np.log(nyu_depth), mask_uni[0,:,:,0]*np.log(nyu_depth), 
                    mask_grad_nyu[0,:,:,0], mask_comb_nyu[0,:,:,0]]
    sintel_upper  = [sintel_rgb.astype(int), mask_reg[0,:,:,0]*np.log(sintel_depth), mask_uni[0,:,:,0]*np.log(sintel_depth),
                     mask_grad_sintel[0,:,:,0], mask_comb_sintel[0,:,:,0]]

    synthia_upper.extend(synthia_depth_outcome)
    nyu_upper.extend(nyu_depth_outcome)
    sintel_upper.extend(sintel_depth_outcome)
    
    print("Before final generate of synthia")
    cmap = sns.cubehelix_palette(1, light=1, as_cmap=True,reverse=True)
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(synthia_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/synthia_results.png', bbox_inches='tight')

    print("Before final generate of nyu")
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(nyu_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/nyu_results.png', bbox_inches='tight')

    print("Before final generate of sintel")
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(sintel_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/sintel_results.png', bbox_inches='tight')

    print("Finished generating png-s")

    print("Start of generating error_maps")

    saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir2))
    rgb_list = [synthia_rgb[None,:,:,:], nyu_rgb[None,:,:,:], sintel_rgb[None,:,:,:]]
    depth_list = [synthia_depth[None,:,:,None], nyu_depth[None,:,:,None], sintel_depth[None,:,:,None]]
    errs = []
    for rgb, d in zip(rgb_list, depth_list):
        mask_c = get_grad_mask(rgb, synthia_size, combined=True)
        out_c = sess.run(G_output, feed_dict={input_rgb:rgb, target:d, mask_t:mask_c, d_flg:False})[0,:,:,0]
        out_gt = resize(np.log(np.clip(d[0,:,:,0], 1e-3, 1e4)), synthia_size, preserve_range=True)
        errs.append(get_colors(abs(out_gt - out_c)))

    fig, axes = plt.subplots(nrows=1, ncols=3)
    fig.set_size_inches(15,5)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(3):
        axes[i].imshow(errs[i])

    fig.savefig('Results/Dense/errors_maps.png', bbox_inches='tight')

    sess.close()