def assign_test_data(self, meta_data):
        ## set up TESTING data properties --------------------------------------------------
        class_count = {}
        for single_class in self.label_white_list:
            class_count[single_class] = 0
        self.test_rgb = []
        self.test_depth = []
        self.test_points = []
        self.test_2D_bb = []     
        self.test_3D_bb = []
        self.test_labels = []
        self.test_labels_image = []
        test_count = 0
        print('Processing test data')
        for entry in tqdm(range(self.train_size, self.train_size+self.test_size)):
            self.test_2D_bb.append(0) 
            # todo: could think about converting rotation matrix (bbs_3D[0][objekt][0:2,0:2]) into theta
            self.test_3D_bb.append(0)
            self.test_labels_image.append(0)   # need to keep track of what labels matches each image
            self.test_labels.append(0)

            self.test_rgb.append(utils.cv2.resize(utils.open_rgb(meta_data, entry), self.rgbd_size))
            depth_16bit = utils.open_depth(meta_data, entry)
            point_cloud = utils.make_point_cloud(depth_16bit, meta_data, entry)
            self.test_points.append(utils.rand_down_sample(point_cloud, self.point_cloud_size))
            self.test_depth.append(utils.normalise_depth(depth_16bit))
            self.test_depth[test_count] = utils.cv2.resize(self.test_depth[test_count], self.rgbd_size)
            test_count += 1
Exemple #2
0
def sintel_iter(rgb, depth, bs = 6):
    for i in range(0, len(depth), bs):
        b_r = rgb[i:i + bs]
        b_d = depth[i:i + bs]
        depths = np.exp(np.clip(np.concatenate([depth_read(d)[None,:,:,None] for d in b_d]), 0, 20))
        rgbs = np.concatenate([open_rgb(r)[None,:,:] for r in b_r])
        yield rgbs, depths
 def _read(self, files):
     from utils import open_depth_synthia, open_rgb, open_depth
     if any(['depth' in f for f in files]):
         images = np.asarray([open_depth(f) for f in files])[...,
                                                             np.newaxis]
     else:
         images = np.asarray([open_rgb(f) for f in files])
     return images
 def _read(self, files):
     from utils import open_depth_synthia, open_rgb
     if any(['Depth' in f for f in files]):
         #returns depth in meters
         images = np.asarray([open_depth_synthia(f) for f in files])
     else:
         images = np.asarray([open_rgb(f) for f in files])
     return images
Exemple #5
0
def evaluate_exp3():
    print("Start of generating of png files, attached to paper")
    synthia_list = get_file_list(synthia_dir)
    synthia_depth_list = filter_files(synthia_list, ['Stereo_Left', 'Depth', 'png'])
    synthia_rgb_list =  filter_files(synthia_list, ['Stereo_Left','RGB','png'])

    synthia_rgb = open_rgb(synthia_rgb_list[230])
    synthia_depth = open_depth(synthia_depth_list[230])

    synthia_rgb   = cv2.resize(synthia_rgb, tuple(synthia_size[::-1]))
    synthia_depth = cv2.resize(synthia_depth, tuple(synthia_size[::-1]))

    mat_f = h5py.File(nyu_dir)
    NYU_depth_list = mat_f['depths']
    NYU_rgb_list = mat_f['images']

    nyu_rgb = NYU_rgb_list[18][None,:,:,:].transpose((0,3,2,1))[0]
    nyu_depth = NYU_depth_list[18].T

    nyu_rgb  = cv2.resize(nyu_rgb, tuple(synthia_size[::-1]))
    nyu_depth = cv2.resize(nyu_depth, tuple(synthia_size[::-1]))

    sintel_depth_list = sorted(get_file_list(sintel_depth_dir))
    sintel_rgb_list = sorted(get_file_list(sintel_rgb_dir))

    sintel_rgb   = open_rgb(sintel_rgb_list[169])
    sintel_depth = depth_read(sintel_depth_list[169]) 

    sintel_rgb   = cv2.resize(sintel_rgb, tuple(synthia_size[::-1]))
    sintel_depth = cv2.resize(sintel_depth, tuple(synthia_size[::-1]))

    mask_uni = get_mask(synthia_size, 0.4)
    mask_reg = get_regular_grid_mask(synthia_size)
    
    mask_grad_synthia = combined_mask(synthia_rgb, synthia_size, combined = False)
    mask_comb_synthia = combined_mask(synthia_rgb, synthia_size, combined = True)

    print("Mask shapes: ",mask_uni.shape, mask_reg.shape, mask_grad_synthia.shape, mask_comb_synthia.shape)

    mask_grad_nyu = combined_mask(nyu_rgb, synthia_size, combined = False)
    mask_comb_nyu = combined_mask(nyu_rgb, synthia_size, combined = True)

    mask_grad_sintel = combined_mask(sintel_rgb, synthia_size, combined = False)
    mask_comb_sintel = combined_mask(sintel_rgb, synthia_size, combined = True)

    print("Passed stage of generating masks")
    tf.reset_default_graph()
    input_rgb = tf.placeholder(tf.float32, shape=(None,None,None, 3))
    target = tf.placeholder(tf.float32, shape=(None,None,None, 1))
    mask_t = tf.placeholder(tf.float32, shape=(None,None,None,1))
    d_flg = tf.placeholder(tf.bool)
    y_true, gr_input, d_input, m = preprocess(input_rgb, target, mask_t, synthia_size, eps, maxi)

    G_output = G_Depth(d_input, m, d_flg)

    sess = tf.InteractiveSession()
    saver = tf.train.Saver()
    #saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir2))

    synthia_masks = [mask_grad_synthia, mask_grad_synthia, mask_grad_synthia, mask_comb_synthia]
    nyu_masks     = [mask_reg, mask_uni, mask_grad_nyu, mask_comb_nyu]
    sintel_masks  = [mask_reg, mask_uni, mask_grad_sintel, mask_comb_sintel]
    model_list = [checkpoint_dir5, checkpoint_dir6, checkpoint_dir7, checkpoint_dir2]
    modell_list = [checkpoint_dir3, checkpoint_dir4, checkpoint_dir7, checkpoint_dir2]
    synthia_depth_outcome = []
    nyu_depth_outcome = []
    sintel_depth_outcome = []

    #getting depth for each of mask
    for i in zip(synthia_masks,nyu_masks,sintel_masks, model_list, modell_list):
        saver.restore(sess, tf.train.latest_checkpoint(i[4]))
        synthia_temp = sess.run(G_output, feed_dict={input_rgb:synthia_rgb[None,:,:,:], target:synthia_depth[None,:,:,None], mask_t:i[0], d_flg:False})
        saver.restore(sess, tf.train.latest_checkpoint(i[3]))
        nyu_temp     = sess.run(G_output, feed_dict={input_rgb:nyu_rgb[None,:,:,:], target:nyu_depth[None,:,:,None], mask_t:i[1], d_flg:False})
        sintel_temp  = sess.run(G_output, feed_dict={input_rgb:sintel_rgb[None,:,:,:], target:sintel_depth[None,:,:,None], mask_t:i[2], d_flg:False})
        print(synthia_temp.shape)
        synthia_depth_outcome.append(synthia_temp[0,:,:,0])
        nyu_depth_outcome.append(nyu_temp[0,:,:,0])
        sintel_depth_outcome.append(sintel_temp[0,:,:,0])

    synthia_depth_outcome.insert(0, np.log(synthia_depth))
    nyu_depth_outcome.insert(0, np.log(nyu_depth))
    sintel_depth_outcome.insert(0, np.log(sintel_depth))

    synthia_upper = [synthia_rgb.astype(int), mask_reg[0,:,:,0]*np.log(synthia_depth), mask_uni[0,:,:,0]*np.log(synthia_depth),
                     mask_grad_synthia[0,:,:,0], mask_comb_synthia[0,:,:,0]]
    nyu_upper     = [nyu_rgb.astype(int), mask_reg[0,:,:,0]*np.log(nyu_depth), mask_uni[0,:,:,0]*np.log(nyu_depth), 
                    mask_grad_nyu[0,:,:,0], mask_comb_nyu[0,:,:,0]]
    sintel_upper  = [sintel_rgb.astype(int), mask_reg[0,:,:,0]*np.log(sintel_depth), mask_uni[0,:,:,0]*np.log(sintel_depth),
                     mask_grad_sintel[0,:,:,0], mask_comb_sintel[0,:,:,0]]

    synthia_upper.extend(synthia_depth_outcome)
    nyu_upper.extend(nyu_depth_outcome)
    sintel_upper.extend(sintel_depth_outcome)
    
    print("Before final generate of synthia")
    cmap = sns.cubehelix_palette(1, light=1, as_cmap=True,reverse=True)
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(synthia_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/synthia_results.png', bbox_inches='tight')

    print("Before final generate of nyu")
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(nyu_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/nyu_results.png', bbox_inches='tight')

    print("Before final generate of sintel")
    fig, axes = plt.subplots(nrows=2, ncols=5)
    fig.set_size_inches(30,8)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(2):
        for j in range(5):
            axes[i,j].imshow(sintel_upper[i*5 + j], cmap)

    fig.savefig('Results/Dense/sintel_results.png', bbox_inches='tight')

    print("Finished generating png-s")

    print("Start of generating error_maps")

    saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir2))
    rgb_list = [synthia_rgb[None,:,:,:], nyu_rgb[None,:,:,:], sintel_rgb[None,:,:,:]]
    depth_list = [synthia_depth[None,:,:,None], nyu_depth[None,:,:,None], sintel_depth[None,:,:,None]]
    errs = []
    for rgb, d in zip(rgb_list, depth_list):
        mask_c = get_grad_mask(rgb, synthia_size, combined=True)
        out_c = sess.run(G_output, feed_dict={input_rgb:rgb, target:d, mask_t:mask_c, d_flg:False})[0,:,:,0]
        out_gt = resize(np.log(np.clip(d[0,:,:,0], 1e-3, 1e4)), synthia_size, preserve_range=True)
        errs.append(get_colors(abs(out_gt - out_c)))

    fig, axes = plt.subplots(nrows=1, ncols=3)
    fig.set_size_inches(15,5)
    plt.setp(axes, xticks=[], yticks=[])
    fig.tight_layout()
    for i in range(3):
        axes[i].imshow(errs[i])

    fig.savefig('Results/Dense/errors_maps.png', bbox_inches='tight')

    sess.close()
Exemple #6
0
    def csv_train_data(self, meta_data):
        ## set up TRAINING data properties -------------------------------------------------
        class_count = {}
        for single_class in self.label_white_list:
            class_count[single_class] = 0
        # we don't know the size of these:
        self.filename = []
        self.label = []
        self.label_int = []
        self.width = []
        self.height = []
        self.xmin = []
        self.ymin = []
        self.xmax = []
        self.ymax = []

        im_count = 0
        for entry in range(7751):  # 75%
            if im_count >= self.train_num:
                break
            current_rgb = utils.open_rgb(meta_data, entry)
            bbs_2D = utils.get_2D_bb(meta_data, entry)
            labels = utils.get_label(meta_data, entry)
            checked_im = 0
            for objekt in range(len(bbs_2D)):
                if not labels[
                        objekt] in self.label_white_list:  # only take the objects we care about
                    continue
                if checked_im == 0:
                    im_count += 1
                    checked_im = 1
                # if class_count[labels[objekt]] >= self.train_num_per_class:  # limit examples per class
                #     continue
                # class_count[labels[objekt]] += 1

                bb_2D = bbs_2D[objekt]
                w, h = imagesize.get(
                    meta_data[entry]
                    [0])  # get height and width without loading image into RAM

                self.filename.append(meta_data[entry][0])
                self.label.append(labels[objekt])
                self.label_int.append(
                    self.label_white_list.index(labels[objekt]) + 1)
                # self.label.append(self.label_white_list.index(labels[objekt])+1)
                self.width.append(w)
                self.height.append(h)
                self.xmin.append(int(bb_2D[0]))  # double check
                self.ymin.append(int(bb_2D[1]))  # that theses
                self.xmax.append(int(bb_2D[0]) + int(bb_2D[2]))  # are correct
                self.ymax.append(int(bb_2D[1]) + int(bb_2D[3]))  # !
        data = {
            'filename': self.filename,
            'class': self.label,
            'class_int': self.label_int,
            'width': self.width,
            'height': self.height,
            'xmin': self.xmin,
            'ymin': self.ymin,
            'xmax': self.xmax,
            'ymax': self.ymax
        }
        df = pd.DataFrame.from_dict(data)
        self.train_df = df