def load_verify_sample(self): if self.__sample_index >= self.__training_samples: self.__sample_index = 0 self.epoch += 1 img = scipy.misc.imread( self.__params.left_path + self.__img_contets[self.__sample_index]).astype(np.float32) img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114 model = self.__params.kitti_disp_path disp = pfm.load(model + self.__contents[self.__sample_index])[0].astype(float) gt = pfm.load(self.__params.gt_path + self.__contents[self.__sample_index])[0].astype(float) gt_noc = pfm.load(self.__params.gt_path_noc + self.__contents[self.__sample_index])[0].astype( float) s = img.shape height, width = img.shape if s[0] < self.__heightresize: padding = self.__heightresize - s[0] img = np.lib.pad(img, [(padding, 0), (0, 0)], 'edge') disp = np.lib.pad(disp, [(padding, 0), (0, 0)], 'edge') gt = np.lib.pad(gt, [(padding, 0), (0, 0)], 'edge') gt_noc = np.lib.pad(gt_noc, [(padding, 0), (0, 0)], 'edge') if s[1] < self.__widthresize: padding = self.__widthresize - s[1] img = np.lib.pad(img, [(0, 0), (padding, 0)], 'edge') disp = np.lib.pad(disp, [(0, 0), (padding, 0)], 'edge') gt = np.lib.pad(gt, [(0, 0), (padding, 0)], 'edge') gt_noc = np.lib.pad(gt_noc, [(0, 0), (padding, 0)], 'edge') data = np.stack([disp, img], axis=2) data = np.reshape(data, [1, data.shape[0], data.shape[1], data.shape[2]]) gt = np.reshape(gt, [1, gt.shape[0], gt.shape[1], 1]) gt_noc = np.reshape(gt_noc, [1, gt_noc.shape[0], gt_noc.shape[1], 1]) self.__sample_index += 1 return data, gt, gt_noc, self.__sample_index, height, width, self.__img_contets[ self.__sample_index - 1]
def load_training_sample(self): if self.__sample_index >= self.__training_samples: self.__sample_index = 0 self.epoch += 1 self.shuffle_data() img = scipy.misc.imread( self.__params.left_path + self.__img_contets[self.__sample_index]).astype(np.float32) img = img[:, :, 0] * 0.299 + img[:, :, 1] * 0.587 + img[:, :, 2] * 0.114 model = self.__params.kitti_disp_path if (bool(random.getrandbits(1))): model = self.__params.kitti15_disp_path disp = pfm.load(model + self.__contents[self.__sample_index])[0].astype(float) gt = pfm.load(self.__params.gt_path + self.__contents[self.__sample_index])[0].astype(float) gt_noc = pfm.load(self.__params.gt_path_noc + self.__contents[self.__sample_index])[0].astype( float) s = img.shape maxheight = s[0] - 256 maxwidth = s[1] - 256 x = random.randint(0, maxheight) y = random.randint(0, maxwidth) disp = disp[x:x + 256, y:y + 256] img = img[x:x + 256, y:y + 256] gt = gt[x:x + 256, y:y + 256] gt_noc = gt_noc[x:x + 256, y:y + 256] data = np.stack([disp, img], axis=2) data = np.reshape(data, [1, data.shape[0], data.shape[1], data.shape[2]]) gt = np.reshape(gt, [1, gt.shape[0], gt.shape[1], 1]) gt_noc = np.reshape(gt_noc, [1, gt_noc.shape[0], gt_noc.shape[1], 1]) self.__sample_index += 1 return data, gt, gt_noc, self.__sample_index
from __future__ import division import numpy as np import matplotlib.pyplot as plt import sys import math import random import os sys.path.insert(0, '../pylibs') sys.path.insert(0, '../src') import cpputils import pfmutil as pfm l_gt_p = "..../Freiburg/driving/disparity/15mm_focallength/scene_forwards/slow/left/" r_gt_p = "..../Freiburg/driving/disparity/15mm_focallength/scene_forwards/slow/right/" save_p = ".../Freiburg/driving/disparity/15mm_focallength/scene_forwards/slow/left_nonocc/" ims = os.listdir(l_gt_p) for im in ims: l_gt = pfm.load(l_gt_p + im)[0] r_gt = pfm.load(r_gt_p + im)[0] occ = cpputils.make_occ(l_gt, r_gt) pfm.save(save_p + im, occ)
# error gcnet-disparity PATH_TO_ERR_CBMV_IMAGES = "/media/ccjData2/research-projects/kitti-devkit/results/cbmvnet-gc-F8-RMSp-sf-epo26Based-epo30-4dsConv-k5-testKT15/errors_disp_img_0" # error cbmv-gcnet disparity PATH_TO_ERR_GC_IMAGES = "/media/ccjData2/research-projects/kitti-devkit/results/gcnet-F8-RMSp-sf-epo30-4dsConv-k5-testKT15/errors_disp_img_0" if 0: for i in range(0, 200): src_limg_name = PATH_TO_LEFT_IMAGES + '/%06d_10.png' % i dst_limg_name = 'results/msnet-supp-figs/kt15/left/%06d_L_10.png' % i os.system("cp {} {}".format(src_limg_name, dst_limg_name)) src_rimg_name = PATH_TO_RIGHT_IMAGES + '/%06d_10.png' % i dst_rimg_name = 'results/msnet-supp-figs/kt15/right/%06d_R_10.png' % i os.system("cp {} {}".format(src_rimg_name, dst_rimg_name)) dispGT = pfm.load(PATH_TO_LEFT_DISP_GT + '/%06d_10.pfm' % i) dispGT[dispGT == np.inf] = .0 dispGT = kitti_colormap(dispGT) # here we directly read the pfm disparity #disp0 = kitti_colormap( pfm.load(PATH_TO_DISP_GC_IMAGES+'/%06d.pfm'%i) ) #disp1 = kitti_colormap( pfm.load(PATH_TO_DISP_CBMV_IMAGES+'/0%09d.pfm'%i) ) disp0 = cv2.imread(PATH_TO_DISP_GC_IMAGES + '/%06d_10.png' % i) print(PATH_TO_DISP_GC_IMAGES + '/%06d.png' % i) disp1 = cv2.imread(PATH_TO_DISP_CBMV_IMAGES + '/%06d_10.png' % i) w = disp0.shape[1] disp0_err = cv2.imread(PATH_TO_ERR_GC_IMAGES + '/%06d_10.png' % i) disp1_err = cv2.imread(PATH_TO_ERR_CBMV_IMAGES + '/%06d_10.png' % i) cv2.imwrite('results/msnet-supp-figs/kt15/disp_gc/%06d_10_gc.png' % i, disp0)
#for i in range(503, 504): #for i in range(990, 1100): #for i in range(1115, 1161): #for i in range(0, 400): #for i in range(599): src_limg_name = PATH_TO_LEFT_IMAGES + '/0%09d.png' % i left = cv2.imread(src_limg_name)[:, :, ::-1] dst_limg_name = 'results/msnet-supp-figs/left/0%09d_L.png' % i os.system("cp {} {}".format(src_limg_name, dst_limg_name)) src_rimg_name = PATH_TO_RIGHT_IMAGES + '/0%09d.png' % i right = cv2.imread(src_rimg_name)[:, :, ::-1] dst_rimg_name = 'results/msnet-supp-figs/right/0%09d_R.png' % i os.system("cp {} {}".format(src_rimg_name, dst_rimg_name)) # here we directly read the pfm disparity disp0 = kitti_colormap(pfm.load(PATH_TO_PSM_IMAGES + '/0%09d.pfm' % i)) disp1 = kitti_colormap(pfm.load(PATH_TO_CBMV_IMAGES + '/0%09d.pfm' % i)) w = disp0.shape[1] cv2.imwrite('results/msnet-supp-figs/disp_gc/0%06d_gc.png' % i, disp0[:, :, [2, 1, 0]]) cv2.imwrite('results/msnet-supp-figs/disp_msgc/0%06d_msgc.png' % i, disp1[:, :, [2, 1, 0]]) board_tmp = np.zeros((10, w, 3)) collage = np.concatenate( (left, board_tmp, right, board_tmp, disp0, board_tmp, disp1), 0) cv2.imwrite('results/msnet-supp-figs/concat/0%06d_all.png' % i, collage[:, :, [2, 1, 0]]) sys.exit()
def __create_samples_mem(self, iml, imr, index): w, h, ndisp = self.__read_calib(index) gt = pfm.load(self.__data_path + self.__trainset[index] + "/disp0GT.pfm")[0] gt = np.reshape(gt, [gt.shape[0] * gt.shape[1], 1]) infs = np.concatenate((np.argwhere(gt == np.inf), np.argwhere(gt < 0)), axis=0) # infs = np.empty((0,2)) gt = np.delete(gt, infs[:, 0], axis=0) gt = np.round(gt) gt = gt.astype(np.int32) #print ("loading gt ... gt shape = {}".format(gt.shape)) random_samples = fte.generate_d_indices(gt, ndisp, 1) assert random_samples.shape[ 1] == 3 # here : 3 means 1 positive sample + 2 negative ones; samples = np.empty( (random_samples.shape[0] * random_samples.shape[1], 21)) #print ("samples shape = {}".format(samples.shape)) #print ("staring census ...") ################## Census compute ########################################################## #print ('w = {}, h = {}, ndisp = {}, censW = {}'.format(w, h, ndisp, self.__censw)) #print ('last iml = {}, last imr = {}'.format(iml[h-1,w-1], imr[h-1,w-1])) costcensus = mtc.census(iml, imr, ndisp, self.__censw).astype(np.float64) #print ('costcensus shape = {}'.format(costcensus.shape)) costcensusR = fte.get_right_cost(costcensus) costcensus = np.reshape( costcensus, [costcensus.shape[0] * costcensus.shape[1], costcensus.shape[2]]) costcensusR = np.reshape(costcensusR, [ costcensusR.shape[0] * costcensusR.shape[1], costcensusR.shape[2] ]) costcensus = np.delete(costcensus, infs[:, 0], axis=0) samples[:, 0] = fte.get_samples(costcensus, random_samples) samples[:, 4] = fte.extract_ratio(costcensus, random_samples, .01) samples[:, 8] = fte.extract_likelihood(costcensus, random_samples, self.__cens_sigma) del costcensus #print ("census done!") r_pkrn = fte.extract_ratio(costcensusR, .01) r_pkrn = np.reshape(r_pkrn, [h, w, ndisp]) r_pkrn = fte.get_left_cost(r_pkrn) r_pkrn = np.reshape( r_pkrn, [r_pkrn.shape[0] * r_pkrn.shape[1], r_pkrn.shape[2]]) r_pkrn = np.delete(r_pkrn, infs[:, 0], axis=0) samples[:, 12] = fte.get_samples(r_pkrn, random_samples) del r_pkrn r_aml = fte.extract_likelihood(costcensusR, self.__cens_sigma) r_aml = np.reshape(r_aml, [h, w, ndisp]) r_aml = fte.get_left_cost(r_aml) r_aml = np.reshape(r_aml, [r_aml.shape[0] * r_aml.shape[1], r_aml.shape[2]]) r_aml = np.delete(r_aml, infs[:, 0], axis=0) samples[:, 16] = fte.get_samples(r_aml, random_samples) del r_aml del costcensusR ###################################################################################### ############################### NCC compute ########################################## costncc = mtc.nccNister(iml, imr, ndisp, self.__nccw) costncc = fte.swap_axes(costncc) costnccR = fte.get_right_cost(costncc) costncc = np.reshape( costncc, [costncc.shape[0] * costncc.shape[1], costncc.shape[2]]) costnccR = np.reshape( costnccR, [costnccR.shape[0] * costnccR.shape[1], costnccR.shape[2]]) costncc = np.delete(costncc, infs[:, 0], axis=0) samples[:, 1] = fte.get_samples(costncc, random_samples) samples[:, 5] = fte.extract_ratio(costncc, random_samples, 1.01) samples[:, 9] = fte.extract_likelihood(costncc, random_samples, self.__ncc_sigma) del costncc r_pkrn = fte.extract_ratio(costnccR, 1.01) r_pkrn = np.reshape(r_pkrn, [h, w, ndisp]) r_pkrn = fte.get_left_cost(r_pkrn) r_pkrn = np.reshape( r_pkrn, [r_pkrn.shape[0] * r_pkrn.shape[1], r_pkrn.shape[2]]) r_pkrn = np.delete(r_pkrn, infs[:, 0], axis=0) samples[:, 13] = fte.get_samples(r_pkrn, random_samples) del r_pkrn r_aml = fte.extract_likelihood(costnccR, self.__ncc_sigma) r_aml = np.reshape(r_aml, [h, w, ndisp]) r_aml = fte.get_left_cost(r_aml) r_aml = np.reshape(r_aml, [r_aml.shape[0] * r_aml.shape[1], r_aml.shape[2]]) r_aml = np.delete(r_aml, infs[:, 0], axis=0) samples[:, 17] = fte.get_samples(r_aml, random_samples) del r_aml del costnccR ###################################################################################### ############################### Sob compute ########################################## sobl = mtc.sobel(iml) sobr = mtc.sobel(imr) costsob = mtc.sadsob(sobl, sobr, ndisp, 5).astype(np.float64) costsob = fte.swap_axes(costsob) costsobR = fte.get_right_cost(costsob) costsob = np.reshape( costsob, [costsob.shape[0] * costsob.shape[1], costsob.shape[2]]) costsobR = np.reshape( costsobR, [costsobR.shape[0] * costsobR.shape[1], costsobR.shape[2]]) costsob = np.delete(costsob, infs[:, 0], axis=0) samples[:, 2] = fte.get_samples(costsob, random_samples) samples[:, 6] = fte.extract_ratio(costsob, random_samples, .01) samples[:, 10] = fte.extract_likelihood(costsob, random_samples, self.__sad_sigma) del costsob r_pkrn = fte.extract_ratio(costsobR, .01) r_pkrn = np.reshape(r_pkrn, [h, w, ndisp]) r_pkrn = fte.get_left_cost(r_pkrn) r_pkrn = np.reshape( r_pkrn, [r_pkrn.shape[0] * r_pkrn.shape[1], r_pkrn.shape[2]]) r_pkrn = np.delete(r_pkrn, infs[:, 0], axis=0) samples[:, 14] = fte.get_samples(r_pkrn, random_samples) del r_pkrn r_aml = fte.extract_likelihood(costsobR, self.__sad_sigma) r_aml = np.reshape(r_aml, [h, w, ndisp]) r_aml = fte.get_left_cost(r_aml) r_aml = np.reshape(r_aml, [r_aml.shape[0] * r_aml.shape[1], r_aml.shape[2]]) r_aml = np.delete(r_aml, infs[:, 0], axis=0) samples[:, 18] = fte.get_samples(r_aml, random_samples) del r_aml del costsobR ###################################################################################### ############################### Sad compute ########################################## costsad = mtc.zsad(iml, imr, ndisp, self.__sadw).astype(np.float64) costsad = fte.swap_axes(costsad) costsadR = fte.get_right_cost(costsad) costsad = np.reshape( costsad, [costsad.shape[0] * costsad.shape[1], costsad.shape[2]]) costsadR = np.reshape( costsadR, [costsadR.shape[0] * costsadR.shape[1], costsadR.shape[2]]) costsad = np.delete(costsad, infs[:, 0], axis=0) samples[:, 3] = fte.get_samples(costsad, random_samples) samples[:, 7] = fte.extract_ratio(costsad, random_samples, .01) samples[:, 11] = fte.extract_likelihood(costsad, random_samples, self.__sad_sigma) del costsad r_pkrn = fte.extract_ratio(costsadR, .01) r_pkrn = np.reshape(r_pkrn, [h, w, ndisp]) r_pkrn = fte.get_left_cost(r_pkrn) r_pkrn = np.reshape( r_pkrn, [r_pkrn.shape[0] * r_pkrn.shape[1], r_pkrn.shape[2]]) r_pkrn = np.delete(r_pkrn, infs[:, 0], axis=0) samples[:, 15] = fte.get_samples(r_pkrn, random_samples) del r_pkrn r_aml = fte.extract_likelihood(costsadR, self.__sad_sigma) r_aml = np.reshape(r_aml, [h, w, ndisp]) r_aml = fte.get_left_cost(r_aml) r_aml = np.reshape(r_aml, [r_aml.shape[0] * r_aml.shape[1], r_aml.shape[2]]) r_aml = np.delete(r_aml, infs[:, 0], axis=0) samples[:, 19] = fte.get_samples(r_aml, random_samples) del r_aml del costsadR samples[:, 20] = fte.generate_labels(random_samples) return samples
sys.path.insert(0, '../../pylibs') import pfmutil as pfm freiburg_35mm_forward_fast = '/media/kbatsos/Data2/datasets/Freiburg/disparity/35mm_focallength/scene_forwards/fast/left/' freiburg_35mm_forward_slow = '/media/kbatsos/Data2/datasets/Freiburg/disparity/35mm_focallength/scene_forwards/slow/left/' freiburg_35mm_backward_fast = '/media/kbatsos/Data2/datasets/Freiburg/disparity/35mm_focallength/scene_backwards/fast/left/' freiburg_35mm_backward_slow = '/media/kbatsos/Data2/datasets/Freiburg/disparity/35mm_focallength/scene_backwards/slow/left/' freiburg_15mm_forward_fast = '/media/kbatsos/Data2/datasets/Freiburg/disparity/15mm_focallength/scene_forwards/fast/left/' freiburg_15mm_forward_slow = '/media/kbatsos/Data2/datasets/Freiburg/disparity/15mm_focallength/scene_forwards/slow/left/' freiburg_15mm_backward_fast = '/media/kbatsos/Data2/datasets/Freiburg/disparity/15mm_focallength/scene_backwards/fast/left/' freiburg_15mm_backward_slow = '/media/kbatsos/Data2/datasets/Freiburg/disparity/15mm_focallength/scene_backwards/slow/left/' set_search = freiburg_15mm_backward_slow freiburg_sets = os.listdir(set_search) max_d = 0 for set_n in freiburg_sets: disp = pfm.load(set_search + set_n) dmax = np.max(disp[0]) print "##################### set " + set_n + " ############################" print dmax if max_d < dmax: max_d = dmax print max_d print len(freiburg_sets)