def load_train_val_files(): datapath ='/research2/ECCV_dataset_resized' val_data = [11,16,21,22,33,36,38,53,59,92] dataset={} train_input=[] train_gt=[] val_input=[] val_gt=[] folders = glob.glob(os.path.join(datapath,'*')) folders = natsorted(folders) count =1 for ff in range(len(folders)): subfolders = glob.glob(os.path.join(datapath,folders[ff],'*')) subfolders = natsorted(subfolders) for ss in range(len(subfolders)): print('folder:%d subfolder %d \n' %(ff,ss)) files = glob.glob(os.path.join(datapath,folders[ff],subfolders[ss],'*.bmp')) files = natsorted(files) if count not in val_data: train_input.append(files[:-1]) train_gt.append(files[-1]) else: val_input.append(files[:-1]) val_gt.append(files[-1]) count += 1 dataset['train_input'] = train_input dataset['train_gt'] = train_gt dataset['val_input'] = val_input dataset['val_gt']= val_gt with open('ECCV_256size.pickle','wb') as f: pickle.dump(dataset,f)
def draw_mean(csvpath): min_idx = 16 files = glob.glob(os.path.join(csvpath, '*.csv')) files = natsorted(files) num_files = len(files) color = iter(cm.rainbow(np.linspace(0, 1, num_files))) mean_error = [] mean_ang = [] mean_deg = [] for f in range(num_files): err, ang, deg = mean_read_files(files[f], csvpath) mean_error.append(err) mean_ang.append(ang) mean_deg.append(deg) if len(err) < min_idx - 1: print('Minimum epochs is %d, current file has %d epochs' % (min_idx, len(err))) pdb.set_trace() for d in range(num_files): labelname = files[d] labelname = labelname.split('/') labelname = labelname[-1].split('.') c = next(color) plt.plot(range(min_idx), mean_error[d][:min_idx], color=c, label=labelname[0]) plt.legend(loc='best') #plt.legend(loc='upper left',bbox_to_anchor=[0,1],ncol=1) plt.savefig(os.path.join(csvpath, 'mean_err.png')) plt.close() color = iter(cm.rainbow(np.linspace(0, 1, num_files))) for d in range(num_files): labelname = files[d] labelname = labelname.split('/') labelname = labelname[-1].split('.') c = next(color) plt.plot(range(min_idx), mean_ang[d][:min_idx], color=c, label=labelname[0]) plt.legend(loc='upper left', bbox_to_anchor=[0, 1], ncol=1) plt.savefig(os.path.join(csvpath, 'mean_ang.png')) plt.close() color = iter(cm.rainbow(np.linspace(0, 1, num_files))) for d in range(num_files): labelname = files[d] labelname = labelname.split('/') labelname = labelname[-1].split('.') c = next(color) plt.plot(range(min_idx), mean_deg[d][:min_idx], color=c, label=labelname[0]) plt.legend(loc='upper left', bbox_to_anchor=[0, 1], ncol=1) plt.savefig(os.path.join(csvpath, 'good_deg.png')) plt.close()
def diff_lights(mainpath): sum_diff = 0.0 num = 0 files = glob.glob(os.path.join(mainpath, '*.csv')) files = natsorted(files) for d in range(len(files)): print('dir %d /%d \n' % (d, len(files))) file_ = os.path.join(mainpath, files[d]) with open(file_, 'r') as f: content = f.readlines() for line in range(1, len(content)): values = content[line].strip().split(",") sum_diff += float(values[4]) num += 1 print('mean different lights: %0.6f \n' % (sum_diff / num))
def finding_bestview(mainpath): min_err = 100.0 min_arg = 100.0 max_deg = 0.0 err_view = 'err' arg_view = 'arg' deg_view = 'deg' files = glob.glob(os.path.join(mainpath, '*.csv')) files = natsorted(files) for d in range(len(files)): print('dir %d /%d \n' % (d, len(files))) file_ = os.path.join(mainpath, files[d]) #[min_err,min_arg,max_deg,arr_epoch,arg_epoch,deg_epoch,err_view,arg_view,deg_view] = searchminmax(file_,min_err,min_arg,max_deg,files[f]) min_err, min_arg, max_deg, err_view, arg_view, deg_view = searchminmax( file_, min_err, min_arg, max_deg, err_view, arg_view, deg_view, file_) print('min_err:%f view:%s \n' % (min_err, err_view)) print('min_arg:%f view:%s\n' % (min_arg, arg_view)) print('max_deg:%f view:%s\n' % (max_deg, deg_view))
def compute_average(path_, num_imgs): main_path = os.path.join( '/home/yjyoon/Dropbox/ECCV_result/avg_view/%03d/' % num_imgs) if not os.path.exists(main_path): os.makedirs(main_path) dirs = os.listdir(path_) for d in range(len(dirs)): subp = os.path.join(path_, dirs[d]) subdirs = os.listdir(subp) for subd in range(len(subdirs)): images = glob.glob( os.path.join(path_, dirs[d], subdirs[subd], '*.bmp')) images = natsorted(images) rand_idx = np.random.permutation(len(images)) avg_img = np.zeros((600, 800, 3)).astype(np.float) for idx in range(num_imgs): img = scipy.misc.imread(images[rand_idx[idx]]) avg_img += img avg_img /= num_imgs savepath = main_path + 'avg_img_%s_%03d.bmp' % (dirs[d], int(subdirs[subd])) scipy.misc.imsave(savepath, avg_img)
def drawplot(csvpath): files = glob.glob(os.path.join(csvpath, '*.csv')) files = natsorted(files) num_files = len(files) for f in range(num_files): read_files(files[f], csvpath)
def main(_): width_size = 905 height_size = 565 #width_size = 1104 #height_size = 764 #width_size = 1123 #height_size = 900 pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) if not os.path.exists('./logs_multifreq_noskip'): os.makedirs('./logs_multifreq_noskip') gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) #with tf.Session() as sess: with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ num_block = FLAGS.num_block,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = EVAL(sess, batch_size=1,num_block=FLAGS.num_block,ir_image_shape=[None,None,1],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) print('deep model test \n') if FLAGS.is_train: dcgan.train(FLAGS) else: list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] print '1: Estimating Normal maps from arbitary obejcts \n' print '2: EStimating Normal maps according to only object tilt angles(Light direction is fixed(EX:3) \n' print '3: Estimating Normal maps according to Light directions and object tilt angles \n' x = input('Selecting a Evaluation mode:') VAL_OPTION = int(x) if VAL_OPTION == 1: # arbitary dataset print("Computing arbitary dataset ") trained_models = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) trained_models = natsorted(trained_models) model = trained_models[6] model = model.split('/') model = model[-1] print('Load trained network: %s\n' % model) dcgan.load(FLAGS.checkpoint_dir, model) datapath = '/research3/datain/gmchoe_normal/0403/IR_0.25' #datapath = '/research3/dataout/ECCV_2018/2130/centerview' #datapath = '/research2/proposal_linux/dataset/coin' savepath = datapath mean_nir = -0.3313 img_files = glob.glob(os.path.join(datapath, '*.png')) img_files = natsorted(img_files) pdb.set_trace() #listdir = natsorted(os.listdir(datapath)) #fulldatapath = natsorted(fulldatapath) for idx in xrange(0, len(img_files)): print('Processing %d/%d \n' % (len(img_files), idx)) #img_file = glob.glob(os.path.join(datapath,'nir.jpg')) #img_file = glob.glob(os.path.join(datapath,listdir[idx])) input_ = scipy.misc.imread(img_files[idx], 'F').astype(float) height_size = input_.shape[0] width_size = input_.shape[1] #input_ = scipy.misc.imresize(input_,[565,905]) input_ = np.reshape( input_, (height_size, width_size, 1)) # LF size:383 x 552 #input_ = np.power(input_,0.6) nondetail_input_ = ndimage.gaussian_filter(input_, sigma=(1, 1, 0), order=0) input_ = input_ / 127.5 - 1.0 nondetail_input_ = nondetail_input_ / 127.5 - 1.0 # normalize -1 ~1 detail_input_ = input_ - nondetail_input_ nondetail_input_ = np.reshape( nondetail_input_, (1, height_size, width_size, 1)) # LF size:383 x 552 detail_input_ = np.reshape(detail_input_, (1, height_size, width_size, 1)) #detail_input_ = detail_input_/127.5 -1.0 # normalize -1 ~1 start_time = time.time() sample = sess.run(dcgan.G, feed_dict={ dcgan.nondetail_images: nondetail_input_, dcgan.detail_images: detail_input_ }) print('time: %.8f' % (time.time() - start_time)) sample = np.squeeze(sample).astype(np.float32) # normalization # output = np.sqrt(np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output = (output + 1.) / 2. """ if not os.path.exists(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))): os.makedirs(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))) """ savename = os.path.join( savepath, 'result/%s.bmp' % (img_files[idx][-10:])) #savename = os.path.join(savepath,'single_normal_%02d.bmp' % (idx+1)) #savename = os.path.join(savepath,'%s/%s/%s/single_normal.bmp' % (FLAGS.dataset,model,listdir[idx])) scipy.misc.imsave(savename, output) elif VAL_OPTION == 2: # light source fixed list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] #save_files = glob.glob(os.path.join(FLAGS.checkpoint_dir,FLAGS.dataset,'DCGAN.model*')) #save_files = natsorted(save_files) savepath = './skip/L1_ang' load = dcgan.load(FLAGS.checkpoint_dir) if load: for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) #gt_ = scipy.misc.imread('/research2/IR_normal_small/save016/1/12_Normal.bmp').astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = np.reshape(input_, (600, 800, 1)) nondetail_input_ = ndimage.gaussian_filter( input_, sigma=(1, 1, 0), order=0) input_ = input_ / 127.5 - 1.0 nondetail_input_ = nondetail_input_ / 127.5 - 1.0 # normalize -1 ~1 detail_input_ = input_ - nondetail_input_ nondetail_input_ = np.reshape( nondetail_input_, (1, 600, 800, 1)) detail_input_ = np.reshape(detail_input_, (1, 600, 800, 1)) start_time = time.time() sample = sess.run( [dcgan.G], feed_dict={ dcgan.nondetail_images: nondetail_input_, dcgan.detail_images: detail_input_ }) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal.bmp' % (list_val[idx], idx2)) scipy.misc.imsave(savename, sample) else: print("Failed to load network") elif VAL_OPTION == 3: # depends on light sources list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] mean_nir = -0.3313 #-1~1 savepath = './angle_light_result' if not os.path.exists(os.path.join(savepath)): os.makedirs(os.path.join(savepath)) load = dcgan.load(FLAGS.checkpoint_dir) if load: print(" Load Success") for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): #tilt angles 1~9 for idx3 in range(1, 13): # light source print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread( img + '/%d.bmp' % idx3).astype( np.float32) #input NIR image input_ = scipy.misc.imresize( input_, [600, 800], 'nearest') input_ = np.reshape(input_, (600, 800, 1)) nondetail_input_ = ndimage.gaussian_filter( input_, sigma=(1, 1, 0), order=0) input_ = input_ / 127.5 - 1.0 nondetail_input_ = nondetail_input_ / 127.5 - 1.0 # normalize -1 ~1 detail_input_ = input_ - nondetail_input_ nondetail_input_ = np.reshape( nondetail_input_, (1, 600, 800, 1)) detail_input_ = np.reshape( detail_input_, (1, 600, 800, 1)) start_time = time.time() sample = sess.run( [dcgan.G], feed_dict={ dcgan.nondetail_images: nondetail_input_, dcgan.detail_images: detail_input_ }) sample = np.squeeze(sample[-1]).astype( np.float32) print('time: %.8f' % (time.time() - start_time)) # normalization # output = np.sqrt( np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%s/%03d/%d' % (FLAGS.dataset, list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%s/%03d/%d' % (FLAGS.dataset, list_val[idx], idx2))) savename = os.path.join( savepath, '%s/%03d/%d/single_normal_%03d.bmp' % (FLAGS.dataset, list_val[idx], idx2, idx3)) scipy.misc.imsave(savename, output) else: print("Failed to laod network")
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) if not os.path.exists(os.path.join('./logs', time.strftime('%d%m'))): os.makedirs(os.path.join('./logs', time.strftime('%d%m'))) gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) #with tf.Session() as sess: with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: fcn = FCN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: fcn = EVAL(sess, batch_size=1,rgb_image_shape=[224,224,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: fcn.train(FLAGS) else: VGG_mean = [103.939, 116.779, 123.68] train_rgb_data = open("db/Oxford_data1_RGB_train.txt") train_rgblist = train_rgb_data.readlines() train_depth_data = open("db/Oxford_data1_depth_train.txt") train_depthlist = train_depth_data.readlines() shuf = range(0, len(train_rgblist)) random.shuffle(shuf) shuf = shuf[:10] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'FCN.model*')) save_files = natsorted(save_files) savepath = './Depth_seg' if not os.path.exists(os.path.join(savepath)): os.makedirs(os.path.join(savepath)) model = save_files[-2] model = model.split('/') model = model[-1] fcn.load(FLAGS.checkpoint_dir, model) for m in range(len(shuf)): rgbpath = train_rgblist[shuf[m]] rgb_img = scipy.misc.imread(rgbpath[:-1]).astype(np.float32) rgb_img = center_crop(rgb_img, 224) rgb_img = np.reshape(rgb_img, (1, 224, 224, 3)) depthpath = train_depthlist[shuf[m]] depth_img = sio.loadmat(depthpath[:-1]) depth_img = depth_img['depth'] depth_img = np.reshape( depth_img, [depth_img.shape[0], depth_img.shape[1], 1]) depth_img = center_crop(depth_img, 224) start_time = time.time() predict = sess.run(fcn.pred_seg, feed_dict={ fcn.rgb_images: rgb_img, fcn.keep_prob: 1.0 }) predict = np.squeeze(predict).astype(np.float32) print('time: %.8f' % (time.time() - start_time)) if not os.path.exists(os.path.join(savepath, '%s' % (model))): os.makedirs(os.path.join(savepath, '%s' % (model))) savename = os.path.join( savepath, '%s/predict_%03d.jpg' % (model, shuf[m])) scipy.misc.imsave(savename, predict.astype(np.uint8)) savename = os.path.join(savepath, '%s/gt_%03d.jpg' % (model, shuf[m])) scipy.misc.imsave(savename, np.squeeze(depth_img).astype(np.uint8))
def main(_): width_size = 905 height_size = 565 #width_size = 1104 #height_size = 764 #width_size = 1123 #height_size = 900 pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) #with tf.Session() as sess: with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ num_block = FLAGS.num_block,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = EVAL(sess, batch_size=1,num_block=FLAGS.num_block,ir_image_shape=[None,None,1],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) print('deep model test \n') if FLAGS.is_train: dcgan.train(FLAGS) else: list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] print '1: Estimating Normal maps from arbitary obejcts \n' print '2: EStimating Normal maps of NIR dataset \n' x = input('Selecting a Evaluation mode:') VAL_OPTION = int(x) if VAL_OPTION == 1: # arbitary dataset print("Computing arbitary dataset ") trained_models = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) trained_models = natsorted(trained_models) model = trained_models[6] model = model.split('/') model = model[-1] print('Load trained network: %s\n' % model) dcgan.load(FLAGS.checkpoint_dir, model) datapath = '/research3/datain/gmchoe_normal/0403/IR_0.25' savepath = datapath mean_nir = -0.3313 img_files = glob.glob(os.path.join(datapath, '*.png')) img_files = natsorted(img_files) pdb.set_trace() for idx in xrange(0, len(img_files)): print('Processing %d/%d \n' % (len(img_files), idx)) input_ = scipy.misc.imread(img_files[idx], 'F').astype(float) height_size = input_.shape[0] width_size = input_.shape[1] input_ = np.reshape( input_, (height_size, width_size, 1)) # LF size:383 x 552 nondetail_input_ = ndimage.gaussian_filter(input_, sigma=(1, 1, 0), order=0) input_ = input_ / 127.5 - 1.0 nondetail_input_ = nondetail_input_ / 127.5 - 1.0 # normalize -1 ~1 detail_input_ = input_ - nondetail_input_ nondetail_input_ = np.reshape( nondetail_input_, (1, height_size, width_size, 1)) # LF size:383 x 552 detail_input_ = np.reshape(detail_input_, (1, height_size, width_size, 1)) #detail_input_ = detail_input_/127.5 -1.0 # normalize -1 ~1 start_time = time.time() sample = sess.run(dcgan.G, feed_dict={ dcgan.nondetail_images: nondetail_input_, dcgan.detail_images: detail_input_ }) print('time: %.8f' % (time.time() - start_time)) sample = np.squeeze(sample).astype(np.float32) # normalization # output = np.sqrt(np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output = (output + 1.) / 2. """ if not os.path.exists(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))): os.makedirs(os.path.join(savepath,'%s/%s/%s' %(FLAGS.dataset,model,listdir[idx]))) """ savename = os.path.join( savepath, 'result/%s.bmp' % (img_files[idx][-10:])) #savename = os.path.join(savepath,'single_normal_%02d.bmp' % (idx+1)) #savename = os.path.join(savepath,'%s/%s/%s/single_normal.bmp' % (FLAGS.dataset,model,listdir[idx])) scipy.misc.imsave(savename, output) elif VAL_OPTION == 2: # light source fixed list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] load, iteration = dcgan.load(FLAGS.checkpoint_dir) pdb.set_trace() savepath = './singleview_nir/L2ang/%d' % iteration obj = 1 count = 1 if load: for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): #tilt angles print("Selected material %03d/%d" % (list_val[idx], idx2)) img = './dataset/multi-view/testdata_3579/%03d/%03d/patch_%06d.mat' % ( obj, idx2, count) input_ = scipy.io.loadmat(img) input_ = input_['input_'] input_ = input_.astype(np.float) #input_ = input_[:,:,0:3] input_ = np.reshape(input_, (1, 600, 800, 4)) input_ = input_ / 127.5 - 1.0 start_time = time.time() sample = sess.run([dcgan.G], feed_dict={dcgan.images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.sqrt( np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%03d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%03d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%03d/multiview_normal.bmp' % (list_val[idx], idx2)) scipy.misc.imsave(savename, output) count = count + 1 obj = obj + 1 else: print("Failed to load network")
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) if not os.path.exists(os.path.join('./logs', time.strftime('%d%m'))): os.makedirs(os.path.join('./logs', time.strftime('%d%m'))) gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: dcgan = DCGAN(sess, batch_size=FLAGS.batch_size,\ dataset_name=FLAGS.dataset, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = EVAL(sess, batch_size=1,ir_image_shape=[256,256,1],normal_image_shape=[256,256,3],dataset_name=FLAGS.dataset,\ checkpoint_dir=FLAGS.checkpoint_dir) print('deep model test \n') if FLAGS.is_train: dcgan.train(FLAGS) else: list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] print '1: Estimating Normal maps from arbitary obejcts \n' print '2: EStimating Normal maps according to only object tilt angles(Light direction is fixed(EX:3) \n' print '3: Estimating Normal maps according to Light directions and object tilt angles \n' x = input('Selecting a Evaluation mode:') VAL_OPTION = int(x) if VAL_OPTION == 1: # arbitary dataset print("Computing arbitary dataset ") trained_models = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) trained_models = natsorted(trained_models) datapath = '/research2/Ammonight/*.bmp' savepath = '/research2/Ammonight/output' mean_nir = -0.3313 fulldatapath = os.path.join(glob.glob(datapath)) model = trained_models[4] model = model.split('/') model = model[-1] pdb.set_trace() dcgan.load(FLAGS.checkpoint_dir, model) for idx in xrange(len(fulldatapath)): input_ = scipy.misc.imread(fulldatapath[idx]).astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 input_ = np.reshape( input_, (1, input_.shape[0], input_.shape[1], 1)) input_ = np.array(input_).astype(np.float32) mask = [input_ > -1.0][0] * 1.0 mean_mask = mask * mean_nir #input_ = input_ - mean_mask start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.sqrt(np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output[output == inf] = 0.0 sample = (output + 1.0) / 2.0 name = fulldatapath[idx].split('/') name = name[-1].split('.') name = name[0] savename = savepath + '/normal_' + name + '.bmp' scipy.misc.imsave(savename, sample) elif VAL_OPTION == 2: # light source fixed list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './RMSS_ang_scale_loss_result' for model_idx in range(0, len(save_files), 2): model = save_files[model_idx] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) gt_ = scipy.misc.imread( '/research2/IR_normal_small/save016/1/12_Normal.bmp' ).astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 gt_ = scipy.misc.imresize(gt_, [600, 800]) gt_ = np.reshape(gt_, (1, 600, 800, 3)) gt_ = np.array(gt_).astype(np.float32) input_ = np.reshape(input_, (1, 600, 800, 1)) input_ = np.array(input_).astype(np.float32) start_time = time.time() sample = sess.run( dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal_%s.bmp' % (list_val[idx], idx2, model)) scipy.misc.imsave(savename, sample) elif VAL_OPTION == 3: # depends on light sources list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] selec_model = [-2] mean_nir = -0.3313 #-1~1 save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './Deconv_L1_result' if not os.path.exists(os.path.join(savepath)): os.makedirs(os.path.join(savepath)) for m in range(len(selec_model)): model = save_files[selec_model[m]] model = model.split('/') model = model[-1] print('Load model: %s \n' % model) dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): #tilt angles 1~9 for idx3 in range(1, 13): # light source print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/ECCV_dataset_resized/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread( img + '/%d.bmp' % idx3).astype( float) #input NIR image input_ = scipy.misc.imresize( input_, [256, 256]) input_ = input_ / 127.5 - 1.0 # normalize -1 ~1 input_ = np.reshape(input_, (1, 256, 256, 1)) input_ = np.array(input_).astype(np.float32) gt_ = scipy.misc.imread( img + '/12_Normal.bmp').astype(float) gt_ = np.sum(gt_, axis=2) gt_ = scipy.misc.imresize(gt_, [256, 256]) gt_ = np.reshape(gt_, [1, 256, 256, 1]) mask = [gt_ > 0.0][0] * 1.0 mean_mask = mean_nir * mask #input_ = input_ - mean_mask start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={ dcgan.ir_images: input_, dcgan.keep_prob: 1.0 }) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.sqrt( np.sum(np.power(sample, 2), axis=2)) output = np.expand_dims(output, axis=-1) output = sample / output output = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%d/%s' % (list_val[idx], idx2, model))): os.makedirs( os.path.join( savepath, '%03d/%d/%s' % (list_val[idx], idx2, model))) savename = os.path.join( savepath, '%03d/%d/%s/single_normal_%03d.bmp' % (list_val[idx], idx2, model, idx3)) scipy.misc.imsave(savename, output)
def main(_): pp.pprint(flags.FLAGS.__flags) if not os.path.exists(FLAGS.checkpoint_dir): os.makedirs(FLAGS.checkpoint_dir) if not os.path.exists(FLAGS.sample_dir): os.makedirs(FLAGS.sample_dir) if not os.path.exists(os.path.join('./logs', time.strftime('%d%m'))): os.makedirs(os.path.join('./logs', time.strftime('%d%m'))) gpu_config = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_config)) as sess: if FLAGS.is_train: if FLAGS.model == "narrow": dcgan = DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ input_size=FLAGS.input_size,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = Deep_DCGAN(sess, image_size=FLAGS.image_size, batch_size=FLAGS.batch_size,\ input_size=FLAGS.input_size,dataset_name=FLAGS.dataset,is_crop=FLAGS.is_crop, checkpoint_dir=FLAGS.checkpoint_dir) else: if FLAGS.model == "narrow": dcgan = EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[None,None,1],normal_image_shape=[None,None,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) else: dcgan = Deep_EVAL(sess, input_size = 600, batch_size=1,ir_image_shape=[None,None,1],normal_image_shape=[None,None,3],dataset_name=FLAGS.dataset,\ is_crop=False, checkpoint_dir=FLAGS.checkpoint_dir) if FLAGS.is_train: dcgan.train(FLAGS) else: OPTION = 2 # for validation list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] VAL_OPTION = 3 """ if OPTION == 1: data = json.load(open("/research2/IR_normal_small/json/traininput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/traingt_single_224_ori_small.json")) elif OPTION == 2: data = json.load(open("/research2/IR_normal_small/json/testinput_single_224_ori_small.json")) data_label = json.load(open("/research2/IR_normal_small/json/testgt_single_224_ori_small.json")) """ if VAL_OPTION == 1: model = 'DCGAN.model-10000' dcgan.load(FLAGS.checkpoint_dir, model) list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] for idx in range(len(list_val)): os.makedirs( os.path.join('L1_loss_result', '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) gt_ = scipy.misc.imread( '/research2/IR_normal_small/save016/1/12_Normal.bmp' ).astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = input_ / 255.0 - 1.0 # normalize -1 ~1 gt_ = scipy.misc.imresize(gt_, [600, 800]) #input_ = input_[240:840,515:1315] #gt_ = gt_[240:840,515:1315] input_ = np.reshape(input_, (1, 600, 800, 1)) gt_ = np.reshape(gt_, (1, 600, 800, 3)) input_ = np.array(input_).astype(np.float32) gt_ = np.array(gt_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) gt_ = np.squeeze(gt_).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. os.makedirs( os.path.join('L1_loss_result', '%03d/%d' % (list_val[idx], idx2))) savename = './L1_loss_result/%03d/%d/single_normal_L1_%s.bmp' % ( list_val[idx], idx2, model) scipy.misc.imsave(savename, sample) elif VAL_OPTION == 2: # arbitary dataset print("Computing arbitary dataset ") trained_models = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) trained_models = natsorted(trained_models) datapath = '/home/yjyoon/Dropbox/ECCV_result/smartphone/iphone/input/gray_*.bmp' savepath = '/home/yjyoon/Dropbox/ECCV_result/smartphone/iphone/output' fulldatapath = os.path.join(glob.glob(datapath)) model = trained_models[-2] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in xrange(len(fulldatapath)): #input_ = cv2.imread(fulldatapath[idx]) #input_ = cv2.cvtColor(input_,cv2.COLOR_BGR2YCR_CB) #input_ = cv2.resize(input_[:,:,0],(600,800)) input_ = scipy.misc.imread(fulldatapath[idx]).astype(float) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 input_ = np.reshape( input_, (1, input_.shape[0], input_.shape[1], 1)) #[Y,Cr,Cb]= rgb2ycbcr(input_) #input_= rgb2gray(input_) #input_ = scipy.misc.imresize(Y,(600,800)) #input_ = np.reshape(input_,(1,600,800,1)) input_ = np.array(input_).astype(np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((sample.shape[0], sample.shape[1], 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. name = fulldatapath[idx].split('/') name = name[-1].split('.') name = name[0] savename = savepath + '/normal_' + name + '.bmp' scipy.misc.imsave(savename, sample) elif VAL_OPTION == 3: # light source fixed list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './RMSS_ang_scale_loss_result' max_h = 600 max_w = 800 for model_idx in range(0, len(save_files), 2): model = save_files[model_idx] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread(img + '/3.bmp').astype(float) input_ = scipy.misc.imresize(input_, [600, 800]) input_ = (input_ / 127.5) - 1. # normalize -1 ~1 overlap = np.min(max_h, max_w) - 100 step_h = np.ceil(max_h, overlap) step_w = np.ceil(max_w, overlap) result = np.zeros(input_.shape[0], input_.shape[1], input_.shape[2], step_h * step_w) tmp_result = [] for h in range(0, input_.shape[0], overlap): for w in range(0, input_.shape[1], overlap): crop_input_ = input_[h:h + max_h, w:w + max_w] crop_input_ = np.reshape( crop_input_, (max_h, max_w, 1)) crop_intpu_ = np.array(crop_input_).astype( np.float32) start_time = time.time() sample = sess.run(dcgan.sampler, feed_dict={ dcgan.ir_images: crop_input_ }) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype( np.float32) output = np.sqrt( np.sum(np.square(sample), axis=2)) output = np.expand_dims(output, -1) output[output == 0.0] = 1e-10 output = sample / output tmp_result.append(output) result = recovering_fullimage( tmp_result, result, overlap, max_h, max_w) if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal_%s.bmp' % (list_val[idx], idx2, model)) scipy.misc.imsave(savename, result) elif VAL_OPTION == 4: # depends on light sources list_val = [11, 16, 21, 22, 33, 36, 38, 53, 59, 92] save_files = glob.glob( os.path.join(FLAGS.checkpoint_dir, FLAGS.dataset, 'DCGAN.model*')) save_files = natsorted(save_files) savepath = './L1_ang_loss_lights_result' if not os.path.exists(os.path.join(savepath)): os.makedirs(os.path.join(savepath)) model = save_files[-2] model = model.split('/') model = model[-1] dcgan.load(FLAGS.checkpoint_dir, model) for idx in range(len(list_val)): if not os.path.exists( os.path.join(savepath, '%03d' % list_val[idx])): os.makedirs( os.path.join(savepath, '%03d' % list_val[idx])) for idx2 in range(1, 10): #tilt angles 1~9 for idx3 in range(1, 13): # light source print("Selected material %03d/%d" % (list_val[idx], idx2)) img = '/research2/IR_normal_small/save%03d/%d' % ( list_val[idx], idx2) input_ = scipy.misc.imread( img + '/%d.bmp' % idx3).astype( float) #input NIR image input_ = scipy.misc.imresize(input_, [600, 800]) input_ = input_ / 127.5 - 1.0 # normalize -1 ~1 input_ = np.reshape(input_, (1, 600, 800, 1)) input_ = np.array(input_).astype(np.float32) start_time = time.time() sample = sess.run( dcgan.sampler, feed_dict={dcgan.ir_images: input_}) print('time: %.8f' % (time.time() - start_time)) # normalization # sample = np.squeeze(sample).astype(np.float32) output = np.zeros((600, 800, 3)).astype(np.float32) output[:, :, 0] = sample[:, :, 0] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 1] = sample[:, :, 1] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[:, :, 2] = sample[:, :, 2] / (np.sqrt( np.power(sample[:, :, 0], 2) + np.power(sample[:, :, 1], 2) + np.power(sample[:, :, 2], 2))) output[output == inf] = 0.0 sample = (output + 1.) / 2. if not os.path.exists( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))): os.makedirs( os.path.join( savepath, '%03d/%d' % (list_val[idx], idx2))) savename = os.path.join( savepath, '%03d/%d/single_normal_%d.bmp' % (list_val[idx], idx2, idx3)) scipy.misc.imsave(savename, sample) def recovering_fullimage(output, result, overlap, max_h, max_w): count = 0 for h in range(0, input_.shape[0], overlap): for w in range(0, input_.shape[1], overlap): result[h:h + max_h, w:w + max_w, :, count] = output[count] count += 0 return np.max(result, axis=3)