def main(source_a, source_b, dataset, size=256, read_img_type='.png', write_img_type='.png'): # target_train_file = '../Data/{}/train/'.format(dataset) target_val_file = '../Data/{}/val/'.format(dataset) # if not os.path.isdir(target_train_file): # os.makedirs(target_train_file) if not os.path.isdir(target_val_file): os.makedirs(target_val_file) data_a = utils.all_files_under(source_a, extension=read_img_type, sort=True) data_b = utils.all_files_under(source_b, extension=read_img_type, sort=True) print('Number of data A: {}'.format(len(data_a))) print('Number of data B: {}'.format(len(data_b))) # make training images for idx in range(len(data_a)): a_img = imread(data_a[idx], is_grayscale=True) b_img = imread(data_b[idx], is_grayscale=True) print(data_a[idx]) print(data_b[idx]) imsave(a_img, b_img, os.path.join(target_val_file, str(idx) + write_img_type), size=size)
def main(gt, method): # read gt image addresses gt_names = utils.all_files_under(os.path.join('../', gt), extension='.jpg') # read prediction image addresses filenames = utils.all_files_under(os.path.join('../', method), extension='.jpg') # print(methods[idx_method]) mae_method, rmse_method, psnr_method, ssim_method, pcc_method = [], [], [], [], [] for idx_name in range(len(gt_names)): # read gt and prediction image gt_img = cv2.imread(gt_names[idx_name], cv2.IMREAD_GRAYSCALE).astype(np.float32) pred_img = cv2.imread(filenames[idx_name], cv2.IMREAD_GRAYSCALE).astype(np.float32) # check gt and prediction image name if gt_names[idx_name].split('p0')[-1] != filenames[idx_name].split( 'p0')[-1]: sys.exit(' [!] Image name can not match!') # calcualte mae and psnr mae = utils.mean_absoulute_error(pred_img, gt_img) rmse = utils.root_mean_square_error(pred_img, gt_img) psnr = utils.peak_signal_to_noise_ratio(pred_img, gt_img) ssim = utils.structural_similarity_index(pred_img, gt_img) pcc = utils.pearson_correlation_coefficient(pred_img, gt_img) if np.mod(idx_name, 300) == 0: print('Method: {}, idx: {}'.format(method, idx_name)) # collect each image results mae_method.append(mae) rmse_method.append(rmse) psnr_method.append(psnr) ssim_method.append(ssim) pcc_method.append(pcc) # list to np.array mae_method = np.asarray(mae_method) rmse_method = np.asarray(rmse_method) psnr_method = np.asarray(psnr_method) ssim_method = np.asarray(ssim_method) pcc_method = np.asarray(pcc_method) print(' MAE - mean: {:.3f}, std: {:.3f}'.format(np.mean(mae_method), np.std(mae_method))) print('RMSE - mean: {:.3f}, std: {:.3f}'.format(np.mean(rmse_method), np.std(rmse_method))) print('PSNR - mean: {:.3f}, std: {:.3f}'.format(np.mean(psnr_method), np.std(psnr_method))) print('SSIM - mean: {:.3f}, std: {:.3f}'.format(np.mean(ssim_method), np.std(ssim_method))) print(' PCC - mean: {:.3f}, std: {:.3f}'.format(np.mean(pcc_method), np.std(pcc_method))) data_list = [mae_method, rmse_method, psnr_method, ssim_method, pcc_method] write_to_csv(method, data_list, gt_names)
def _read_val_img_path(self): if self.mode == 0 or self.mode == 1: self.val_left_img_paths = utils.all_files_under( folder=os.path.join('../data', 'rg_' + self.domain + '_val_' + self.data), endswith=self.img_format, condition='L_') if self.mode == 0 or self.mode == 2: self.val_right_img_paths = utils.all_files_under( folder=os.path.join('../data', 'rg_' + self.domain + '_val_' + self.data), endswith=self.img_format, condition='R_') if self.mode == 0: assert len(self.val_left_img_paths) == len( self.val_right_img_paths) self.num_val = len(self.val_left_img_paths) elif self.mode == 1: self.num_val = len(self.val_left_img_paths) elif self.mode == 2: self.num_val = len(self.val_right_img_paths) else: raise NotImplementedError
def _load_celeba(self): print('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.celeba_train_path) self.num_trains = len(self.train_data) self.val_data = utils.all_files_under(self.celeba_val_path) self.num_vals = len(self.val_data) celeba_attr_f = open("../Data/" + self.flags.load_label, "r").readlines() if self.flags.y_dim: self.y_label = [[0 if i == '-1' else 1 for i in x.split()[1:]] for x in celeba_attr_f[2:]] train_temp = [[x] for x in self.train_data] train_label = [x for x in self.y_label[:self.num_trains]] self.train_data_with_label = np.concatenate( [train_temp, train_label], axis=1) val_temp = [[x] for x in self.val_data] val_label = [x for x in self.y_label[self.num_trains:]] self.val_data_with_label = np.concatenate([val_temp, val_label], axis=1) else: self.y_label = None print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def read_val_data(self): bags, shoeses = [], [] bags_val_path = utils.all_files_under(self.bags_val_path) shoes_val_path = utils.all_files_under(self.shoes_val_path) # read bags data for bag_path in bags_val_path: _, bag = utils.load_data(bag_path, flip=False, is_test=True, is_gray_scale=False, transform_type='zero_center', img_size=self.ori_image_size) # scipy.misc.imresize reutrns uint8 type bag = cv2.resize(bag, dsize=None, fx=0.25, fy=0.25) # (256, 256, 3) to (64, 64, 3) bags.append(bag) for shoes_path in shoes_val_path: _, shoes = utils.load_data(shoes_path, flip=False, is_test=True, is_gray_scale=False, transform_type='zero_center', img_size=self.ori_image_size) # scipy.misc.imresize reutrns uint8 type shoes = cv2.resize(shoes, dsize=None, fx=0.25, fy=0.25) # (256, 256, 3) to (64, 64, 3) shoeses.append(shoes) self.data_x = np.asarray(bags).astype(np.float32) # list to array self.data_y = np.asarray(shoeses).astype(np.float32) # list to array
def __init__(self, flags): run_config = tf.ConfigProto() run_config.gpu_options.allow_growth = True self.sess = tf.Session(config=run_config) self.flags = flags self.style_img_name = flags.style_img.split('/')[-1][:-4] self.content_target_paths = utils.all_files_under( self.flags.train_path) self.test_targets = utils.all_files_under(self.flags.test_path, extension='.jpg') self.test_target_names = utils.all_files_under(self.flags.test_path, append_path=False, extension='.jpg') self.test_save_paths = [ os.path.join(self.flags.test_dir, self.style_img_name, file[:-4]) for file in self.test_target_names ] self.num_contents = len(self.content_target_paths) self.num_iters = int( self.num_contents / self.flags.batch_size) * self.flags.epochs self.model = StyleTranser(self.sess, self.flags, self.num_iters) self.train_writer = tf.summary.FileWriter( 'logs/{}'.format(self.style_img_name), graph_def=self.sess.graph_def) self.saver = tf.train.Saver() self.sess.run(tf.global_variables_initializer()) tf_utils.show_all_variables()
def _read_img_path(self): self.train_paths = utils.all_files_under(self.train_folder) self.val_paths = utils.all_files_under(self.val_folder) self.test_paths = utils.all_files_under(self.test_folder) self.num_train_imgs = len(self.train_paths) self.num_val_imgs = len(self.val_paths) self.num_test_imgs = len(self.test_paths)
def _read_img_path(self): # Generation task using training and validation data together self.train_paths = utils.all_files_under(self.train_folder) + utils.all_files_under(self.val_folder) self.val_paths = [] self.test_paths = utils.all_files_under(self.test_folder) self.num_train_imgs = len(self.train_paths) self.num_val_imgs = len(self.val_paths) self.num_test_imgs = len(self.test_paths)
def _load_vub(self): print('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.vub_train_path) self.num_trains = len(self.train_data) self.val_data = utils.all_files_under(self.vub_val_path) self.num_vals = len(self.val_data) print('dataset size = {}'.format(self.num_trains)) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def _load_fashion(self): print('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.fashion_train_path) self.num_trains = len(self.train_data) self.val_data = utils.all_files_under(self.fashion_val_path) self.num_vals = len(self.val_data) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def main(_): os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu_index check_opts(FLAGS) style_name = "" img_paths = utils.all_files_under(FLAGS.in_path) out_paths = [ os.path.join(FLAGS.out_path, style_name, file) for file in utils.all_files_under(FLAGS.in_path, append_path=False) ] feed_forward(img_paths, out_paths, FLAGS.checkpoint_dir)
def _load_data(self): print('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.train_data_path, extension='.png') self.val_data = utils.all_files_under(self.val_data_path, extension='.png') self.num_trains = len(self.train_data) self.num_vals = len(self.val_data) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def _read_img_path(self): self.cls01_left = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_0'), endswith=self.img_format, condition='_L_') self.cls01_right = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_0'), endswith=self.img_format, condition='_R_') self.cls02_left = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_1'), endswith=self.img_format, condition='_L_') self.cls02_right = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_1'), endswith=self.img_format, condition='_R_') self.cls03_left = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_2'), endswith=self.img_format, condition='_L_') self.cls03_right = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_2'), endswith=self.img_format, condition='_R_') self.cls04_left = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_3'), endswith=self.img_format, condition='_L_') self.cls04_right = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_3'), endswith=self.img_format, condition='_R_') self.cls05_left = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_4'), endswith=self.img_format, condition='_L_') self.cls05_right = utils.all_files_under(folder=os.path.join( '../data', 'cls_' + self.shape, self.shape + '_4'), endswith=self.img_format, condition='_R_') self._read_train_img_path() # Read training img paths self._read_val_img_path() # Read val img paths self._read_test_img_path() # Read test img paths
def main(domain, data_type, img_format, num_attri=6): data_folder = os.path.join('../data', 'rg_' + domain + '_train_' + data_type) left_img_paths = utils.all_files_under(folder=data_folder, endswith=img_format, condition='L_') num_imgs = len(left_img_paths) data = np.zeros((num_imgs, num_attri), dtype=np.float32) min_max_data = np.zeros(num_attri * 2, dtype=np.float32) for i, img_path in enumerate(left_img_paths): X = float(img_path[img_path.find('_X')+2:img_path.find('_Y')]) Y = float(img_path[img_path.find('_Y')+2:img_path.find('_Z')]) Ra = float(img_path[img_path.find('_Ra')+3:img_path.find('_Rb')]) Rb = float(img_path[img_path.find('_Rb')+3:img_path.find('_F')]) F = float(img_path[img_path.find('_F')+2:img_path.find('_D')]) D = float(img_path[img_path.find('_D')+2:img_path.find(args.format)]) data[i, :] = np.asarray([X, Y, Ra, Rb, F, D]) for i in range(num_attri): min_max_data[2*i] = data[:, i].min() min_max_data[2*i+1] = data[:, i].max() print('Min X: {}'.format(min_max_data[0])) print('Max X: {}'.format(min_max_data[1])) print('Min Y: {}'.format(min_max_data[2])) print('Max Y: {}'.format(min_max_data[3])) print('Min Ra: {}'.format(min_max_data[4])) print('Max Ra: {}'.format(min_max_data[5])) print('Min Rb: {}'.format(min_max_data[6])) print('Max Rb: {}'.format(min_max_data[7])) print('Min F: {}'.format(min_max_data[8])) print('Max F: {}'.format(min_max_data[9])) print('Min D: {}'.format(min_max_data[10])) print('Max D: {}'.format(min_max_data[11])) np.save(data_folder, min_max_data)
def simple_test(self): size = 256 folder = './test01' imgPaths = utils.all_files_under(folder, extension='.png') saveFolder = os.path.join(folder, 'results') if not os.path.exists(saveFolder): os.makedirs(saveFolder) for idx, imgPath in enumerate(imgPaths): img = cv2.imread(imgPath, cv2.IMREAD_GRAYSCALE) img = img / 127.5 - 1. img = img[:, :, np.newaxis] # print('img shape: {}'.format(img.shape)) y_imgs = self.model.test_only([img]) y_img = (y_imgs[0] + 1.) / 2. cv2.imshow('test', y_img) cv2.waitKey(0) img = img[:, :, 0] y_img = y_img[:, :, 0] canvas = np.zeros((size, 2 * size), dtype=np.uint8) canvas[:, :size] = (255. * ((img + 1.) / 2.)).astype(np.uint8) canvas[:, -size:] = (255. * y_img).astype(np.uint8) cv2.imwrite(os.path.join(saveFolder, str(idx) + '.png'), canvas)
def _load_celeba(self): #pdb.set_trace() print('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.celeba_path) self.num_trains = len(self.train_data) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def read_imgs_and_measurement(): slice_list = [] # read gt image addresses gt_names = utils.all_files_under('../gt', extension='.jpg') for idx in range(len(gt_names)): if np.mod(idx, 300) == 0: print('Method: {}, Measure: {}, idx: {}'.format( args.method, args.measure, idx)) img_name = os.path.basename(gt_names[idx]) # read gt and prediction image gt_img = cv2.imread(gt_names[idx], cv2.IMREAD_GRAYSCALE).astype(np.float32) pred_img = cv2.imread( os.path.join('../{}'.format(args.method), img_name), cv2.IMREAD_GRAYSCALE).astype(np.float32) # calculate measurement measure_value = 0. if args.measure.lower() == 'mae': measure_value = utils.mean_absoulute_error(pred_img, gt_img) elif args.measure.lower() == 'rmse': measure_value = utils.root_mean_square_error(pred_img, gt_img) elif args.measure.lower() == 'psnr': measure_value = utils.peak_signal_to_noise_ratio(pred_img, gt_img) elif args.measure.lower() == 'ssim': measure_value = utils.structural_similarity_index(pred_img, gt_img) slice_list.append(SliceExample(img_name, args.method, measure_value)) return slice_list
def read_imgs(num_objects, target_examples, h, w, data_folder='../test/generation/20191009-091833', num_examples=20): target_img_names = list() all_img_names = all_files_under(folder=data_folder, subfolder=None, endswith='.png') # Extract 10 examples for each object for i, img_name in enumerate(all_img_names): if i % num_examples < target_examples: target_img_names.append(img_name) # Read real, mask, and fake imgs real_imgs = np.zeros((target_examples * num_objects, h, w, 1), dtype=np.uint8) mask_imgs = np.zeros((target_examples * num_objects, h, w, 3), dtype=np.uint8) fake_imgs = np.zeros((target_examples * num_objects, h, w, 1), dtype=np.uint8) for i, img_name in enumerate(target_img_names): img = cv2.imread(img_name) real_img = img[:, :w, 1] mask_img = img[:, w:2 * w, :] fake_img = img[:, -w:, 1] real_imgs[i, :, :, :] = np.expand_dims(real_img, axis=-1) mask_imgs[i, :, :, :] = mask_img fake_imgs[i, :, :, :] = np.expand_dims(fake_img, axis=-1) return real_imgs, mask_imgs, fake_imgs
def data_writer(inputDir, stage, outputName): dataPath = os.path.join(inputDir, '{}'.format(stage), 'paired') imgPaths = utils.all_files_under(folder=dataPath, subfolder='') numImgs = len(imgPaths) # Create tfrecrods dir if not exists output_file = '{0}/{1}/{1}.tfrecords'.format(outputName, stage) if not os.path.isdir(os.path.dirname(output_file)): os.makedirs(os.path.dirname(output_file)) # Dump to tfrecords file writer = tf.io.TFRecordWriter(output_file) for idx, img_path in enumerate(imgPaths): with tf.io.gfile.GFile(img_path, 'rb') as f: img_data = f.read() example = _convert_to_example(img_path, img_data) writer.write(example.SerializeToString()) if np.mod(idx, 100) == 0: print('Processed {}/{}...'.format(idx, numImgs)) print('Finished!') writer.close()
def read_data(data_path_list): file = open(data_path_list, 'r') paths = file.readlines() json_obj = JsonData() overall_paths = list() overall_user_id = list() for i, path in enumerate(paths): path = path.strip() stage = os.path.dirname(path).split('/')[-2] img_paths = all_files_under(folder=path, subfolder=None, endswith='.png') overall_paths.extend(img_paths) print('{}: {} - num. of images: {}'.format(i, path, len(img_paths))) for j, img_path in enumerate(img_paths): # TODO: key shoulde be adaptive _, user_id = json_obj.find_id(target=os.path.basename(img_path), data_set=stage, key='generative_images') overall_user_id.extend([user_id]) if j % 500 == 0: print('Reading {}/{}...'.format(j, len(img_paths))) return overall_paths, overall_user_id
def __init__(self, FLAGS, LOGGER): self.input_size = (33, 33) self.FLAGS = FLAGS self.batch_size = FLAGS.batch_size self.image_dir = os.path.join('..', 'DataTrain', 'Images', 'input') self.label_dir = os.path.join('..', 'DataTrain', 'Images', 'label') self.checkdata(self.image_dir) self.test_dir = os.path.join('..', 'DataTest') self.train_names = utils.all_files_under(self.image_dir, extension='bmp', append_path=False) self.test_names = utils.all_files_under(self.test_dir, extension='bmp', append_path=False) #%% implement batch fetcher self.train_batch_fetcher = TrainBatchFetcher(self.train_names, self.batch_size)
def _load_imagenet64(self): logger.info('Load {} dataset...'.format(self.dataset_name)) self.train_data = utils.all_files_under(self.imagenet64_path, extension='.png') self.num_trains = len(self.train_data) logger.info('Load {} dataset SUCCESS!'.format(self.dataset_name)) logger.info('Img size: {}'.format(self.image_size)) logger.info('Num. of training data: {}'.format(self.num_trains))
def _load_ct_mri(self): print('Load {} dataset...'.format(self.dataset_name)) self.image_size = (256, 256, 1) self.train_data = utils.all_files_under(self.ct_mri_path, extension='.png') self.num_trains = len(self.train_data) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def _load_ct_mri(self): print('Load {} dataset...'.format(self.dataset_name)) for idx, p_id in enumerate(self.person_id_list): data_path = utils.all_files_under(os.path.join(self.path_file, p_id)) self.num_vals[idx] = len(data_path) self.data_path.append(data_path) print('Load {} dataset SUCCESS!'.format(self.dataset_name))
def main(methods, display_names, measure, num_cases_require): # Read gt image paths gt_names = utils.all_files_under('../gt', extension='.jpg') num_cases, case_dict = count_cases( gt_names) # sort and save img paths according to subject id # Calculate ssim according to case id mean_arrs, var_arrs = cal_meausre(methods, measure, case_dict, num_cases_require) # Horizontal bar plot horizontal_bar_plot(display_names, mean_arrs, var_arrs, num_cases_require, measure)
def main(data, temp_id, size=256, delay=0, is_save=False): save_folder = os.path.join(os.path.dirname(data), 'preprocessing') if is_save and not os.path.exists(save_folder): os.makedirs(save_folder) save_folder2 = os.path.join(os.path.dirname(data), 'post') if is_save and not os.path.exists(save_folder2): os.makedirs(save_folder2) # read all files paths filenames = all_files_under(data, extension='png') # read template image temp_filename = filenames[temp_id] ref_img = cv2.imread(temp_filename, cv2.IMREAD_GRAYSCALE) ref_img = ref_img[:, -size:].copy() _, ref_img = n4itk(ref_img) # N4 bias correction for the reference image for idx, filename in enumerate(filenames): print('idx: {}, filename: {}'.format(idx, filename)) img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) ct_img = img[:, :size] mr_img = img[:, -size:] # N4 bias correction ori_img, cor_img = n4itk(mr_img) # Dynamic histogram matching between two images his_mr = histogram_matching(cor_img, ref_img) # Mask estimation based on Otsu auto-thresholding mask = get_mask(his_mr, task='m2c') # Masked out masked_ct = ct_img & mask masked_mr = his_mr & mask canvas = imshow(ori_img, cor_img, his_mr, masked_mr, mask, ct_img, masked_ct, size=size, delay=delay) canvas2 = np.hstack((masked_mr, masked_ct, mask)) if is_save: cv2.imwrite(os.path.join(save_folder, os.path.basename(filename)), canvas) cv2.imwrite(os.path.join(save_folder2, os.path.basename(filename)), canvas2)
def main(data, size=256, is_save=False, delay=0): save_folder = os.path.join(os.path.dirname(data), 'N4_bias_correction') if is_save and not os.path.exists(save_folder): os.makedirs(save_folder) filenames = all_files_under(data, extension='png') for idx, filename in enumerate(filenames): print('idx: {}, filename: {}'.format(idx, filename)) img = cv2.imread(filename, cv2.IMREAD_GRAYSCALE) mr_img = img[:, -size:] ori_img, cor_img = n4itk(mr_img) canvas = imshow(ori_img, cor_img, size, delay) if is_save: imwrite(canvas, save_folder, filename)
def get_ident_data(state="validation"): if state.lower() == "train": paths = ["../../Data/OpenEDS/Generative_Dataset", "../../Data/OpenEDS/Sequence_Dataset"] elif state.lower() == "validation": paths = ["../../Data/OpenEDS/Semantic_Segmentation_Dataset"] else: raise NotImplementedError # Initilize JsonData to read all of the information from the json files json_obj = JsonData(is_statistics=False) full_img_paths = list() full_cls = list() num_imgs = 0 for path in paths: for idx, (root, directories, files) in enumerate(os.walk(path)): for directory in directories: folder = os.path.join(root, directory) img_paths = utils.all_files_under(folder, subfolder=None, endswith='.png') if (len(img_paths) != 0) & ('paired' not in folder) & ('overfitting' not in folder) & \ ('train_expand' not in folder): # Add img paths full_img_paths.extend(img_paths) data_set = os.path.basename(os.path.dirname(folder)) key = os.path.basename(path).lower().replace('dataset', 'images') for img_path in img_paths: # Read user id flag, user_id = json_obj.find_id(target=os.path.basename(img_path), data_set=data_set, key=key) for item in json_obj.users_list: if item['id'] == user_id: # Add cls info of the image full_cls.extend([item['cls']]) if not flag: exit("Cannot find user id of the image {} !".format(img_path)) num_imgs += len(img_paths) print('The number of colllected data: {}'.format(num_imgs)) print("Total data: {}".format(num_imgs)) return full_img_paths, full_cls
def main(data_path, num_tests=20, num_vals=10): statistics_num = list() files = all_files_under(data_path) for id_ in range(111, 234, 1): user_id = 'U' + str(id_) num = 0 # print('Processing user_id: {}...'.format(user_id)) for img_path in files: if user_id in img_path: num += 1 if num < num_tests + num_vals: print('ID: {}, num. of images are less then {}'.format(user_id, num_tests + num_vals)) continue else: statistics_num.append(num) draw_fig(statistics_num)
def main(read_folder, left_pre_fix, save_folder): file_names = all_files_under(folder=read_folder, endswith='.jpg', condition=left_pre_fix) total_imgs = len(file_names) for i, file_name in enumerate(file_names): left_img_name = file_name right_img_name = (left_img_name.replace('A', 'B')).replace('L', 'R') left_img = cv2.imread(left_img_name) right_img = cv2.imread(right_img_name) # Save path and restore as .png format save_left_path = os.path.join(save_folder, os.path.basename(left_img_name).replace('.jpg', '.png')) save_right_path = os.path.join(save_folder, os.path.basename(right_img_name).replace('.jpg', '.png')) cv2.imwrite(save_left_path, left_img) cv2.imwrite(save_right_path, right_img) if i % 200 == 0: print('Processing [{0:5}/{1:5}]...'.format(i, total_imgs))