def save_image(input_image, save_path, debug=True, vis=False): save_path = safepath(save_path) mkdir_if_missing(save_path) if debug: assert isimage(input_image), 'input data is not image format' assert is_path_exists_or_creatable( save_path), 'save path is not correct' pil_image = Image.fromarray(input_image) # imsave(save_path, input_image) pil_image.save(save_path)
def generate_sequence(img_path, net, save_path, debug=True, vis=True): if not is_path_exists(save_path): mkdir_if_missing(save_path) if debug: assert is_path_exists(img_path) and isfile(img_path), 'input image path is not correct' mkdir_if_missing(os.path.join(save_path, 'images')) mkdir_if_missing(os.path.join(save_path, 'activations')) mkdir_if_missing(os.path.join(save_path, 'features')) img = Image.open(img_path) rot_list = range(0, 360) # rotate 360 degree index = 0 for rot_tmp in rot_list: print('processing RE-Pooling visualization %d/%d' % (index+1, len(rot_list))) img_rot = np.array(img.rotate(rot_tmp).convert('L')).astype('float32') / 255. save_path_tmp = os.path.join(save_path, 'images', 'image_%03d.jpg' % rot_tmp) visualize_save_image(image=img_rot, vis=vis, save=True, save_path=save_path_tmp, debug=debug) inputdata = preprocess_image_caffe([img_rot], debug=debug) # process as caffe input net.blobs['data'].data[...] = inputdata net.forward() activation = deepcopy(net.blobs['activations'].data) feature = deepcopy(net.blobs['features'].data) activationlist = unpreprocess_image_caffe(activation, debug=debug) test = activationlist[0] save_path_tmp = os.path.join(save_path, 'activations', 'activations_%03d.jpg' % rot_tmp) visualize_save_image(image=activationlist[0], vis=vis, save=True, save_path=save_path_tmp, debug=debug) featurelist = unpreprocess_image_caffe(feature, debug=debug) save_path_tmp = os.path.join(save_path, 'features', 'features_%03d.jpg' % rot_tmp) visualize_save_image(image=featurelist[0], vis=vis, save=True, save_path=save_path_tmp, debug=debug) index += 1
def save_image(input_image, save_path, resize_factor=None, target_size=None, input_angle=0, warning=True, debug=True): ''' load an image to a given path, with preprocessing of resizing and rotating parameters: resize_factor: a scalar target_size: a list of tuple or numpy array with 2 elements, representing height and width input_angle: a scalar, counterclockwise rotation in degree ''' save_path = safe_path(save_path, warning=warning, debug=debug) mkdir_if_missing(save_path) if debug: is_path_exists_or_creatable(save_path), 'the path is not good to save' np_image, _ = safe_image(input_image, warning=warning, debug=debug) if resize_factor is None and target_size is None: resize_factor = 1.0 # default not to have resizing # preprocessing the image before saving np_image = image_rotate(np_image, input_angle=input_angle, warning=warning, debug=debug) np_image = image_resize(np_image, resize_factor=resize_factor, target_size=target_size, warning=warning, debug=debug) # saving pil_image = Image.fromarray(np_image) pil_image.save(save_path)
def test_save_image(): image_path = '../lena.png' img = load_image(image_path) assert img.shape == (512, 512, 3) mkdir_if_missing('./tmp/') print('basic') save_image(img, save_path='./tmp/basic.png') print('testing for resizing') save_image(img, save_path='./tmp/resizing.png', resize_factor=2.0) print('testing for resizing') save_image(img, save_path='./tmp/target_size.png', target_size=[1033, 1033]) print('testing for rotation') save_image(img, save_path='./tmp/rotating.png', input_angle=45) print('testing for rotation') save_image(img, save_path='./tmp/out_rotating.png', input_angle=450) print('\n\nDONE! SUCCESSFUL!!\n')
def test_mkdir_if_missing(): print('test repetitive') path = './mnt/dome/adhoc_0.5x/abd' mkdir_if_missing(path) print('test repetitive') path = './' mkdir_if_missing(path) print('test basic') path = 'test_folder' mkdir_if_missing(path) print('test recursive folder') path = 'test_folder1/test3/test4' mkdir_if_missing(path) print('test recursive file') path = 'test_folder1/test2/test3/te.txt' mkdir_if_missing(path) print('test edge case') try: path = 2 mkdir_if_missing(path) sys.exit('\nwrong! never should be here\n\n') except AssertionError: print('the input should be a string') print('\n\nDONE! SUCCESSFUL!!\n')
def generate_hdf5(save_dir, data_src, data_name='data', batch_size=1, ext_filter='png', label_src1=None, label_name1='label', label_preprocess_function1=identity, label_range1=None, label_src2=None, label_name2='label2', label_preprocess_function2=identity, label_range2=None, debug=True, vis=False): ''' # this function creates data in hdf5 format from a image path # input parameter # data_src: source of image data, which can be a list of image path, a txt file contains a list of image path, a folder contains a set of images, a list of numpy array image data # label_src: source of label data, which can be none, a file contains a set of labels, a dictionary of labels, a 1-d numpy array data, a list of label data # save_dir: where to store the hdf5 data # batch_size: how many image to store in a single hdf file # ext_filder: what format of data to use for generating hdf5 data ''' # parse input assert is_path_exists_or_creatable( save_dir), 'save path should be a folder to save all hdf5 files' mkdir_if_missing(save_dir) assert isstring( data_name), 'dataset name is not correct' # name for hdf5 data # convert data source to a list of numpy array image data if isfolder(data_src): print 'data is loading from %s with extension .%s' % (data_src, ext_filter) filelist, num_data = load_list_from_folder(data_src, ext_filter=ext_filter) datalist = None elif isfile(data_src): print 'data is loading from %s with extension .%s' % (data_src, ext_filter) filelist, num_data = load_list_from_file(data_src) datalist = None elif islist(data_src): if debug: assert all( isimage(data_tmp) for data_tmp in data_src ), 'input data source is not a list of numpy array image data' datalist = data_src num_data = len(datalist) filelist = None else: assert False, 'data source format is not correct.' if debug: assert (datalist is None and filelist is not None) or ( filelist is None and datalist is not None), 'data is not correct' if datalist is not None: assert len(datalist) == num_data, 'number of data is not equal' if filelist is not None: assert len(filelist) == num_data, 'number of data is not equal' # convert label source to a list of numpy array label if label_src1 is None: labeldict1 = None labellist1 = None elif isfile(label_src1): assert is_path_exists(label_src1), 'file not found' _, _, ext = fileparts(label_src1) assert ext == '.json', 'only json extension is supported' labeldict1 = json.load(label_src1) num_label1 = len(labeldict1) assert num_data == num_label1, 'number of data and label is not equal.' labellist1 = None elif isdict(label_src1): labeldict1 = label_src1 labellist1 = None elif isnparray(label_src1): if debug: assert label_src1.ndim == 1, 'only 1-d label is supported' labeldict1 = None labellist1 = label_src1 elif islist(label_src1): if debug: assert all( np.array(label_tmp).size == 1 for label_tmp in label_src1), 'only 1-d label is supported' labellist1 = label_src1 labeldict1 = None else: assert False, 'label source format is not correct.' assert isfunction(label_preprocess_function1 ), 'label preprocess function is not correct.' # convert label source to a list of numpy array label if label_src2 is None: labeldict2 = None labellist2 = None elif isfile(label_src2): assert is_path_exists(label_src2), 'file not found' _, _, ext = fileparts(label_src2) assert ext == '.json', 'only json extension is supported' labeldict2 = json.load(label_src2) num_label2 = len(labeldict2) assert num_data == num_label2, 'number of data and label is not equal.' labellist2 = None elif isdict(label_src2): labeldict2 = label_src2 labellist2 = None elif isnparray(label_src2): if debug: assert label_src2.ndim == 1, 'only 1-d label is supported' labeldict2 = None labellist2 = label_src2 elif islist(label_src2): if debug: assert all( np.array(label_tmp).size == 1 for label_tmp in label_src2), 'only 1-d label is supported' labellist2 = label_src2 labeldict2 = None else: assert False, 'label source format is not correct.' assert isfunction(label_preprocess_function2 ), 'label preprocess function is not correct.' # warm up if datalist is not None: size_data = datalist[0].shape else: size_data = imread(filelist[0]).shape if labeldict1 is not None: if debug: assert isstring(label_name1), 'label name is not correct' labels1 = np.zeros((batch_size, 1), dtype='float32') # label_value1 = [float(label_tmp_char) for label_tmp_char in labeldict1.values()] # label_range1 = np.array([min(label_value1), max(label_value1)]) if labellist1 is not None: labels1 = np.zeros((batch_size, 1), dtype='float32') # label_range1 = [np.min(labellist1), np.max(labellist1)] if label_src1 is not None and debug: assert label_range1 is not None, 'label range is not correct' assert (labeldict1 is not None and labellist1 is None) or ( labellist1 is not None and labeldict1 is None), 'label is not correct' if labeldict2 is not None: if debug: assert isstring(label_name2), 'label name is not correct' labels2 = np.zeros((batch_size, 1), dtype='float32') # label_value2 = [float(label_tmp_char) for label_tmp_char in labeldict2.values()] # label_range2 = np.array([min(label_value2), max(label_value2)]) if labellist2 is not None: labels2 = np.zeros((batch_size, 1), dtype='float32') # label_range2 = [np.min(labellist2), np.max(labellist2)] if label_src2 is not None and debug: assert label_range2 is not None, 'label range is not correct' assert (labeldict2 is not None and labellist2 is None) or ( labellist2 is not None and labeldict2 is None), 'label is not correct' # start generating count_hdf = 1 # count number of hdf5 file clock = Timer() datalist_batch = list() for i in xrange(num_data): clock.tic() if filelist is not None: imagefile = filelist[i] _, name, _ = fileparts(imagefile) img = imread(imagefile).astype('float32') max_value = np.max(img) if max_value > 1 and max_value <= 255: img = img / 255.0 # [rows,col,channel,numbers], scale the image data to (0, 1) if debug: min_value = np.min(img) assert min_value >= 0 and min_value <= 1, 'data is not in [0, 1]' if datalist is not None: img = datalist[i] if debug: assert size_data == img.shape datalist_batch.append(img) # process label if labeldict1 is not None: if debug: assert len(filelist) == len( labeldict1), 'file list is not equal to label dictionary' labels1[i % batch_size, 0] = float(labeldict1[name]) if labellist1 is not None: labels1[i % batch_size, 0] = float(labellist1[i]) if labeldict2 is not None: if debug: assert len(filelist) == len( labeldict2), 'file list is not equal to label dictionary' labels2[i % batch_size, 0] = float(labeldict2[name]) if labellist2 is not None: labels2[i % batch_size, 0] = float(labellist2[i]) # save to hdf5 if i % batch_size == 0: data = preprocess_image_caffe( datalist_batch, debug=debug, vis=vis ) # swap channel, transfer from list of HxWxC to NxCxHxW # write to hdf5 format if filelist is not None: save_path = os.path.join(save_dir, '%s.hdf5' % name) else: save_path = os.path.join(save_dir, 'image_%010d.hdf5' % count_hdf) h5f = h5py.File(save_path, 'w') h5f.create_dataset(data_name, data=data, dtype='float32') if (labeldict1 is not None) or (labellist1 is not None): labels1 = label_preprocess_function1(data=labels1, data_range=label_range1, debug=debug) h5f.create_dataset(label_name1, data=labels1, dtype='float32') labels1 = np.zeros((batch_size, 1), dtype='float32') if (labeldict2 is not None) or (labellist2 is not None): labels2 = label_preprocess_function2(data=labels2, data_range=label_range2, debug=debug) h5f.create_dataset(label_name2, data=labels2, dtype='float32') labels2 = np.zeros((batch_size, 1), dtype='float32') h5f.close() count_hdf = count_hdf + 1 del datalist_batch[:] if debug: assert len(datalist_batch) == 0, 'list has not been cleared' average_time = clock.toc() print( 'saving to %s: %d/%d, average time:%.3f, elapsed time:%s, estimated time remaining:%s' % (save_path, i + 1, num_data, average_time, format_time(average_time * i), format_time(average_time * (num_data - i)))) return count_hdf - 1, num_data
def facial_landmark_evaluation(pred_dict_all, anno_dict, num_pts, error_threshold, normalization_ced=True, normalization_vec=False, covariance=True, display_list=None, debug=True, vis=False, save=True, save_path=None): ''' evaluate the performance of facial landmark detection parameter: pred_dict_all: a dictionary for all basline methods. Each key is the method name and the value is corresponding prediction dictionary, which keys are the image path and values are 2 x N prediction results anno_dict: a dictionary which keys are the image path and values are 2 x N annotation results num_pts: number of points vis: determine if visualizing the pck curve save: determine if saving the visualization results save_path: a directory to save all the results visualization: 1. 2d pck curve (total and point specific) for all points for all methods 2. point error vector (total and point specific) for all points and for all methods 3. mean square error return: metrics_all: a list of list to have detailed metrics over all methods ptswise_mse: a list of list to have average MSE over all key-points for all methods ''' num_methods = len(pred_dict_all) if debug: assert isdict(pred_dict_all) and num_methods > 0 and all( isdict(pred_dict) for pred_dict in pred_dict_all.values()), 'predictions result format is not correct' assert isdict(anno_dict), 'annotation result format is not correct' assert ispositiveinteger(num_pts), 'number of points is not correct' assert isscalar(error_threshold), 'error threshold is not correct' assert islogical(normalization_ced) and islogical( normalization_vec), 'normalization flag is not correct' if display_list is not None: assert len( display_list ) == num_methods, 'display list is not correct %d vs %d' % ( len(display_list), num_methods) num_images = len(pred_dict_all.values()[0]) if debug: assert num_images > 0, 'the predictions are empty' assert num_images == len( anno_dict ), 'number of images is not equal to number of annotations: %d vs %d' % ( num_images, len(anno_dict)) assert all( num_images == len(pred_dict) for pred_dict in pred_dict_all.values() ), 'number of images in results from different methods are not equal' # calculate normalized mean error for each single image based on point-to-point Euclidean distance normalized by the bounding box size # calculate point error vector for each single image based on error vector normalized by the bounding box size normed_mean_error_dict = dict() normed_mean_error_pts_specific_dict = dict() normed_mean_error_pts_specific_valid_dict = dict() pts_error_vec_dict = dict() pts_error_vec_pts_specific_dict = dict() for method_name, pred_dict in pred_dict_all.items(): normed_mean_error_total = np.zeros((num_images, ), dtype='float32') normed_mean_error_pts_specific = np.zeros((num_images, num_pts), dtype='float32') normed_mean_error_pts_specific_valid = np.zeros((num_images, num_pts), dtype='bool') pts_error_vec = np.zeros((num_images, 2), dtype='float32') pts_error_vec_pts_specific = np.zeros((num_images, 2, num_pts), dtype='float32') count = 0 count_skip_num_images = 0 # it's possible that no annotation exists on some images, than no error should be counted for those images, we count the number of those images for image_path, pts_prediction in pred_dict.items(): _, filename, _ = fileparts(image_path) pts_anno = anno_dict[filename] # 2 x N annotation pts_keep_index = range(num_pts) # to avoid list object type, do conversion here if islist(pts_anno): pts_anno = np.asarray(pts_anno) if islist(pts_prediction): pts_prediction = np.asarray(pts_prediction) if debug: assert ( is2dptsarray(pts_anno) or is2dptsarray_occlusion(pts_anno) ) and pts_anno.shape[ 1] == num_pts, 'shape of annotations is not correct (%d x %d) vs (%d x %d)' % ( 2, num_pts, pts_anno.shape[0], pts_anno.shape[1]) # if the annotation has 3 channels (include extra occlusion channel, we keep only the points with annotations) # occlusion: -1 -> not annotated, 0 -> invisible, 1 -> visible, we keep both visible and invisible points if pts_anno.shape[0] == 3: pts_keep_index = np.where( np.logical_or(pts_anno[2, :] == 1, pts_anno[2, :] == 0))[0].tolist() if len(pts_keep_index ) <= 0: # if no point is annotated in current image count_skip_num_images += 1 continue pts_anno = pts_anno[0:2, pts_keep_index] pts_prediction = pts_prediction[:, pts_keep_index] # to avoid the point location includes the score or occlusion channel, only take the first two channels here if pts_prediction.shape[0] == 3 or pts_prediction.shape[0] == 4: pts_prediction = pts_prediction[0:2, :] num_pts_tmp = len(pts_keep_index) if debug: assert pts_anno.shape[ 1] <= num_pts, 'number of points is not correct: %d vs %d' % ( pts_anno.shape[1], num_pts) assert pts_anno.shape == pts_prediction.shape, 'shape of annotations and predictions are not the same {} vs {}'.format( print_np_shape(pts_anno, debug=debug), print_np_shape(pts_prediction, debug=debug)) # print 'number of points to keep is %d' % num_pts_tmp # calculate bbox for normalization if normalization_ced or normalization_vec: assert len( pts_keep_index ) == num_pts, 'some points are not annotated. Normalization on PCK curve is not allowed.' bbox_anno = pts2bbox(pts_anno, debug=debug) # 1 x 4 bbox_TLWH = bbox_TLBR2TLWH(bbox_anno, debug=debug) # 1 x 4 bbox_size = math.sqrt(bbox_TLWH[0, 2] * bbox_TLWH[0, 3]) # scalar # calculate normalized error for all points normed_mean_error, _ = pts_euclidean(pts_prediction, pts_anno, debug=debug) # scalar if normalization_ced: normed_mean_error /= bbox_size normed_mean_error_total[count] = normed_mean_error if normed_mean_error == 0: print pts_prediction print pts_anno # calculate normalized error point specifically for pts_index in xrange(num_pts): if pts_index in pts_keep_index: # if current point not annotated in current image, just keep 0 normed_mean_error_pts_specific_valid[count, pts_index] = True else: continue pts_index_from_keep_list = pts_keep_index.index(pts_index) pts_prediction_tmp = np.reshape( pts_prediction[:, pts_index_from_keep_list], (2, 1)) pts_anno_tmp = np.reshape( pts_anno[:, pts_index_from_keep_list], (2, 1)) normed_mean_error_pts_specifc_tmp, _ = pts_euclidean( pts_prediction_tmp, pts_anno_tmp, debug=debug) if normalization_ced: normed_mean_error_pts_specifc_tmp /= bbox_size normed_mean_error_pts_specific[ count, pts_index] = normed_mean_error_pts_specifc_tmp # calculate the point error vector error_vector = pts_prediction - pts_anno # 2 x num_pts_tmp if normalization_vec: error_vector /= bbox_size pts_error_vec_pts_specific[ count, :, pts_keep_index] = np.transpose(error_vector) pts_error_vec[count, :] = np.sum(error_vector, axis=1) / num_pts_tmp count += 1 assert count + count_skip_num_images == num_images, 'all cells in the array must be filled %d vs %d' % ( count + count_skip_num_images, num_images) # print normed_mean_error_total # time.sleep(1000) # save results to dictionary normed_mean_error_dict[method_name] = normed_mean_error_total[:count] normed_mean_error_pts_specific_dict[ method_name] = normed_mean_error_pts_specific[:count, :] normed_mean_error_pts_specific_valid_dict[ method_name] = normed_mean_error_pts_specific_valid[:count, :] pts_error_vec_dict[method_name] = np.transpose( pts_error_vec[:count, :]) # 2 x num_images pts_error_vec_pts_specific_dict[ method_name] = pts_error_vec_pts_specific[:count, :, :] # calculate mean value if mse: mse_value = dict( ) # dictionary to record all average MSE for different methods mse_dict = dict( ) # dictionary to record all point-wise MSE for different keypoints for method_name, error_array in normed_mean_error_dict.items(): mse_value[method_name] = np.mean(error_array) else: mse_value = None # visualize the ced (cumulative error distribution curve) print('visualizing pck curve....\n') pck_savedir = os.path.join(save_path, 'pck') mkdir_if_missing(pck_savedir) pck_savepath = os.path.join(pck_savedir, 'pck_curve_overall.png') table_savedir = os.path.join(save_path, 'metrics') mkdir_if_missing(table_savedir) table_savepath = os.path.join(table_savedir, 'detailed_metrics_overall.txt') _, metrics_all = visualize_ced(normed_mean_error_dict, error_threshold=error_threshold, normalized=normalization_ced, truncated_list=truncated_list, title='2D PCK curve (all %d points)' % num_pts, display_list=display_list, debug=debug, vis=vis, save=save, pck_savepath=pck_savepath, table_savepath=table_savepath) metrics_title = ['Method Name / Point Index'] ptswise_mse_table = [[ normed_mean_error_pts_specific_dict.keys()[index_tmp] ] for index_tmp in xrange(num_methods)] for pts_index in xrange(num_pts): metrics_title.append(str(pts_index + 1)) normed_mean_error_dict_tmp = dict() for method_name, error_array in normed_mean_error_pts_specific_dict.items( ): normed_mean_error_pts_specific_valid_temp = normed_mean_error_pts_specific_valid_dict[ method_name] # Some points at certain images might not be annotated. When calculating MSE for these specific point, we remove those images to avoid "false" mean average error valid_array_per_pts_per_method = np.where( normed_mean_error_pts_specific_valid_temp[:, pts_index] == True)[0].tolist() error_array_per_pts = error_array[:, pts_index] error_array_per_pts = error_array_per_pts[ valid_array_per_pts_per_method] num_image_tmp = len(valid_array_per_pts_per_method) normed_mean_error_dict_tmp[method_name] = np.reshape( error_array_per_pts, (num_image_tmp, )) pck_savepath = os.path.join(pck_savedir, 'pck_curve_pts_%d.png' % (pts_index + 1)) table_savepath = os.path.join( table_savedir, 'detailed_metrics_pts_%d.txt' % (pts_index + 1)) metrics_dict, _ = visualize_ced(normed_mean_error_dict_tmp, error_threshold=error_threshold, normalized=normalization_ced, truncated_list=truncated_list, display2terminal=False, title='2D PCK curve for point %d' % (pts_index + 1), display_list=display_list, debug=debug, vis=vis, save=save, pck_savepath=pck_savepath, table_savepath=table_savepath) for method_index in range(num_methods): method_name = normed_mean_error_pts_specific_dict.keys( )[method_index] ptswise_mse_table[method_index].append( '%.1f' % metrics_dict[method_name]['MSE']) # reorder the table order_index_list = [ display_list.index(method_name_tmp) for method_name_tmp in normed_mean_error_pts_specific_dict.keys() ] order_index_list = [0] + [ order_index_tmp + 1 for order_index_tmp in order_index_list ] # print table to terminal ptswise_mse_table = list_reorder([metrics_title] + ptswise_mse_table, order_index_list, debug=debug) table = AsciiTable(ptswise_mse_table) print '\nprint point-wise average MSE' print table.table # save table to file ptswise_savepath = os.path.join(table_savedir, 'pointwise_average_MSE.txt') table_file = open(ptswise_savepath, 'w') table_file.write(table.table) table_file.close() print '\nsave point-wise average MSE to %s' % ptswise_savepath # visualize the error vector map print('visualizing error vector distribution map....\n') error_vec_save_dir = os.path.join(save_path, 'error_vec') mkdir_if_missing(error_vec_save_dir) savepath_tmp = os.path.join(error_vec_save_dir, 'error_vector_distribution_all.png') visualize_pts(pts_error_vec_dict, title='Point Error Vector Distribution (all %d points)' % num_pts, mse=mse, mse_value=mse_value, display_range=display_range, display_list=display_list, xlim=xlim, ylim=ylim, covariance=covariance, debug=debug, vis=vis, save=save, save_path=savepath_tmp) for pts_index in xrange(num_pts): pts_error_vec_pts_specific_dict_tmp = dict() for method_name, error_vec_dict in pts_error_vec_pts_specific_dict.items( ): pts_error_vec_pts_specific_valid = normed_mean_error_pts_specific_valid_dict[ method_name] # get valid flag valid_image_index_per_pts = np.where( pts_error_vec_pts_specific_valid[:, pts_index] == True )[0].tolist( ) # get images where the points with current index are annotated pts_error_vec_pts_specific_dict_tmp[method_name] = np.transpose( error_vec_dict[valid_image_index_per_pts, :, pts_index]) # 2 x num_images savepath_tmp = os.path.join( error_vec_save_dir, 'error_vector_distribution_pts_%d.png' % (pts_index + 1)) if mse: mse_dict_tmp = visualize_pts( pts_error_vec_pts_specific_dict_tmp, title='Point Error Vector Distribution for Point %d' % (pts_index + 1), mse=mse, display_range=display_range, display_list=display_list, xlim=xlim, ylim=ylim, covariance=covariance, debug=debug, vis=vis, save=save, save_path=savepath_tmp) mse_best = min(mse_dict_tmp.values()) mse_single = dict() mse_single['mse'] = mse_best mse_single['num_images'] = len( valid_image_index_per_pts ) # assume number of valid images is equal for all methods mse_dict[pts_index] = mse_single else: visualize_pts( pts_error_vec_pts_specific_dict_tmp, title='Point Error Vector Distribution for Point %d' % (pts_index + 1), mse=mse, display_range=display_range, display_list=display_list, xlim=xlim, ylim=ylim, covariance=covariance, debug=debug, vis=vis, save=save, save_path=savepath_tmp) # save mse to json file for further use if mse: json_path = os.path.join(save_path, 'mse_pts.json') with open(json_path, 'w') as file: print('save mse for all keypoings to {}'.format(json_path)) json.dump(mse_dict, file) file.close() print('\ndone!!!!!\n') return metrics_all, ptswise_mse_table