def fetch_from_images_in_directory(dir_path): print("Fetching for images in {}".format(dir_path)) gt_polygons_dir_path = os.path.join(dir_path, GT_POLYGONS_DIR_NAME) if not os.path.exists(gt_polygons_dir_path): os.makedirs(gt_polygons_dir_path) images_dir_path = os.path.join(dir_path, IMAGE_DIR_NAME) image_filepaths = python_utils.get_filepaths(images_dir_path, IMAGE_EXTENSION) for i, image_filepath in enumerate(image_filepaths): image_basename = os.path.basename(image_filepath) image_name = os.path.splitext(image_basename)[0] print("Fetching for image {}. Progress: {}/{}".format( image_name, i + 1, len(image_filepaths))) gt_polygons_path = os.path.join(gt_polygons_dir_path, "{}.npy".format(image_name)) if not os.path.exists(gt_polygons_path): gt_polygons = load_gt_polygons(image_filepath) if gt_polygons is not None: np.save(gt_polygons_path, gt_polygons) else: print("Fetching did not return any polygons. Skip this one.") else: print( "GT polygons data was already fetched, skip this one. (Delete the gt_polygons file to re-fetch)" )
def load_checkpoint(self, checkpoints_dirpath): """ Loads last checkpoint in checkpoints_dirpath :param checkpoints_dirpath: :return: """ try: filepaths = python_utils.get_filepaths(checkpoints_dirpath, endswith_str=".tar", startswith_str="checkpoint") if len(filepaths) == 0: return None filepaths = sorted(filepaths) filepath = filepaths[-1] # Last checkpoint checkpoint = torch.load(filepath) self.model.load_state_dict(checkpoint['model_state_dict']) return True except NotADirectoryError: return None
def main(): plt.figure(1, figsize=(7, 4)) handles = [] for source_params in SOURCE_PARAMS_LIST: if "plot_dashes" in source_params: plot_dashes = source_params["plot_dashes"] else: plot_dashes = (None, None) threshold_accuracies_filepath_list = python_utils.get_filepaths( source_params["path"], ACCURACIES_FILENAME_EXTENSION) threshold_accuracies_list = [] for threshold_accuracies_filepath in threshold_accuracies_filepath_list: threshold_accuracies = np.load( threshold_accuracies_filepath).item() threshold_accuracies_list.append(threshold_accuracies) # Plot main, min and max curves accuracies_list = [] for threshold_accuracies in threshold_accuracies_list: accuracies_list.append(threshold_accuracies["accuracies"]) accuracies_table = np.stack(accuracies_list, axis=0) accuracies_min = np.min(accuracies_table, axis=0) accuracies_average = np.mean(accuracies_table, axis=0) accuracies_max = np.max(accuracies_table, axis=0) if PLOT_AVERAGE: plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_average, color=source_params["plot_color"], dashes=plot_dashes, alpha=ALPHA_MAIN, label=source_params["name"]) if PLOT_MIN_MAX: plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_min, color=source_params["plot_color"], dashes=(6, 1), alpha=ALPHA_MIN_MAX, label=source_params["name"]) plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_max, color=source_params["plot_color"], dashes=(6, 1), alpha=ALPHA_MIN_MAX, label=source_params["name"]) if PLOT_ALL: # Plot all curves: for threshold_accuracies in threshold_accuracies_list: plt.plot(threshold_accuracies["thresholds"], threshold_accuracies["accuracies"], color=source_params["plot_color"], dashes=plot_dashes, alpha=ALPHA_INDIVIDUAL, label=source_params["name"]) # Legend handles.append( plt.Line2D([0], [0], color=source_params["plot_color"], dashes=plot_dashes)) plt.grid(True) axes = plt.gca() axes.set_xlim([0, X_LIM]) axes.set_ylim([0.0, 1.0]) # plt.title("Fraction of vertices whose ground truth point distance is less than the threshold (higher is better)") plt.xlabel('Threshold (in pixels)') plt.ylabel('Fraction of vertices') # Add legends in top-left labels = [source_params["name"] for source_params in SOURCE_PARAMS_LIST] plt.legend(handles, labels) # Plot plt.tight_layout() plt.savefig(FILEPATH) plt.show()
def main(): plt.figure(1, figsize=(4, 4)) for source_params in SOURCE_PARAMS_LIST: thresholds_ious_filepath_list = python_utils.get_filepaths( source_params["path"], IOUS_FILENAME_EXTENSION) # print(thresholds_ious_filepath_list) thresholds_ious_list = [] for thresholds_ious_filepath in thresholds_ious_filepath_list: thresholds_ious = np.load(thresholds_ious_filepath).item() thresholds_ious_list.append(thresholds_ious) # print(thresholds_ious_list) # Plot main, min and max curves ious_list = [] for thresholds_ious in thresholds_ious_list: ious_list.append(thresholds_ious["ious"]) ious_table = np.stack(ious_list, axis=0) ious_average = np.mean(ious_table, axis=0) ious_average_area = np.trapz(ious_average, thresholds_ious_list[0]["thresholds"]) ious_average_max = np.max(ious_average) ious_average_midpoint = ious_average[ious_average.shape[0] // 2] print("ious_average_area = {}".format(ious_average_area)) print("ious_average_max = {}".format(ious_average_max)) print("ious_average_midpoint = {}".format(ious_average_midpoint)) plt.plot(thresholds_ious_list[0]["thresholds"], ious_average, color=source_params["plot_color"], alpha=ALPHA_MAIN, label=source_params["name"]) # Plot all curves: for thresholds_ious in thresholds_ious_list: plt.plot(thresholds_ious["thresholds"], thresholds_ious["ious"], color=source_params["plot_color"], alpha=ALPHA_INDIVIDUAL, label=source_params["name"]) plt.grid(True) axes = plt.gca() axes.set_xlim([0.0, 1.0]) axes.set_ylim([-0.01, 1.0]) # plt.title("IoU relative to the mask threshold") plt.xlabel('Mask threshold') plt.ylabel('IoU') # Add legends in top-left handles = [ plt.Line2D([0], [0], color=source_params["plot_color"]) for source_params in SOURCE_PARAMS_LIST ] labels = [source_params["name"] for source_params in SOURCE_PARAMS_LIST] plt.legend(handles, labels) # Plot plt.tight_layout() plt.savefig(FILEPATH) plt.show()
def similarity_stats_1d(config, run_name, dataset_params, split_name, stats_params): # print("# --- Similarity Stats --- #") working_dir = os.path.dirname(os.path.abspath(__file__)) # Find data_dir data_dirpath = python_utils.choose_first_existing_path( config["data_dir_candidates"]) if data_dirpath is None: print_utils.print_error("ERROR: Data directory not found!") exit() # print_utils.print_info("Using data from {}".format(data_dirpath)) root_dir = os.path.join(data_dirpath, config["data_root_partial_dirpath"]) # setup run directory: runs_dir = os.path.join(working_dir, config["runs_dirpath"]) run_dirpath = None try: run_dirpath = run_utils.setup_run_dir(runs_dir, run_name) except ValueError: print_utils.print_error( "Run name {} was not found. Aborting...".format(run_name)) exit() # Instantiate dataset # ds = Synthetic1DDataset(root_dir=root_dir, params=dataset_params, split_name="test", # distribution="triangular" # ) ds = Synthetic1DDataset(root_dir=root_dir, params=dataset_params, split_name=split_name, transform=None) sample_count = len(ds) # Load grads and pred grads_dirpath = os.path.join(run_dirpath, "grads") grads_filepath_list = python_utils.get_filepaths(grads_dirpath, endswith_str=".npy", startswith_str="grads.") grads_list = [ np.load(grads_filepath) for grads_filepath in tqdm(grads_filepath_list, desc="Loading grads") ] # print("Grads shape: {}".format(grads_list[0].shape)) pred_filepath_list = python_utils.get_filepaths(grads_dirpath, endswith_str=".npy", startswith_str="pred.") pred_list = [ np.load(pred_filepath) for pred_filepath in tqdm(pred_filepath_list, desc="Loading pred") ] # Create stats dir stats_dirpath = os.path.join(run_dirpath, "stats_1d") os.makedirs(stats_dirpath, exist_ok=True) # import time # t1 = time.clock() neighbor_count, neighbor_count_no_normalization = netsimilarity_utils.compute_soft_neighbor_count( grads_list) neighbors_filepath = os.path.join(stats_dirpath, "neighbors_soft.npy") np.save(neighbors_filepath, neighbor_count) neighbors_filepath = os.path.join(stats_dirpath, "neighbors_soft_no_normalization.npy") np.save(neighbors_filepath, neighbor_count_no_normalization) if not COMPUTE_ONLY_NEIGHBORS_SOFT: # Compute similarity matrix similarity_mat = netsimilarity_utils.compute_similarity_mat_1d( grads_list) # Compute number of neighbors # Hard-thresholding: for t in stats_params["neighbors_t"]: neighbor_count = netsimilarity_utils.compute_neighbor_count( similarity_mat, "hard", t=t) neighbors_filepath = os.path.join( stats_dirpath, "neighbors_hard_t_{}.npy".format(t)) np.save(neighbors_filepath, neighbor_count) # # Soft estimate # neighbor_count = netsimilarity_utils.compute_neighbor_count(similarity_mat, "soft") # neighbors_filepath = os.path.join(stats_dirpath, "neighbors_soft.npy") # np.save(neighbors_filepath, neighbor_count) # Mix for n in stats_params["neighbors_n"]: neighbor_count = netsimilarity_utils.compute_neighbor_count( similarity_mat, "less_soft", n=n) neighbors_filepath = os.path.join( stats_dirpath, "neighbors_less_soft_n_{}.npy".format(n)) np.save(neighbors_filepath, neighbor_count) # print("Time to compute number of neighbors:") # print(time.clock() - t1) # Save inputs for key in ["alpha", "x", "density", "gt", "noise", "curvature"]: filepath = os.path.join(stats_dirpath, "{}.npy".format(key)) values = [sample[key] for sample in ds] np.save(filepath, values) # Save outputs pred_filepath = os.path.join(stats_dirpath, "pred.npy") pred = [pred[0] for pred in pred_list] np.save(pred_filepath, pred) # Error error_filepath = os.path.join(stats_dirpath, "error.npy") error = [ np.abs(sample["gt"] - pred[0]) for sample, pred in zip(ds, pred_list) ] np.save(error_filepath, error) # Losses logs_dirpath = os.path.join(run_dirpath, "logs") final_losses = python_utils.load_json( os.path.join(logs_dirpath, "final_losses.json")) train_loss_filepath = os.path.join(stats_dirpath, "train_loss.npy") np.save(train_loss_filepath, final_losses["train_loss"]) val_loss_filepath = os.path.join(stats_dirpath, "val_loss.npy") np.save(val_loss_filepath, final_losses["val_loss"]) loss_ratio_filepath = os.path.join(stats_dirpath, "loss_ratio.npy") np.save(loss_ratio_filepath, final_losses["val_loss"] / final_losses["train_loss"])
def main(): plt.figure(1, figsize=(7, 4)) handles = [] for source_params in SOURCE_PARAMS_LIST: print("# --- {} --- #".format(source_params["name"])) if "plot_dashes" in source_params: plot_dashes = source_params["plot_dashes"] else: plot_dashes = (None, None) if "linewidth" in source_params: linewidth = source_params["linewidth"] else: linewidth = 1.5 if "marker" in source_params: marker = source_params["marker"] else: marker = None threshold_accuracies_filepath_list = python_utils.get_filepaths( source_params["path"], ACCURACIES_FILENAME_EXTENSION) threshold_accuracies_list = [] for threshold_accuracies_filepath in threshold_accuracies_filepath_list: threshold_accuracies = np.load( threshold_accuracies_filepath).item() threshold_accuracies_list.append(threshold_accuracies) # Plot main, min and max curves accuracies_list = [] for threshold_accuracies in threshold_accuracies_list: accuracies_list.append(threshold_accuracies["accuracies"]) accuracies_table = np.stack(accuracies_list, axis=0) accuracies_min = np.min(accuracies_table, axis=0) accuracies_average = np.mean(accuracies_table, axis=0) accuracies_max = np.max(accuracies_table, axis=0) accuracies_std = np.std(accuracies_table, axis=0) accuracies_average_area = np.trapz( accuracies_average, threshold_accuracies_list[0]["thresholds"]) if PLOT_AVERAGE: markers_on = range(0, len(accuracies_average), 4) plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_average, color=source_params["plot_color"], linewidth=linewidth, marker=marker, markevery=markers_on, dashes=plot_dashes, alpha=ALPHA_MAIN, label=source_params["name"]) print("Area under average curve = {}".format( accuracies_average_area)) if PLOT_MIN_MAX: plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_min, color=source_params["plot_color"], dashes=(6, 1), alpha=ALPHA_MIN_MAX, label=source_params["name"]) plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_max, color=source_params["plot_color"], dashes=(6, 1), alpha=ALPHA_MIN_MAX, label=source_params["name"]) if PLOT_STD: plt.fill_between(threshold_accuracies_list[0]["thresholds"], accuracies_average - accuracies_std, accuracies_average + accuracies_std, color=source_params["plot_color"], alpha=ALPHA_STD, label=source_params["name"]) # plt.plot(threshold_accuracies_list[0]["thresholds"], accuracies_std, color=source_params["plot_color"], # dashes=(6, 1), alpha=ALPHA_STD, label=source_params["name"]) if PLOT_ALL: # Plot all curves: for threshold_accuracies in threshold_accuracies_list: plt.plot(threshold_accuracies["thresholds"], threshold_accuracies["accuracies"], color=source_params["plot_color"], dashes=plot_dashes, alpha=ALPHA_INDIVIDUAL, label=source_params["name"]) # Legend handles.append( plt.Line2D([0], [0], color=source_params["plot_color"], linewidth=linewidth, marker=marker, dashes=plot_dashes)) plt.grid(True) axes = plt.gca() axes.set_xlim([0, X_LIM]) axes.set_ylim([0.0, 1.0]) # plt.title("Fraction of vertices whose ground truth point distance is less than the threshold (higher is better)") plt.xlabel('Threshold $\\tau$ (in pixels)') plt.ylabel('Fraction of vertices') # Add legends in top-left labels = [source_params["name"] for source_params in SOURCE_PARAMS_LIST] plt.legend(handles, labels, numpoints=None) # Plot plt.tight_layout() plt.savefig(FILEPATH, dpi=300) plt.show()