def find_scoped_addons_path(addons_dir: os.PathLike): """Find all addons path within a directory, the first level sub-directories beeing expected to be DBUUIDs.""" paths_map = {} for dbuuid in addons_dir.iterdir(): if not dbuuid.is_dir(): continue paths_map[dbuuid] = find_addons_path(dbuuid) return paths_map
def repository_root(path: PathLike = None) -> Path: if path is None: path = __file__ if not isinstance(path, Path): path = Path(path) if path.is_file(): path = path.parent if '.git' in (child.name for child in path.iterdir()) or path == path.parent: return path else: return repository_root(path.parent)
def check_reference_directory( test_directory: PathLike, reference_directory: PathLike, skip_lines: Dict[str, List[int]] = None, ): if not isinstance(test_directory, Path): test_directory = Path(test_directory) if not isinstance(reference_directory, Path): reference_directory = Path(reference_directory) if skip_lines is None: skip_lines = {} for reference_filename in reference_directory.iterdir(): if reference_filename.is_dir(): check_reference_directory(test_directory / reference_filename.name, reference_filename, skip_lines) else: test_filename = test_directory / reference_filename.name if reference_filename.suffix in [".h5", ".nc"]: reference_filesize = Path(reference_filename).stat().st_size test_filesize = Path(test_filename).stat().st_size diff = test_filesize - reference_filesize message = f'"{test_filesize}" != "{reference_filesize}"\n{diff}' assert reference_filesize == test_filesize, message continue with open(test_filename) as test_file, open( reference_filename) as reference_file: test_lines = list(test_file.readlines()) reference_lines = list(reference_file.readlines()) lines_to_skip = set() for file_mask, line_indices in skip_lines.items(): if (file_mask in str(test_filename) or re.match(file_mask, str(test_filename)) and len(test_lines) > 0): try: lines_to_skip.update( line_index % len(test_lines) for line_index in line_indices) except ZeroDivisionError: continue for line_index in sorted(lines_to_skip, reverse=True): del test_lines[line_index], reference_lines[line_index] cwd = Path.cwd() assert "\n".join(test_lines) == "\n".join( reference_lines ), f'"{os.path.relpath(test_filename, cwd)}" != "{os.path.relpath(reference_filename, cwd)}"'
def evaluate_box_dir( pred_dir: PathLike, gt_dir: PathLike, classes: Sequence[str], save_dir: Optional[Path] = None, ) -> Tuple[Dict, Dict]: """ Run box evaluation inside a directory Args: pred_dir: path to dir with predictions gt_dir: path to dir with groud truth data classes: classes present in dataset save_dir: optional path to save plots Returns: Dict[str, float]: dictionary with scalar values for evaluation Dict[str, np.ndarray]: dictionary with arrays, e.g. for visualization of graphs See Also: :class:`nndet.evaluator.registry.BoxEvaluator` """ pred_dir = Path(pred_dir) gt_dir = Path(gt_dir) if save_dir is not None: save_dir.mkdir(parents=True, exist_ok=True) case_ids = [ p.stem.rsplit('_boxes', 1)[0] for p in pred_dir.iterdir() if p.is_file() and p.stem.endswith("_boxes") ] logger.info(f"Found {len(case_ids)} for box evaluation in {pred_dir}") evaluator = BoxEvaluator.create( classes=classes, fast=False, verbose=False, save_dir=save_dir, ) for case_id in case_ids: gt = np.load(str(gt_dir / f"{case_id}_boxes_gt.npz"), allow_pickle=True) pred = load_pickle(pred_dir / f"{case_id}_boxes.pkl") evaluator.run_online_evaluation( pred_boxes=[pred["pred_boxes"]], pred_classes=[pred["pred_labels"]], pred_scores=[pred["pred_scores"]], gt_boxes=[gt["boxes"]], gt_classes=[gt["classes"]], gt_ignore=None, ) return evaluator.finish_online_evaluation()
def _validate_confd(confd: os.PathLike) -> None: if not confd.exists(): raise NoConfigDirError(f"{confd} does not exist.") if not confd.is_dir(): raise ConfigDirNoDirError(f"{confd} is not a directory.") if stat.S_IMODE(confd.lstat().st_mode) > 0o550: oct_str = oct(stat.S_IMODE(confd.lstat().st_mode)) raise ConfigDirOwnershipError( f"{confd} ownership is {oct_str}, max allowed is `0o550`") for child in confd.iterdir(): if not child.is_file(): _log.warning( f"Config dir '{confd}' only contains files, not '{child}'!") continue if child.suffix != ".json": _log.warning( f"Config dir '{confd}' only contains json files, not '{child.name}'!" )
def evaluate_case_dir( pred_dir: PathLike, gt_dir: PathLike, classes: Sequence[str], target_class: Optional[int] = None, ) -> Tuple[Dict, Dict]: """ Run evaluation of case results inside a directory Args: pred_dir: path to dir with predictions gt_dir: path to dir with groud truth data classes: classes present in dataset target_class in case of multiple classes, specify a target class to evaluate in a target class vs rest setting Returns: Dict[str, float]: dictionary with scalar values for evaluation Dict[str, np.ndarray]: dictionary with arrays, e.g. for visualization of graph) See Also: :class:`nndet.evaluator.registry.CaseEvaluator` """ pred_dir = Path(pred_dir) gt_dir = Path(gt_dir) case_ids = [ p.stem.rsplit('_boxes', 1)[0] for p in pred_dir.iterdir() if p.is_file() and p.stem.endswith("_boxes") ] logger.info(f"Found {len(case_ids)} for case evaluation in {pred_dir}") evaluator = CaseEvaluator.create( classes=classes, target_class=target_class, ) for case_id in case_ids: gt = np.load(str(gt_dir / f"{case_id}_boxes_gt.npz"), allow_pickle=True) pred = load_pickle(pred_dir / f"{case_id}_boxes.pkl") evaluator.run_online_evaluation(pred_classes=[pred["pred_labels"]], pred_scores=[pred["pred_scores"]], gt_classes=[gt["classes"]]) return evaluator.finish_online_evaluation()
def repository_root(path: PathLike = None) -> Path: """ get the root directory of the current Git repository :param path: query path :return: repository root directory """ if path is None: path = __file__ if not isinstance(path, Path): path = Path(path) if path.is_file(): path = path.parent if '.git' in (child.name for child in path.iterdir()) or path == path.parent: return path else: return repository_root(path.parent)
def check_reference_directory( test_directory: PathLike, reference_directory: PathLike, skip_lines: {str: [int]} = None ): if not isinstance(test_directory, Path): test_directory = Path(test_directory) if not isinstance(reference_directory, Path): reference_directory = Path(reference_directory) if skip_lines is None: skip_lines = {} for reference_filename in reference_directory.iterdir(): if reference_filename.is_dir(): check_reference_directory( test_directory / reference_filename.name, reference_filename, skip_lines ) else: test_filename = test_directory / reference_filename.name with open(test_filename) as test_file, open(reference_filename) as reference_file: test_lines = list(test_file.readlines()) reference_lines = list(reference_file.readlines()) lines_to_skip = set() for file_mask, line_indices in skip_lines.items(): if ( file_mask in str(test_filename) or re.match(file_mask, str(test_filename)) and len(test_lines) > 0 ): try: lines_to_skip.update( line_index % len(test_lines) for line_index in line_indices ) except ZeroDivisionError: continue for line_index in sorted(lines_to_skip, reverse=True): del test_lines[line_index], reference_lines[line_index] cwd = Path.cwd() message = f'"{os.path.relpath(test_filename, cwd)}" != "{os.path.relpath(reference_filename, cwd)}"' assert '\n'.join(test_lines) == '\n'.join(reference_lines), message
def evaluate_seg_dir( pred_dir: PathLike, gt_dir: PathLike, classes: Sequence[str], ) -> Tuple[Dict, None]: """ Compute dice metric across a directory Args: pred_dir: path to dir with predictions gt_dir: path to dir with groud truth data classes: classes present in dataset Returns: Dict[str, float]: dictionary with scalar values for evaluation None See Also: :class:`nndet.evaluator.registry.PerCaseSegmentationEvaluator` """ pred_dir = Path(pred_dir) gt_dir = Path(gt_dir) case_ids = [ p.stem.rsplit('_seg', 1)[0] for p in pred_dir.iterdir() if p.is_file() and p.stem.endswith("_seg") ] logger.info(f"Found {len(case_ids)} for seg evaluation in {pred_dir}") evaluator = PerCaseSegmentationEvaluator.create(classes=classes) for case_id in case_ids: gt = np.load(str(gt_dir / f"{case_id}_seg_gt.npz"), allow_pickle=True)["seg"] # 1, dims pred = load_pickle(pred_dir / f"{case_id}_seg.pkl") evaluator.run_online_evaluation( seg=pred[None], target=gt, ) return evaluator.finish_online_evaluation()
def check_reference_directory(test_directory: PathLike, reference_directory: PathLike, skip_lines: {str: [int]} = None): if not isinstance(test_directory, Path): test_directory = Path(test_directory) if not isinstance(reference_directory, Path): reference_directory = Path(reference_directory) if skip_lines is None: skip_lines = {} for reference_filename in reference_directory.iterdir(): if reference_filename.is_dir(): check_reference_directory(test_directory / reference_filename.name, reference_filename, skip_lines) else: test_filename = test_directory / reference_filename.name with open(test_filename) as test_file, open( reference_filename) as reference_file: test_lines = list(test_file.readlines()) reference_lines = list(reference_file.readlines()) diff = '\n'.join(Differ().compare(test_lines, reference_lines)) message = f'"{test_filename}" != "{reference_filename}"\n{diff}' assert len(test_lines) == len(reference_lines), message lines_to_skip = set() for file_mask, line_indices in skip_lines.items(): if file_mask in str(test_filename) or re.match( file_mask, str(test_filename)): lines_to_skip.update(line_index % len(test_lines) for line_index in line_indices) for line_index in sorted(lines_to_skip, reverse=True): del test_lines[line_index], reference_lines[line_index] assert '\n'.join(test_lines) == '\n'.join( reference_lines), message