def detect_blobs_blocks(filename_base, image5d, offset, size, channels, verify=False, save_dfs=True, full_roi=False, coloc=False): """Detect blobs by block processing of a large image. All channels are processed in the same blocks. Args: filename_base: Base path to use file output. image5d: Large image to process as a Numpy array of t,z,y,x,[c] offset: Sub-image offset given as coordinates in z,y,x. size: Sub-image shape given in z,y,x. channels (Sequence[int]): Sequence of channels, where None detects in all channels. verify: True to verify detections against truth database; defaults to False. save_dfs: True to save data frames to file; defaults to True. full_roi (bool): True to treat ``image5d`` as the full ROI; defaults to False. coloc (bool): True to perform blob co-localizations; defaults to False. Returns: tuple[int, int, int], str, :class:`magmap.cv.detector.Blobs`: Accuracy metrics from :class:`magmap.cv.detector.verify_rois`, feedback message from this same function, and detected blobs. """ time_start = time() subimg_path_base = filename_base if size is None or offset is None: # uses the entire stack if no size or offset specified size = image5d.shape[1:4] offset = (0, 0, 0) else: # get base path for sub-image subimg_path_base = naming.make_subimage_name( filename_base, offset, size) filename_blobs = libmag.combine_paths(subimg_path_base, config.SUFFIX_BLOBS) # get ROI for given region, including all channels if full_roi: # treat the full image as the ROI roi = image5d[0] else: roi = plot_3d.prepare_subimg(image5d, offset, size) num_chls_roi = 1 if len(roi.shape) < 4 else roi.shape[3] if num_chls_roi < 2: coloc = False print("Unable to co-localize as image has only 1 channel") # prep chunking ROI into sub-ROIs with size based on segment_size, scaling # by physical units to make more independent of resolution; use profile # from first channel to be processed for block settings time_detection_start = time() settings = config.get_roi_profile(channels[0]) print("Profile for block settings:", settings[settings.NAME_KEY]) sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border, \ tol, overlap_base, overlap, overlap_padding = setup_blocks( settings, roi.shape) # TODO: option to distribute groups of sub-ROIs to different servers # for blob detection seg_rois = StackDetector.detect_blobs_sub_rois( roi, sub_roi_slices, sub_rois_offsets, denoise_max_shape, exclude_border, coloc, channels) detection_time = time() - time_detection_start print("blob detection time (s):", detection_time) # prune blobs in overlapping portions of sub-ROIs time_pruning_start = time() segments_all, df_pruning = StackPruner.prune_blobs_mp( roi, seg_rois, overlap, tol, sub_roi_slices, sub_rois_offsets, channels, overlap_padding) pruning_time = time() - time_pruning_start print("blob pruning time (s):", pruning_time) #print("maxes:", np.amax(segments_all, axis=0)) # get weighted mean of ratios if df_pruning is not None: print("\nBlob pruning ratios:") path_pruning = "blob_ratios.csv" if save_dfs else None df_pruning_all = df_io.data_frames_to_csv( df_pruning, path_pruning, show=" ") cols = df_pruning_all.columns.tolist() blob_pruning_means = {} if "blobs" in cols: blobs_unpruned = df_pruning_all["blobs"] num_blobs_unpruned = np.sum(blobs_unpruned) for col in cols[1:]: blob_pruning_means["mean_{}".format(col)] = [ np.sum(np.multiply(df_pruning_all[col], blobs_unpruned)) / num_blobs_unpruned] path_pruning_means = "blob_ratios_means.csv" if save_dfs else None df_pruning_means = df_io.dict_to_data_frame( blob_pruning_means, path_pruning_means, show=" ") else: print("no blob ratios found") '''# report any remaining duplicates np.set_printoptions(linewidth=500, threshold=10000000) print("all blobs (len {}):".format(len(segments_all))) sort = np.lexsort( (segments_all[:, 2], segments_all[:, 1], segments_all[:, 0])) blobs = segments_all[sort] print(blobs) print("checking for duplicates in all:") print(detector.remove_duplicate_blobs(blobs, slice(0, 3))) ''' stats_detection = None fdbk = None colocs = None if segments_all is not None: # remove the duplicated elements that were used for pruning detector.replace_rel_with_abs_blob_coords(segments_all) if coloc: colocs = segments_all[:, 10:10+num_chls_roi].astype(np.uint8) # remove absolute coordinate and any co-localization columns segments_all = detector.remove_abs_blob_coords(segments_all) # compare detected blobs with truth blobs # TODO: assumes ground truth is relative to any ROI offset, # but should make customizable if verify: stats_detection, fdbk = verifier.verify_stack( filename_base, subimg_path_base, settings, segments_all, channels, overlap_base) if config.save_subimg: subimg_base_path = libmag.combine_paths( subimg_path_base, config.SUFFIX_SUBIMG) if (isinstance(config.image5d, np.memmap) and config.image5d.filename == os.path.abspath(subimg_base_path)): # file at sub-image save path may have been opened as a memmap # file, in which case saving would fail libmag.warn("{} is currently open, cannot save sub-image" .format(subimg_base_path)) else: # write sub-image, which is in ROI (3D) format with open(subimg_base_path, "wb") as f: np.save(f, roi) # store blobs in Blobs instance # TODO: consider separating into blobs and blobs metadata archives blobs = detector.Blobs( segments_all, colocalizations=colocs, path=filename_blobs) blobs.resolutions = config.resolutions blobs.basename = os.path.basename(config.filename) blobs.roi_offset = offset blobs.roi_size = size # whole image benchmarking time times = ( [detection_time], [pruning_time], time() - time_start) times_dict = {} for key, val in zip(StackTimes, times): times_dict[key] = val if segments_all is None: print("\nNo blobs detected") else: print("\nTotal blobs found:", len(segments_all)) detector.show_blobs_per_channel(segments_all) print("\nTotal detection processing times (s):") path_times = "stack_detection_times.csv" if save_dfs else None df_io.dict_to_data_frame(times_dict, path_times, show=" ") return stats_detection, fdbk, blobs
def setup_images(path: str, series: Optional[int] = None, offset: Optional[Sequence[int]] = None, size: Optional[Sequence[int]] = None, proc_type: Optional["config.ProcessTypes"] = None, allow_import: bool = True, fallback_main_img: bool = True): """Sets up an image and all associated images and metadata. Paths for related files such as registered images will generally be constructed from ``path``. If :attr:`config.prefix` is set, it will be used in place of ``path`` for registered labels. Args: path: Path to image from which MagellanMapper-style paths will be generated. series: Image series number; defaults to None. offset: Sub-image offset given in z,y,x; defaults to None. size: Sub-image shape given in z,y,x; defaults to None. proc_type: Processing type. allow_import: True to allow importing the image if it cannot be loaded; defaults to True. fallback_main_img: True to fall back to loading a registered image if possible if the main image could not be loaded; defaults to True. """ def add_metadata(): # override metadata set from command-line metadata args if available md = { config.MetaKeys.RESOLUTIONS: config.meta_dict[config.MetaKeys.RESOLUTIONS], config.MetaKeys.MAGNIFICATION: config.meta_dict[config.MetaKeys.MAGNIFICATION], config.MetaKeys.ZOOM: config.meta_dict[config.MetaKeys.ZOOM], config.MetaKeys.SHAPE: config.meta_dict[config.MetaKeys.SHAPE], config.MetaKeys.DTYPE: config.meta_dict[config.MetaKeys.DTYPE], } for key, val in md.items(): if val is not None: # explicitly set metadata takes precedence over extracted vals import_md[key] = val res = import_md[config.MetaKeys.RESOLUTIONS] if res is None: # default to 1 for x,y,z since image resolutions are required res = [1] * 3 import_md[config.MetaKeys.RESOLUTIONS] = res _logger.warn("No image resolutions found. Defaulting to: %s", res) # LOAD MAIN IMAGE # reset image5d config.image5d = None config.image5d_is_roi = False config.img5d = Image5d() load_subimage = offset is not None and size is not None config.resolutions = None # reset label images config.labels_img = None config.labels_img_sitk = None config.labels_img_orig = None config.borders_img = None config.labels_meta = None config.labels_ref = None # reset blobs config.blobs = None filename_base = importer.filename_to_base(path, series) subimg_base = None blobs = None # registered images set to load atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS] annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION] borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS] if load_subimage and not config.save_subimg: # load a saved sub-image file if available and not set to save one subimg_base = naming.make_subimage_name(filename_base, offset, size) filename_subimg = libmag.combine_paths(subimg_base, config.SUFFIX_SUBIMG) try: # load sub-image if available config.image5d = np.load(filename_subimg, mmap_mode="r") config.image5d = importer.roi_to_image5d(config.image5d) config.image5d_is_roi = True config.img5d.img = config.image5d config.img5d.path_img = filename_subimg config.img5d.img_io = config.LoadIO.NP config.img5d.subimg_offset = offset config.img5d.subimg_size = size print("Loaded sub-image from {} with shape {}".format( filename_subimg, config.image5d.shape)) # after loading sub-image, load original image's metadata # for essential data such as vmin/vmax; will only warn if # fails to load since metadata could be specified elsewhere _, orig_info = importer.make_filenames(path, series) print("load original image metadata from:", orig_info) importer.load_metadata(orig_info) except IOError: print("Ignored sub-image file from {} as unable to load".format( filename_subimg)) if config.load_data[config.LoadData.BLOBS] or proc_type in ( config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH, config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS): # load a blobs archive blobs = detector.Blobs() try: if subimg_base: try: # load blobs generated from sub-image config.blobs = blobs.load_blobs( img_to_blobs_path(subimg_base)) except (FileNotFoundError, KeyError): # fallback to loading from full image blobs and getting # a subset, shifting them relative to sub-image offset print("Unable to load blobs file based on {}, will try " "from {}".format(subimg_base, filename_base)) config.blobs = blobs.load_blobs( img_to_blobs_path(filename_base)) blobs.blobs, _ = detector.get_blobs_in_roi(blobs.blobs, offset, size, reverse=False) detector.Blobs.shift_blob_rel_coords( blobs.blobs, np.multiply(offset, -1)) else: # load full image blobs config.blobs = blobs.load_blobs( img_to_blobs_path(filename_base)) except (FileNotFoundError, KeyError) as e2: print("Unable to load blobs file") if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_BLOBS): # blobs expected but not found raise e2 if path and config.image5d is None and not atlas_suffix: # load or import the main image stack print("Loading main image") try: path_lower = path.lower() import_only = proc_type is config.ProcessTypes.IMPORT_ONLY if path_lower.endswith(sitk_io.EXTS_3D): # load format supported by SimpleITK and prepend time axis; # if 2D, convert to 3D img5d = sitk_io.read_sitk_files(path, make_3d=True) elif not import_only and path_lower.endswith((".tif", ".tiff")): # load TIF file directly img5d, meta = read_tif(path) config.resolutions = meta[config.MetaKeys.RESOLUTIONS] else: # load or import from MagellanMapper Numpy format img5d = None if not import_only: # load previously imported image img5d = importer.read_file(path, series) if allow_import and (img5d is None or img5d.img is None): # import image; will re-import over any existing image file if os.path.isdir(path) and all( [r is None for r in config.reg_suffixes.values()]): # import directory of single plane images to single # stack if no register suffixes are set chls, import_md = importer.setup_import_dir(path) add_metadata() prefix = config.prefix if not prefix: prefix = os.path.join( os.path.dirname(path), importer.DEFAULT_IMG_STACK_NAME) img5d = importer.import_planes_to_stack( chls, prefix, import_md) elif import_only: # import multi-plane image chls, import_path = importer.setup_import_multipage( path) prefix = config.prefix if config.prefix else import_path import_md = importer.setup_import_metadata( chls, config.channel, series) add_metadata() img5d = importer.import_multiplane_images( chls, prefix, import_md, series, channel=config.channel) if img5d is not None: # set loaded main image in config config.img5d = img5d config.image5d = config.img5d.img except FileNotFoundError as e: _logger.exception(e) _logger.info("Could not load %s", path) if config.metadatas and config.metadatas[0]: # assign metadata from alternate file if given to supersede settings # for any loaded image5d # TODO: access metadata directly from given image5d's dict to allow # loading multiple image5d images simultaneously importer.assign_metadata(config.metadatas[0]) # main image is currently required since many parameters depend on it if fallback_main_img and atlas_suffix is None and config.image5d is None: # fallback to atlas if main image not already loaded atlas_suffix = config.RegNames.IMG_ATLAS.value _logger.info( "Main image is not set, falling back to registered image with " "suffix %s", atlas_suffix) # use prefix to get images registered to a different image, eg a # downsampled version, or a different version of registered images path = config.prefix if config.prefix else path if path and atlas_suffix is not None: try: # will take the place of any previously loaded image5d config.img5d = sitk_io.read_sitk_files(path, atlas_suffix, make_3d=True) config.image5d = config.img5d.img except FileNotFoundError as e: print(e) # load metadata related to the labels image config.labels_metadata = labels_meta.LabelsMeta( f"{path}." if config.prefix else path).load() # load labels reference file, prioritizing path given by user # and falling back to any extension matching PATH_LABELS_REF path_labels_refs = [config.load_labels] labels_path_ref = config.labels_metadata.path_ref if labels_path_ref: path_labels_refs.append(labels_path_ref) labels_ref = None for ref in path_labels_refs: if not ref: continue try: # load labels reference file labels_ref = ontology.LabelsRef(ref).load() if labels_ref.ref_lookup is not None: config.labels_ref = labels_ref _logger.debug("Loaded labels reference file from %s", ref) break except (FileNotFoundError, KeyError): pass if path_labels_refs and (labels_ref is None or labels_ref.ref_lookup is None): # warn if labels path given but none found _logger.warn( "Unable to load labels reference file from '%s', skipping", path_labels_refs) if annotation_suffix is not None: try: # load labels image # TODO: need to support multichannel labels images img5d, config.labels_img_sitk = sitk_io.read_sitk_files( path, annotation_suffix, True, True) config.labels_img = img5d.img[0] except FileNotFoundError as e: print(e) if config.image5d is not None: # create a blank labels images for custom annotation; colormap # can be generated for the original labels loaded below config.labels_img = np.zeros(config.image5d.shape[1:4], dtype=int) print("Created blank labels image from main image") if config.image5d is not None and config.labels_img is not None: # set up scaling factors by dimension between intensity and # labels images config.labels_scaling = importer.calc_scaling( config.image5d, config.labels_img) if borders_suffix is not None: # load borders image, which can also be another labels image try: config.borders_img = sitk_io.read_sitk_files(path, borders_suffix, make_3d=True).img[0] except FileNotFoundError as e: print(e) if config.atlas_labels[config.AtlasLabels.ORIG_COLORS]: labels_orig_ids = config.labels_metadata.region_ids_orig if labels_orig_ids is None: if config.load_labels is not None: # load original labels image from same directory as ontology # file for consistent ID-color mapping, even if labels are missing try: config.labels_img_orig = sitk_io.load_registered_img( config.load_labels, config.RegNames.IMG_LABELS.value) except FileNotFoundError as e: print(e) if config.labels_img is not None and config.labels_img_orig is None: _logger.warn( "Could not load original labels image IDs; colors may " "differ from the original image") load_rot90 = config.roi_profile["load_rot90"] if load_rot90 and config.image5d is not None: # rotate main image specified num of times x90deg after loading since # need to rotate images output by deep learning toolkit config.image5d = np.rot90(config.image5d, load_rot90, (2, 3)) if (config.image5d is not None and load_subimage and not config.image5d_is_roi): # crop full image to bounds of sub-image config.image5d = plot_3d.prepare_subimg(config.image5d, offset, size)[None] config.image5d_is_roi = True # add any additional image5d thresholds for multichannel images, such # as those loaded without metadata for these settings colormaps.setup_cmaps() num_channels = get_num_channels(config.image5d) config.near_max = libmag.pad_seq(config.near_max, num_channels, -1) config.near_min = libmag.pad_seq(config.near_min, num_channels, 0) config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels) colormaps.setup_colormaps(num_channels) if config.labels_img is not None: # make discrete colormap for labels image config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img) if (blobs is not None and blobs.blobs is not None and config.img5d.img is not None and blobs.roi_size is not None): # scale blob coordinates to main image if shapes differ scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size) # scale radius by mean of other dimensions' scaling scaling = np.append(scaling, np.mean(scaling)) if not np.all(scaling == 1): _logger.debug("Scaling blobs to main image by factor: %s", scaling) blobs.blobs[:, :4] = ontology.scale_coords(blobs.blobs[:, :4], scaling) blobs.scaling = scaling
def process_file( path: str, proc_type: Enum, proc_val: Optional[Any] = None, series: Optional[int] = None, subimg_offset: Optional[List[int]] = None, subimg_size: Optional[List[int]] = None, roi_offset: Optional[List[int]] = None, roi_size: Optional[List[int]] = None ) -> Tuple[Optional[Any], Optional[str]]: """Processes a single image file non-interactively. Assumes that the image has already been set up. Args: path: Path to image from which MagellanMapper-style paths will be generated. proc_type: Processing type, which should be a one of :class:`config.ProcessTypes`. proc_val: Processing value associated with ``proc_type``; defaults to None. series: Image series number; defaults to None. subimg_offset: Sub-image offset as (z,y,x) to load; defaults to None. subimg_size: Sub-image size as (z,y,x) to load; defaults to None. roi_offset: Region of interest offset as (x, y, z) to process; defaults to None. roi_size: Region of interest size of region to process, given as ``(x, y, z)``; defaults to None. Returns: Tuple of stats from processing, or None if no stats, and text feedback from the processing, or None if no feedback. """ # PROCESS BY TYPE stats = None fdbk = None filename_base = importer.filename_to_base(path, series) print("{}\n".format("-" * 80)) if proc_type is config.ProcessTypes.LOAD: # loading completed return None, None elif proc_type is config.ProcessTypes.LOAD: # already imported so does nothing print("imported {}, will exit".format(path)) elif proc_type is config.ProcessTypes.EXPORT_ROIS: # export ROIs; assumes that info_proc was already loaded to # give smaller region from which smaller ROIs from the truth DB # will be extracted from magmap.io import export_rois db = config.db if config.truth_db is None else config.truth_db export_path = naming.make_subimage_name(filename_base, subimg_offset, subimg_size) export_rois.export_rois(db, config.image5d, config.channel, export_path, config.plot_labels[config.PlotLabels.PADDING], config.unit_factor, config.truth_db_mode, os.path.basename(export_path)) elif proc_type is config.ProcessTypes.TRANSFORM: # transpose, rescale, and/or resize whole large image transformer.transpose_img( path, series, plane=config.plane, rescale=config.transform[config.Transforms.RESCALE], target_size=config.roi_size) elif proc_type in (config.ProcessTypes.EXTRACT, config.ProcessTypes.ANIMATED): # generate animated GIF or extract single plane export_stack.stack_to_img(config.filenames, roi_offset, roi_size, series, subimg_offset, subimg_size, proc_type is config.ProcessTypes.ANIMATED, config.suffix) elif proc_type is config.ProcessTypes.EXPORT_BLOBS: # export blobs to CSV file from magmap.io import export_rois export_rois.blobs_to_csv(config.blobs.blobs, filename_base) elif proc_type in (config.ProcessTypes.DETECT, config.ProcessTypes.DETECT_COLOC): # detect blobs in the full image, +/- co-localization coloc = proc_type is config.ProcessTypes.DETECT_COLOC stats, fdbk, _ = stack_detect.detect_blobs_stack( filename_base, subimg_offset, subimg_size, coloc) elif proc_type is config.ProcessTypes.COLOC_MATCH: if config.blobs is not None and config.blobs.blobs is not None: # colocalize blobs in separate channels by matching blobs shape = subimg_size if shape is None: # get shape from loaded image, falling back to its metadata if config.image5d is not None: shape = config.image5d.shape[1:] else: shape = config.img5d.meta[config.MetaKeys.SHAPE][1:] matches = colocalizer.StackColocalizer.colocalize_stack( shape, config.blobs.blobs) # insert matches into database colocalizer.insert_matches(config.db, matches) else: print("No blobs loaded to colocalize, skipping") elif proc_type in (config.ProcessTypes.EXPORT_PLANES, config.ProcessTypes.EXPORT_PLANES_CHANNELS): # export each plane as a separate image file export_stack.export_planes( config.image5d, config.savefig, config.channel, proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS) elif proc_type is config.ProcessTypes.EXPORT_RAW: # export the main image as a raw data file out_path = libmag.combine_paths(config.filename, ".raw", sep="") libmag.backup_file(out_path) np_io.write_raw_file(config.image5d, out_path) elif proc_type is config.ProcessTypes.EXPORT_TIF: # export the main image as a TIF files for each channel np_io.write_tif(config.image5d, config.filename) elif proc_type is config.ProcessTypes.PREPROCESS: # pre-process a whole image and save to file # TODO: consider chunking option for larger images out_path = config.prefix if not out_path: out_path = libmag.insert_before_ext(config.filename, "_preproc") transformer.preprocess_img(config.image5d, proc_val, config.channel, out_path) return stats, fdbk
def setup_images(path=None, series=None, offset=None, size=None, proc_mode=None, allow_import=True): """Sets up an image and all associated images and metadata. Paths for related files such as registered images will generally be constructed from ``path``. If :attr:`config.prefix` is set, it will be used in place of ``path`` for registered labels. Args: path (str): Path to image from which MagellanMapper-style paths will be generated. series (int): Image series number; defaults to None. offset (List[int]): Sub-image offset given in z,y,x; defaults to None. size (List[int]): Sub-image shape given in z,y,x; defaults to None. proc_mode (str): Processing mode, which should be a key in :class:`config.ProcessTypes`, case-insensitive; defaults to None. allow_import (bool): True to allow importing the image if it cannot be loaded; defaults to True. """ def add_metadata(): # override metadata set from command-line metadata args if available md = { config.MetaKeys.RESOLUTIONS: config.meta_dict[ config.MetaKeys.RESOLUTIONS], config.MetaKeys.MAGNIFICATION: config.meta_dict[ config.MetaKeys.MAGNIFICATION], config.MetaKeys.ZOOM: config.meta_dict[config.MetaKeys.ZOOM], config.MetaKeys.SHAPE: config.meta_dict[config.MetaKeys.SHAPE], config.MetaKeys.DTYPE: config.meta_dict[config.MetaKeys.DTYPE], } for key, val in md.items(): if val is not None: # explicitly set metadata takes precedence over extracted vals import_md[key] = val # LOAD MAIN IMAGE # reset image5d config.image5d = None config.image5d_is_roi = False config.img5d = Image5d() load_subimage = offset is not None and size is not None config.resolutions = None # reset label images config.labels_img = None config.labels_img_sitk = None config.borders_img = None # reset blobs config.blobs = None filename_base = importer.filename_to_base(path, series) subimg_base = None blobs = None if load_subimage and not config.save_subimg: # load a saved sub-image file if available and not set to save one subimg_base = naming.make_subimage_name( filename_base, offset, size) filename_subimg = libmag.combine_paths( subimg_base, config.SUFFIX_SUBIMG) try: # load sub-image if available config.image5d = np.load(filename_subimg, mmap_mode="r") config.image5d = importer.roi_to_image5d(config.image5d) config.image5d_is_roi = True config.img5d.img = config.image5d config.img5d.path_img = filename_subimg config.img5d.img_io = config.LoadIO.NP config.img5d.subimg_offset = offset config.img5d.subimg_size = size print("Loaded sub-image from {} with shape {}" .format(filename_subimg, config.image5d.shape)) # after loading sub-image, load original image's metadata # for essential data such as vmin/vmax; will only warn if # fails to load since metadata could be specified elsewhere _, orig_info = importer.make_filenames(path, series) print("load original image metadata from:", orig_info) importer.load_metadata(orig_info) except IOError: print("Ignored sub-image file from {} as unable to load" .format(filename_subimg)) proc_type = libmag.get_enum(proc_mode, config.ProcessTypes) if config.load_data[config.LoadData.BLOBS] or proc_type in ( config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH, config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS, config.ProcessTypes.DETECT): # load a blobs archive blobs = detector.Blobs() try: if subimg_base: try: # load blobs generated from sub-image config.blobs = blobs.load_blobs( img_to_blobs_path(subimg_base)) except (FileNotFoundError, KeyError): # fallback to loading from full image blobs and getting # a subset, shifting them relative to sub-image offset print("Unable to load blobs file based on {}, will try " "from {}".format(subimg_base, filename_base)) config.blobs = blobs.load_blobs( img_to_blobs_path(filename_base)) blobs.blobs, _ = detector.get_blobs_in_roi( blobs.blobs, offset, size, reverse=False) detector.shift_blob_rel_coords( blobs.blobs, np.multiply(offset, -1)) else: # load full image blobs config.blobs = blobs.load_blobs( img_to_blobs_path(filename_base)) except (FileNotFoundError, KeyError) as e2: print("Unable to load blobs file") if proc_type in ( config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_BLOBS): # blobs expected but not found raise e2 if path and config.image5d is None: # load or import the main image stack print("Loading main image") try: if path.endswith(sitk_io.EXTS_3D): # attempt to format supported by SimpleITK and prepend time axis config.image5d = sitk_io.read_sitk_files(path)[None] config.img5d.img = config.image5d config.img5d.path_img = path config.img5d.img_io = config.LoadIO.SITK else: # load or import from MagellanMapper Numpy format import_only = proc_type is config.ProcessTypes.IMPORT_ONLY img5d = None if not import_only: # load previously imported image img5d = importer.read_file(path, series) if allow_import: # re-import over existing image or import new image if os.path.isdir(path) and all( [r is None for r in config.reg_suffixes.values()]): # import directory of single plane images to single # stack if no register suffixes are set chls, import_md = importer.setup_import_dir(path) add_metadata() prefix = config.prefix if not prefix: prefix = os.path.join( os.path.dirname(path), importer.DEFAULT_IMG_STACK_NAME) img5d = importer.import_planes_to_stack( chls, prefix, import_md) elif import_only or img5d is None: # import multi-plane image chls, import_path = importer.setup_import_multipage( path) prefix = config.prefix if config.prefix else import_path import_md = importer.setup_import_metadata( chls, config.channel, series) add_metadata() img5d = importer.import_multiplane_images( chls, prefix, import_md, series, channel=config.channel) if img5d is not None: # set loaded main image in config config.img5d = img5d config.image5d = config.img5d.img except FileNotFoundError as e: print(e) print("Could not load {}, will fall back to any associated " "registered image".format(path)) if config.metadatas and config.metadatas[0]: # assign metadata from alternate file if given to supersede settings # for any loaded image5d # TODO: access metadata directly from given image5d's dict to allow # loading multiple image5d images simultaneously importer.assign_metadata(config.metadatas[0]) # main image is currently required since many parameters depend on it atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS] if atlas_suffix is None and config.image5d is None: # fallback to atlas if main image not already loaded atlas_suffix = config.RegNames.IMG_ATLAS.value print("main image is not set, falling back to registered " "image with suffix", atlas_suffix) # use prefix to get images registered to a different image, eg a # downsampled version, or a different version of registered images path = config.prefix if config.prefix else path if path and atlas_suffix is not None: try: # will take the place of any previously loaded image5d config.image5d = sitk_io.read_sitk_files( path, reg_names=atlas_suffix)[None] config.img5d.img = config.image5d config.img5d.img_io = config.LoadIO.SITK except FileNotFoundError as e: print(e) annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION] if annotation_suffix is not None: try: # load labels image # TODO: need to support multichannel labels images config.labels_img, config.labels_img_sitk = sitk_io.read_sitk_files( path, reg_names=annotation_suffix, return_sitk=True) except FileNotFoundError as e: print(e) if config.image5d is not None: # create a blank labels images for custom annotation; colormap # can be generated for the original labels loaded below config.labels_img = np.zeros( config.image5d.shape[1:4], dtype=int) print("Created blank labels image from main image") if config.image5d is not None and config.labels_img is not None: # set up scaling factors by dimension between intensity and # labels images config.labels_scaling = importer.calc_scaling( config.image5d, config.labels_img) try: if config.load_labels is not None: # load labels reference file labels_ref = ontology.load_labels_ref(config.load_labels) if isinstance(labels_ref, pd.DataFrame): # parse CSV files loaded into data frame config.labels_ref_lookup = ontology.create_lookup_pd( labels_ref) else: # parse dict from ABA JSON file config.labels_ref_lookup = ( ontology.create_aba_reverse_lookup(labels_ref)) except FileNotFoundError as e: print(e) borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS] if borders_suffix is not None: # load borders image, which can also be another labels image try: config.borders_img = sitk_io.read_sitk_files( path, reg_names=borders_suffix) except FileNotFoundError as e: print(e) if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS] and config.load_labels is not None): # load original labels image from same directory as ontology # file for consistent ID-color mapping, even if labels are missing try: config.labels_img_orig = sitk_io.load_registered_img( config.load_labels, config.RegNames.IMG_LABELS.value) except FileNotFoundError as e: print(e) libmag.warn( "could not load original labels image; colors may differ" "differ from it") load_rot90 = config.roi_profile["load_rot90"] if load_rot90 and config.image5d is not None: # rotate main image specified num of times x90deg after loading since # need to rotate images output by deep learning toolkit config.image5d = np.rot90(config.image5d, load_rot90, (2, 3)) if (config.image5d is not None and load_subimage and not config.image5d_is_roi): # crop full image to bounds of sub-image config.image5d = plot_3d.prepare_subimg( config.image5d, offset, size)[None] config.image5d_is_roi = True # add any additional image5d thresholds for multichannel images, such # as those loaded without metadata for these settings colormaps.setup_cmaps() num_channels = get_num_channels(config.image5d) config.near_max = libmag.pad_seq(config.near_max, num_channels, -1) config.near_min = libmag.pad_seq(config.near_min, num_channels, 0) config.vmax_overview = libmag.pad_seq( config.vmax_overview, num_channels) colormaps.setup_colormaps(num_channels) if config.labels_img is not None: # make discrete colormap for labels image config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img) if (blobs is not None and blobs.blobs is not None and config.img5d.img is not None): # scale blob coordinates to main image if shapes differ scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size) if not np.all(scaling == 1): print("Scaling blobs to main image by factor:", scaling) blobs.blobs[:, :3] = ontology.scale_coords( blobs.blobs[:, :3], scaling)