def make_labels_level_img(img_path, level, prefix=None, show=False): """Replace labels in an image with their parents at the given level. Labels that do not fall within a parent at that level will remain in place. Args: img_path: Path to the base image from which the corresponding registered image will be found. level: Ontological level at which to group child labels. prefix: Start of path for output image; defaults to None to use ``img_path`` instead. show: True to show the images after generating them; defaults to False. """ # load original labels image and setup ontology dictionary labels_sitk = sitk_io.load_registered_img(img_path, config.RegNames.IMG_LABELS.value, get_sitk=True) labels_np = sitk.GetArrayFromImage(labels_sitk) ref = ontology.load_labels_ref(config.load_labels) labels_ref_lookup = ontology.create_aba_reverse_lookup(ref) ids = list(labels_ref_lookup.keys()) for key in ids: keys = [key, -1 * key] for region in keys: if region == 0: continue # get ontological label label = labels_ref_lookup[abs(region)] label_level = label[ontology.NODE][config.ABAKeys.LEVEL.value] if label_level == level: # get children (including parent first) at given level # and replace them with parent label_ids = ontology.get_children_from_id( labels_ref_lookup, region) labels_region = np.isin(labels_np, label_ids) print("replacing labels within", region) labels_np[labels_region] = region labels_level_sitk = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_np) # generate an edge image at this level labels_edge = vols.make_labels_edge(labels_np) labels_edge_sikt = sitk_io.replace_sitk_with_numpy(labels_sitk, labels_edge) # write and optionally display labels level image imgs_write = { config.RegNames.IMG_LABELS_LEVEL.value.format(level): labels_level_sitk, config.RegNames.IMG_LABELS_EDGE_LEVEL.value.format(level): labels_edge_sikt, } out_path = prefix if prefix else img_path sitk_io.write_reg_images(imgs_write, out_path) if show: for img in imgs_write.values(): if img: sitk.Show(img)
def setup_images(path=None, series=None, offset=None, size=None, proc_mode=None, allow_import=True): """Sets up an image and all associated images and metadata. Paths for related files such as registered images will generally be constructed from ``path``. If :attr:`config.prefix` is set, it will be used in place of ``path`` for registered labels. Args: path (str): Path to image from which MagellanMapper-style paths will be generated. series (int): Image series number; defaults to None. offset (List[int]): Sub-image offset given in z,y,x; defaults to None. size (List[int]): Sub-image shape given in z,y,x; defaults to None. proc_mode (str): Processing mode, which should be a key in :class:`config.ProcessTypes`, case-insensitive; defaults to None. allow_import (bool): True to allow importing the image if it cannot be loaded; defaults to True. """ def add_metadata(): # override metadata set from command-line metadata args if available md = { config.MetaKeys.RESOLUTIONS: config.meta_dict[config.MetaKeys.RESOLUTIONS], config.MetaKeys.MAGNIFICATION: config.meta_dict[config.MetaKeys.MAGNIFICATION], config.MetaKeys.ZOOM: config.meta_dict[config.MetaKeys.ZOOM], config.MetaKeys.SHAPE: config.meta_dict[config.MetaKeys.SHAPE], config.MetaKeys.DTYPE: config.meta_dict[config.MetaKeys.DTYPE], } for key, val in md.items(): if val is not None: # explicitly set metadata takes precedence over extracted vals import_md[key] = val # LOAD MAIN IMAGE # reset image5d config.image5d = None config.image5d_is_roi = False load_subimage = offset is not None and size is not None config.resolutions = None # reset label images config.labels_img = None config.borders_img = None filename_base = importer.filename_to_base(path, series) subimg_base = None if load_subimage and not config.save_subimg: # load a saved sub-image file if available and not set to save one subimg_base = stack_detect.make_subimage_name(filename_base, offset, size) filename_subimg = libmag.combine_paths(subimg_base, config.SUFFIX_SUBIMG) try: # load sub-image if available config.image5d = np.load(filename_subimg, mmap_mode="r") config.image5d = importer.roi_to_image5d(config.image5d) config.image5d_is_roi = True config.image5d_io = config.LoadIO.NP print("Loaded sub-image from {} with shape {}".format( filename_subimg, config.image5d.shape)) # after loading sub-image, load original image's metadata # for essential data such as vmin/vmax; will only warn if # fails to load since metadata could be specified elsewhere _, orig_info = importer.make_filenames(path, series) print("load original image metadata from:", orig_info) importer.load_metadata(orig_info) except IOError: print("Ignored sub-image file from {} as unable to load".format( filename_subimg)) proc_type = libmag.get_enum(proc_mode, config.ProcessTypes) if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS, config.ProcessTypes.DETECT): # load a blobs archive try: if subimg_base: try: # load blobs generated from sub-image config.blobs = load_blobs(subimg_base) except (FileNotFoundError, KeyError): # fallback to loading from full image blobs and getting # a subset, shifting them relative to sub-image offset print("Unable to load blobs file based on {}, will try " "from {}".format(subimg_base, filename_base)) config.blobs = load_blobs(filename_base) config.blobs, _ = detector.get_blobs_in_roi(config.blobs, offset, size, reverse=False) detector.shift_blob_rel_coords(config.blobs, np.multiply(offset, -1)) else: # load full image blobs config.blobs = load_blobs(filename_base) except (FileNotFoundError, KeyError) as e2: print("Unable to load blobs file") if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_BLOBS): # blobs expected but not found raise e2 if path and config.image5d is None: # load or import the main image stack print("Loading main image") try: if path.endswith(sitk_io.EXTS_3D): # attempt to format supported by SimpleITK and prepend time axis config.image5d = sitk_io.read_sitk_files(path)[None] config.image5d_io = config.LoadIO.SITK else: # load or import from MagellanMapper Numpy format import_only = proc_type is config.ProcessTypes.IMPORT_ONLY if not import_only: # load previously imported image config.image5d = importer.read_file(path, series) if allow_import: # re-import over existing image or import new image if os.path.isdir(path) and all( [r is None for r in config.reg_suffixes.values()]): # import directory of single plane images to single # stack if no register suffixes are set chls, import_md = importer.setup_import_dir(path) add_metadata() prefix = config.prefix if not prefix: prefix = os.path.join( os.path.dirname(path), importer.DEFAULT_IMG_STACK_NAME) config.image5d = importer.import_planes_to_stack( chls, prefix, import_md) elif import_only or config.image5d is None: # import multi-plane image chls, import_path = importer.setup_import_multipage( path) prefix = config.prefix if config.prefix else import_path import_md = importer.setup_import_metadata( chls, config.channel, series) add_metadata() config.image5d = importer.import_multiplane_images( chls, prefix, import_md, series, channel=config.channel) config.image5d_io = config.LoadIO.NP except FileNotFoundError as e: print(e) print("Could not load {}, will fall back to any associated " "registered image".format(path)) if config.metadatas and config.metadatas[0]: # assign metadata from alternate file if given to supersede settings # for any loaded image5d # TODO: access metadata directly from given image5d's dict to allow # loading multiple image5d images simultaneously importer.assign_metadata(config.metadatas[0]) # main image is currently required since many parameters depend on it atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS] if atlas_suffix is None and config.image5d is None: # fallback to atlas if main image not already loaded atlas_suffix = config.RegNames.IMG_ATLAS.value print( "main image is not set, falling back to registered " "image with suffix", atlas_suffix) # use prefix to get images registered to a different image, eg a # downsampled version, or a different version of registered images path = config.prefix if config.prefix else path if path and atlas_suffix is not None: try: # will take the place of any previously loaded image5d config.image5d = sitk_io.read_sitk_files( path, reg_names=atlas_suffix)[None] config.image5d_io = config.LoadIO.SITK except FileNotFoundError as e: print(e) annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION] if annotation_suffix is not None: # load labels image, set up scaling, and load labels file try: # TODO: need to support multichannel labels images config.labels_img = sitk_io.read_sitk_files( path, reg_names=annotation_suffix) if config.image5d is not None: config.labels_scaling = importer.calc_scaling( config.image5d, config.labels_img) if config.load_labels is not None: labels_ref = ontology.load_labels_ref(config.load_labels) if isinstance(labels_ref, pd.DataFrame): # parse CSV files loaded into data frame config.labels_ref_lookup = ontology.create_lookup_pd( labels_ref) else: # parse dict from ABA JSON file config.labels_ref_lookup = ( ontology.create_aba_reverse_lookup(labels_ref)) except FileNotFoundError as e: print(e) borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS] if borders_suffix is not None: # load borders image, which can also be another labels image try: config.borders_img = sitk_io.read_sitk_files( path, reg_names=borders_suffix) except FileNotFoundError as e: print(e) if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS] and config.load_labels is not None): # load original labels image from same directory as ontology # file for consistent ID-color mapping, even if labels are missing try: config.labels_img_orig = sitk_io.load_registered_img( config.load_labels, config.RegNames.IMG_LABELS.value) except FileNotFoundError as e: print(e) libmag.warn( "could not load original labels image; colors may differ" "differ from it") load_rot90 = config.roi_profile["load_rot90"] if load_rot90 and config.image5d is not None: # rotate main image specified num of times x90deg after loading since # need to rotate images output by deep learning toolkit config.image5d = np.rot90(config.image5d, load_rot90, (2, 3)) if (config.image5d is not None and load_subimage and not config.image5d_is_roi): # crop full image to bounds of sub-image config.image5d = plot_3d.prepare_subimg(config.image5d, size, offset)[None] config.image5d_is_roi = True # add any additional image5d thresholds for multichannel images, such # as those loaded without metadata for these settings colormaps.setup_cmaps() num_channels = get_num_channels(config.image5d) config.near_max = libmag.pad_seq(config.near_max, num_channels, -1) config.near_min = libmag.pad_seq(config.near_min, num_channels, 0) config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels) colormaps.setup_colormaps(num_channels)
def plot_region_development(metric, size=None, show=True): """Plot regions across development for the given metric. Args: metric (str): Column name of metric to track. size (List[int]): Sequence of ``width, height`` to size the figure; defaults to None. show (bool): True to display the image; defaults to True. """ # set up access to data frame columns id_cols = ["Age", "Condition"] extra_cols = ["RegionName"] cond_col = "Region" # assume that vol stats file is given first, then region IDs; # merge in region names and levels df_regions = pd.read_csv(config.filenames[1]) df = pd.read_csv(config.filename).merge( df_regions[["Region", "RegionName", "Level"]], on="Region", how="left") # convert sample names to ages ages = ontology.rel_to_abs_ages(df["Sample"].unique()) df["Age"] = df["Sample"].map(ages) # get large super-structures for normalization to brain tissue, where # "non-brain" are spinal cord and ventricles, which are variably labeled df_base = df[df["Region"] == 15564] ids_nonbr_large = (17651, 126651558) dfs_nonbr_large = [df[df["Region"] == n] for n in ids_nonbr_large] # get data frame with region IDs of all non-brain structures removed labels_ref_lookup = ontology.create_aba_reverse_lookup( ontology.load_labels_ref(config.load_labels)) ids_nonbr = [] for n in ids_nonbr_large: ids_nonbr.extend(ontology.get_children_from_id(labels_ref_lookup, n)) label_id = config.atlas_labels[config.AtlasLabels.ID] if label_id is not None: # show only selected region and its children ids = ontology.get_children_from_id(labels_ref_lookup, label_id) df = df[np.isin(df["Region"], ids)] df_brain = df.loc[~df["Region"].isin(ids_nonbr)] levels = np.sort(df["Level"].unique()) conds = df["Condition"].unique() # get aggregated whole brain tissue for normalization cols_show = (*id_cols, cond_col, *extra_cols, metric) if dfs_nonbr_large: # add all large non-brain structures df_nonbr = dfs_nonbr_large[0] for df_out in dfs_nonbr_large[1:]: df_nonbr = df_io.normalize_df(df_nonbr, id_cols, cond_col, None, [metric], extra_cols, df_out, df_io.df_add) # subtract them from whole organism to get brain tissue alone, # updating given metric in db_base df_base = df_io.normalize_df(df_base, id_cols, cond_col, None, [metric], extra_cols, df_nonbr, df_io.df_subtract) df_base.loc[:, "RegionName"] = "Brain tissue" print("Brain {}:".format(metric)) df_io.print_data_frame(df_base.loc[:, cols_show], "\t") df_base_piv, regions = df_io.pivot_with_conditions(df_base, id_cols, "RegionName", metric) # plot lines with separate styles for each condition and colors for # each region name linestyles = ("--", "-.", ":", "-") num_conds = len(conds) linestyles = linestyles * (num_conds // (len(linestyles) + 1) + 1) if num_conds < len(linestyles): # ensure that 1st and last styles are dashed and solid unless linestyles = (*linestyles[:num_conds - 1], linestyles[-1]) lines_params = { "labels": (metric, "Post-Conceptional Age"), "linestyles": linestyles, "size": size, "show": show, "ignore_invis": True, "groups": conds, "marker": ".", } line_params_norm = lines_params.copy() line_params_norm["labels"] = ("Fraction", "Post-Conceptional Age") plot_2d.plot_lines(config.filename, "Age", regions, title="Whole Brain Development ({})".format(metric), suffix="_dev_{}_brain".format(metric), df=df_base_piv, **lines_params) for level in levels: # plot raw metric at given level df_level = df.loc[df["Level"] == level] print("Raw {}:".format(metric)) df_io.print_data_frame(df_level.loc[:, cols_show], "\t") df_level_piv, regions = df_io.pivot_with_conditions( df_level, id_cols, "RegionName", metric) plot_2d.plot_lines(config.filename, "Age", regions, title="Structure Development ({}, Level {})".format( metric, level), suffix="_dev_{}_level{}".format(metric, level), df=df_level_piv, **lines_params) # plot metric normalized to whole brain tissue; structures # above removed regions will still contain them df_brain_level = df_brain.loc[df_brain["Level"] == level] df_norm = df_io.normalize_df(df_brain_level, id_cols, cond_col, None, [metric], extra_cols, df_base) print("{} normalized to whole brain:".format(metric)) df_io.print_data_frame(df_norm.loc[:, cols_show], "\t") df_norm_piv, regions = df_io.pivot_with_conditions( df_norm, id_cols, "RegionName", metric) plot_2d.plot_lines( config.filename, "Age", regions, units=(None, config.plot_labels[config.PlotLabels.X_UNIT]), title=("Structure Development Normalized to Whole " "Brain ({}, Level {})".format(metric, level)), suffix="_dev_{}_level{}_norm".format(metric, level), df=df_norm_piv, **line_params_norm)