Exemple #1
0
def reg_out_path(file_path, reg_name, match_ext=False):
    """Generate a path for a file registered to another file.
    
    Args:
        file_path: Full path of file registered to. :attr:`config.series`
            will be appended unless ``file_path`` is a directory.
        reg_name: Suffix for type of registration, eg :const:``IMG_LABELS``.
        match_ext: True to change the extension of ``reg_name`` to match 
            that of ``file_path``.
    
    Returns:
        str: Full path with the registered filename including appropriate
        extension at the end.
    """
    if match_ext:
        reg_name = libmag.match_ext(file_path, reg_name)
    if os.path.isdir(file_path):
        return os.path.join(file_path, reg_name)
    else:
        file_path_base = importer.filename_to_base(file_path, config.series)
        return file_path_base + "_" + reg_name
Exemple #2
0
def _detect_subimgs(
        path: str, series: int, subimg_offsets: List[List[int]],
        subimg_sizes: List[List[int]]
) -> Tuple[Union[np.ndarray, Any], List[str]]:
    """Detect blobs in an image across sub-image offsets.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        series: Image series number.
        subimg_offsets: Nested list of sub-image offset sets
            given as ``[[offset_z1, offset_y1, offset_x1], ...]``.
        subimg_sizes: Nested list of sub-image size sets
            given as ``[[offset_z1, offset_y1, offset_x1], ...]`` and
            corresponding to ``subimg_offsets``.
    
    Returns:
        Summed stats array and concatenated summaries.
    """
    stat = np.zeros(3)

    # use whole image if sub-image parameters are not set
    if subimg_offsets is None:
        subimg_offsets = [None]
    if subimg_sizes is None:
        subimg_sizes = [None]
    roi_sizes_len = len(subimg_sizes)

    summaries = []
    for i in range(len(subimg_offsets)):
        size = (subimg_sizes[i] if roi_sizes_len > 1 else subimg_sizes[0])
        np_io.setup_images(path, series, subimg_offsets[i], size)
        stat_roi, fdbk, _ = stack_detect.detect_blobs_stack(
            importer.filename_to_base(path, series), subimg_offsets[i], size)
        if stat_roi is not None:
            stat = np.add(stat, stat_roi)
        summaries.append("Offset {}:\n{}".format(subimg_offsets[i], fdbk))
    return stat, summaries
Exemple #3
0
def setup_images(path=None,
                 series=None,
                 offset=None,
                 size=None,
                 proc_mode=None,
                 allow_import=True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        series (int): Image series number; defaults to None.
        offset (List[int]): Sub-image offset given in z,y,x; defaults to None.
        size (List[int]): Sub-image shape given in z,y,x; defaults to None.
        proc_mode (str): Processing mode, which should be a key in 
            :class:`config.ProcessTypes`, case-insensitive; defaults to None.
        allow_import (bool): True to allow importing the image if it
            cannot be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.borders_img = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = stack_detect.make_subimage_name(filename_base, offset,
                                                      size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.image5d_io = config.LoadIO.NP
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_ROIS,
                     config.ProcessTypes.EXPORT_BLOBS,
                     config.ProcessTypes.DETECT):
        # load a blobs archive
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = load_blobs(subimg_base)
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = load_blobs(filename_base)
                    config.blobs, _ = detector.get_blobs_in_roi(config.blobs,
                                                                offset,
                                                                size,
                                                                reverse=False)
                    detector.shift_blob_rel_coords(config.blobs,
                                                   np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = load_blobs(filename_base)
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None:
        # load or import the main image stack
        print("Loading main image")
        try:
            if path.endswith(sitk_io.EXTS_3D):
                # attempt to format supported by SimpleITK and prepend time axis
                config.image5d = sitk_io.read_sitk_files(path)[None]
                config.image5d_io = config.LoadIO.SITK
            else:
                # load or import from MagellanMapper Numpy format
                import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
                if not import_only:
                    # load previously imported image
                    config.image5d = importer.read_file(path, series)
                if allow_import:
                    # re-import over existing image or import new image
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        config.image5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only or config.image5d is None:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        config.image5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
                config.image5d_io = config.LoadIO.NP
        except FileNotFoundError as e:
            print(e)
            print("Could not load {}, will fall back to any associated "
                  "registered image".format(path))

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    if atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        print(
            "main image is not set, falling back to registered "
            "image with suffix", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.image5d = sitk_io.read_sitk_files(
                path, reg_names=atlas_suffix)[None]
            config.image5d_io = config.LoadIO.SITK
        except FileNotFoundError as e:
            print(e)

    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    if annotation_suffix is not None:
        # load labels image, set up scaling, and load labels file
        try:
            # TODO: need to support multichannel labels images
            config.labels_img = sitk_io.read_sitk_files(
                path, reg_names=annotation_suffix)
            if config.image5d is not None:
                config.labels_scaling = importer.calc_scaling(
                    config.image5d, config.labels_img)
            if config.load_labels is not None:
                labels_ref = ontology.load_labels_ref(config.load_labels)
                if isinstance(labels_ref, pd.DataFrame):
                    # parse CSV files loaded into data frame
                    config.labels_ref_lookup = ontology.create_lookup_pd(
                        labels_ref)
                else:
                    # parse dict from ABA JSON file
                    config.labels_ref_lookup = (
                        ontology.create_aba_reverse_lookup(labels_ref))
        except FileNotFoundError as e:
            print(e)

    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]
    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(
                path, reg_names=borders_suffix)
        except FileNotFoundError as e:
            print(e)

    if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS]
            and config.load_labels is not None):
        # load original labels image from same directory as ontology
        # file for consistent ID-color mapping, even if labels are missing
        try:
            config.labels_img_orig = sitk_io.load_registered_img(
                config.load_labels, config.RegNames.IMG_LABELS.value)
        except FileNotFoundError as e:
            print(e)
            libmag.warn(
                "could not load original labels image; colors may differ"
                "differ from it")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, size,
                                                offset)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)
Exemple #4
0
def setup_dbs():
    """Set up databases for the given image file.
    
    Only sets up each database if it has not been set up already.
    
    """
    # prep filename
    filename_base = None
    if config.filename:
        filename_base = importer.filename_to_base(config.filename,
                                                  config.series)

    # get any user-supplied truth database path, falling back to name based
    # on filename or default name
    truth_db_path = config.truth_db_params[config.TruthDB.PATH]
    user_dir = config.user_app_dirs.user_data_dir
    truth_db_name_base = filename_base if filename_base else os.path.join(
        user_dir, sqlite.DB_NAME_BASE)

    if config.truth_db_mode is config.TruthDBModes.VIEW:
        # loads truth DB as a separate database in parallel with the given
        # editable database, with name based on filename by default unless
        # truth DB name explicitly given
        path = truth_db_path if truth_db_path else truth_db_name_base
        try:
            sqlite.load_truth_db(path)
        except FileNotFoundError as e:
            print(e)
            print("Could not load truth DB from current image path")
    elif config.truth_db_mode is config.TruthDBModes.VERIFY:
        if not config.verified_db:
            # creates a new verified DB to store all ROC results
            config.verified_db = sqlite.ClrDB()
            config.verified_db.load_db(
                os.path.join(user_dir, sqlite.DB_NAME_VERIFIED), True)
        if truth_db_path:
            # load truth DB path to verify against if explicitly given
            try:
                sqlite.load_truth_db(truth_db_path)
            except FileNotFoundError as e:
                print(e)
                print("Could not load truth DB from {}".format(truth_db_path))
    elif config.truth_db_mode is config.TruthDBModes.VERIFIED:
        # loads verified DB as the main DB, which includes copies of truth
        # values with flags for whether they were detected
        path = os.path.join(user_dir, sqlite.DB_NAME_VERIFIED)
        if truth_db_path: path = truth_db_path
        try:
            config.db = sqlite.ClrDB()
            config.db.load_db(path)
            config.verified_db = config.db
        except FileNotFoundError as e:
            print(e)
            print("Could not load verified DB from {}".format(
                sqlite.DB_NAME_VERIFIED))
    elif config.truth_db_mode is config.TruthDBModes.EDIT:
        # loads truth DB as the main database for editing rather than
        # loading as a truth database
        config.db_path = truth_db_path
        if not config.db_path:
            config.db_path = "{}{}".format(
                os.path.basename(truth_db_name_base), sqlite.DB_SUFFIX_TRUTH)
        print("Editing truth database at {}".format(config.db_path))

    if config.db is None:
        # load the main database
        config.db = sqlite.ClrDB()
        config.db.load_db(None, False)
Exemple #5
0
def process_file(
    path: str,
    proc_type: Enum,
    proc_val: Optional[Any] = None,
    series: Optional[int] = None,
    subimg_offset: Optional[List[int]] = None,
    subimg_size: Optional[List[int]] = None,
    roi_offset: Optional[List[int]] = None,
    roi_size: Optional[List[int]] = None
) -> Tuple[Optional[Any], Optional[str]]:
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_type: Processing type, which should be a one of
            :class:`config.ProcessTypes`.
        proc_val: Processing value associated with ``proc_type``; defaults to
            None.
        series: Image series number; defaults to None.
        subimg_offset: Sub-image offset as (z,y,x) to load; defaults to None.
        subimg_size: Sub-image size as (z,y,x) to load; defaults to None.
        roi_offset: Region of interest offset as (x, y, z) to process;
            defaults to None.
        roi_size: Region of interest size of region to process, given as
            ``(x, y, z)``; defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)

    print("{}\n".format("-" * 80))
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_path = naming.make_subimage_name(filename_base, subimg_offset,
                                                subimg_size)
        export_rois.export_rois(db, config.image5d, config.channel,
                                export_path,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(export_path))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs.blobs, filename_base)

    elif proc_type in (config.ProcessTypes.DETECT,
                       config.ProcessTypes.DETECT_COLOC):
        # detect blobs in the full image, +/- co-localization
        coloc = proc_type is config.ProcessTypes.DETECT_COLOC
        stats, fdbk, _ = stack_detect.detect_blobs_stack(
            filename_base, subimg_offset, subimg_size, coloc)

    elif proc_type is config.ProcessTypes.COLOC_MATCH:
        if config.blobs is not None and config.blobs.blobs is not None:
            # colocalize blobs in separate channels by matching blobs
            shape = subimg_size
            if shape is None:
                # get shape from loaded image, falling back to its metadata
                if config.image5d is not None:
                    shape = config.image5d.shape[1:]
                else:
                    shape = config.img5d.meta[config.MetaKeys.SHAPE][1:]
            matches = colocalizer.StackColocalizer.colocalize_stack(
                shape, config.blobs.blobs)
            # insert matches into database
            colocalizer.insert_matches(config.db, matches)
        else:
            print("No blobs loaded to colocalize, skipping")

    elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
                       config.ProcessTypes.EXPORT_PLANES_CHANNELS):
        # export each plane as a separate image file
        export_stack.export_planes(
            config.image5d, config.savefig, config.channel,
            proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.EXPORT_TIF:
        # export the main image as a TIF files for each channel
        np_io.write_tif(config.image5d, config.filename)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, proc_val, config.channel,
                                   out_path)

    return stats, fdbk
Exemple #6
0
def setup_images(path: str,
                 series: Optional[int] = None,
                 offset: Optional[Sequence[int]] = None,
                 size: Optional[Sequence[int]] = None,
                 proc_type: Optional["config.ProcessTypes"] = None,
                 allow_import: bool = True,
                 fallback_main_img: bool = True):
    """Sets up an image and all associated images and metadata.

    Paths for related files such as registered images will generally be
    constructed from ``path``. If :attr:`config.prefix` is set, it will
    be used in place of ``path`` for registered labels.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        series: Image series number; defaults to None.
        offset: Sub-image offset given in z,y,x; defaults to None.
        size: Sub-image shape given in z,y,x; defaults to None.
        proc_type: Processing type.
        allow_import: True to allow importing the image if it
            cannot be loaded; defaults to True.
        fallback_main_img: True to fall back to loading a registered image
            if possible if the main image could not be loaded; defaults to True.
    
    """
    def add_metadata():
        # override metadata set from command-line metadata args if available
        md = {
            config.MetaKeys.RESOLUTIONS:
            config.meta_dict[config.MetaKeys.RESOLUTIONS],
            config.MetaKeys.MAGNIFICATION:
            config.meta_dict[config.MetaKeys.MAGNIFICATION],
            config.MetaKeys.ZOOM:
            config.meta_dict[config.MetaKeys.ZOOM],
            config.MetaKeys.SHAPE:
            config.meta_dict[config.MetaKeys.SHAPE],
            config.MetaKeys.DTYPE:
            config.meta_dict[config.MetaKeys.DTYPE],
        }
        for key, val in md.items():
            if val is not None:
                # explicitly set metadata takes precedence over extracted vals
                import_md[key] = val

        res = import_md[config.MetaKeys.RESOLUTIONS]
        if res is None:
            # default to 1 for x,y,z since image resolutions are required
            res = [1] * 3
            import_md[config.MetaKeys.RESOLUTIONS] = res
            _logger.warn("No image resolutions found. Defaulting to: %s", res)

    # LOAD MAIN IMAGE

    # reset image5d
    config.image5d = None
    config.image5d_is_roi = False
    config.img5d = Image5d()
    load_subimage = offset is not None and size is not None
    config.resolutions = None

    # reset label images
    config.labels_img = None
    config.labels_img_sitk = None
    config.labels_img_orig = None
    config.borders_img = None
    config.labels_meta = None
    config.labels_ref = None

    # reset blobs
    config.blobs = None

    filename_base = importer.filename_to_base(path, series)
    subimg_base = None
    blobs = None

    # registered images set to load
    atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS]
    annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION]
    borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS]

    if load_subimage and not config.save_subimg:
        # load a saved sub-image file if available and not set to save one
        subimg_base = naming.make_subimage_name(filename_base, offset, size)
        filename_subimg = libmag.combine_paths(subimg_base,
                                               config.SUFFIX_SUBIMG)

        try:
            # load sub-image if available
            config.image5d = np.load(filename_subimg, mmap_mode="r")
            config.image5d = importer.roi_to_image5d(config.image5d)
            config.image5d_is_roi = True
            config.img5d.img = config.image5d
            config.img5d.path_img = filename_subimg
            config.img5d.img_io = config.LoadIO.NP
            config.img5d.subimg_offset = offset
            config.img5d.subimg_size = size
            print("Loaded sub-image from {} with shape {}".format(
                filename_subimg, config.image5d.shape))

            # after loading sub-image, load original image's metadata
            # for essential data such as vmin/vmax; will only warn if
            # fails to load since metadata could be specified elsewhere
            _, orig_info = importer.make_filenames(path, series)
            print("load original image metadata from:", orig_info)
            importer.load_metadata(orig_info)
        except IOError:
            print("Ignored sub-image file from {} as unable to load".format(
                filename_subimg))

    if config.load_data[config.LoadData.BLOBS] or proc_type in (
            config.ProcessTypes.LOAD, config.ProcessTypes.COLOC_MATCH,
            config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS):
        # load a blobs archive
        blobs = detector.Blobs()
        try:
            if subimg_base:
                try:
                    # load blobs generated from sub-image
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(subimg_base))
                except (FileNotFoundError, KeyError):
                    # fallback to loading from full image blobs and getting
                    # a subset, shifting them relative to sub-image offset
                    print("Unable to load blobs file based on {}, will try "
                          "from {}".format(subimg_base, filename_base))
                    config.blobs = blobs.load_blobs(
                        img_to_blobs_path(filename_base))
                    blobs.blobs, _ = detector.get_blobs_in_roi(blobs.blobs,
                                                               offset,
                                                               size,
                                                               reverse=False)
                    detector.Blobs.shift_blob_rel_coords(
                        blobs.blobs, np.multiply(offset, -1))
            else:
                # load full image blobs
                config.blobs = blobs.load_blobs(
                    img_to_blobs_path(filename_base))
        except (FileNotFoundError, KeyError) as e2:
            print("Unable to load blobs file")
            if proc_type in (config.ProcessTypes.LOAD,
                             config.ProcessTypes.EXPORT_BLOBS):
                # blobs expected but not found
                raise e2

    if path and config.image5d is None and not atlas_suffix:
        # load or import the main image stack
        print("Loading main image")
        try:
            path_lower = path.lower()
            import_only = proc_type is config.ProcessTypes.IMPORT_ONLY
            if path_lower.endswith(sitk_io.EXTS_3D):
                # load format supported by SimpleITK and prepend time axis;
                # if 2D, convert to 3D
                img5d = sitk_io.read_sitk_files(path, make_3d=True)
            elif not import_only and path_lower.endswith((".tif", ".tiff")):
                # load TIF file directly
                img5d, meta = read_tif(path)
                config.resolutions = meta[config.MetaKeys.RESOLUTIONS]
            else:
                # load or import from MagellanMapper Numpy format
                img5d = None
                if not import_only:
                    # load previously imported image
                    img5d = importer.read_file(path, series)
                if allow_import and (img5d is None or img5d.img is None):
                    # import image; will re-import over any existing image file
                    if os.path.isdir(path) and all(
                        [r is None for r in config.reg_suffixes.values()]):
                        # import directory of single plane images to single
                        # stack if no register suffixes are set
                        chls, import_md = importer.setup_import_dir(path)
                        add_metadata()
                        prefix = config.prefix
                        if not prefix:
                            prefix = os.path.join(
                                os.path.dirname(path),
                                importer.DEFAULT_IMG_STACK_NAME)
                        img5d = importer.import_planes_to_stack(
                            chls, prefix, import_md)
                    elif import_only:
                        # import multi-plane image
                        chls, import_path = importer.setup_import_multipage(
                            path)
                        prefix = config.prefix if config.prefix else import_path
                        import_md = importer.setup_import_metadata(
                            chls, config.channel, series)
                        add_metadata()
                        img5d = importer.import_multiplane_images(
                            chls,
                            prefix,
                            import_md,
                            series,
                            channel=config.channel)
            if img5d is not None:
                # set loaded main image in config
                config.img5d = img5d
                config.image5d = config.img5d.img
        except FileNotFoundError as e:
            _logger.exception(e)
            _logger.info("Could not load %s", path)

    if config.metadatas and config.metadatas[0]:
        # assign metadata from alternate file if given to supersede settings
        # for any loaded image5d
        # TODO: access metadata directly from given image5d's dict to allow
        # loading multiple image5d images simultaneously
        importer.assign_metadata(config.metadatas[0])

    # main image is currently required since many parameters depend on it
    if fallback_main_img and atlas_suffix is None and config.image5d is None:
        # fallback to atlas if main image not already loaded
        atlas_suffix = config.RegNames.IMG_ATLAS.value
        _logger.info(
            "Main image is not set, falling back to registered image with "
            "suffix %s", atlas_suffix)
    # use prefix to get images registered to a different image, eg a
    # downsampled version, or a different version of registered images
    path = config.prefix if config.prefix else path
    if path and atlas_suffix is not None:
        try:
            # will take the place of any previously loaded image5d
            config.img5d = sitk_io.read_sitk_files(path,
                                                   atlas_suffix,
                                                   make_3d=True)
            config.image5d = config.img5d.img
        except FileNotFoundError as e:
            print(e)

    # load metadata related to the labels image
    config.labels_metadata = labels_meta.LabelsMeta(
        f"{path}." if config.prefix else path).load()

    # load labels reference file, prioritizing path given by user
    # and falling back to any extension matching PATH_LABELS_REF
    path_labels_refs = [config.load_labels]
    labels_path_ref = config.labels_metadata.path_ref
    if labels_path_ref:
        path_labels_refs.append(labels_path_ref)
    labels_ref = None
    for ref in path_labels_refs:
        if not ref: continue
        try:
            # load labels reference file
            labels_ref = ontology.LabelsRef(ref).load()
            if labels_ref.ref_lookup is not None:
                config.labels_ref = labels_ref
                _logger.debug("Loaded labels reference file from %s", ref)
                break
        except (FileNotFoundError, KeyError):
            pass
    if path_labels_refs and (labels_ref is None
                             or labels_ref.ref_lookup is None):
        # warn if labels path given but none found
        _logger.warn(
            "Unable to load labels reference file from '%s', skipping",
            path_labels_refs)

    if annotation_suffix is not None:
        try:
            # load labels image
            # TODO: need to support multichannel labels images
            img5d, config.labels_img_sitk = sitk_io.read_sitk_files(
                path, annotation_suffix, True, True)
            config.labels_img = img5d.img[0]
        except FileNotFoundError as e:
            print(e)
            if config.image5d is not None:
                # create a blank labels images for custom annotation; colormap
                # can be generated for the original labels loaded below
                config.labels_img = np.zeros(config.image5d.shape[1:4],
                                             dtype=int)
                print("Created blank labels image from main image")
        if config.image5d is not None and config.labels_img is not None:
            # set up scaling factors by dimension between intensity and
            # labels images
            config.labels_scaling = importer.calc_scaling(
                config.image5d, config.labels_img)

    if borders_suffix is not None:
        # load borders image, which can also be another labels image
        try:
            config.borders_img = sitk_io.read_sitk_files(path,
                                                         borders_suffix,
                                                         make_3d=True).img[0]
        except FileNotFoundError as e:
            print(e)

    if config.atlas_labels[config.AtlasLabels.ORIG_COLORS]:
        labels_orig_ids = config.labels_metadata.region_ids_orig
        if labels_orig_ids is None:
            if config.load_labels is not None:
                # load original labels image from same directory as ontology
                # file for consistent ID-color mapping, even if labels are missing
                try:
                    config.labels_img_orig = sitk_io.load_registered_img(
                        config.load_labels, config.RegNames.IMG_LABELS.value)
                except FileNotFoundError as e:
                    print(e)
            if config.labels_img is not None and config.labels_img_orig is None:
                _logger.warn(
                    "Could not load original labels image IDs; colors may "
                    "differ from the original image")

    load_rot90 = config.roi_profile["load_rot90"]
    if load_rot90 and config.image5d is not None:
        # rotate main image specified num of times x90deg after loading since
        # need to rotate images output by deep learning toolkit
        config.image5d = np.rot90(config.image5d, load_rot90, (2, 3))

    if (config.image5d is not None and load_subimage
            and not config.image5d_is_roi):
        # crop full image to bounds of sub-image
        config.image5d = plot_3d.prepare_subimg(config.image5d, offset,
                                                size)[None]
        config.image5d_is_roi = True

    # add any additional image5d thresholds for multichannel images, such
    # as those loaded without metadata for these settings
    colormaps.setup_cmaps()
    num_channels = get_num_channels(config.image5d)
    config.near_max = libmag.pad_seq(config.near_max, num_channels, -1)
    config.near_min = libmag.pad_seq(config.near_min, num_channels, 0)
    config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels)
    colormaps.setup_colormaps(num_channels)

    if config.labels_img is not None:
        # make discrete colormap for labels image
        config.cmap_labels = colormaps.setup_labels_cmap(config.labels_img)

    if (blobs is not None and blobs.blobs is not None
            and config.img5d.img is not None and blobs.roi_size is not None):
        # scale blob coordinates to main image if shapes differ
        scaling = np.divide(config.img5d.img.shape[1:4], blobs.roi_size)
        # scale radius by mean of other dimensions' scaling
        scaling = np.append(scaling, np.mean(scaling))
        if not np.all(scaling == 1):
            _logger.debug("Scaling blobs to main image by factor: %s", scaling)
            blobs.blobs[:, :4] = ontology.scale_coords(blobs.blobs[:, :4],
                                                       scaling)
        blobs.scaling = scaling
Exemple #7
0
def process_file(path,
                 proc_mode,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 roi_offset=None,
                 roi_size=None):
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_mode (str): Processing mode, which should be a key in
            :class:`config.ProcessTypes`, case-insensitive.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        roi_offset (List[int]): Region of interest offset as (x, y, z) to
            process; defaults to None.
        roi_size (List[int]): Region of interest size of region to process,
            given as (x, y, z); defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)
    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_rois.export_rois(db, config.image5d, config.channel,
                                filename_base,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(config.filename))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        from magmap.io import export_stack
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs, filename_base)

    elif proc_type is config.ProcessTypes.DETECT:
        # detect blobs in the full image
        stats, fdbk, segments_all = stack_detect.detect_blobs_large_image(
            filename_base, config.image5d, subimg_offset, subimg_size,
            config.truth_db_mode is config.TruthDBModes.VERIFY,
            not config.grid_search_profile, config.image5d_is_roi)

    elif proc_type is config.ProcessTypes.EXPORT_PLANES:
        # export each plane as a separate image file
        from magmap.io import export_stack
        export_stack.export_planes(config.image5d, config.prefix,
                                   config.savefig, config.channel)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        profile = config.get_roi_profile(0)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, profile["preprocess"],
                                   config.channel, out_path)

    return stats, fdbk
Exemple #8
0
def main(process_args_only=False, skip_dbs=False):
    """Starts the visualization GUI.
    
    Processes command-line arguments.
    
    Args:
        process_args_only (bool): Processes command-line arguments and
            returns; defaults to False.
        skip_dbs (bool): True to skip loading databases; defaults to False.
    """
    parser = argparse.ArgumentParser(
        description="Setup environment for MagellanMapper")

    # image specification arguments
    parser.add_argument(
        "--img",
        nargs="*",
        help="Main image path(s); after import, the filename is often "
        "given as the original name without its extension")
    parser.add_argument(
        "--meta",
        nargs="*",
        help="Metadata path(s), which can be given as multiple files "
        "corresponding to each image")
    parser.add_argument("--prefix", help="Path prefix")
    parser.add_argument("--suffix", help="Filename suffix")
    parser.add_argument("--channel", nargs="*", type=int, help="Channel index")
    parser.add_argument("--series", help="Series index")
    parser.add_argument("--subimg_offset",
                        nargs="*",
                        help="Sub-image offset in x,y,z")
    parser.add_argument("--subimg_size",
                        nargs="*",
                        help="Sub-image size in x,y,z")
    parser.add_argument("--offset", nargs="*", help="ROI offset in x,y,z")
    parser.add_argument("--size", nargs="*", help="ROI size in x,y,z")
    parser.add_argument("--db", help="Database path")
    parser.add_argument(
        "--cpus",
        help="Maximum number of CPUs/processes to use for multiprocessing "
        "tasks. Use \"none\" or 0 to auto-detect this number (default).")

    # task arguments
    parser.add_argument("--proc",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.ProcessTypes),
                        help="Image processing mode")
    parser.add_argument("--register",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.RegisterTypes),
                        help="Image registration task")
    parser.add_argument("--df",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.DFTasks),
                        help="Data frame task")
    parser.add_argument("--plot_2d",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.Plot2DTypes),
                        help="2D plot task; see config.Plot2DTypes")
    parser.add_argument("--ec2_start",
                        nargs="*",
                        help="AWS EC2 instance start")
    parser.add_argument("--ec2_list", nargs="*", help="AWS EC2 instance list")
    parser.add_argument("--ec2_terminate",
                        nargs="*",
                        help="AWS EC2 instance termination")
    parser.add_argument(
        "--notify",
        nargs="*",
        help="Notification message URL, message, and attachment strings")
    parser.add_argument("--grid_search",
                        help="Grid search hyperparameter tuning profile(s)")

    # profile arguments
    parser.add_argument(
        "--roi_profile",
        nargs="*",
        help="ROI profile, which can be separated by underscores "
        "for multiple profiles and given as paths to custom profiles "
        "in YAML format. Multiple profile groups can be given, which "
        "will each be applied to the corresponding channel. See "
        "docs/settings.md for more details.")
    parser.add_argument(
        "--atlas_profile",
        help="Atlas profile, which can be separated by underscores "
        "for multiple profiles and given as paths to custom profiles "
        "in YAML format. See docs/settings.md for more details.")
    parser.add_argument(
        "--theme",
        nargs="*",
        type=str.lower,
        choices=libmag.enum_names_aslist(config.Themes),
        help="UI theme, which can be given as multiple themes to apply "
        "on top of one another")

    # grouped arguments
    parser.add_argument(
        "--truth_db",
        nargs="*",
        help="Truth database; see config.TruthDB for settings and "
        "config.TruthDBModes for modes")
    parser.add_argument("--labels",
                        nargs="*",
                        help=_get_args_dict_help(
                            "Atlas labels; see config.AtlasLabels.",
                            config.AtlasLabels))
    parser.add_argument("--transform",
                        nargs="*",
                        help=_get_args_dict_help(
                            "Image transformations; see config.Transforms.",
                            config.Transforms))
    parser.add_argument(
        "--reg_suffixes",
        nargs="*",
        help=_get_args_dict_help(
            "Registered image suffixes; see config.RegSuffixes for keys "
            "and config.RegNames for values", config.RegSuffixes))
    parser.add_argument(
        "--plot_labels",
        nargs="*",
        help=_get_args_dict_help(
            "Plot label customizations; see config.PlotLabels ",
            config.PlotLabels))
    parser.add_argument(
        "--set_meta",
        nargs="*",
        help="Set metadata values; see config.MetaKeys for settings")

    # image and figure display arguments
    parser.add_argument("--plane",
                        type=str.lower,
                        choices=config.PLANE,
                        help="Planar orientation")
    parser.add_argument(
        "--show",
        nargs="?",
        const="1",
        help="If applicable, show images after completing the given task")
    parser.add_argument(
        "--alphas",
        help="Alpha opacity levels, which can be comma-delimited for "
        "multichannel images")
    parser.add_argument(
        "--vmin",
        help="Minimum intensity levels, which can be comma-delimited "
        "for multichannel images")
    parser.add_argument(
        "--vmax",
        help="Maximum intensity levels, which can be comma-delimited "
        "for multichannel images")
    parser.add_argument("--seed", help="Random number generator seed")

    # export arguments
    parser.add_argument("--save_subimg",
                        action="store_true",
                        help="Save sub-image as separate file")
    parser.add_argument("--slice", help="Slice given as start,stop,step")
    parser.add_argument("--delay", help="Animation delay in ms")
    parser.add_argument("--savefig", help="Extension for saved figures")
    parser.add_argument("--groups",
                        nargs="*",
                        help="Group values corresponding to each image")
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Verbose output to assist with debugging")
    args = parser.parse_args()

    if args.img is not None:
        # set image file path and convert to basis for additional paths
        config.filenames = args.img
        config.filename = config.filenames[0]
        print("Set filenames to {}, current filename {}".format(
            config.filenames, config.filename))

    if args.meta is not None:
        # set metadata paths
        config.metadata_paths = args.meta
        print("Set metadata paths to", config.metadata_paths)
        config.metadatas = []
        for path in config.metadata_paths:
            # load metadata to dictionary
            md, _ = importer.load_metadata(path, assign=False)
            config.metadatas.append(md)

    if args.channel is not None:
        # set the channels
        config.channel = args.channel
        print("Set channel to {}".format(config.channel))

    series_list = [config.series]  # list of series
    if args.series is not None:
        series_split = args.series.split(",")
        series_list = []
        for ser in series_split:
            ser_split = ser.split("-")
            if len(ser_split) > 1:
                ser_range = np.arange(int(ser_split[0]), int(ser_split[1]) + 1)
                series_list.extend(ser_range.tolist())
            else:
                series_list.append(int(ser_split[0]))
        config.series = series_list[0]
        print("Set to series_list to {}, current series {}".format(
            series_list, config.series))

    if args.savefig is not None:
        # save figure with file type of this extension; remove leading period
        config.savefig = args.savefig.lstrip(".")
        print("Set savefig extension to {}".format(config.savefig))

    if args.verbose:
        # verbose mode, including printing longer Numpy arrays for debugging
        config.verbose = args.verbose
        np.set_printoptions(linewidth=200, threshold=10000)
        print("Set verbose to {}".format(config.verbose))

    # parse sub-image offsets and sizes;
    # expects x,y,z input but stores as z,y,x by convention
    if args.subimg_offset is not None:
        config.subimg_offsets = _parse_coords(args.subimg_offset, True)
        print("Set sub-image offsets to {} (z,y,x)".format(
            config.subimg_offsets))
    if args.subimg_size is not None:
        config.subimg_sizes = _parse_coords(args.subimg_size, True)
        print("Set sub-image sizes to {} (z,y,x)".format(config.subimg_sizes))

    # parse ROI offsets and sizes, which are relative to any sub-image;
    # expects x,y,z input and output
    if args.offset is not None:
        config.roi_offsets = _parse_coords(args.offset)
        if config.roi_offsets:
            config.roi_offset = config.roi_offsets[0]
        print("Set ROI offsets to {}, current offset {} (x,y,z)".format(
            config.roi_offsets, config.roi_offset))
    if args.size is not None:
        config.roi_sizes = _parse_coords(args.size)
        if config.roi_sizes:
            config.roi_size = config.roi_sizes[0]
        print("Set ROI sizes to {}, current size {} (x,y,z)".format(
            config.roi_sizes, config.roi_size))

    if args.cpus is not None:
        # set maximum number of CPUs
        config.cpus = (None if args.cpus.lower() in ("none",
                                                     "0") else int(args.cpus))
        print("Set maximum number of CPUs for multiprocessing tasks to",
              config.cpus)

    # set up main processing mode
    if args.proc is not None:
        config.proc_type = args.proc
        print("processing type set to {}".format(config.proc_type))
    proc_type = libmag.get_enum(config.proc_type, config.ProcessTypes)
    if config.proc_type and proc_type not in config.ProcessTypes:
        libmag.warn("\"{}\" processing type not found".format(
            config.proc_type))

    if args.set_meta is not None:
        # set individual metadata values, currently used for image import
        # TODO: take precedence over loaded metadata archives
        config.meta_dict = args_to_dict(args.set_meta,
                                        config.MetaKeys,
                                        config.meta_dict,
                                        sep_vals="|")
        print("Set metadata values to {}".format(config.meta_dict))
        res = config.meta_dict[config.MetaKeys.RESOLUTIONS]
        if res:
            # set image resolutions, taken as a single set of x,y,z and
            # converting to a nested list of z,y,x
            res_split = res.split(",")
            if len(res_split) >= 3:
                res_float = tuple(float(i) for i in res_split)[::-1]
                config.resolutions = [res_float]
                print("Set resolutions to {}".format(config.resolutions))
            else:
                res_float = None
                print("Resolution ({}) should be given as 3 values (x,y,z)".
                      format(res))
            # store single set of resolutions, similar to input
            config.meta_dict[config.MetaKeys.RESOLUTIONS] = res_float
        mag = config.meta_dict[config.MetaKeys.MAGNIFICATION]
        if mag:
            # set objective magnification
            config.magnification = mag
            print("Set magnification to {}".format(config.magnification))
        zoom = config.meta_dict[config.MetaKeys.ZOOM]
        if zoom:
            # set objective zoom
            config.zoom = zoom
            print("Set zoom to {}".format(config.zoom))
        shape = config.meta_dict[config.MetaKeys.SHAPE]
        if shape:
            # parse shape, storing only in dict
            config.meta_dict[config.MetaKeys.SHAPE] = [
                int(n) for n in shape.split(",")[::-1]
            ]

    # set up ROI and register profiles
    setup_profiles(args.roi_profile, args.atlas_profile, args.grid_search)

    if args.plane is not None:
        config.plane = args.plane
        print("Set plane to {}".format(config.plane))
    if args.save_subimg:
        config.save_subimg = args.save_subimg
        print("Set to save the sub-image")

    if args.labels:
        # set up atlas labels
        setup_labels(args.labels)

    if args.transform is not None:
        # image transformations such as flipping, rotation
        config.transform = args_to_dict(args.transform, config.Transforms,
                                        config.transform)
        print("Set transformations to {}".format(config.transform))

    if args.register:
        # register type to process in register module
        config.register_type = args.register
        print("Set register type to {}".format(config.register_type))

    if args.df:
        # data frame processing task
        config.df_task = args.df
        print("Set data frame processing task to {}".format(config.df_task))

    if args.plot_2d:
        # 2D plot type to process in plot_2d module
        config.plot_2d_type = args.plot_2d
        print("Set plot_2d type to {}".format(config.plot_2d_type))

    if args.slice:
        # specify a generic slice by command-line, assuming same order
        # of arguments as for slice built-in function and interpreting
        # "none" string as None
        config.slice_vals = args.slice.split(",")
        config.slice_vals = [
            None if val.lower() == "none" else int(val)
            for val in config.slice_vals
        ]
        print("Set slice values to {}".format(config.slice_vals))
    if args.delay:
        config.delay = int(args.delay)
        print("Set delay to {}".format(config.delay))

    if args.show:
        # show images after task is performed, if supported
        config.show = _is_arg_true(args.show)
        print("Set show to {}".format(config.show))

    if args.groups:
        config.groups = args.groups
        print("Set groups to {}".format(config.groups))
    if args.ec2_start is not None:
        # start EC2 instances
        config.ec2_start = args_with_dict(args.ec2_start)
        print("Set ec2 start to {}".format(config.ec2_start))
    if args.ec2_list:
        # list EC2 instances
        config.ec2_list = args_with_dict(args.ec2_list)
        print("Set ec2 list to {}".format(config.ec2_list))
    if args.ec2_terminate:
        config.ec2_terminate = args.ec2_terminate
        print("Set ec2 terminate to {}".format(config.ec2_terminate))
    if args.notify:
        notify_len = len(args.notify)
        if notify_len > 0:
            config.notify_url = args.notify[0]
            print("Set notification URL to {}".format(config.notify_url))
        if notify_len > 1:
            config.notify_msg = args.notify[1]
            print("Set notification message to {}".format(config.notify_msg))
        if notify_len > 2:
            config.notify_attach = args.notify[2]
            print("Set notification attachment path to {}".format(
                config.notify_attach))
    if args.prefix:
        config.prefix = args.prefix
        print("Set path prefix to {}".format(config.prefix))
    if args.suffix:
        config.suffix = args.suffix
        print("Set path suffix to {}".format(config.suffix))

    if args.alphas:
        # specify alpha levels
        config.alphas = [float(val) for val in args.alphas.split(",")]
        print("Set alphas to", config.alphas)

    if args.vmin:
        # specify vmin levels
        config.vmins = [libmag.get_int(val) for val in args.vmin.split(",")]
        print("Set vmins to", config.vmins)

    if args.vmax:
        # specify vmax levels and copy to vmax overview used for plotting
        # and updated for normalization
        config.vmaxs = [libmag.get_int(val) for val in args.vmax.split(",")]
        config.vmax_overview = list(config.vmaxs)
        print("Set vmaxs to", config.vmaxs)

    if args.reg_suffixes is not None:
        # specify suffixes of registered images to load
        config.reg_suffixes = args_to_dict(args.reg_suffixes,
                                           config.RegSuffixes,
                                           config.reg_suffixes)
        print("Set registered image suffixes to {}".format(
            config.reg_suffixes))

    if args.seed:
        # specify random number generator seed
        config.seed = int(args.seed)
        print("Set random number generator seed to", config.seed)

    if args.plot_labels is not None:
        # specify general plot labels
        config.plot_labels = args_to_dict(args.plot_labels, config.PlotLabels,
                                          config.plot_labels)
        print("Set plot labels to {}".format(config.plot_labels))

    if args.theme is not None:
        # specify themes, currently applied to Matplotlib elements
        theme_names = []
        for theme in args.theme:
            # add theme enum if found
            theme_enum = libmag.get_enum(theme, config.Themes)
            if theme_enum:
                config.rc_params.append(theme_enum)
                theme_names.append(theme_enum.name)
        print("Set to use themes to {}".format(theme_names))

    # prep filename
    filename_base = None
    if config.filename:
        filename_base = importer.filename_to_base(config.filename,
                                                  config.series)

    if not skip_dbs:
        setup_dbs(filename_base, args.db, args.truth_db)

    # set multiprocessing start method
    chunking.set_mp_start_method()

    # POST-ARGUMENT PARSING

    if process_args_only:
        return

    # if command-line driven task specified, start task and shut down
    if config.register_type:
        register.main()
    elif config.notify_url:
        notify.main()
    elif config.plot_2d_type:
        plot_2d.main()
    elif config.df_task:
        df_io.main()
    elif config.grid_search_profile:
        _grid_search(series_list)
    elif config.ec2_list or config.ec2_start or config.ec2_terminate:
        # defer importing AWS module to avoid making its dependencies
        # required for MagellanMapper
        from magmap.cloud import aws
        aws.main()
    else:
        # set up image and perform any whole image processing tasks;
        # do not shut down if not a command-line proc task
        _process_files(series_list)
        if proc_type is None or proc_type is config.ProcessTypes.LOAD:
            return
    shutdown()