예제 #1
0
def write_tif(image5d: np.ndarray, path: Union[str, pathlib.Path],
              **kwargs: Any):
    """Write a NumPy array to TIF files.
    
    Each channel will be exported to a separate file.
    
    Args:
        image5d: NumPy array in ``t, z, y, x, c`` dimension order.
        path: Base output path. If ``image5d`` has multiple channels, they
            will be exported to files with ``_ch_<n>`` appended just before
            the extension.
        kwargs: Arguments passed to :meth:`tifffile.imwrite`.

    """
    nchls = get_num_channels(image5d)
    for i in range(nchls):
        # export the given channel to a separate file, adding the channel to
        # the filename if multiple channels exist
        img_chl = image5d if image5d.ndim <= 4 else image5d[..., i]
        out_path = pathlib.Path(
            libmag.make_out_path(
                f"{path}{f'_ch_{i}' if nchls > 1 else ''}.tif",
                combine_prefix=True)).resolve()
        pathlib.Path.mkdir(out_path.parent.resolve(), exist_ok=True)
        libmag.backup_file(out_path)

        if "imagej" in kwargs and kwargs["imagej"]:
            # ImageJ format assumes dimension order of TZCYXS
            img_chl = img_chl[:, :, np.newaxis]

        # write to TIF
        _logger.info("Exporting image of shape %s to '%s'", img_chl.shape,
                     out_path)
        tifffile.imwrite(out_path, img_chl, photometric="minisblack", **kwargs)
예제 #2
0
def _create_db(path):
    """Creates the database including initial schema insertion.
    
    Raises:
        FileExistsError: If file with the same path already exists.
    """
    # creates empty database in the current working directory if
    # not already there.
    if os.path.exists(path):
        libmag.backup_file(path)
    conn = sqlite3.connect(path)
    conn.row_factory = sqlite3.Row
    cur = conn.cursor()
    
    # create tables
    _create_table_about(cur)
    _create_table_experiments(cur)
    _create_table_rois(cur)
    _create_table_blobs(cur)
    _create_table_blob_matches(cur)
    
    # store DB version information
    insert_about(conn, cur, DB_VERSION, datetime.datetime.now())
    
    conn.commit()
    print("created db at {}".format(path))
    return conn, cur
예제 #3
0
def data_frames_to_csv(data_frames, path=None, sort_cols=None, show=None):
    """Combine and export multiple data frames to CSV file.
    
    Args:
        data_frames: List of data frames to concatenate, or a single 
            ``DataFrame``.
        path: Output path; defaults to None, in which case the data frame 
            will not be saved.
        sort_cols: Column as a string of list of columns by which to sort; 
            defaults to None for no sorting.
        show: True or " " to print the data frame with a space-separated 
            table, or can provide an alternate separator. Defaults to None 
            to not print the data frame.
    
    Returns:
        The combined data frame.
    """
    ext = ".csv"
    if path:
        if not path.endswith(ext): path += ext
        libmag.backup_file(path)
    combined = data_frames
    if not isinstance(data_frames, pd.DataFrame):
        combined = pd.concat(combined)
    if sort_cols is not None:
        combined = combined.sort_values(sort_cols)
    combined.to_csv(path, index=False, na_rep="NaN")
    if show is not None:
        print_data_frame(combined, show)
    if path:
        print("exported volume data per sample to CSV file: \"{}\""
              .format(path))
    return combined
예제 #4
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    # set up animation output path and time interval
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return

    # WORKAROUND: FFMpeg may give a "height not divisible by 2" error, fixed
    # by padding with a pixel
    # TODO: check if needed for width
    # TODO: account for difference in FFMpeg height and fig height
    for fn, size in {
            # fig.set_figwidth: fig.get_figwidth(),
            fig.set_figheight:
            fig.get_figheight()
    }.items():
        if size * fig.dpi % 2 != 0:
            fn(size + 1. / fig.dpi)
            print("Padded size with", fn, fig.get_figwidth(), "to new size of",
                  fig.get_figheight())

    # generate and save animation
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
예제 #5
0
def save_fig(path, ext=None, modifier="", fig=None):
    """Save figure with support for backup and alternative file formats.
    
    Dots per inch is set by :attr:`config.plot_labels[config.PlotLabels.DPI]`.
    Backs up any existing file before saving. If the found extension is
    not for a supported format for the figure's backend, the figure is not
    saved.

    Args:
        path (str): Base path to use.
        ext (str): File format extension for saving, without period. Defaults
            to None to use the extension in ``path`` if available, or ``png``
            ``path`` does not have an extension. If extension is in
            :const:`config.FORMATS_3D`, the figure will not be saved.
        modifier (str): Modifier string to append before the extension;
            defaults to an empty string.
        fig (:obj:`matplotlib.figure.Figure`): Figure; defaults to None
            to use the current figure.
    
    Returns:
        str: The output path, or None if the file was not saved.
    
    """
    if fig is None:
        # default to using the current figure
        fig = plt.gcf()
    
    if ext in config.FORMATS_3D:
        _logger.warn(
            f"Extension '{ext}' is a 3D type, will skip saving 2D figure")
        return
    
    # set up output path and backup any existing file
    if ext is None:
        # extract extension from path if not given directly, defaulting to PNG
        ext = os.path.splitext(path)[1]
        ext = ext[1:] if ext else config.DEFAULT_SAVEFIG
    if ext not in fig.canvas.get_supported_filetypes().keys():
        # avoid saving if the figure backend does not support the output format
        _logger.warn(
            f"Figure for '{path}' not saved as '{ext}' is not a recognized "
            f"save extension")
        return None
    
    # backup any existing file
    plot_path = "{}{}.{}".format(os.path.splitext(path)[0], modifier, ext)
    libmag.backup_file(plot_path)
    
    # save the current or given figure with config DPI
    dpi = config.plot_labels[config.PlotLabels.DPI]
    fig.savefig(plot_path, dpi=dpi)
    _logger.info(f"Exported figure to {plot_path}")
    return plot_path
예제 #6
0
def data_frames_to_csv(data_frames: List[pd.DataFrame],
                       path: str = None,
                       sort_cols: Optional[Union[str, List[str]]] = None,
                       show: Optional[Union[str, bool]] = None,
                       index: bool = False):
    """Combine and export multiple data frames to CSV file.
    
    Args:
        data_frames: List of data frames to concatenate, or a single 
            ``DataFrame``.
        path: Output path; defaults to None, in which case the data frame 
            will not be saved.
        sort_cols: Column(s) by which to sort; defaults to None for no sorting.
        show: True or " " to print the data frame with a space-separated 
            table, or can provide an alternate separator. Defaults to None 
            to not print the data frame.
        index: True to include the index; defaults to False.
    
    Returns:
        The combined data frame.
    """
    ext = ".csv"
    if path:
        if not path.endswith(ext): path += ext
        path_dir = os.path.dirname(path)
        if path_dir and not os.path.exists(path_dir):
            # recursively generate parent directories
            os.makedirs(path_dir)
        libmag.backup_file(path)
    combined = data_frames
    if not isinstance(data_frames, pd.DataFrame):
        # combine data frames
        combined = pd.concat(combined)
    if sort_cols is not None:
        # sort column
        combined = combined.sort_values(sort_cols)
    if path:
        # save to file
        combined.to_csv(path, index=index, na_rep="NaN")
    if show is not None:
        # print to console
        print_data_frame(combined, show)
    if path:
        # show the exported data path
        _logger.info("Exported volume data per sample to CSV file: \"%s\"",
                     path)
    return combined
예제 #7
0
def merge_excels(paths, out_path, names=None):
    """Merge Excel files into separate sheets of a single Excel output file.

    Args:
        paths (List[str]): Sequence of paths to Excel files to load.
        out_path (str): Path to output file.
        names (List[str]): Sequence of sheet names corresponding to ``paths``.
            If None, the filenames without extensions in ``paths`` will be
            used.
    """
    libmag.backup_file(out_path)
    with pd.ExcelWriter(out_path) as writer:
        if not names:
            names = [libmag.get_filename_without_ext(p) for p in paths]
        for path, name in zip(paths, names):
            # TODO: styling appears to be lost during the read step
            df = pd.read_excel(path, index_col=0, engine="openpyxl")
            df.to_excel(writer, sheet_name=name, index=False)
예제 #8
0
def animate_imgs(base_path, plotted_imgs, delay, ext=None, suffix=None):
    """Export to an animated image.
    
    Defaults to an animated GIF unless ``ext`` specifies otherwise.
    Requires ``FFMpeg`` for MP4 file format exports and ``ImageMagick`` for
    all other types of exports.
    
    Args:
        base_path (str): String from which an output path will be constructed.
        plotted_imgs (List[:obj:`matplotlib.image.AxesImage]): Sequence of
            images to include in the animation.
        delay (int): Delay between image display in ms. If None, the delay will
            defaul to 100ms.
        ext (str): Extension to use when saving, without the period. Defaults
            to None, in which case "gif" will be used.
        suffix (str): String to append to output path before extension;
            defaults to None to ignore.

    """
    if ext is None: ext = "gif"
    out_path = libmag.combine_paths(base_path, "animated", ext=ext)
    if suffix: out_path = libmag.insert_before_ext(out_path, suffix, "_")
    libmag.backup_file(out_path)
    if delay is None:
        delay = 100
    if plotted_imgs and len(plotted_imgs[0]) > 0:
        fig = plotted_imgs[0][0].figure
    else:
        libmag.warn("No images available to animate")
        return
    anim = animation.ArtistAnimation(fig,
                                     plotted_imgs,
                                     interval=delay,
                                     repeat_delay=0,
                                     blit=False)
    try:
        writer = "ffmpeg" if ext == "mp4" else "imagemagick"
        anim.save(out_path, writer=writer)
        print("saved animation file to {}".format(out_path))
    except ValueError as e:
        print(e)
        libmag.warn("No animation writer available for Matplotlib")
예제 #9
0
def save_fig(path, ext=None, modifier="", fig=None):
    """Save figure, swapping in the given extension for the extension
    in the given path.
    
    Dots per inch is set by :attr:`config.plot_labels[config.PlotLabels.DPI]`.
    Backs up any existing file before saving.

    Args:
        path (str): Base path to use.
        ext (str): File format extension for saving, without period. Defaults
            to None to use the extension in ``path`` if available, or ``png``
            ``path`` does not have an extension. If extension is in
            :const:`config.FORMATS_3D`, the figure will not be saved.
        modifier (str): Modifier string to append before the extension;
            defaults to an empty string.
        fig (:obj:`matplotlib.figure.Figure`): Figure; defaults to None
            to use the current figure.
    
    """
    if ext in config.FORMATS_3D:
        print(
            "Extension \"{}\" is a 3D type, will skip saving 2D figure".format(
                ext))
        return

    # set up output path and backup any existing file
    if ext is None:
        # extract extension from path if not given directly, defaulting to PNG
        ext = os.path.splitext(path)[1]
        ext = ext[1:] if ext else "png"
    plot_path = "{}{}.{}".format(os.path.splitext(path)[0], modifier, ext)
    libmag.backup_file(plot_path)

    # save the current or given figure with config DPI
    save_fn = plt.savefig if fig is None else fig.savefig
    dpi = config.plot_labels[config.PlotLabels.DPI]
    save_fn(plot_path, dpi=dpi)
    print("exported figure to", plot_path)
예제 #10
0
def process_cli_args():
    """Parse command-line arguments.
    
    Typically stores values as :mod:`magmap.settings.config` attributes.
    
    """
    parser = argparse.ArgumentParser(
        description="Setup environment for MagellanMapper")
    parser.add_argument("--version",
                        action="store_true",
                        help="Show version information and exit")

    # image specification arguments

    # image path(s) specified as an optional argument; takes precedence
    # over positional argument
    parser.add_argument(
        "--img",
        nargs="*",
        default=None,
        help="Main image path(s); after import, the filename is often "
        "given as the original name without its extension")
    # alternatively specified as the first and only positional parameter
    # with as many arguments as desired
    parser.add_argument(
        "img_paths",
        nargs="*",
        default=None,
        help="Main image path(s); can also be given as --img, which takes "
        "precedence over this argument")

    parser.add_argument(
        "--meta",
        nargs="*",
        help="Metadata path(s), which can be given as multiple files "
        "corresponding to each image")
    parser.add_argument(
        "--prefix",
        nargs="*",
        type=str,
        help="Path prefix(es), typically used as the base path for file output"
    )
    parser.add_argument(
        "--prefix_out",
        nargs="*",
        type=str,
        help="Path prefix(es), typically used as the base path for file output "
        "when --prefix modifies the input path")
    parser.add_argument(
        "--suffix",
        nargs="*",
        type=str,
        help="Path suffix(es), typically inserted just before the extension")
    parser.add_argument("--channel", nargs="*", type=int, help="Channel index")
    parser.add_argument("--series", help="Series index")
    parser.add_argument("--subimg_offset",
                        nargs="*",
                        help="Sub-image offset in x,y,z")
    parser.add_argument("--subimg_size",
                        nargs="*",
                        help="Sub-image size in x,y,z")
    parser.add_argument("--offset", nargs="*", help="ROI offset in x,y,z")
    parser.add_argument("--size", nargs="*", help="ROI size in x,y,z")
    parser.add_argument("--db", help="Database path")
    parser.add_argument(
        "--cpus",
        help="Maximum number of CPUs/processes to use for multiprocessing "
        "tasks. Use \"none\" or 0 to auto-detect this number (default).")
    parser.add_argument(
        "--load",
        nargs="*",
        help="Load associated data files; see config.LoadData for settings")

    # task arguments
    parser.add_argument(
        "--proc",
        nargs="*",
        help=_get_args_dict_help(
            "Image processing mode; see config.ProcessTypes for keys "
            "and config.PreProcessKeys for PREPROCESS values",
            config.ProcessTypes))
    parser.add_argument("--register",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.RegisterTypes),
                        help="Image registration task")
    parser.add_argument("--df",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.DFTasks),
                        help="Data frame task")
    parser.add_argument("--plot_2d",
                        type=str.lower,
                        choices=libmag.enum_names_aslist(config.Plot2DTypes),
                        help="2D plot task; see config.Plot2DTypes")
    parser.add_argument("--ec2_start",
                        nargs="*",
                        help="AWS EC2 instance start")
    parser.add_argument("--ec2_list", nargs="*", help="AWS EC2 instance list")
    parser.add_argument("--ec2_terminate",
                        nargs="*",
                        help="AWS EC2 instance termination")
    parser.add_argument(
        "--notify",
        nargs="*",
        help="Notification message URL, message, and attachment strings")

    # profile arguments
    parser.add_argument(
        "--roi_profile",
        nargs="*",
        help="ROI profile, which can be separated by underscores "
        "for multiple profiles and given as paths to custom profiles "
        "in YAML format. Multiple profile groups can be given, which "
        "will each be applied to the corresponding channel. See "
        "docs/settings.md for more details.")
    parser.add_argument(
        "--atlas_profile",
        help="Atlas profile, which can be separated by underscores "
        "for multiple profiles and given as paths to custom profiles "
        "in YAML format. See docs/settings.md for more details.")
    parser.add_argument(
        "--grid_search",
        help="Grid search hyperparameter tuning profile(s), which can be "
        "separated by underscores for multiple profiles and given as "
        "paths to custom profiles in YAML format. See docs/settings.md "
        "for more details.")
    parser.add_argument(
        "--theme",
        nargs="*",
        type=str.lower,
        choices=libmag.enum_names_aslist(config.Themes),
        help="UI theme, which can be given as multiple themes to apply "
        "on top of one another")

    # grouped arguments
    parser.add_argument(
        "--truth_db",
        nargs="*",
        help="Truth database; see config.TruthDB for settings and "
        "config.TruthDBModes for modes")
    parser.add_argument("--labels",
                        nargs="*",
                        help=_get_args_dict_help(
                            "Atlas labels; see config.AtlasLabels.",
                            config.AtlasLabels))
    parser.add_argument("--transform",
                        nargs="*",
                        help=_get_args_dict_help(
                            "Image transformations; see config.Transforms.",
                            config.Transforms))
    parser.add_argument(
        "--reg_suffixes",
        nargs="*",
        help=_get_args_dict_help(
            "Registered image suffixes; see config.RegSuffixes for keys "
            "and config.RegNames for values", config.RegSuffixes))
    parser.add_argument(
        "--plot_labels",
        nargs="*",
        help=_get_args_dict_help(
            "Plot label customizations; see config.PlotLabels ",
            config.PlotLabels))
    parser.add_argument(
        "--set_meta",
        nargs="*",
        help="Set metadata values; see config.MetaKeys for settings")

    # image and figure display arguments
    parser.add_argument("--plane",
                        type=str.lower,
                        choices=config.PLANE,
                        help="Planar orientation")
    parser.add_argument(
        "--show",
        nargs="?",
        const="1",
        help="If applicable, show images after completing the given task")
    parser.add_argument(
        "--alphas",
        help="Alpha opacity levels, which can be comma-delimited for "
        "multichannel images")
    parser.add_argument(
        "--vmin",
        help="Minimum intensity levels, which can be comma-delimited "
        "for multichannel images")
    parser.add_argument(
        "--vmax",
        help="Maximum intensity levels, which can be comma-delimited "
        "for multichannel images")
    parser.add_argument("--seed", help="Random number generator seed")

    # export arguments
    parser.add_argument("--save_subimg",
                        action="store_true",
                        help="Save sub-image as separate file")
    parser.add_argument("--slice", help="Slice given as start,stop,step")
    parser.add_argument("--delay", help="Animation delay in ms")
    parser.add_argument("--savefig", help="Extension for saved figures")
    parser.add_argument("--groups",
                        nargs="*",
                        help="Group values corresponding to each image")
    parser.add_argument(
        "-v",
        "--verbose",
        nargs="*",
        help=_get_args_dict_help(
            "Verbose output to assist with debugging; see config.Verbosity.",
            config.Verbosity))

    # only parse recognized arguments to avoid error for unrecognized ones
    args, args_unknown = parser.parse_known_args()

    # set up application directories
    user_dir = config.user_app_dirs.user_data_dir
    if not os.path.isdir(user_dir):
        # make application data directory
        if os.path.exists(user_dir):
            # backup any non-directory file
            libmag.backup_file(user_dir)
        os.makedirs(user_dir)

    if args.verbose is not None:
        # verbose mode and logging setup
        config.verbose = True
        config.verbosity = args_to_dict(args.verbose, config.Verbosity,
                                        config.verbosity)
        if config.verbosity[config.Verbosity.LEVEL] is None:
            # default to debug mode if any verbose flag is set without level
            config.verbosity[config.Verbosity.LEVEL] = logging.DEBUG
        logs.update_log_level(config.logger,
                              config.verbosity[config.Verbosity.LEVEL])

        # print longer Numpy arrays for debugging
        np.set_printoptions(linewidth=200, threshold=10000)
        _logger.info("Set verbose to %s", config.verbosity)

    # set up logging to given file unless explicitly given an empty string
    log_path = config.verbosity[config.Verbosity.LOG_PATH]
    if log_path != "":
        if log_path is None:
            log_path = os.path.join(config.user_app_dirs.user_data_dir,
                                    "out.log")
        # log to file
        config.log_path = logs.add_file_handler(config.logger, log_path)

    # redirect standard out/error to logging
    sys.stdout = logs.LogWriter(config.logger.info)
    sys.stderr = logs.LogWriter(config.logger.error)

    # load preferences file
    config.prefs = prefs_prof.PrefsProfile()
    config.prefs.add_profiles(str(config.PREFS_PATH))

    if args.version:
        # print version info and exit
        _logger.info(f"{config.APP_NAME}-{libmag.get_version(True)}")
        shutdown()

    # log the app launch path
    path_launch = (sys._MEIPASS if getattr(sys, "frozen", False)
                   and hasattr(sys, "_MEIPASS") else sys.argv[0])
    _logger.info(f"Launched MagellanMapper from {path_launch}")

    if args.img is not None or args.img_paths:
        # set image file path and convert to basis for additional paths
        config.filenames = args.img if args.img else args.img_paths
        config.filename = config.filenames[0]
        print("Set filenames to {}, current filename {}".format(
            config.filenames, config.filename))

    if args.meta is not None:
        # set metadata paths
        config.metadata_paths = args.meta
        print("Set metadata paths to", config.metadata_paths)
        config.metadatas = []
        for path in config.metadata_paths:
            # load metadata to dictionary
            md, _ = importer.load_metadata(path, assign=False)
            config.metadatas.append(md)

    if args.channel is not None:
        # set the channels
        config.channel = args.channel
        print("Set channel to {}".format(config.channel))

    config.series_list = [config.series]  # list of series
    if args.series is not None:
        series_split = args.series.split(",")
        config.series_list = []
        for ser in series_split:
            ser_split = ser.split("-")
            if len(ser_split) > 1:
                ser_range = np.arange(int(ser_split[0]), int(ser_split[1]) + 1)
                config.series_list.extend(ser_range.tolist())
            else:
                config.series_list.append(int(ser_split[0]))
        config.series = config.series_list[0]
        print("Set to series_list to {}, current series {}".format(
            config.series_list, config.series))

    if args.savefig is not None:
        # save figure with file type of this extension; remove leading period
        config.savefig = _parse_none(args.savefig.lstrip("."))
        print("Set savefig extension to {}".format(config.savefig))

    # parse sub-image offsets and sizes;
    # expects x,y,z input but stores as z,y,x by convention
    if args.subimg_offset is not None:
        config.subimg_offsets = _parse_coords(args.subimg_offset, True)
        print("Set sub-image offsets to {} (z,y,x)".format(
            config.subimg_offsets))
    if args.subimg_size is not None:
        config.subimg_sizes = _parse_coords(args.subimg_size, True)
        print("Set sub-image sizes to {} (z,y,x)".format(config.subimg_sizes))

    # parse ROI offsets and sizes, which are relative to any sub-image;
    # expects x,y,z input and output
    if args.offset is not None:
        config.roi_offsets = _parse_coords(args.offset)
        if config.roi_offsets:
            config.roi_offset = config.roi_offsets[0]
        print("Set ROI offsets to {}, current offset {} (x,y,z)".format(
            config.roi_offsets, config.roi_offset))
    if args.size is not None:
        config.roi_sizes = _parse_coords(args.size)
        if config.roi_sizes:
            config.roi_size = config.roi_sizes[0]
        print("Set ROI sizes to {}, current size {} (x,y,z)".format(
            config.roi_sizes, config.roi_size))

    if args.cpus is not None:
        # set maximum number of CPUs
        config.cpus = _parse_none(args.cpus.lower(), int)
        print("Set maximum number of CPUs for multiprocessing tasks to",
              config.cpus)

    if args.load is not None:
        # flag loading data sources with default sub-arg indicating that the
        # data should be loaded from a default path; otherwise, load from
        # path given by the sub-arg; change delimiter to allow paths with ","
        config.load_data = args_to_dict(args.load,
                                        config.LoadData,
                                        config.load_data,
                                        sep_vals="|",
                                        default=True)
        print("Set to load the data types: {}".format(config.load_data))

    # set up main processing mode
    if args.proc is not None:
        config.proc_type = args_to_dict(args.proc,
                                        config.ProcessTypes,
                                        config.proc_type,
                                        default=True)
        print("Set main processing tasks to:", config.proc_type)

    if args.set_meta is not None:
        # set individual metadata values, currently used for image import
        # TODO: take precedence over loaded metadata archives
        config.meta_dict = args_to_dict(args.set_meta,
                                        config.MetaKeys,
                                        config.meta_dict,
                                        sep_vals="|")
        print("Set metadata values to {}".format(config.meta_dict))
        res = config.meta_dict[config.MetaKeys.RESOLUTIONS]
        if res:
            # set image resolutions, taken as a single set of x,y,z and
            # converting to a nested list of z,y,x
            res_split = res.split(",")
            if len(res_split) >= 3:
                res_float = tuple(float(i) for i in res_split)[::-1]
                config.resolutions = [res_float]
                print("Set resolutions to {}".format(config.resolutions))
            else:
                res_float = None
                print("Resolution ({}) should be given as 3 values (x,y,z)".
                      format(res))
            # store single set of resolutions, similar to input
            config.meta_dict[config.MetaKeys.RESOLUTIONS] = res_float
        mag = config.meta_dict[config.MetaKeys.MAGNIFICATION]
        if mag:
            # set objective magnification
            config.magnification = mag
            print("Set magnification to {}".format(config.magnification))
        zoom = config.meta_dict[config.MetaKeys.ZOOM]
        if zoom:
            # set objective zoom
            config.zoom = zoom
            print("Set zoom to {}".format(config.zoom))
        shape = config.meta_dict[config.MetaKeys.SHAPE]
        if shape:
            # parse shape, storing only in dict
            config.meta_dict[config.MetaKeys.SHAPE] = [
                int(n) for n in shape.split(",")[::-1]
            ]

    # set up ROI and register profiles
    setup_roi_profiles(args.roi_profile)
    setup_atlas_profiles(args.atlas_profile)
    setup_grid_search_profiles(args.grid_search)

    if args.plane is not None:
        config.plane = args.plane
        print("Set plane to {}".format(config.plane))
    if args.save_subimg:
        config.save_subimg = args.save_subimg
        print("Set to save the sub-image")

    if args.labels:
        # set up atlas labels
        setup_labels(args.labels)

    if args.transform is not None:
        # image transformations such as flipping, rotation
        config.transform = args_to_dict(args.transform, config.Transforms,
                                        config.transform)
        print("Set transformations to {}".format(config.transform))

    if args.register:
        # register type to process in register module
        config.register_type = args.register
        print("Set register type to {}".format(config.register_type))

    if args.df:
        # data frame processing task
        config.df_task = args.df
        print("Set data frame processing task to {}".format(config.df_task))

    if args.plot_2d:
        # 2D plot type to process in plot_2d module
        config.plot_2d_type = args.plot_2d
        print("Set plot_2d type to {}".format(config.plot_2d_type))

    if args.slice:
        # specify a generic slice by command-line, assuming same order
        # of arguments as for slice built-in function and interpreting
        # "none" string as None
        config.slice_vals = args.slice.split(",")
        config.slice_vals = [
            _parse_none(val.lower(), int) for val in config.slice_vals
        ]
        print("Set slice values to {}".format(config.slice_vals))
    if args.delay:
        config.delay = int(args.delay)
        print("Set delay to {}".format(config.delay))

    if args.show:
        # show images after task is performed, if supported
        config.show = _is_arg_true(args.show)
        print("Set show to {}".format(config.show))

    if args.groups:
        config.groups = args.groups
        print("Set groups to {}".format(config.groups))
    if args.ec2_start is not None:
        # start EC2 instances
        config.ec2_start = args_with_dict(args.ec2_start)
        print("Set ec2 start to {}".format(config.ec2_start))
    if args.ec2_list:
        # list EC2 instances
        config.ec2_list = args_with_dict(args.ec2_list)
        print("Set ec2 list to {}".format(config.ec2_list))
    if args.ec2_terminate:
        config.ec2_terminate = args.ec2_terminate
        print("Set ec2 terminate to {}".format(config.ec2_terminate))
    if args.notify:
        notify_len = len(args.notify)
        if notify_len > 0:
            config.notify_url = args.notify[0]
            print("Set notification URL to {}".format(config.notify_url))
        if notify_len > 1:
            config.notify_msg = args.notify[1]
            print("Set notification message to {}".format(config.notify_msg))
        if notify_len > 2:
            config.notify_attach = args.notify[2]
            print("Set notification attachment path to {}".format(
                config.notify_attach))

    if args.prefix is not None:
        # path input/output prefixes
        config.prefixes = args.prefix
        config.prefix = config.prefixes[0]
        print("Set path prefixes to {}".format(config.prefixes))

    if args.prefix_out is not None:
        # path output prefixes
        config.prefixes_out = args.prefix_out
        config.prefix_out = config.prefixes_out[0]
        print("Set path prefixes to {}".format(config.prefixes_out))

    if args.suffix is not None:
        # path suffixes
        config.suffixes = args.suffix
        config.suffix = config.suffixes[0]
        print("Set path suffixes to {}".format(config.suffixes))

    if args.alphas:
        # specify alpha levels
        config.alphas = [float(val) for val in args.alphas.split(",")]
        print("Set alphas to", config.alphas)

    if args.vmin:
        # specify vmin levels
        config.vmins = [libmag.get_int(val) for val in args.vmin.split(",")]
        print("Set vmins to", config.vmins)

    if args.vmax:
        # specify vmax levels and copy to vmax overview used for plotting
        # and updated for normalization
        config.vmaxs = [libmag.get_int(val) for val in args.vmax.split(",")]
        config.vmax_overview = list(config.vmaxs)
        print("Set vmaxs to", config.vmaxs)

    if args.reg_suffixes is not None:
        # specify suffixes of registered images to load
        config.reg_suffixes = args_to_dict(args.reg_suffixes,
                                           config.RegSuffixes,
                                           config.reg_suffixes)
        print("Set registered image suffixes to {}".format(
            config.reg_suffixes))

    if args.seed:
        # specify random number generator seed
        config.seed = int(args.seed)
        print("Set random number generator seed to", config.seed)

    if args.plot_labels is not None:
        # specify general plot labels
        config.plot_labels = args_to_dict(args.plot_labels, config.PlotLabels,
                                          config.plot_labels)
        print("Set plot labels to {}".format(config.plot_labels))

    if args.theme is not None:
        # specify themes, currently applied to Matplotlib elements
        theme_names = []
        for theme in args.theme:
            # add theme enum if found
            theme_enum = libmag.get_enum(theme, config.Themes)
            if theme_enum:
                config.rc_params.append(theme_enum)
                theme_names.append(theme_enum.name)
        print("Set to use themes to {}".format(theme_names))
    # set up Matplotlib styles/themes
    plot_2d.setup_style()

    if args.db:
        # set main database path to user arg
        config.db_path = args.db
        print("Set database name to {}".format(config.db_path))
    else:
        # set default path
        config.db_path = os.path.join(user_dir, config.db_path)

    if args.truth_db:
        # set settings for separate database of "truth blobs"
        config.truth_db_params = args_to_dict(args.truth_db,
                                              config.TruthDB,
                                              config.truth_db_params,
                                              sep_vals="|")
        mode = config.truth_db_params[config.TruthDB.MODE]
        config.truth_db_mode = libmag.get_enum(mode, config.TruthDBModes)
        libmag.printv(config.truth_db_params)
        print("Mapped \"{}\" truth_db mode to {}".format(
            mode, config.truth_db_mode))

    # notify user of full args list, including unrecognized args
    _logger.debug(f"All command-line arguments: {sys.argv}")
    if args_unknown:
        _logger.info(
            f"The following command-line arguments were unrecognized and "
            f"ignored: {args_unknown}")
예제 #11
0
def process_file(
    path: str,
    proc_type: Enum,
    proc_val: Optional[Any] = None,
    series: Optional[int] = None,
    subimg_offset: Optional[List[int]] = None,
    subimg_size: Optional[List[int]] = None,
    roi_offset: Optional[List[int]] = None,
    roi_size: Optional[List[int]] = None
) -> Tuple[Optional[Any], Optional[str]]:
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path: Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_type: Processing type, which should be a one of
            :class:`config.ProcessTypes`.
        proc_val: Processing value associated with ``proc_type``; defaults to
            None.
        series: Image series number; defaults to None.
        subimg_offset: Sub-image offset as (z,y,x) to load; defaults to None.
        subimg_size: Sub-image size as (z,y,x) to load; defaults to None.
        roi_offset: Region of interest offset as (x, y, z) to process;
            defaults to None.
        roi_size: Region of interest size of region to process, given as
            ``(x, y, z)``; defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)

    print("{}\n".format("-" * 80))
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_path = naming.make_subimage_name(filename_base, subimg_offset,
                                                subimg_size)
        export_rois.export_rois(db, config.image5d, config.channel,
                                export_path,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(export_path))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs.blobs, filename_base)

    elif proc_type in (config.ProcessTypes.DETECT,
                       config.ProcessTypes.DETECT_COLOC):
        # detect blobs in the full image, +/- co-localization
        coloc = proc_type is config.ProcessTypes.DETECT_COLOC
        stats, fdbk, _ = stack_detect.detect_blobs_stack(
            filename_base, subimg_offset, subimg_size, coloc)

    elif proc_type is config.ProcessTypes.COLOC_MATCH:
        if config.blobs is not None and config.blobs.blobs is not None:
            # colocalize blobs in separate channels by matching blobs
            shape = subimg_size
            if shape is None:
                # get shape from loaded image, falling back to its metadata
                if config.image5d is not None:
                    shape = config.image5d.shape[1:]
                else:
                    shape = config.img5d.meta[config.MetaKeys.SHAPE][1:]
            matches = colocalizer.StackColocalizer.colocalize_stack(
                shape, config.blobs.blobs)
            # insert matches into database
            colocalizer.insert_matches(config.db, matches)
        else:
            print("No blobs loaded to colocalize, skipping")

    elif proc_type in (config.ProcessTypes.EXPORT_PLANES,
                       config.ProcessTypes.EXPORT_PLANES_CHANNELS):
        # export each plane as a separate image file
        export_stack.export_planes(
            config.image5d, config.savefig, config.channel,
            proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.EXPORT_TIF:
        # export the main image as a TIF files for each channel
        np_io.write_tif(config.image5d, config.filename)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, proc_val, config.channel,
                                   out_path)

    return stats, fdbk
예제 #12
0
def process_file(path,
                 proc_mode,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 roi_offset=None,
                 roi_size=None):
    """Processes a single image file non-interactively.

    Assumes that the image has already been set up.
    
    Args:
        path (str): Path to image from which MagellanMapper-style paths will 
            be generated.
        proc_mode (str): Processing mode, which should be a key in
            :class:`config.ProcessTypes`, case-insensitive.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        roi_offset (List[int]): Region of interest offset as (x, y, z) to
            process; defaults to None.
        roi_size (List[int]): Region of interest size of region to process,
            given as (x, y, z); defaults to None.
    
    Returns:
        Tuple of stats from processing, or None if no stats, and 
        text feedback from the processing, or None if no feedback.
    """
    # PROCESS BY TYPE
    stats = None
    fdbk = None
    filename_base = importer.filename_to_base(path, series)
    proc_type = libmag.get_enum(proc_mode, config.ProcessTypes)
    if proc_type is config.ProcessTypes.LOAD:
        # loading completed
        return None, None

    elif proc_type is config.ProcessTypes.LOAD:
        # already imported so does nothing
        print("imported {}, will exit".format(path))

    elif proc_type is config.ProcessTypes.EXPORT_ROIS:
        # export ROIs; assumes that info_proc was already loaded to
        # give smaller region from which smaller ROIs from the truth DB
        # will be extracted
        from magmap.io import export_rois
        db = config.db if config.truth_db is None else config.truth_db
        export_rois.export_rois(db, config.image5d, config.channel,
                                filename_base,
                                config.plot_labels[config.PlotLabels.PADDING],
                                config.unit_factor, config.truth_db_mode,
                                os.path.basename(config.filename))

    elif proc_type is config.ProcessTypes.TRANSFORM:
        # transpose, rescale, and/or resize whole large image
        transformer.transpose_img(
            path,
            series,
            plane=config.plane,
            rescale=config.transform[config.Transforms.RESCALE],
            target_size=config.roi_size)

    elif proc_type in (config.ProcessTypes.EXTRACT,
                       config.ProcessTypes.ANIMATED):
        # generate animated GIF or extract single plane
        from magmap.io import export_stack
        export_stack.stack_to_img(config.filenames, roi_offset, roi_size,
                                  series, subimg_offset, subimg_size,
                                  proc_type is config.ProcessTypes.ANIMATED,
                                  config.suffix)

    elif proc_type is config.ProcessTypes.EXPORT_BLOBS:
        # export blobs to CSV file
        from magmap.io import export_rois
        export_rois.blobs_to_csv(config.blobs, filename_base)

    elif proc_type is config.ProcessTypes.DETECT:
        # detect blobs in the full image
        stats, fdbk, segments_all = stack_detect.detect_blobs_large_image(
            filename_base, config.image5d, subimg_offset, subimg_size,
            config.truth_db_mode is config.TruthDBModes.VERIFY,
            not config.grid_search_profile, config.image5d_is_roi)

    elif proc_type is config.ProcessTypes.EXPORT_PLANES:
        # export each plane as a separate image file
        from magmap.io import export_stack
        export_stack.export_planes(config.image5d, config.prefix,
                                   config.savefig, config.channel)

    elif proc_type is config.ProcessTypes.EXPORT_RAW:
        # export the main image as a raw data file
        out_path = libmag.combine_paths(config.filename, ".raw", sep="")
        libmag.backup_file(out_path)
        np_io.write_raw_file(config.image5d, out_path)

    elif proc_type is config.ProcessTypes.PREPROCESS:
        # pre-process a whole image and save to file
        # TODO: consider chunking option for larger images
        profile = config.get_roi_profile(0)
        out_path = config.prefix
        if not out_path:
            out_path = libmag.insert_before_ext(config.filename, "_preproc")
        transformer.preprocess_img(config.image5d, profile["preprocess"],
                                   config.channel, out_path)

    return stats, fdbk