def process_tasks(): """Process command-line tasks. Perform tasks set by the ``--proc`` parameter or any other entry point, such as ``--register`` tasks. Only the first identified task will be performed. """ # if command-line driven task specified, start task and shut down if config.register_type: register.main() elif config.notify_url: notify.main() elif config.plot_2d_type: plot_2d.main() elif config.df_task: df_io.main() elif config.grid_search_profile: _grid_search(config.series_list) elif config.ec2_list or config.ec2_start or config.ec2_terminate: # defer importing AWS module to avoid making its dependencies # required for MagellanMapper from magmap.cloud import aws aws.main() else: if config.filename: for series in config.series_list: # process files for each series, typically a tile within a # microscopy image set or a single whole image filename, offset, size, reg_suffixes = \ importer.deconstruct_img_name(config.filename) set_subimg, _ = importer.parse_deconstructed_name( filename, offset, size, reg_suffixes) if not set_subimg: # sub-image parameters set in filename takes precedence for # the loaded image, but fall back to user-supplied args offset = (config.subimg_offsets[0] if config.subimg_offsets else None) size = (config.subimg_sizes[0] if config.subimg_sizes else None) np_io.setup_images(filename, series, offset, size, config.proc_type) process_file( filename, config.proc_type, series, offset, size, config.roi_offsets[0] if config.roi_offsets else None, config.roi_sizes[0] if config.roi_sizes else None) else: print("No image filename set for processing files, skipping") proc_type = libmag.get_enum(config.proc_type, config.ProcessTypes) if proc_type is None or proc_type is config.ProcessTypes.LOAD: # do not shut down since not a command-line task or if loading files return shutdown()
def preprocess_img(image5d, preprocs, channel, out_path): """Pre-process an image in 3D. Args: image5d (:obj:`np.ndarray`): 5D array in t,z,y,x[,c]. preprocs (Union[str, list[str]]): Pre-processing tasks that will be converted to enums in :class:`config.PreProcessKeys` to perform in the order given. channel (int): Channel to preprocess, or None for all channels. out_path (str): Output base path. Returns: :obj:`np.ndarray`: The pre-processed image array. """ if preprocs is None: print("No preprocessing tasks to perform, skipping") return if not libmag.is_seq(preprocs): preprocs = [preprocs] roi = image5d[0] for preproc in preprocs: # perform global pre-processing task task = libmag.get_enum(preproc, config.PreProcessKeys) _logger.info("Pre-processing task: %s", task) if task is config.PreProcessKeys.SATURATE: roi = plot_3d.saturate_roi(roi, channel=channel) elif task is config.PreProcessKeys.DENOISE: roi = plot_3d.denoise_roi(roi, channel) elif task is config.PreProcessKeys.REMAP: roi = plot_3d.remap_intensity(roi, channel) elif task is config.PreProcessKeys.ROTATE: roi = rotate_img(roi) else: _logger.warn("No preprocessing task found for: %s", preproc) # save to new file image5d = importer.roi_to_image5d(roi) importer.save_np_image(image5d, out_path) return image5d
def setup_images(path=None, series=None, offset=None, size=None, proc_mode=None, allow_import=True): """Sets up an image and all associated images and metadata. Paths for related files such as registered images will generally be constructed from ``path``. If :attr:`config.prefix` is set, it will be used in place of ``path`` for registered labels. Args: path (str): Path to image from which MagellanMapper-style paths will be generated. series (int): Image series number; defaults to None. offset (List[int]): Sub-image offset given in z,y,x; defaults to None. size (List[int]): Sub-image shape given in z,y,x; defaults to None. proc_mode (str): Processing mode, which should be a key in :class:`config.ProcessTypes`, case-insensitive; defaults to None. allow_import (bool): True to allow importing the image if it cannot be loaded; defaults to True. """ def add_metadata(): # override metadata set from command-line metadata args if available md = { config.MetaKeys.RESOLUTIONS: config.meta_dict[config.MetaKeys.RESOLUTIONS], config.MetaKeys.MAGNIFICATION: config.meta_dict[config.MetaKeys.MAGNIFICATION], config.MetaKeys.ZOOM: config.meta_dict[config.MetaKeys.ZOOM], config.MetaKeys.SHAPE: config.meta_dict[config.MetaKeys.SHAPE], config.MetaKeys.DTYPE: config.meta_dict[config.MetaKeys.DTYPE], } for key, val in md.items(): if val is not None: # explicitly set metadata takes precedence over extracted vals import_md[key] = val # LOAD MAIN IMAGE # reset image5d config.image5d = None config.image5d_is_roi = False load_subimage = offset is not None and size is not None config.resolutions = None # reset label images config.labels_img = None config.borders_img = None filename_base = importer.filename_to_base(path, series) subimg_base = None if load_subimage and not config.save_subimg: # load a saved sub-image file if available and not set to save one subimg_base = stack_detect.make_subimage_name(filename_base, offset, size) filename_subimg = libmag.combine_paths(subimg_base, config.SUFFIX_SUBIMG) try: # load sub-image if available config.image5d = np.load(filename_subimg, mmap_mode="r") config.image5d = importer.roi_to_image5d(config.image5d) config.image5d_is_roi = True config.image5d_io = config.LoadIO.NP print("Loaded sub-image from {} with shape {}".format( filename_subimg, config.image5d.shape)) # after loading sub-image, load original image's metadata # for essential data such as vmin/vmax; will only warn if # fails to load since metadata could be specified elsewhere _, orig_info = importer.make_filenames(path, series) print("load original image metadata from:", orig_info) importer.load_metadata(orig_info) except IOError: print("Ignored sub-image file from {} as unable to load".format( filename_subimg)) proc_type = libmag.get_enum(proc_mode, config.ProcessTypes) if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_ROIS, config.ProcessTypes.EXPORT_BLOBS, config.ProcessTypes.DETECT): # load a blobs archive try: if subimg_base: try: # load blobs generated from sub-image config.blobs = load_blobs(subimg_base) except (FileNotFoundError, KeyError): # fallback to loading from full image blobs and getting # a subset, shifting them relative to sub-image offset print("Unable to load blobs file based on {}, will try " "from {}".format(subimg_base, filename_base)) config.blobs = load_blobs(filename_base) config.blobs, _ = detector.get_blobs_in_roi(config.blobs, offset, size, reverse=False) detector.shift_blob_rel_coords(config.blobs, np.multiply(offset, -1)) else: # load full image blobs config.blobs = load_blobs(filename_base) except (FileNotFoundError, KeyError) as e2: print("Unable to load blobs file") if proc_type in (config.ProcessTypes.LOAD, config.ProcessTypes.EXPORT_BLOBS): # blobs expected but not found raise e2 if path and config.image5d is None: # load or import the main image stack print("Loading main image") try: if path.endswith(sitk_io.EXTS_3D): # attempt to format supported by SimpleITK and prepend time axis config.image5d = sitk_io.read_sitk_files(path)[None] config.image5d_io = config.LoadIO.SITK else: # load or import from MagellanMapper Numpy format import_only = proc_type is config.ProcessTypes.IMPORT_ONLY if not import_only: # load previously imported image config.image5d = importer.read_file(path, series) if allow_import: # re-import over existing image or import new image if os.path.isdir(path) and all( [r is None for r in config.reg_suffixes.values()]): # import directory of single plane images to single # stack if no register suffixes are set chls, import_md = importer.setup_import_dir(path) add_metadata() prefix = config.prefix if not prefix: prefix = os.path.join( os.path.dirname(path), importer.DEFAULT_IMG_STACK_NAME) config.image5d = importer.import_planes_to_stack( chls, prefix, import_md) elif import_only or config.image5d is None: # import multi-plane image chls, import_path = importer.setup_import_multipage( path) prefix = config.prefix if config.prefix else import_path import_md = importer.setup_import_metadata( chls, config.channel, series) add_metadata() config.image5d = importer.import_multiplane_images( chls, prefix, import_md, series, channel=config.channel) config.image5d_io = config.LoadIO.NP except FileNotFoundError as e: print(e) print("Could not load {}, will fall back to any associated " "registered image".format(path)) if config.metadatas and config.metadatas[0]: # assign metadata from alternate file if given to supersede settings # for any loaded image5d # TODO: access metadata directly from given image5d's dict to allow # loading multiple image5d images simultaneously importer.assign_metadata(config.metadatas[0]) # main image is currently required since many parameters depend on it atlas_suffix = config.reg_suffixes[config.RegSuffixes.ATLAS] if atlas_suffix is None and config.image5d is None: # fallback to atlas if main image not already loaded atlas_suffix = config.RegNames.IMG_ATLAS.value print( "main image is not set, falling back to registered " "image with suffix", atlas_suffix) # use prefix to get images registered to a different image, eg a # downsampled version, or a different version of registered images path = config.prefix if config.prefix else path if path and atlas_suffix is not None: try: # will take the place of any previously loaded image5d config.image5d = sitk_io.read_sitk_files( path, reg_names=atlas_suffix)[None] config.image5d_io = config.LoadIO.SITK except FileNotFoundError as e: print(e) annotation_suffix = config.reg_suffixes[config.RegSuffixes.ANNOTATION] if annotation_suffix is not None: # load labels image, set up scaling, and load labels file try: # TODO: need to support multichannel labels images config.labels_img = sitk_io.read_sitk_files( path, reg_names=annotation_suffix) if config.image5d is not None: config.labels_scaling = importer.calc_scaling( config.image5d, config.labels_img) if config.load_labels is not None: labels_ref = ontology.load_labels_ref(config.load_labels) if isinstance(labels_ref, pd.DataFrame): # parse CSV files loaded into data frame config.labels_ref_lookup = ontology.create_lookup_pd( labels_ref) else: # parse dict from ABA JSON file config.labels_ref_lookup = ( ontology.create_aba_reverse_lookup(labels_ref)) except FileNotFoundError as e: print(e) borders_suffix = config.reg_suffixes[config.RegSuffixes.BORDERS] if borders_suffix is not None: # load borders image, which can also be another labels image try: config.borders_img = sitk_io.read_sitk_files( path, reg_names=borders_suffix) except FileNotFoundError as e: print(e) if (config.atlas_labels[config.AtlasLabels.ORIG_COLORS] and config.load_labels is not None): # load original labels image from same directory as ontology # file for consistent ID-color mapping, even if labels are missing try: config.labels_img_orig = sitk_io.load_registered_img( config.load_labels, config.RegNames.IMG_LABELS.value) except FileNotFoundError as e: print(e) libmag.warn( "could not load original labels image; colors may differ" "differ from it") load_rot90 = config.roi_profile["load_rot90"] if load_rot90 and config.image5d is not None: # rotate main image specified num of times x90deg after loading since # need to rotate images output by deep learning toolkit config.image5d = np.rot90(config.image5d, load_rot90, (2, 3)) if (config.image5d is not None and load_subimage and not config.image5d_is_roi): # crop full image to bounds of sub-image config.image5d = plot_3d.prepare_subimg(config.image5d, size, offset)[None] config.image5d_is_roi = True # add any additional image5d thresholds for multichannel images, such # as those loaded without metadata for these settings colormaps.setup_cmaps() num_channels = get_num_channels(config.image5d) config.near_max = libmag.pad_seq(config.near_max, num_channels, -1) config.near_min = libmag.pad_seq(config.near_min, num_channels, 0) config.vmax_overview = libmag.pad_seq(config.vmax_overview, num_channels) colormaps.setup_colormaps(num_channels)
def main(): """Process stats based on command-line mode.""" # process stats based on command-line argument df_task = libmag.get_enum(config.df_task, config.DFTasks) id_col = config.plot_labels[config.PlotLabels.ID_COL] x_col = config.plot_labels[config.PlotLabels.X_COL] y_col = config.plot_labels[config.PlotLabels.Y_COL] group_col = config.plot_labels[config.PlotLabels.GROUP_COL] if df_task is config.DFTasks.MERGE_CSVS: # merge multiple CSV files into single CSV file prefix = config.prefix if not prefix: # fallback to default filename based on first path prefix = f"{os.path.splitext(config.filename)[0]}_merged" merge_csvs(config.filenames, prefix) elif df_task is config.DFTasks.MERGE_CSVS_COLS: # join multiple CSV files based on a given index column into single # CSV file dfs = [pd.read_csv(f) for f in config.filenames] df = join_dfs(dfs, id_col, config.plot_labels[config.PlotLabels.DROP_DUPS]) out_path = libmag.make_out_path( config.filename, suffix="_joined" if config.suffix is None else None) data_frames_to_csv(df, out_path) elif df_task is config.DFTasks.APPEND_CSVS_COLS: # concatenate multiple CSV files into single CSV file by appending # selected columns from the given files dfs = [pd.read_csv(f) for f in config.filenames] labels = libmag.to_seq(config.plot_labels[config.PlotLabels.X_LABEL]) extra_cols = libmag.to_seq(x_col) data_cols = libmag.to_seq(y_col) df = append_cols(dfs, labels, extra_cols=extra_cols, data_cols=data_cols) out_path = libmag.make_out_path( config.filename, suffix="_appended" if config.suffix is None else None) data_frames_to_csv(df, out_path) elif df_task is config.DFTasks.EXPS_BY_REGION: # convert volume stats data frame to experiments by region exps_by_regions(config.filename) elif df_task is config.DFTasks.EXTRACT_FROM_CSV: # extract rows from CSV file based on matching rows in given col, where # "X_COL" = name of column on which to filter, and # "Y_COL" = values in this column for which rows should be kept df = pd.read_csv(config.filename) df_filt, _ = filter_dfs_on_vals([df], None, [(x_col, y_col)]) data_frames_to_csv(df_filt, libmag.make_out_path()) elif df_task is config.DFTasks.ADD_CSV_COLS: # add columns with corresponding values for all rows, where # "X_COL" = name of column(s) to add, and # "Y_COL" = value(s) for corresponding cols df = pd.read_csv(config.filename) cols = { k: v for k, v in zip(libmag.to_seq(x_col), libmag.to_seq(y_col)) } df = add_cols_df(df, cols) out_path = libmag.make_out_path( config.filename, suffix="_appended" if config.suffix is None else None) data_frames_to_csv(df, out_path) elif df_task is config.DFTasks.NORMALIZE: # normalize values in each group to that of a base group, where # "ID_COL" = ID column(s), # "X_COL" = condition column # "Y_COL" = base condition to which values will be normalized, # "GROUP_COL" = metric columns to normalize, # "WT_COL" = extra columns to keep df = pd.read_csv(config.filename) df = normalize_df(df, id_col, x_col, y_col, group_col, config.plot_labels[config.PlotLabels.WT_COL]) out_path = libmag.make_out_path( config.filename, suffix="_norm" if config.suffix is None else None) data_frames_to_csv(df, out_path) elif df_task is config.DFTasks.MERGE_EXCELS: # merge multiple Excel files into single Excel file, with each # original Excel file as a separate sheet in the combined file merge_excels(config.filenames, config.prefix, config.plot_labels[config.PlotLabels.LEGEND_NAMES]) elif df_task in _ARITHMETIC_TASKS: # perform arithmetic operations on pairs of columns in a data frame df = pd.read_csv(config.filename) fn = _ARITHMETIC_TASKS[df_task] for col_x, col_y, col_id in zip(libmag.to_seq(x_col), libmag.to_seq(y_col), libmag.to_seq(id_col)): # perform the arithmetic operation specified by the specific # task on the pair of columns, inserting the results in a new # column specified by ID func_to_paired_cols(df, col_x, col_y, fn, col_id) # output modified data frame to CSV file data_frames_to_csv(df, libmag.make_out_path()) elif df_task is config.DFTasks.REPLACE_VALS: # replace values in a CSV file # X_COL: replace from these values # Y_COL: replace to these values # GROUP_COL: columns to replace df = pd.read_csv(config.filename) df = replace_vals(df, x_col, y_col, group_col) data_frames_to_csv(df, libmag.make_out_path())
def process_cli_args(): """Parse command-line arguments. Typically stores values as :mod:`magmap.settings.config` attributes. """ parser = argparse.ArgumentParser( description="Setup environment for MagellanMapper") parser.add_argument("--version", action="store_true", help="Show version information and exit") # image specification arguments # image path(s) specified as an optional argument; takes precedence # over positional argument parser.add_argument( "--img", nargs="*", default=None, help="Main image path(s); after import, the filename is often " "given as the original name without its extension") # alternatively specified as the first and only positional parameter # with as many arguments as desired parser.add_argument( "img_paths", nargs="*", default=None, help="Main image path(s); can also be given as --img, which takes " "precedence over this argument") parser.add_argument( "--meta", nargs="*", help="Metadata path(s), which can be given as multiple files " "corresponding to each image") parser.add_argument( "--prefix", nargs="*", type=str, help="Path prefix(es), typically used as the base path for file output" ) parser.add_argument( "--prefix_out", nargs="*", type=str, help="Path prefix(es), typically used as the base path for file output " "when --prefix modifies the input path") parser.add_argument( "--suffix", nargs="*", type=str, help="Path suffix(es), typically inserted just before the extension") parser.add_argument("--channel", nargs="*", type=int, help="Channel index") parser.add_argument("--series", help="Series index") parser.add_argument("--subimg_offset", nargs="*", help="Sub-image offset in x,y,z") parser.add_argument("--subimg_size", nargs="*", help="Sub-image size in x,y,z") parser.add_argument("--offset", nargs="*", help="ROI offset in x,y,z") parser.add_argument("--size", nargs="*", help="ROI size in x,y,z") parser.add_argument("--db", help="Database path") parser.add_argument( "--cpus", help="Maximum number of CPUs/processes to use for multiprocessing " "tasks. Use \"none\" or 0 to auto-detect this number (default).") parser.add_argument( "--load", nargs="*", help="Load associated data files; see config.LoadData for settings") # task arguments parser.add_argument( "--proc", nargs="*", help=_get_args_dict_help( "Image processing mode; see config.ProcessTypes for keys " "and config.PreProcessKeys for PREPROCESS values", config.ProcessTypes)) parser.add_argument("--register", type=str.lower, choices=libmag.enum_names_aslist(config.RegisterTypes), help="Image registration task") parser.add_argument("--df", type=str.lower, choices=libmag.enum_names_aslist(config.DFTasks), help="Data frame task") parser.add_argument("--plot_2d", type=str.lower, choices=libmag.enum_names_aslist(config.Plot2DTypes), help="2D plot task; see config.Plot2DTypes") parser.add_argument("--ec2_start", nargs="*", help="AWS EC2 instance start") parser.add_argument("--ec2_list", nargs="*", help="AWS EC2 instance list") parser.add_argument("--ec2_terminate", nargs="*", help="AWS EC2 instance termination") parser.add_argument( "--notify", nargs="*", help="Notification message URL, message, and attachment strings") # profile arguments parser.add_argument( "--roi_profile", nargs="*", help="ROI profile, which can be separated by underscores " "for multiple profiles and given as paths to custom profiles " "in YAML format. Multiple profile groups can be given, which " "will each be applied to the corresponding channel. See " "docs/settings.md for more details.") parser.add_argument( "--atlas_profile", help="Atlas profile, which can be separated by underscores " "for multiple profiles and given as paths to custom profiles " "in YAML format. See docs/settings.md for more details.") parser.add_argument( "--grid_search", help="Grid search hyperparameter tuning profile(s), which can be " "separated by underscores for multiple profiles and given as " "paths to custom profiles in YAML format. See docs/settings.md " "for more details.") parser.add_argument( "--theme", nargs="*", type=str.lower, choices=libmag.enum_names_aslist(config.Themes), help="UI theme, which can be given as multiple themes to apply " "on top of one another") # grouped arguments parser.add_argument( "--truth_db", nargs="*", help="Truth database; see config.TruthDB for settings and " "config.TruthDBModes for modes") parser.add_argument("--labels", nargs="*", help=_get_args_dict_help( "Atlas labels; see config.AtlasLabels.", config.AtlasLabels)) parser.add_argument("--transform", nargs="*", help=_get_args_dict_help( "Image transformations; see config.Transforms.", config.Transforms)) parser.add_argument( "--reg_suffixes", nargs="*", help=_get_args_dict_help( "Registered image suffixes; see config.RegSuffixes for keys " "and config.RegNames for values", config.RegSuffixes)) parser.add_argument( "--plot_labels", nargs="*", help=_get_args_dict_help( "Plot label customizations; see config.PlotLabels ", config.PlotLabels)) parser.add_argument( "--set_meta", nargs="*", help="Set metadata values; see config.MetaKeys for settings") # image and figure display arguments parser.add_argument("--plane", type=str.lower, choices=config.PLANE, help="Planar orientation") parser.add_argument( "--show", nargs="?", const="1", help="If applicable, show images after completing the given task") parser.add_argument( "--alphas", help="Alpha opacity levels, which can be comma-delimited for " "multichannel images") parser.add_argument( "--vmin", help="Minimum intensity levels, which can be comma-delimited " "for multichannel images") parser.add_argument( "--vmax", help="Maximum intensity levels, which can be comma-delimited " "for multichannel images") parser.add_argument("--seed", help="Random number generator seed") # export arguments parser.add_argument("--save_subimg", action="store_true", help="Save sub-image as separate file") parser.add_argument("--slice", help="Slice given as start,stop,step") parser.add_argument("--delay", help="Animation delay in ms") parser.add_argument("--savefig", help="Extension for saved figures") parser.add_argument("--groups", nargs="*", help="Group values corresponding to each image") parser.add_argument( "-v", "--verbose", nargs="*", help=_get_args_dict_help( "Verbose output to assist with debugging; see config.Verbosity.", config.Verbosity)) # only parse recognized arguments to avoid error for unrecognized ones args, args_unknown = parser.parse_known_args() # set up application directories user_dir = config.user_app_dirs.user_data_dir if not os.path.isdir(user_dir): # make application data directory if os.path.exists(user_dir): # backup any non-directory file libmag.backup_file(user_dir) os.makedirs(user_dir) if args.verbose is not None: # verbose mode and logging setup config.verbose = True config.verbosity = args_to_dict(args.verbose, config.Verbosity, config.verbosity) if config.verbosity[config.Verbosity.LEVEL] is None: # default to debug mode if any verbose flag is set without level config.verbosity[config.Verbosity.LEVEL] = logging.DEBUG logs.update_log_level(config.logger, config.verbosity[config.Verbosity.LEVEL]) # print longer Numpy arrays for debugging np.set_printoptions(linewidth=200, threshold=10000) _logger.info("Set verbose to %s", config.verbosity) # set up logging to given file unless explicitly given an empty string log_path = config.verbosity[config.Verbosity.LOG_PATH] if log_path != "": if log_path is None: log_path = os.path.join(config.user_app_dirs.user_data_dir, "out.log") # log to file config.log_path = logs.add_file_handler(config.logger, log_path) # redirect standard out/error to logging sys.stdout = logs.LogWriter(config.logger.info) sys.stderr = logs.LogWriter(config.logger.error) # load preferences file config.prefs = prefs_prof.PrefsProfile() config.prefs.add_profiles(str(config.PREFS_PATH)) if args.version: # print version info and exit _logger.info(f"{config.APP_NAME}-{libmag.get_version(True)}") shutdown() # log the app launch path path_launch = (sys._MEIPASS if getattr(sys, "frozen", False) and hasattr(sys, "_MEIPASS") else sys.argv[0]) _logger.info(f"Launched MagellanMapper from {path_launch}") if args.img is not None or args.img_paths: # set image file path and convert to basis for additional paths config.filenames = args.img if args.img else args.img_paths config.filename = config.filenames[0] print("Set filenames to {}, current filename {}".format( config.filenames, config.filename)) if args.meta is not None: # set metadata paths config.metadata_paths = args.meta print("Set metadata paths to", config.metadata_paths) config.metadatas = [] for path in config.metadata_paths: # load metadata to dictionary md, _ = importer.load_metadata(path, assign=False) config.metadatas.append(md) if args.channel is not None: # set the channels config.channel = args.channel print("Set channel to {}".format(config.channel)) config.series_list = [config.series] # list of series if args.series is not None: series_split = args.series.split(",") config.series_list = [] for ser in series_split: ser_split = ser.split("-") if len(ser_split) > 1: ser_range = np.arange(int(ser_split[0]), int(ser_split[1]) + 1) config.series_list.extend(ser_range.tolist()) else: config.series_list.append(int(ser_split[0])) config.series = config.series_list[0] print("Set to series_list to {}, current series {}".format( config.series_list, config.series)) if args.savefig is not None: # save figure with file type of this extension; remove leading period config.savefig = _parse_none(args.savefig.lstrip(".")) print("Set savefig extension to {}".format(config.savefig)) # parse sub-image offsets and sizes; # expects x,y,z input but stores as z,y,x by convention if args.subimg_offset is not None: config.subimg_offsets = _parse_coords(args.subimg_offset, True) print("Set sub-image offsets to {} (z,y,x)".format( config.subimg_offsets)) if args.subimg_size is not None: config.subimg_sizes = _parse_coords(args.subimg_size, True) print("Set sub-image sizes to {} (z,y,x)".format(config.subimg_sizes)) # parse ROI offsets and sizes, which are relative to any sub-image; # expects x,y,z input and output if args.offset is not None: config.roi_offsets = _parse_coords(args.offset) if config.roi_offsets: config.roi_offset = config.roi_offsets[0] print("Set ROI offsets to {}, current offset {} (x,y,z)".format( config.roi_offsets, config.roi_offset)) if args.size is not None: config.roi_sizes = _parse_coords(args.size) if config.roi_sizes: config.roi_size = config.roi_sizes[0] print("Set ROI sizes to {}, current size {} (x,y,z)".format( config.roi_sizes, config.roi_size)) if args.cpus is not None: # set maximum number of CPUs config.cpus = _parse_none(args.cpus.lower(), int) print("Set maximum number of CPUs for multiprocessing tasks to", config.cpus) if args.load is not None: # flag loading data sources with default sub-arg indicating that the # data should be loaded from a default path; otherwise, load from # path given by the sub-arg; change delimiter to allow paths with "," config.load_data = args_to_dict(args.load, config.LoadData, config.load_data, sep_vals="|", default=True) print("Set to load the data types: {}".format(config.load_data)) # set up main processing mode if args.proc is not None: config.proc_type = args_to_dict(args.proc, config.ProcessTypes, config.proc_type, default=True) print("Set main processing tasks to:", config.proc_type) if args.set_meta is not None: # set individual metadata values, currently used for image import # TODO: take precedence over loaded metadata archives config.meta_dict = args_to_dict(args.set_meta, config.MetaKeys, config.meta_dict, sep_vals="|") print("Set metadata values to {}".format(config.meta_dict)) res = config.meta_dict[config.MetaKeys.RESOLUTIONS] if res: # set image resolutions, taken as a single set of x,y,z and # converting to a nested list of z,y,x res_split = res.split(",") if len(res_split) >= 3: res_float = tuple(float(i) for i in res_split)[::-1] config.resolutions = [res_float] print("Set resolutions to {}".format(config.resolutions)) else: res_float = None print("Resolution ({}) should be given as 3 values (x,y,z)". format(res)) # store single set of resolutions, similar to input config.meta_dict[config.MetaKeys.RESOLUTIONS] = res_float mag = config.meta_dict[config.MetaKeys.MAGNIFICATION] if mag: # set objective magnification config.magnification = mag print("Set magnification to {}".format(config.magnification)) zoom = config.meta_dict[config.MetaKeys.ZOOM] if zoom: # set objective zoom config.zoom = zoom print("Set zoom to {}".format(config.zoom)) shape = config.meta_dict[config.MetaKeys.SHAPE] if shape: # parse shape, storing only in dict config.meta_dict[config.MetaKeys.SHAPE] = [ int(n) for n in shape.split(",")[::-1] ] # set up ROI and register profiles setup_roi_profiles(args.roi_profile) setup_atlas_profiles(args.atlas_profile) setup_grid_search_profiles(args.grid_search) if args.plane is not None: config.plane = args.plane print("Set plane to {}".format(config.plane)) if args.save_subimg: config.save_subimg = args.save_subimg print("Set to save the sub-image") if args.labels: # set up atlas labels setup_labels(args.labels) if args.transform is not None: # image transformations such as flipping, rotation config.transform = args_to_dict(args.transform, config.Transforms, config.transform) print("Set transformations to {}".format(config.transform)) if args.register: # register type to process in register module config.register_type = args.register print("Set register type to {}".format(config.register_type)) if args.df: # data frame processing task config.df_task = args.df print("Set data frame processing task to {}".format(config.df_task)) if args.plot_2d: # 2D plot type to process in plot_2d module config.plot_2d_type = args.plot_2d print("Set plot_2d type to {}".format(config.plot_2d_type)) if args.slice: # specify a generic slice by command-line, assuming same order # of arguments as for slice built-in function and interpreting # "none" string as None config.slice_vals = args.slice.split(",") config.slice_vals = [ _parse_none(val.lower(), int) for val in config.slice_vals ] print("Set slice values to {}".format(config.slice_vals)) if args.delay: config.delay = int(args.delay) print("Set delay to {}".format(config.delay)) if args.show: # show images after task is performed, if supported config.show = _is_arg_true(args.show) print("Set show to {}".format(config.show)) if args.groups: config.groups = args.groups print("Set groups to {}".format(config.groups)) if args.ec2_start is not None: # start EC2 instances config.ec2_start = args_with_dict(args.ec2_start) print("Set ec2 start to {}".format(config.ec2_start)) if args.ec2_list: # list EC2 instances config.ec2_list = args_with_dict(args.ec2_list) print("Set ec2 list to {}".format(config.ec2_list)) if args.ec2_terminate: config.ec2_terminate = args.ec2_terminate print("Set ec2 terminate to {}".format(config.ec2_terminate)) if args.notify: notify_len = len(args.notify) if notify_len > 0: config.notify_url = args.notify[0] print("Set notification URL to {}".format(config.notify_url)) if notify_len > 1: config.notify_msg = args.notify[1] print("Set notification message to {}".format(config.notify_msg)) if notify_len > 2: config.notify_attach = args.notify[2] print("Set notification attachment path to {}".format( config.notify_attach)) if args.prefix is not None: # path input/output prefixes config.prefixes = args.prefix config.prefix = config.prefixes[0] print("Set path prefixes to {}".format(config.prefixes)) if args.prefix_out is not None: # path output prefixes config.prefixes_out = args.prefix_out config.prefix_out = config.prefixes_out[0] print("Set path prefixes to {}".format(config.prefixes_out)) if args.suffix is not None: # path suffixes config.suffixes = args.suffix config.suffix = config.suffixes[0] print("Set path suffixes to {}".format(config.suffixes)) if args.alphas: # specify alpha levels config.alphas = [float(val) for val in args.alphas.split(",")] print("Set alphas to", config.alphas) if args.vmin: # specify vmin levels config.vmins = [libmag.get_int(val) for val in args.vmin.split(",")] print("Set vmins to", config.vmins) if args.vmax: # specify vmax levels and copy to vmax overview used for plotting # and updated for normalization config.vmaxs = [libmag.get_int(val) for val in args.vmax.split(",")] config.vmax_overview = list(config.vmaxs) print("Set vmaxs to", config.vmaxs) if args.reg_suffixes is not None: # specify suffixes of registered images to load config.reg_suffixes = args_to_dict(args.reg_suffixes, config.RegSuffixes, config.reg_suffixes) print("Set registered image suffixes to {}".format( config.reg_suffixes)) if args.seed: # specify random number generator seed config.seed = int(args.seed) print("Set random number generator seed to", config.seed) if args.plot_labels is not None: # specify general plot labels config.plot_labels = args_to_dict(args.plot_labels, config.PlotLabels, config.plot_labels) print("Set plot labels to {}".format(config.plot_labels)) if args.theme is not None: # specify themes, currently applied to Matplotlib elements theme_names = [] for theme in args.theme: # add theme enum if found theme_enum = libmag.get_enum(theme, config.Themes) if theme_enum: config.rc_params.append(theme_enum) theme_names.append(theme_enum.name) print("Set to use themes to {}".format(theme_names)) # set up Matplotlib styles/themes plot_2d.setup_style() if args.db: # set main database path to user arg config.db_path = args.db print("Set database name to {}".format(config.db_path)) else: # set default path config.db_path = os.path.join(user_dir, config.db_path) if args.truth_db: # set settings for separate database of "truth blobs" config.truth_db_params = args_to_dict(args.truth_db, config.TruthDB, config.truth_db_params, sep_vals="|") mode = config.truth_db_params[config.TruthDB.MODE] config.truth_db_mode = libmag.get_enum(mode, config.TruthDBModes) libmag.printv(config.truth_db_params) print("Mapped \"{}\" truth_db mode to {}".format( mode, config.truth_db_mode)) # notify user of full args list, including unrecognized args _logger.debug(f"All command-line arguments: {sys.argv}") if args_unknown: _logger.info( f"The following command-line arguments were unrecognized and " f"ignored: {args_unknown}")
def process_file(path, proc_mode, series=None, subimg_offset=None, subimg_size=None, roi_offset=None, roi_size=None): """Processes a single image file non-interactively. Assumes that the image has already been set up. Args: path (str): Path to image from which MagellanMapper-style paths will be generated. proc_mode (str): Processing mode, which should be a key in :class:`config.ProcessTypes`, case-insensitive. series (int): Image series number; defaults to None. subimg_offset (List[int]): Sub-image offset as (z,y,x) to load; defaults to None. subimg_size (List[int]): Sub-image size as (z,y,x) to load; defaults to None. roi_offset (List[int]): Region of interest offset as (x, y, z) to process; defaults to None. roi_size (List[int]): Region of interest size of region to process, given as (x, y, z); defaults to None. Returns: Tuple of stats from processing, or None if no stats, and text feedback from the processing, or None if no feedback. """ # PROCESS BY TYPE stats = None fdbk = None filename_base = importer.filename_to_base(path, series) proc_type = libmag.get_enum(proc_mode, config.ProcessTypes) print("{}\n".format("-" * 80)) if proc_type is config.ProcessTypes.LOAD: # loading completed return None, None elif proc_type is config.ProcessTypes.LOAD: # already imported so does nothing print("imported {}, will exit".format(path)) elif proc_type is config.ProcessTypes.EXPORT_ROIS: # export ROIs; assumes that info_proc was already loaded to # give smaller region from which smaller ROIs from the truth DB # will be extracted from magmap.io import export_rois db = config.db if config.truth_db is None else config.truth_db export_rois.export_rois(db, config.image5d, config.channel, filename_base, config.plot_labels[config.PlotLabels.PADDING], config.unit_factor, config.truth_db_mode, os.path.basename(config.filename)) elif proc_type is config.ProcessTypes.TRANSFORM: # transpose, rescale, and/or resize whole large image transformer.transpose_img( path, series, plane=config.plane, rescale=config.transform[config.Transforms.RESCALE], target_size=config.roi_size) elif proc_type in (config.ProcessTypes.EXTRACT, config.ProcessTypes.ANIMATED): # generate animated GIF or extract single plane export_stack.stack_to_img(config.filenames, roi_offset, roi_size, series, subimg_offset, subimg_size, proc_type is config.ProcessTypes.ANIMATED, config.suffix) elif proc_type is config.ProcessTypes.EXPORT_BLOBS: # export blobs to CSV file from magmap.io import export_rois export_rois.blobs_to_csv(config.blobs.blobs, filename_base) elif proc_type in (config.ProcessTypes.DETECT, config.ProcessTypes.DETECT_COLOC): # detect blobs in the full image, +/- co-localization coloc = proc_type is config.ProcessTypes.DETECT_COLOC stats, fdbk, _ = stack_detect.detect_blobs_stack( filename_base, subimg_offset, subimg_size, coloc) elif proc_type is config.ProcessTypes.COLOC_MATCH: if config.blobs is not None and config.blobs.blobs is not None: # colocalize blobs in separate channels by matching blobs shape = (config.image5d.shape[1:] if subimg_size is None else subimg_size) matches = colocalizer.StackColocalizer.colocalize_stack( shape, config.blobs.blobs) # insert matches into database colocalizer.insert_matches(config.db, matches) else: print("No blobs loaded to colocalize, skipping") elif proc_type in (config.ProcessTypes.EXPORT_PLANES, config.ProcessTypes.EXPORT_PLANES_CHANNELS): # export each plane as a separate image file export_stack.export_planes( config.image5d, config.savefig, config.channel, proc_type is config.ProcessTypes.EXPORT_PLANES_CHANNELS) elif proc_type is config.ProcessTypes.EXPORT_RAW: # export the main image as a raw data file out_path = libmag.combine_paths(config.filename, ".raw", sep="") libmag.backup_file(out_path) np_io.write_raw_file(config.image5d, out_path) elif proc_type is config.ProcessTypes.PREPROCESS: # pre-process a whole image and save to file # TODO: consider chunking option for larger images profile = config.get_roi_profile(0) out_path = config.prefix if not out_path: out_path = libmag.insert_before_ext(config.filename, "_preproc") transformer.preprocess_img(config.image5d, profile["preprocess"], config.channel, out_path) return stats, fdbk
def setup_dbs(filename_base, db_path=None, truth_db_config=None): """Set up databases for the given image file if the given database has not been set up already. Args: filename_base (str): Image base path. db_path (str): Main database path; defaults to None to use a default path. truth_db_config (List[str]): Sequence of truth database configuration settings; defaults to None to not load truth-related databases. """ if db_path: config.db_name = db_path print("Set database name to {}".format(config.db_name)) # load "truth blobs" from separate database for viewing if truth_db_config is not None: # set the truth database mode config.truth_db_params = args_to_dict(truth_db_config, config.TruthDB, config.truth_db_params, sep_vals="|") mode = config.truth_db_params[config.TruthDB.MODE] config.truth_db_mode = libmag.get_enum(mode, config.TruthDBModes) libmag.printv(config.truth_db_params) print("Mapped \"{}\" truth_db mode to {}".format( mode, config.truth_db_mode)) truth_db_path = config.truth_db_params[config.TruthDB.PATH] truth_db_name_base = filename_base if filename_base else sqlite.DB_NAME_BASE if config.truth_db_mode is config.TruthDBModes.VIEW: # loads truth DB as a separate database in parallel with the given # editable database, with name based on filename by default unless # truth DB name explicitly given path = truth_db_path if truth_db_path else truth_db_name_base try: sqlite.load_truth_db(path) except FileNotFoundError as e: print(e) print("Could not load truth DB from current image path") elif config.truth_db_mode is config.TruthDBModes.VERIFY: if not config.verified_db: # creates a new verified DB to store all ROC results config.verified_db = sqlite.ClrDB() config.verified_db.load_db(sqlite.DB_NAME_VERIFIED, True) if truth_db_path: # load truth DB path to verify against if explicitly given try: sqlite.load_truth_db(truth_db_path) except FileNotFoundError as e: print(e) print("Could not load truth DB from {}".format(truth_db_path)) elif config.truth_db_mode is config.TruthDBModes.VERIFIED: # loads verified DB as the main DB, which includes copies of truth # values with flags for whether they were detected path = sqlite.DB_NAME_VERIFIED if truth_db_path: path = truth_db_path try: config.db = sqlite.ClrDB() config.db.load_db(path) config.verified_db = config.db except FileNotFoundError as e: print(e) print("Could not load verified DB from {}".format( sqlite.DB_NAME_VERIFIED)) elif config.truth_db_mode is config.TruthDBModes.EDIT: # loads truth DB as the main database for editing rather than # loading as a truth database config.db_name = truth_db_path if not config.db_name: config.db_name = "{}{}".format( os.path.basename(truth_db_name_base), sqlite.DB_SUFFIX_TRUTH) print("Editing truth database at {}".format(config.db_name)) if config.db is None: config.db = sqlite.ClrDB() config.db.load_db(None, False)
def main(process_args_only=False, skip_dbs=False): """Starts the visualization GUI. Processes command-line arguments. Args: process_args_only (bool): Processes command-line arguments and returns; defaults to False. skip_dbs (bool): True to skip loading databases; defaults to False. """ parser = argparse.ArgumentParser( description="Setup environment for MagellanMapper") # image specification arguments parser.add_argument( "--img", nargs="*", help="Main image path(s); after import, the filename is often " "given as the original name without its extension") parser.add_argument( "--meta", nargs="*", help="Metadata path(s), which can be given as multiple files " "corresponding to each image") parser.add_argument("--prefix", help="Path prefix") parser.add_argument("--suffix", help="Filename suffix") parser.add_argument("--channel", nargs="*", type=int, help="Channel index") parser.add_argument("--series", help="Series index") parser.add_argument("--subimg_offset", nargs="*", help="Sub-image offset in x,y,z") parser.add_argument("--subimg_size", nargs="*", help="Sub-image size in x,y,z") parser.add_argument("--offset", nargs="*", help="ROI offset in x,y,z") parser.add_argument("--size", nargs="*", help="ROI size in x,y,z") parser.add_argument("--db", help="Database path") parser.add_argument( "--cpus", help="Maximum number of CPUs/processes to use for multiprocessing " "tasks. Use \"none\" or 0 to auto-detect this number (default).") # task arguments parser.add_argument("--proc", type=str.lower, choices=libmag.enum_names_aslist(config.ProcessTypes), help="Image processing mode") parser.add_argument("--register", type=str.lower, choices=libmag.enum_names_aslist(config.RegisterTypes), help="Image registration task") parser.add_argument("--df", type=str.lower, choices=libmag.enum_names_aslist(config.DFTasks), help="Data frame task") parser.add_argument("--plot_2d", type=str.lower, choices=libmag.enum_names_aslist(config.Plot2DTypes), help="2D plot task; see config.Plot2DTypes") parser.add_argument("--ec2_start", nargs="*", help="AWS EC2 instance start") parser.add_argument("--ec2_list", nargs="*", help="AWS EC2 instance list") parser.add_argument("--ec2_terminate", nargs="*", help="AWS EC2 instance termination") parser.add_argument( "--notify", nargs="*", help="Notification message URL, message, and attachment strings") parser.add_argument("--grid_search", help="Grid search hyperparameter tuning profile(s)") # profile arguments parser.add_argument( "--roi_profile", nargs="*", help="ROI profile, which can be separated by underscores " "for multiple profiles and given as paths to custom profiles " "in YAML format. Multiple profile groups can be given, which " "will each be applied to the corresponding channel. See " "docs/settings.md for more details.") parser.add_argument( "--atlas_profile", help="Atlas profile, which can be separated by underscores " "for multiple profiles and given as paths to custom profiles " "in YAML format. See docs/settings.md for more details.") parser.add_argument( "--theme", nargs="*", type=str.lower, choices=libmag.enum_names_aslist(config.Themes), help="UI theme, which can be given as multiple themes to apply " "on top of one another") # grouped arguments parser.add_argument( "--truth_db", nargs="*", help="Truth database; see config.TruthDB for settings and " "config.TruthDBModes for modes") parser.add_argument("--labels", nargs="*", help=_get_args_dict_help( "Atlas labels; see config.AtlasLabels.", config.AtlasLabels)) parser.add_argument("--transform", nargs="*", help=_get_args_dict_help( "Image transformations; see config.Transforms.", config.Transforms)) parser.add_argument( "--reg_suffixes", nargs="*", help=_get_args_dict_help( "Registered image suffixes; see config.RegSuffixes for keys " "and config.RegNames for values", config.RegSuffixes)) parser.add_argument( "--plot_labels", nargs="*", help=_get_args_dict_help( "Plot label customizations; see config.PlotLabels ", config.PlotLabels)) parser.add_argument( "--set_meta", nargs="*", help="Set metadata values; see config.MetaKeys for settings") # image and figure display arguments parser.add_argument("--plane", type=str.lower, choices=config.PLANE, help="Planar orientation") parser.add_argument( "--show", nargs="?", const="1", help="If applicable, show images after completing the given task") parser.add_argument( "--alphas", help="Alpha opacity levels, which can be comma-delimited for " "multichannel images") parser.add_argument( "--vmin", help="Minimum intensity levels, which can be comma-delimited " "for multichannel images") parser.add_argument( "--vmax", help="Maximum intensity levels, which can be comma-delimited " "for multichannel images") parser.add_argument("--seed", help="Random number generator seed") # export arguments parser.add_argument("--save_subimg", action="store_true", help="Save sub-image as separate file") parser.add_argument("--slice", help="Slice given as start,stop,step") parser.add_argument("--delay", help="Animation delay in ms") parser.add_argument("--savefig", help="Extension for saved figures") parser.add_argument("--groups", nargs="*", help="Group values corresponding to each image") parser.add_argument("-v", "--verbose", action="store_true", help="Verbose output to assist with debugging") args = parser.parse_args() if args.img is not None: # set image file path and convert to basis for additional paths config.filenames = args.img config.filename = config.filenames[0] print("Set filenames to {}, current filename {}".format( config.filenames, config.filename)) if args.meta is not None: # set metadata paths config.metadata_paths = args.meta print("Set metadata paths to", config.metadata_paths) config.metadatas = [] for path in config.metadata_paths: # load metadata to dictionary md, _ = importer.load_metadata(path, assign=False) config.metadatas.append(md) if args.channel is not None: # set the channels config.channel = args.channel print("Set channel to {}".format(config.channel)) series_list = [config.series] # list of series if args.series is not None: series_split = args.series.split(",") series_list = [] for ser in series_split: ser_split = ser.split("-") if len(ser_split) > 1: ser_range = np.arange(int(ser_split[0]), int(ser_split[1]) + 1) series_list.extend(ser_range.tolist()) else: series_list.append(int(ser_split[0])) config.series = series_list[0] print("Set to series_list to {}, current series {}".format( series_list, config.series)) if args.savefig is not None: # save figure with file type of this extension; remove leading period config.savefig = args.savefig.lstrip(".") print("Set savefig extension to {}".format(config.savefig)) if args.verbose: # verbose mode, including printing longer Numpy arrays for debugging config.verbose = args.verbose np.set_printoptions(linewidth=200, threshold=10000) print("Set verbose to {}".format(config.verbose)) # parse sub-image offsets and sizes; # expects x,y,z input but stores as z,y,x by convention if args.subimg_offset is not None: config.subimg_offsets = _parse_coords(args.subimg_offset, True) print("Set sub-image offsets to {} (z,y,x)".format( config.subimg_offsets)) if args.subimg_size is not None: config.subimg_sizes = _parse_coords(args.subimg_size, True) print("Set sub-image sizes to {} (z,y,x)".format(config.subimg_sizes)) # parse ROI offsets and sizes, which are relative to any sub-image; # expects x,y,z input and output if args.offset is not None: config.roi_offsets = _parse_coords(args.offset) if config.roi_offsets: config.roi_offset = config.roi_offsets[0] print("Set ROI offsets to {}, current offset {} (x,y,z)".format( config.roi_offsets, config.roi_offset)) if args.size is not None: config.roi_sizes = _parse_coords(args.size) if config.roi_sizes: config.roi_size = config.roi_sizes[0] print("Set ROI sizes to {}, current size {} (x,y,z)".format( config.roi_sizes, config.roi_size)) if args.cpus is not None: # set maximum number of CPUs config.cpus = (None if args.cpus.lower() in ("none", "0") else int(args.cpus)) print("Set maximum number of CPUs for multiprocessing tasks to", config.cpus) # set up main processing mode if args.proc is not None: config.proc_type = args.proc print("processing type set to {}".format(config.proc_type)) proc_type = libmag.get_enum(config.proc_type, config.ProcessTypes) if config.proc_type and proc_type not in config.ProcessTypes: libmag.warn("\"{}\" processing type not found".format( config.proc_type)) if args.set_meta is not None: # set individual metadata values, currently used for image import # TODO: take precedence over loaded metadata archives config.meta_dict = args_to_dict(args.set_meta, config.MetaKeys, config.meta_dict, sep_vals="|") print("Set metadata values to {}".format(config.meta_dict)) res = config.meta_dict[config.MetaKeys.RESOLUTIONS] if res: # set image resolutions, taken as a single set of x,y,z and # converting to a nested list of z,y,x res_split = res.split(",") if len(res_split) >= 3: res_float = tuple(float(i) for i in res_split)[::-1] config.resolutions = [res_float] print("Set resolutions to {}".format(config.resolutions)) else: res_float = None print("Resolution ({}) should be given as 3 values (x,y,z)". format(res)) # store single set of resolutions, similar to input config.meta_dict[config.MetaKeys.RESOLUTIONS] = res_float mag = config.meta_dict[config.MetaKeys.MAGNIFICATION] if mag: # set objective magnification config.magnification = mag print("Set magnification to {}".format(config.magnification)) zoom = config.meta_dict[config.MetaKeys.ZOOM] if zoom: # set objective zoom config.zoom = zoom print("Set zoom to {}".format(config.zoom)) shape = config.meta_dict[config.MetaKeys.SHAPE] if shape: # parse shape, storing only in dict config.meta_dict[config.MetaKeys.SHAPE] = [ int(n) for n in shape.split(",")[::-1] ] # set up ROI and register profiles setup_profiles(args.roi_profile, args.atlas_profile, args.grid_search) if args.plane is not None: config.plane = args.plane print("Set plane to {}".format(config.plane)) if args.save_subimg: config.save_subimg = args.save_subimg print("Set to save the sub-image") if args.labels: # set up atlas labels setup_labels(args.labels) if args.transform is not None: # image transformations such as flipping, rotation config.transform = args_to_dict(args.transform, config.Transforms, config.transform) print("Set transformations to {}".format(config.transform)) if args.register: # register type to process in register module config.register_type = args.register print("Set register type to {}".format(config.register_type)) if args.df: # data frame processing task config.df_task = args.df print("Set data frame processing task to {}".format(config.df_task)) if args.plot_2d: # 2D plot type to process in plot_2d module config.plot_2d_type = args.plot_2d print("Set plot_2d type to {}".format(config.plot_2d_type)) if args.slice: # specify a generic slice by command-line, assuming same order # of arguments as for slice built-in function and interpreting # "none" string as None config.slice_vals = args.slice.split(",") config.slice_vals = [ None if val.lower() == "none" else int(val) for val in config.slice_vals ] print("Set slice values to {}".format(config.slice_vals)) if args.delay: config.delay = int(args.delay) print("Set delay to {}".format(config.delay)) if args.show: # show images after task is performed, if supported config.show = _is_arg_true(args.show) print("Set show to {}".format(config.show)) if args.groups: config.groups = args.groups print("Set groups to {}".format(config.groups)) if args.ec2_start is not None: # start EC2 instances config.ec2_start = args_with_dict(args.ec2_start) print("Set ec2 start to {}".format(config.ec2_start)) if args.ec2_list: # list EC2 instances config.ec2_list = args_with_dict(args.ec2_list) print("Set ec2 list to {}".format(config.ec2_list)) if args.ec2_terminate: config.ec2_terminate = args.ec2_terminate print("Set ec2 terminate to {}".format(config.ec2_terminate)) if args.notify: notify_len = len(args.notify) if notify_len > 0: config.notify_url = args.notify[0] print("Set notification URL to {}".format(config.notify_url)) if notify_len > 1: config.notify_msg = args.notify[1] print("Set notification message to {}".format(config.notify_msg)) if notify_len > 2: config.notify_attach = args.notify[2] print("Set notification attachment path to {}".format( config.notify_attach)) if args.prefix: config.prefix = args.prefix print("Set path prefix to {}".format(config.prefix)) if args.suffix: config.suffix = args.suffix print("Set path suffix to {}".format(config.suffix)) if args.alphas: # specify alpha levels config.alphas = [float(val) for val in args.alphas.split(",")] print("Set alphas to", config.alphas) if args.vmin: # specify vmin levels config.vmins = [libmag.get_int(val) for val in args.vmin.split(",")] print("Set vmins to", config.vmins) if args.vmax: # specify vmax levels and copy to vmax overview used for plotting # and updated for normalization config.vmaxs = [libmag.get_int(val) for val in args.vmax.split(",")] config.vmax_overview = list(config.vmaxs) print("Set vmaxs to", config.vmaxs) if args.reg_suffixes is not None: # specify suffixes of registered images to load config.reg_suffixes = args_to_dict(args.reg_suffixes, config.RegSuffixes, config.reg_suffixes) print("Set registered image suffixes to {}".format( config.reg_suffixes)) if args.seed: # specify random number generator seed config.seed = int(args.seed) print("Set random number generator seed to", config.seed) if args.plot_labels is not None: # specify general plot labels config.plot_labels = args_to_dict(args.plot_labels, config.PlotLabels, config.plot_labels) print("Set plot labels to {}".format(config.plot_labels)) if args.theme is not None: # specify themes, currently applied to Matplotlib elements theme_names = [] for theme in args.theme: # add theme enum if found theme_enum = libmag.get_enum(theme, config.Themes) if theme_enum: config.rc_params.append(theme_enum) theme_names.append(theme_enum.name) print("Set to use themes to {}".format(theme_names)) # prep filename filename_base = None if config.filename: filename_base = importer.filename_to_base(config.filename, config.series) if not skip_dbs: setup_dbs(filename_base, args.db, args.truth_db) # set multiprocessing start method chunking.set_mp_start_method() # POST-ARGUMENT PARSING if process_args_only: return # if command-line driven task specified, start task and shut down if config.register_type: register.main() elif config.notify_url: notify.main() elif config.plot_2d_type: plot_2d.main() elif config.df_task: df_io.main() elif config.grid_search_profile: _grid_search(series_list) elif config.ec2_list or config.ec2_start or config.ec2_terminate: # defer importing AWS module to avoid making its dependencies # required for MagellanMapper from magmap.cloud import aws aws.main() else: # set up image and perform any whole image processing tasks; # do not shut down if not a command-line proc task _process_files(series_list) if proc_type is None or proc_type is config.ProcessTypes.LOAD: return shutdown()