コード例 #1
0
def write_tif(image5d: np.ndarray, path: Union[str, pathlib.Path],
              **kwargs: Any):
    """Write a NumPy array to TIF files.
    
    Each channel will be exported to a separate file.
    
    Args:
        image5d: NumPy array in ``t, z, y, x, c`` dimension order.
        path: Base output path. If ``image5d`` has multiple channels, they
            will be exported to files with ``_ch_<n>`` appended just before
            the extension.
        kwargs: Arguments passed to :meth:`tifffile.imwrite`.

    """
    nchls = get_num_channels(image5d)
    for i in range(nchls):
        # export the given channel to a separate file, adding the channel to
        # the filename if multiple channels exist
        img_chl = image5d if image5d.ndim <= 4 else image5d[..., i]
        out_path = pathlib.Path(
            libmag.make_out_path(
                f"{path}{f'_ch_{i}' if nchls > 1 else ''}.tif",
                combine_prefix=True)).resolve()
        pathlib.Path.mkdir(out_path.parent.resolve(), exist_ok=True)
        libmag.backup_file(out_path)

        if "imagej" in kwargs and kwargs["imagej"]:
            # ImageJ format assumes dimension order of TZCYXS
            img_chl = img_chl[:, :, np.newaxis]

        # write to TIF
        _logger.info("Exporting image of shape %s to '%s'", img_chl.shape,
                     out_path)
        tifffile.imwrite(out_path, img_chl, photometric="minisblack", **kwargs)
コード例 #2
0
ファイル: verifier.py プロジェクト: sanderslab/magellanmapper
def verify_stack(filename_base, subimg_path_base, settings, segments_all,
                 channels, overlap_base):
    db_path_base = os.path.basename(subimg_path_base)
    stats_detection = None
    fdbk = None
    try:
        # Truth databases are any database stored with manually
        # verified blobs and loaded at command-line with the
        # `--truth_db` flag or loaded here. While all experiments
        # can be stored in a single database, this verification also
        # supports experiments saved to separate databases in the
        # software root directory and named as a sub-image but with
        # the `sqlite.DB_SUFFIX_TRUTH` suffix. Experiments in the
        # database are also assumed to be named based on the full
        # image or the sub-image filename, without any directories.

        # load ROIs from previously loaded truth database or one loaded
        # based on sub-image filename
        exp_name, rois = _get_truth_db_rois(
            subimg_path_base, filename_base,
            db_path_base if config.truth_db is None else None)
        if rois is None:
            # load alternate truth database based on sub-image filename
            print("Loading truth ROIs from experiment:", exp_name)
            exp_name, rois = _get_truth_db_rois(subimg_path_base,
                                                filename_base, db_path_base)
        if config.truth_db is None:
            raise LookupError(
                "No truth database found for experiment {}, will "
                "skip detection verification".format(exp_name))
        if rois is None:
            raise LookupError(
                "No truth set ROIs found for experiment {}, will "
                "skip detection verification".format(exp_name))

        # verify each ROI and store results in a separate database
        exp_id = sqlite.insert_experiment(config.verified_db.conn,
                                          config.verified_db.cur, exp_name,
                                          None)
        verify_tol = np.multiply(overlap_base, settings["verify_tol_factor"])
        stats_detection, fdbk, df_verify = verify_rois(
            rois, segments_all, config.truth_db.blobs_truth, verify_tol,
            config.verified_db, exp_id, exp_name, channels)
        df_io.data_frames_to_csv(
            df_verify,
            libmag.make_out_path(libmag.combine_paths(exp_name, "verify.csv")))
    except FileNotFoundError:
        libmag.warn("Could not load truth DB from {}; "
                    "will not verify ROIs".format(db_path_base))
    except LookupError as e:
        libmag.warn(str(e))
    return stats_detection, fdbk
コード例 #3
0
def export_planes(image5d, ext, channel=None, separate_chls=False):
    """Export each plane and channel combination into separate 2D image files

    Args:
        image5d (:obj:`np.ndarray`): Image in ``t,z,y,x[,c]`` format.
        ext (str): Save format given as an extension without period.
        channel (int): Channel to save; defaults to None for all channels.
        separate_chls (bool): True to export all channels from each plane to
            a separate image; defaults to False. 

    """
    suffix = "_export" if config.suffix is None else config.suffix
    out_path = libmag.make_out_path(suffix=suffix)
    output_dir = os.path.dirname(out_path)
    basename = os.path.splitext(os.path.basename(out_path))[0]
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)
    roi = image5d[0]
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    num_digits = len(str(len(roi)))
    for i, plane in enumerate(roi):
        path = os.path.join(output_dir,
                            "{}_{:0{}d}".format(basename, i, num_digits))
        if separate_chls and multichannel:
            for chl in channels:
                # save each channel as separate file
                plane_chl = plane[..., chl]
                path_chl = "{}{}{}.{}".format(path, importer.CHANNEL_SEPARATOR,
                                              chl, ext)
                print("Saving image plane {} to {}".format(i, path_chl))
                io.imsave(path_chl, plane_chl)
        else:
            # save single channel plane
            path = "{}.{}".format(path, ext)
            print("Saving image plane {} to {}".format(i, path))
            io.imsave(path, plane)
コード例 #4
0
def export_planes(image5d: np.ndarray,
                  ext: str,
                  channel: Optional[int] = None,
                  separate_chls: bool = False):
    """Export all planes of a 3D+ image into separate 2D image files.
    
    Unlike :meth:`stack_to_img`, this method exports raw planes and
    each channels into separate files, without processing through Matplotlib.
    Supports image rotation set in :attr:`magmap.settings.config.transform`.
    
    By default, all z-planes are exported, with plane indices specified through
    :attr:`config.slice_vals`. Alternatively, regions of interest can be
    specified by :attr:`config.roi_offset` and :attr:`config.roi_size`.
    The planar orientation can be configured through :attr:`config.plane`.

    Args:
        image5d: Image in ``t,z,y,x[,c]`` format.
        ext: Save format given as an extension without period.
        channel: Channel to save; defaults to None for all channels.
        separate_chls (bool): True to export all channels from each plane to
            a separate image; defaults to False. 

    """
    # set up output path
    suffix = "_export" if config.suffix is None else config.suffix
    out_path = libmag.make_out_path(suffix=suffix)
    output_dir = os.path.dirname(out_path)
    basename = os.path.splitext(os.path.basename(out_path))[0]
    if not os.path.isdir(output_dir):
        os.makedirs(output_dir)

    # set up image and apply any rotation
    roi = image5d[0]
    multichannel, channels = plot_3d.setup_channels(roi, channel, 3)
    rotate = config.transform[config.Transforms.ROTATE]
    roi = cv_nd.rotate90(roi, rotate, multichannel=multichannel)
    stacker = setup_stack(roi[np.newaxis, :],
                          offset=config.roi_offset,
                          roi_size=config.roi_size,
                          slice_vals=config.slice_vals,
                          rescale=config.transform[config.Transforms.RESCALE])
    roi = stacker.images[0]

    num_planes = len(roi)
    img_sl = stacker.img_slice
    for i, plane in enumerate(roi):
        # add plane to output path
        out_name = f"{basename}_plane_" \
                   f"{plot_support.get_plane_axis(config.plane)}" \
                   f"{img_sl.start + img_sl.step * i}"
        path = os.path.join(output_dir, out_name)
        if separate_chls and multichannel:
            for chl in channels:
                # save each channel as separate file
                plane_chl = plane[..., chl]
                path_chl = "{}{}{}.{}".format(path, importer.CHANNEL_SEPARATOR,
                                              chl, ext)
                print("Saving image plane {} to {}".format(i, path_chl))
                io.imsave(path_chl, plane_chl)
        else:
            # save single channel plane
            path = "{}.{}".format(path, ext)
            print("Saving image plane {} to {}".format(i, path))
            io.imsave(path, plane)
コード例 #5
0
def stack_to_img(paths,
                 roi_offset,
                 roi_size,
                 series=None,
                 subimg_offset=None,
                 subimg_size=None,
                 animated=False,
                 suffix=None):
    """Build an image file from a stack of images in a directory or an 
    array, exporting as an animated GIF or movie for multiple planes or 
    extracting a single plane to a standard image file format.
    
    Writes the file to the parent directory of path.
    
    Args:
        paths (List[str]): Image paths, which can each be either an image 
            directory or a base path to a single image, including 
            volumetric images.
        roi_offset (Sequence[int]): Tuple of offset given in user order
            ``x,y,z``; defaults to None. Requires ``roi_size`` to not be None.
        roi_size (Sequence[int]): Size of the region of interest in user order 
            ``x,y,z``; defaults to None. Requires ``roi_offset`` to not be None.
        series (int): Image series number; defaults to None.
        subimg_offset (List[int]): Sub-image offset as (z,y,x) to load;
            defaults to None.
        subimg_size (List[int]): Sub-image size as (z,y,x) to load;
            defaults to None.
        animated (bool): True to export as an animated image; defaults to False.
        suffix (str): String to append to output path before extension; 
            defaults to None to ignore.

    """
    # set up figure layout for collages
    size = config.plot_labels[config.PlotLabels.LAYOUT]
    ncols, nrows = size if size else (1, 1)
    num_paths = len(paths)
    collage = num_paths > 1
    figs = {}

    for i in range(nrows):
        for j in range(ncols):
            n = i * ncols + j
            if n >= num_paths: break

            # load an image and set up its image stacker
            path_sub = paths[n]
            axs = []
            # TODO: test directory of images
            # TODO: consider not reloading first image
            np_io.setup_images(path_sub, series, subimg_offset, subimg_size)
            stacker = setup_stack(
                config.image5d,
                path_sub,
                offset=roi_offset,
                roi_size=roi_size,
                slice_vals=config.slice_vals,
                rescale=config.transform[config.Transforms.RESCALE],
                labels_imgs=(config.labels_img, config.borders_img))

            # add sub-plot title unless groups given as empty string
            title = None
            if config.groups:
                title = libmag.get_if_within(config.groups, n)
            elif num_paths > 1:
                title = os.path.basename(path_sub)

            if not stacker.images: continue
            ax = None
            for k in range(len(stacker.images[0])):
                # create or retrieve fig; animation has only 1 fig
                planei = 0 if animated else (stacker.img_slice.start +
                                             k * stacker.img_slice.step)
                fig_dict = figs.get(planei)
                if not fig_dict:
                    # set up new fig
                    fig, gs = plot_support.setup_fig(
                        nrows, ncols,
                        config.plot_labels[config.PlotLabels.SIZE])
                    fig_dict = {"fig": fig, "gs": gs, "imgs": []}
                    figs[planei] = fig_dict
                if ax is None:
                    # generate new axes for the gridspec position
                    ax = fig_dict["fig"].add_subplot(fig_dict["gs"][i, j])
                if title:
                    ax.title.set_text(title)
                axs.append(ax)

            # export planes
            plotted_imgs = stacker.build_stack(
                axs, config.plot_labels[config.PlotLabels.SCALE_BAR],
                size is None or ncols * nrows == 1)

            if animated:
                # store all plotted images in single fig
                fig_dict = figs.get(0)
                if fig_dict:
                    fig_dict["imgs"] = plotted_imgs
            else:
                # store one plotted image per fig; not used currently
                for fig_dict, img in zip(figs.values(), plotted_imgs):
                    fig_dict["imgs"].append(img)

    path_base = paths[0]
    for planei, fig_dict in figs.items():
        if animated:
            # generate animated image (eg animated GIF or movie file)
            animate_imgs(path_base, fig_dict["imgs"], config.delay,
                         config.savefig, suffix)
        else:
            # generate single figure with axis and plane index in filename
            if collage:
                # output filename as a collage of images
                if not os.path.isdir(path_base):
                    path_base = os.path.dirname(path_base)
                path_base = os.path.join(path_base, "collage")

            # insert mod as suffix, then add any additional suffix;
            # can use config.prefix_out for make_out_path prefix
            mod = "_plane_{}{}".format(
                plot_support.get_plane_axis(config.plane), planei)
            out_path = libmag.make_out_path(path_base, suffix=mod)
            if suffix:
                out_path = libmag.insert_before_ext(out_path, suffix)
            plot_support.save_fig(out_path,
                                  config.savefig,
                                  fig=fig_dict["fig"])
コード例 #6
0
def parse_grid_stats(
    stats: OrderedDict[str, Tuple[Sequence, Sequence, str, OrderedDict]]
) -> Tuple[Dict[str, Tuple[Sequence, Sequence, Sequence]], pd.DataFrame]:
    """Parse stats from a grid search.
    
    Args:
        stats: Dictionary where key is a string with the parameters
            up to the last parameter group, and each value is a tuple of 
            the raw stats as (pos, true_pos, false_pos); the array of
            values for the last parameter; the last parameter key; and an 
            ``OrderedDict`` of the parent parameters and their values for 
            the given set of stats.
    
    Returns:
        Tuple of ``group_stats`` and ``df``:
        - ``group_stats`` is a dictionary of stats, where keys
          correspond go ``stats`` keys, and values are tuples of the
          false discovery rate, sensitivity, and last parameter group value,
          each as sequences
        - ``df`` is a data frame summarizing the stats
    
    """

    # parse a grid search
    stats_for_df = {}
    headers = None
    group_dict = {}
    param_keys = []
    for key, value in stats.items():
        # parse stats from a set of parameters
        grid_stats = np.array(value[0])  # raw stats
        # last parameter is given separately since it is actively varying
        last_param_vals, last_param_key, parent_params = value[1:]
        if not headers:
            # set up headers for each stat and insert parameter headers
            # at the start
            headers = [
                GridSearchStats.PARAM.value,
                GridSearchStats.PPV,
                GridSearchStats.SENS,
                GridSearchStats.POS,
                GridSearchStats.TP,
                GridSearchStats.FP,
                GridSearchStats.FDR,
            ]
            headers[0] = "_".join((headers[0], last_param_key))
            for i, parent in enumerate(parent_params.keys()):
                headers.insert(i, "_".join(
                    (GridSearchStats.PARAM.value, parent)))
                param_keys.append(parent)
            param_keys.append(last_param_key)
        # false discovery rate, inverse of PPV, since don't have true negs
        fdr = np.subtract(
            1,
            np.divide(grid_stats[:, 1],
                      np.add(grid_stats[:, 1], grid_stats[:, 2])))
        sens = np.divide(grid_stats[:, 1], grid_stats[:, 0])
        for i, n in enumerate(last_param_vals):
            stat_list = []
            for parent_val in parent_params.values():
                stat_list.append(parent_val)
            stat_list.extend((last_param_vals[i], 1 - fdr[i], sens[i],
                              *grid_stats[i].astype(int), fdr[i]))
            for header, stat in zip(headers, stat_list):
                stats_for_df.setdefault(header, []).append(stat)
        group_dict[key] = (fdr, sens, last_param_vals)
    print()

    # generate a data frame to summarize stats and save to file
    path_df = libmag.make_out_path("gridsearch_{}.csv".format(
        "_".join(param_keys)))
    df = df_io.dict_to_data_frame(stats_for_df, path_df, show=" ")
    return group_dict, df
コード例 #7
0
ファイル: df_io.py プロジェクト: sanderslab/magellanmapper
def main():
    """Process stats based on command-line mode."""

    # process stats based on command-line argument

    df_task = libmag.get_enum(config.df_task, config.DFTasks)
    id_col = config.plot_labels[config.PlotLabels.ID_COL]
    x_col = config.plot_labels[config.PlotLabels.X_COL]
    y_col = config.plot_labels[config.PlotLabels.Y_COL]
    group_col = config.plot_labels[config.PlotLabels.GROUP_COL]

    if df_task is config.DFTasks.MERGE_CSVS:
        # merge multiple CSV files into single CSV file
        prefix = config.prefix
        if not prefix:
            # fallback to default filename based on first path
            prefix = f"{os.path.splitext(config.filename)[0]}_merged"
        merge_csvs(config.filenames, prefix)

    elif df_task is config.DFTasks.MERGE_CSVS_COLS:
        # join multiple CSV files based on a given index column into single
        # CSV file
        dfs = [pd.read_csv(f) for f in config.filenames]
        df = join_dfs(dfs, id_col,
                      config.plot_labels[config.PlotLabels.DROP_DUPS])
        out_path = libmag.make_out_path(
            config.filename,
            suffix="_joined" if config.suffix is None else None)
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.APPEND_CSVS_COLS:
        # concatenate multiple CSV files into single CSV file by appending
        # selected columns from the given files
        dfs = [pd.read_csv(f) for f in config.filenames]
        labels = libmag.to_seq(config.plot_labels[config.PlotLabels.X_LABEL])
        extra_cols = libmag.to_seq(x_col)
        data_cols = libmag.to_seq(y_col)
        df = append_cols(dfs,
                         labels,
                         extra_cols=extra_cols,
                         data_cols=data_cols)
        out_path = libmag.make_out_path(
            config.filename,
            suffix="_appended" if config.suffix is None else None)
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.EXPS_BY_REGION:
        # convert volume stats data frame to experiments by region
        exps_by_regions(config.filename)

    elif df_task is config.DFTasks.EXTRACT_FROM_CSV:
        # extract rows from CSV file based on matching rows in given col, where
        # "X_COL" = name of column on which to filter, and
        # "Y_COL" = values in this column for which rows should be kept
        df = pd.read_csv(config.filename)
        df_filt, _ = filter_dfs_on_vals([df], None, [(x_col, y_col)])
        data_frames_to_csv(df_filt, libmag.make_out_path())

    elif df_task is config.DFTasks.ADD_CSV_COLS:
        # add columns with corresponding values for all rows, where
        # "X_COL" = name of column(s) to add, and
        # "Y_COL" = value(s) for corresponding cols
        df = pd.read_csv(config.filename)
        cols = {
            k: v
            for k, v in zip(libmag.to_seq(x_col), libmag.to_seq(y_col))
        }
        df = add_cols_df(df, cols)
        out_path = libmag.make_out_path(
            config.filename,
            suffix="_appended" if config.suffix is None else None)
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.NORMALIZE:
        # normalize values in each group to that of a base group, where
        # "ID_COL" = ID column(s),
        # "X_COL" = condition column
        # "Y_COL" = base condition to which values will be normalized,
        # "GROUP_COL" = metric columns to normalize,
        # "WT_COL" = extra columns to keep
        df = pd.read_csv(config.filename)
        df = normalize_df(df, id_col, x_col, y_col, group_col,
                          config.plot_labels[config.PlotLabels.WT_COL])
        out_path = libmag.make_out_path(
            config.filename, suffix="_norm" if config.suffix is None else None)
        data_frames_to_csv(df, out_path)

    elif df_task is config.DFTasks.MERGE_EXCELS:
        # merge multiple Excel files into single Excel file, with each
        # original Excel file as a separate sheet in the combined file
        merge_excels(config.filenames, config.prefix,
                     config.plot_labels[config.PlotLabels.LEGEND_NAMES])

    elif df_task in _ARITHMETIC_TASKS:
        # perform arithmetic operations on pairs of columns in a data frame
        df = pd.read_csv(config.filename)
        fn = _ARITHMETIC_TASKS[df_task]
        for col_x, col_y, col_id in zip(libmag.to_seq(x_col),
                                        libmag.to_seq(y_col),
                                        libmag.to_seq(id_col)):
            # perform the arithmetic operation specified by the specific
            # task on the pair of columns, inserting the results in a new
            # column specified by ID
            func_to_paired_cols(df, col_x, col_y, fn, col_id)

        # output modified data frame to CSV file
        data_frames_to_csv(df, libmag.make_out_path())

    elif df_task is config.DFTasks.REPLACE_VALS:
        # replace values in a CSV file
        # X_COL: replace from these values
        # Y_COL: replace to these values
        # GROUP_COL: columns to replace
        df = pd.read_csv(config.filename)
        df = replace_vals(df, x_col, y_col, group_col)
        data_frames_to_csv(df, libmag.make_out_path())