Ejemplo n.º 1
0
 def _run(radmat, radmat_filter_mask, dyemat, dyepeps, n_channels):
     with c_nn_v2.context(
             train_dyemat=dyemat,
             train_dyepeps=dyepeps,
             radmat=radmat,
             radmat_filter_mask=radmat_filter_mask,
             priors=params.priors,
             n_channels=n_channels,
             n_neighbors=params.n_neighbors,
             run_row_k_fit=params.run_row_k_fit,
             run_against_all_dyetracks=params.run_against_all_dyetracks,
             scoring_verbose=params.scoring_verbose,
             scoring_verbose_cc=params.scoring_verbose_cc,
             row_k_score_factor=params.row_k_score_factor,
     ) as nn_v2_context:
         # _nn_v2.c chokes if a batch is larger than 1024*16
         batches = zap.make_batch_slices(n_rows=radmat.shape[0],
                                         _batch_size=_batch_size)
         with zap.Context(mode="thread",
                          trap_exceptions=False,
                          progress=progress):
             # This must be thread mode because it operates on the context in shared memory.
             zap.work_orders([
                 dict(
                     fn=c_nn_v2.do_classify_radrows,
                     radrow_start_i=batch[0],
                     n_radrows=batch[1] - batch[0],
                     nn_v2_context=nn_v2_context,
                 ) for batch in batches
             ])
         return nn_v2_context
Ejemplo n.º 2
0
def radiometry_cy_ims(cy_ims, locs, reg_psf_samples, peak_mea):
    """
    Compute radiometry on the stack of cycle images for one field on one channel

    Returns:
        output_radmat: ndarray(n_peaks, n_cycles, (sig, noi, bg_med, bg_std))
    """
    with context(cy_ims=cy_ims,
                 locs=locs,
                 reg_psf_samples=reg_psf_samples,
                 peak_mea=peak_mea) as ctx:
        check.array_t(locs, ndim=2, dtype=np.float64)
        n_peaks = locs.shape[0]
        if n_peaks > 0:
            batches = zap.make_batch_slices(n_rows=locs.shape[0],
                                            _batch_size=100)
            with zap.Context(trap_exceptions=False, mode="thread"):
                zap.work_orders([
                    dict(
                        fn=_do_radiometry_field_stack_peak_batch,
                        ctx=ctx,
                        peak_start_i=batch[0],
                        peak_stop_i=batch[1],
                    ) for batch in batches
                ])

    return ctx._out_radiometry
Ejemplo n.º 3
0
    def classify(self, X, progress=None):
        check.array_t(X, ndim=2)

        n_rows = X.shape[0]

        if n_rows < 100:
            winner_y, winner_scores, runnerup_y, runnerup_scores = _do_predict(
                classifier=self.classifier, X=X)
        else:
            n_work_orders = n_rows // 100

            with zap.Context(progress=progress, trap_exceptions=False):
                results = zap.work_orders([
                    Munch(classifier=self.classifier, X=X, fn=_do_predict)
                    for X in np.array_split(X, n_work_orders, axis=0)
                ])
            winner_y = utils.listi(results, 0)
            winner_scores = utils.listi(results, 1)
            runnerup_y = utils.listi(results, 2)
            runnerup_scores = utils.listi(results, 3)

            winner_y = np.concatenate(winner_y)
            winner_scores = np.concatenate(winner_scores)
            runnerup_y = np.concatenate(runnerup_y)
            runnerup_scores = np.concatenate(runnerup_scores)

        return winner_y, winner_scores, runnerup_y, runnerup_scores
Ejemplo n.º 4
0
 def peps_above_thresholds(self, precision=0.0, recall=0.0):
     with zap.Context(mode="thread"):
         df = zap.df_groups(
             _do_peps_above_thresholds,
             self.pr_curve_by_pep().groupby("pep_i"),
             precision=precision,
             recall=recall,
         )
     df = df.reset_index().sort_index().rename(columns={0: "passes"})
     return np.argwhere(df.passes.values).flatten()
Ejemplo n.º 5
0
def psf_fields_one_channel(ims_import_result,
                           sigproc_v2_params,
                           field_iz,
                           channel_i,
                           progress=None) -> priors.RegPSFPrior:
    """
    Build up a regional PSF for one channel on the RAW images.

    Implemented in a parallel zap over every field and then combine the
    fields into a single RegPSF which stores: (divs, divs, peak_mea, peak_mea)
    """

    if ims_import_result.n_fields == 0:
        return None

    with zap.Context(progress=progress):
        region_to_psf_per_field = zap.arrays(
            _do_psf_one_field_one_channel,
            dict(field_i=field_iz),
            _stack=True,
            peak_mea=sigproc_v2_params.peak_mea,
            divs=sigproc_v2_params.divs,
            bandpass_kwargs=dict(
                low_inflection=sigproc_v2_params.low_inflection,
                low_sharpness=sigproc_v2_params.low_sharpness,
                high_inflection=sigproc_v2_params.high_inflection,
                high_sharpness=sigproc_v2_params.high_sharpness,
            ),
            ims_import_result=ims_import_result,
            channel_i=channel_i,
            n_cycles_limit=sigproc_v2_params.n_cycles_limit,
        )

    # SUM over fields
    psf_ims = np.sum(region_to_psf_per_field, axis=0)
    psf_ims = psf_normalize(psf_ims)

    # At this point psf_ims is a pixel image of the PSF at each reg div.
    # ie, 4 dimensional: (divs_y, divs_x, n_pixels_h, n_pixels_w)
    # Now we convert it to Gaussian Parameters by fitting so we don't have
    # to store the pixels anymore: just the 3 critical shape parameters:
    # sigma_x, sigma_y, and rho.
    # Use one frame of ims_import_result to sort out dimensions
    im = ims_import_result.ims[0, 0, 0]
    check.array_t(im, is_square=True)
    reg_psf = priors.RegPSFPrior.from_psf_ims(im.shape[-1], psf_ims)
    return reg_psf
Ejemplo n.º 6
0
def sigproc(sigproc_params, ims_import_result, progress=None):
    # CACHE n_channel, n_cycles, dim into sigproc_params by loading one field
    ims = ims_import_result.field_chcy_ims(field_i=0)
    n_inchannels, n_cycles, h, w = ims.shape
    assert h == w
    n_outchannels = sigproc_params.n_output_channels
    sigproc_params._outchannels_inchannels_cycles_dim = (
        n_outchannels,
        n_inchannels,
        n_cycles,
        h,
    )

    if not sigproc_params.channel_indices_for_alignment:
        sigproc_params.channel_indices_for_alignment = list(
            range(n_inchannels))

    sigproc_result = SigprocV1Result(
        params=sigproc_params,
        n_input_channels=n_inchannels,
        n_channels=n_outchannels,
        n_cycles=n_cycles,
    )

    n_fields = ims_import_result.n_fields
    n_fields_limit = sigproc_params.n_fields_limit
    if n_fields_limit is not None and n_fields_limit < n_fields:
        n_fields = n_fields_limit

    # TASK: I think this would be nicer with the parallel array map
    with zap.Context(trap_exceptions=False, progress=progress):
        results = zap.work_orders([
            Munch(
                fn=_do_field,
                field_i=field_i,
                sigproc_params=sigproc_params,
                ims_import_result=ims_import_result,
                sigproc_result=sigproc_result,
            ) for field_i in range(n_fields)
        ])

    # SET the result n_channels (possibly different from input n_channels)
    n_inchannels = np.array(results)
    assert np.all(n_inchannels == n_inchannels[0])
    sigproc_result.n_channels = int(n_inchannels[0])

    return sigproc_result
Ejemplo n.º 7
0
def _run_sim(sim_params, pep_seqs_df, name, n_peps, n_samples, progress):
    if sim_params.get("random_seed") is not None:
        # Increment so that train and test will be different
        sim_params.random_seed += 1

    np.random.seed(sim_params.random_seed)

    dyemat = ArrayResult(
        f"{name}_dyemat",
        shape=(n_peps, n_samples, sim_params.n_channels, sim_params.n_cycles),
        dtype=DyeType,
        mode="w+",
    )
    radmat = ArrayResult(
        f"{name}_radmat",
        shape=(n_peps, n_samples, sim_params.n_channels, sim_params.n_cycles),
        dtype=RadType,
        mode="w+",
    )
    recall = ArrayResult(
        f"{name}_recall",
        shape=(n_peps, ),
        dtype=RecallType,
        mode="w+",
    )

    with zap.Context(trap_exceptions=False, progress=progress):
        flus__remainders = zap.df_groups(
            _do_pep_sim,
            pep_seqs_df.groupby("pep_i"),
            sim_params=sim_params,
            n_samples=n_samples,
            output_dyemat=dyemat,
            output_radmat=radmat,
            output_recall=recall,
        )

    flus = np.array(utils.listi(flus__remainders, 0))
    flu_remainders = np.array(utils.listi(flus__remainders, 1))

    true_pep_iz = np.repeat(np.arange(n_peps), n_samples)

    return dyemat, radmat, recall, flus, flu_remainders, true_pep_iz
Ejemplo n.º 8
0
    def all_dfs(self, fn, parallel=False):
        """
        Run fn on every run, assert that each returns af DataFrame
        and then pd.concat all the results into one adding a run_i
        column to that DataFrame.

        Example:
            df = job.all_dfs(lambda run: run.prep.pros())
        """
        df_list = []
        if parallel:

            def wrap_fn(run, run_i):
                res_df = fn(run)
                assert isinstance(res_df, pd.DataFrame)
                res_df["run_i"] = run_i
                res_df["run_name"] = run.manifest.run_name
                return res_df

            work_orders = [
                {"fn": wrap_fn, "args": [run, run_i]}
                for run_i, run in enumerate(self._run_results.values())
            ]

            # TODO: it would be nice to integrate this progress stuff into zap as an optional argument
            progress = tqdm(total=len(work_orders))

            def progress_callback(i, j, retry):
                if not retry:
                    progress.update()

            with zap.Context(trap_exceptions=False, progress=progress_callback):
                df_list = zap.work_orders(work_orders)

            progress.close()
        else:
            for run_i, run in enumerate(self._run_results.values()):
                res_df = fn(run)
                assert isinstance(res_df, pd.DataFrame)
                res_df["run_i"] = run_i
                res_df["run_name"] = run.manifest.run_name
                df_list += [res_df]
        return pd.concat(df_list).reset_index(drop=True)
Ejemplo n.º 9
0
    def main(self, job_folder=None):
        switches = utils.plumbum_switches(self)

        if job_folder is None:
            job_folder = self.job

        job_folder = assets.validate_job_folder(job_folder)
        # At this point job_folder is a plumbum path

        # Add a new handler so we get PER-RUN log files into the job folder
        per_run_log_path = job_folder / f"{int(time.time()):06x}.log"
        formatter = zlog.ColorfulFormatter(
            "%(name)s %(asctime)s %(levelname)s %(message)s %(filename)s %(lineno)d"
        )
        handler = logging.StreamHandler(open(per_run_log_path, "w"))
        handler.setFormatter(formatter)
        zlog.add_handler(handler)

        tell(f"Trapping run logs into {per_run_log_path}")

        if job_folder is None:
            log.error(f"No job_folder was specified")
            return 1

        tell(
            f"Plaster run {job_folder} limit={self.limit} started at {arrow.utcnow().format()}"
        )

        if not job_folder.exists():
            log.error(f"Unable to find the path {job_folder}")
            return 1

        # Load the job_uuid if available, evntually this will be nice for logging
        job_uuid = None
        job_yaml = job_folder / "job_manifest.yaml"
        if job_yaml.exists():
            job_manifest = utils.yaml_load_munch(job_yaml)
            job_uuid = job_manifest.uuid

        # Find all the plaster_run.yaml files. They might be in run subfolders
        found = list(
            job_folder.walk(filter=lambda p: p.name == "plaster_run.yaml"))
        run_dirs = [p.dirname for p in found]

        if len(run_dirs) == 0:
            log.error(
                "Plaster: Nothing to do because no run_dirs have plaster_run.yaml files"
            )
            return 1

        def run_reports():
            report_paths = [job_folder / "report.ipynb"
                            ] + (job_folder / "_reports" // "*.ipynb")

            for report_src_path in report_paths:
                report_dst_path = report_src_path.with_suffix(".html")
                if report_src_path.exists() and (self.force or out_of_date(
                        report_src_path, report_dst_path)):
                    tell(f"Running report {report_src_path}")
                    self.run_ipynb(report_src_path)

        if self.reports_only:
            run_reports()
            return 0

        # A normal run where all happens in this process
        failure_count = 0
        for run_dir_i, run_dir in enumerate(sorted(run_dirs)):
            zlog.metrics(
                f"Starting run subdirectory {run_dir}. {run_dir_i + 1} of {len(run_dirs)}",
                log=log,
                _type="plaster_start",
                run_dir=run_dir,
                run_dir_i=run_dir_i,
                run_dir_n=len(run_dirs),
                **switches,
            )

            try:
                with zap.Context(
                        cpu_limit=self.cpu_limit,
                        mode="debug" if self.debug_mode else None,
                        allow_inner_parallelism=True,
                ):
                    # allow_inner_parallelism=True needs to be true so that each task such as sigproc_v2
                    # can allocate parallel jobs to each field.

                    run = RunExecutor(run_dir).load()
                    if "_erisyon" in run.config:
                        zlog.metrics(
                            "run metrics",
                            log=log,
                            _type="erisyon_block",
                            **run.config._erisyon,
                        )

                    failure_count += run.execute(
                        force=self.force,
                        limit=self.limit.split(",") if self.limit else None,
                        clean=self.clean,
                        n_fields_limit=self.n_fields_limit,
                        no_progress=self.no_progress,
                    )
            except Exception as e:
                failure_count += 1
                if not self.continue_on_error:
                    raise e

        if failure_count == 0 and self.limit is None and not self.clean:
            # WRITE job_info.yaml with metadata used by the indexer
            n_runs = len(run_dirs)
            job_info = Munch(n_runs=n_runs, job_uuid=job_uuid)
            if n_runs == 1:
                job = JobResult(job_folder=job_folder)
                tsv_data = {}
                try:
                    tsv_data = job.runs[0].ims_import.tsv_data
                except:
                    pass

                nd2_metadata = {}
                try:
                    nd2_metadata = job.runs[0].ims_import._nd2_metadata
                except:
                    pass

                job_info.update(tsv_data=tsv_data, nd2_metadata=nd2_metadata)

            utils.yaml_save(job_folder / "job_info.yaml", job_info)

            # RUN reports if not skipped
            if not self.skip_reports:
                run_reports()

        return failure_count
Ejemplo n.º 10
0
    def pr_curve_by_pep(self,
                        return_auc=False,
                        pep_iz=None,
                        force_compute=False,
                        progress=None):
        """
        Obtain pr_curves for every peptide.

        If all params are default, may returned cached information computed
        during the run.

        Returns:
            A (potentially HUGE) df of every P/R for every peptide
            A smaller df with just the pep_i and the Area-Under-Curve

        This uses the work_order system (as opposed to the
        higher-level array_split_map()) because the _do_pep_pr_curve
        returns 3 identical returns AND one scalar; array_split_map() doesn't
        like that.
        """

        # The PR for all peptides is computed during the run (no auc).
        if not return_auc and not force_compute and self._cached_pr is not None:
            df = self._cached_pr
            if pep_iz is not None:
                df = df[df.pep_i.isin(pep_iz)]
            return df.copy()

        if pep_iz is None:
            pep_iz = self._prep_result.peps().pep_i.values
        if isinstance(pep_iz, np.ndarray):
            pep_iz = pep_iz.tolist()
        check.list_t(pep_iz, int)

        with zap.Context(mode="thread",
                         trap_exceptions=False,
                         progress=progress):
            results = zap.work_orders([
                Munch(
                    fn=_do_pep_pr_curve,
                    pep_i=pep_i,
                    bag=self,
                ) for pep_i in pep_iz
            ], )

        df_per_pep = [
            pd.DataFrame(
                dict(
                    pep_i=np.repeat(np.array([pep_i]), prec.shape[0]),
                    prec=prec,
                    recall=recall,
                    score=score,
                )) for pep_i, (prec, recall, score, _) in results
        ]

        if len(df_per_pep) > 0:
            pr_df = pd.concat(df_per_pep, axis=0)
        else:
            pr_df = None

        auc_df = pd.DataFrame(
            [(pep_i, auc) for pep_i, (_, _, _, auc) in results],
            columns=["pep_i", "auc"],
        )

        if return_auc:
            return pr_df, auc_df
        else:
            return pr_df
Ejemplo n.º 11
0
def ims_import(src_dir: Path,
               ims_import_params: ImsImportParams,
               progress=None,
               pipeline=None):
    reference_nd2_file_for_metadata = None

    scan_result = _scan_files(src_dir)
    if len(scan_result.nd2_paths) > 0:
        reference_nd2_file_for_metadata = scan_result.nd2_paths[0]

    target_mea = max(scan_result.dim[0], scan_result.dim[1])

    if not utils.is_power_of_2(target_mea):
        new_dim = utils.next_power_of_2(target_mea)
        _convert_message(target_mea, new_dim)
        target_mea = new_dim

    def clamp_fields(n_fields_true: int) -> Tuple[int, int]:
        n_fields = n_fields_true
        n_fields_limit = ims_import_params.get("n_fields_limit")
        if n_fields_limit is not None:
            n_fields = n_fields_limit

        start_field = ims_import_params.get("start_field", 0)
        if start_field + n_fields > n_fields_true:
            n_fields = n_fields_true - start_field

        return start_field, n_fields

    def clamp_cycles(n_cycles_true: int) -> Tuple[int, int]:
        n_cycles = n_cycles_true
        n_cycles_limit = ims_import_params.get("n_cycles_limit")
        if n_cycles_limit is not None:
            n_cycles = n_cycles_limit

        start_cycle = ims_import_params.get("start_cycle", 0)
        if start_cycle is None:
            start_cycle = 0
        if start_cycle + n_cycles > n_cycles_true:
            n_cycles = n_cycles_true - start_cycle

        return start_cycle, n_cycles

    tsv_data = tsv.load_tsv_for_folder(src_dir)

    # ALLOCATE the ImsImportResult
    ims_import_result = ImsImportResult(params=ims_import_params,
                                        tsv_data=Munch(tsv_data))

    dst_ch_i_to_src_ch_i = ims_import_params.dst_ch_i_to_src_ch_i
    if dst_ch_i_to_src_ch_i is None:
        dst_ch_i_to_src_ch_i = [i for i in range(scan_result.n_channels)]

    n_out_channels = len(dst_ch_i_to_src_ch_i)

    # Sanity check that we didn't end up with any src_channels outside of the channel range
    assert all([
        0 <= src_ch_i < scan_result.n_channels
        for src_ch_i in dst_ch_i_to_src_ch_i
    ])

    if ims_import_params.is_z_stack_single_file:
        field_iz, n_cycles_found = _z_stack_import(
            scan_result.nd2_paths[0],
            target_mea,
            ims_import_result,
            dst_ch_i_to_src_ch_i,
            ims_import_params.z_stack_n_slices_per_field,
        )
        n_cycles = ims_import_params.z_stack_n_slices_per_field

    elif ims_import_params.is_movie:
        if scan_result.mode == ScanFileMode.nd2:
            # "Movie mode" means that there aren't any chemical cycles, but rather we are using "cycles" to represent different images in a zstack
            start_field, n_fields = clamp_fields(len(scan_result.nd2_paths))

            # In movie mode, the n_fields from the .nd2 file is becoming n_cycles
            scan_result.n_cycles = scan_result.n_fields
            start_cycle, n_cycles = clamp_cycles(scan_result.n_cycles)

            with zap.Context(progress=progress):
                field_iz, n_cycles_found = zap.arrays(
                    _do_movie_import_nd2,
                    dict(
                        input_field_i=list(
                            range(start_field, start_field + n_fields)),
                        output_field_i=list(range(n_fields)),
                    ),
                    _stack=True,
                    scan_result=scan_result,
                    start_cycle=start_cycle,
                    n_cycles=n_cycles,
                    target_mea=target_mea,
                    import_result=ims_import_result,
                    dst_ch_i_to_src_ch_i=dst_ch_i_to_src_ch_i,
                )
        elif scan_result.mode == ScanFileMode.npy:
            start_field, n_fields = clamp_fields(scan_result.n_fields)
            start_cycle, n_cycles = clamp_cycles(scan_result.n_cycles)

            with zap.Context(progress=progress):
                field_iz, n_cycles_found = zap.arrays(
                    _do_movie_import_npy,
                    dict(
                        input_field_i=list(
                            range(start_field, start_field + n_fields)),
                        output_field_i=list(range(n_fields)),
                    ),
                    _stack=True,
                    scan_result=scan_result,
                    start_cycle=start_cycle,
                    n_cycles=n_cycles,
                    target_mea=target_mea,
                    import_result=ims_import_result,
                    dst_ch_i_to_src_ch_i=dst_ch_i_to_src_ch_i,
                )
        else:
            raise NotImplementedError()

    else:
        start_field, n_fields = clamp_fields(scan_result.n_fields)

        if pipeline:
            pipeline.set_phase(0, 2)

        if scan_result.mode == ScanFileMode.nd2:
            scan_result.n_cycles = len(scan_result.nd2_paths)

            # SCATTER
            with zap.Context(mode="thread", progress=progress):
                zap.arrays(
                    _do_nd2_scatter,
                    dict(
                        cycle_i=list(range(len(scan_result.nd2_paths))),
                        src_path=scan_result.nd2_paths,
                    ),
                    _stack=True,
                    start_field=start_field,
                    n_fields=n_fields,
                    n_channels=scan_result.n_channels,
                    target_mea=target_mea,
                )

        elif scan_result.mode == ScanFileMode.tif:
            # SCATTER
            work_orders = [
                Munch(field_i=k[0], channel_i=k[1], cycle_i=k[2], path=path)
                for k, path in
                scan_result.tif_paths_by_field_channel_cycle.items()
            ]
            with zap.Context(trap_exceptions=False):
                results = zap.work_orders(_do_tif_scatter, work_orders)

            # CHECK that every file exists
            for f in range(n_fields):
                for ch in range(scan_result.n_channels):
                    for cy in range(scan_result.n_cycles):
                        expected = f"__{f:03d}-{ch:02d}-{cy:02d}.npy"
                        if expected not in results:
                            raise FileNotFoundError(
                                f"File is missing in tif pattern: {expected}")

        elif scan_result.mode == ScanFileMode.npy:
            # In npy mode there's no scatter as the files are already fully scattered
            pass

        else:
            raise ValueError(f"Unknown im import mode {scan_result.mode}")

        if pipeline:
            pipeline.set_phase(1, 2)

        # GATHER
        start_cycle, n_cycles = clamp_cycles(scan_result.n_cycles)

        with zap.Context(progress=progress):
            field_iz = zap.arrays(
                _do_gather,
                dict(
                    input_field_i=list(
                        range(start_field, start_field + n_fields)),
                    output_field_i=list(range(0, n_fields)),
                ),
                _stack=True,
                start_cycle=start_cycle,
                n_cycles=n_cycles,
                dim=target_mea,
                import_result=ims_import_result,
                mode=scan_result.mode,
                npy_paths_by_field_channel_cycle=scan_result.
                npy_paths_by_field_channel_cycle,
                dst_ch_i_to_src_ch_i=dst_ch_i_to_src_ch_i,
            )

    if reference_nd2_file_for_metadata:
        with _nd2(reference_nd2_file_for_metadata) as nd2:
            if hasattr(nd2, "metadata"):
                full = Munch(
                    metadata=nd2.metadata,
                    metadata_seq=nd2.metadata_seq,
                )
                ims_import_result._nd2_metadata_full = full

                def me(block_name, default=None):
                    return utils.block_search(full.metadata.SLxExperiment,
                                              block_name, default)

                def mp(block_name, default=None):
                    return utils.block_search(
                        full.metadata_seq.SLxPictureMetadata, block_name,
                        default)

                n_channels = mp("sPicturePlanes.uiSampleCount", 1)

                ims_import_result._nd2_metadata = Munch(
                    calibrated_pixel_size=mp("dCalibration"),
                    experiment_type="movie" if me("eType") == 1 else "edman",
                    n_cycles=me("uLoopPars.uiCount"),
                    cmd_before=me("wsCommandBeforeCapture"),
                    cmd_after=me("wsCommandAfterCapture"),
                    n_channels=n_channels,
                )

                per_channel = []
                for ch_i in range(n_channels):
                    laser_wavelength = None
                    laser_power = None
                    n_lasers = mp(
                        f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_uiMultiLaserLines0",
                        0,
                    )
                    for i in range(n_lasers):
                        is_used = mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_bMultiLaserLineUsed0-{i:02d}",
                            0,
                        )
                        if is_used == 1:
                            laser_wavelength = mp(
                                f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_uiMultiLaserLineWavelength0-{i:02d}",
                                0,
                            )
                            laser_power = mp(
                                f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_dMultiLaserLinePower0-{i:02d}",
                                0,
                            )

                    ch_munch = Munch(
                        laser_wavelength=laser_wavelength,
                        laser_power=laser_power,
                        camera_name=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.CameraUniqueName"
                        ),
                        sensor_pixels_x=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.sizeSensorPixels.cx"
                        ),
                        sensor_pixels_y=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.sizeSensorPixels.cy"
                        ),
                        sensor_microns_x=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.sizeSensorMicrons.cx"
                        ),
                        sensor_microns_y=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.sizeSensorMicrons.cy"
                        ),
                        bin_x=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.dBinningX"
                        ),
                        bin_y=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.dBinningY"
                        ),
                        format=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.fmtDesc.wszFormatDesc"
                        ),
                        roi_l=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.rectSensorUser.left"
                        ),
                        roi_r=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.rectSensorUser.right"
                        ),
                        roi_t=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.rectSensorUser.top"
                        ),
                        roi_b=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.FormatQuality.rectSensorUser.bottom"
                        ),
                        averaging=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.PropertiesQuality.Average"
                        ),
                        integration=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.PropertiesQuality.Integrate"
                        ),
                        name=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pCameraSetting.Metadata.Channels.Channel_0.Name"
                        ),
                        dichroic_filter=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_sFilterName0"
                        ),
                        emission_filter=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_sFilterName1"
                        ),
                        optivar=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_dZoomPosition"
                        ),
                        tirf_focus=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_dTIRFPositionFocus"
                        ),
                        tirf_align_x=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_dTIRFPositionX"
                        ),
                        tirf_align_y=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pDeviceSetting.m_dTIRFPositionY"
                        ),
                        objective_mag=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pObjectiveSetting.dObjectiveMag"
                        ),
                        objective_na=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pObjectiveSetting.dObjectiveNA"
                        ),
                        objective_refractive_index=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.pObjectiveSetting.dRefractIndex"
                        ),
                        settings_name=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.sOpticalConfigs.\x02.sOpticalConfigName"
                        ),
                        readout_mode=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.sSpecSettings.Readout Mode"
                        ),
                        readout_rate=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.sSpecSettings.Readout Rate"
                        ),
                        noise_filter=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.sSpecSettings.Noise Filter"
                        ),
                        temperature=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.sSpecSettings.Temperature"
                        ),
                        exposure=mp(
                            f"sPicturePlanes.sSampleSetting.a{ch_i}.dExposureTime"
                        ),
                    )
                    per_channel += [ch_munch]

                ims_import_result._nd2_metadata.update(**Munch(
                    per_channel=per_channel))

                if me("eType") == 1:
                    # Movie mode
                    ims_import_result._nd2_metadata.update(**Munch(
                        movie_start=me("dStart"),
                        movie_period=me("dPeriod"),
                        movie_duration=me("dDuration"),
                        movie_duration_pref=me("bDurationPref"),
                        movie_max_period_diff=me("dMaxPeriodDiff"),
                        movie_min_period_diff=me("dMinPeriodDiff"),
                        movie_avg_period_diff=me("dAvgPeriodDiff"),
                    ))

    ims_import_result.n_fields = len(field_iz)
    ims_import_result.n_channels = n_out_channels
    ims_import_result.n_cycles = n_cycles
    ims_import_result.dim = target_mea
    ims_import_result.dtype = np.dtype(OUTPUT_NP_TYPE).name
    ims_import_result.src_dir = src_dir

    # CLEAN
    for file in local.cwd // "__*":
        file.delete()

    return ims_import_result
Ejemplo n.º 12
0
    def main(self, job_folder=None):
        switches = utils.plumbum_switches(self)

        if job_folder is None:
            error(f"No job_folder was specified")
            return 1

        important(
            f"Plaster run {job_folder} limit={self.limit} started at {arrow.utcnow().format()}"
        )

        job_folder = assets.validate_job_folder_return_path(
            job_folder, allow_run_folders=True)
        if not job_folder.exists():
            error(f"Unable to find the path {job_folder}")
            return 1

        # Find all the plaster_run.yaml files. They might be in run subfolders
        found = list(
            job_folder.walk(filter=lambda p: p.name == "plaster_run.yaml"))
        run_dirs = [p.dirname for p in found]

        if len(run_dirs) == 0:
            error(
                "Plaster: Nothing to do because no run_dirs have plaster_run.yaml files"
            )
            return 1

        # A normal run where all happens in this process
        failure_count = 0
        for run_dir_i, run_dir in enumerate(sorted(run_dirs)):

            metrics(
                _type="plaster_start",
                run_dir=run_dir,
                run_dir_i=run_dir_i,
                run_dir_n=len(run_dirs),
                **switches,
            )
            important(
                f"Starting run subdirectory {run_dir}. {run_dir_i + 1} of {len(run_dirs)}"
            )

            try:
                with zap.Context(cpu_limit=self.cpu_limit,
                                 debug_mode=self.debug_mode):
                    run = RunExecutor(run_dir).load()
                    if "_erisyon" in run.config:
                        metrics(_type="erisyon_block", **run.config._erisyon)

                    failure_count += run.execute(
                        force=self.force,
                        limit=self.limit.split(",") if self.limit else None,
                        clean=self.clean,
                        n_fields_limit=self.n_fields_limit,
                        skip_s3=self.skip_s3,
                    )
            except Exception as e:
                failure_count += 1
                if not self.continue_on_error:
                    raise e

        if (failure_count == 0 and self.limit is None and not self.clean
                and not self.skip_reports):
            # RUN reports
            report_src_path = job_folder / "report.ipynb"
            report_dst_path = job_folder / "report.html"
            if (self.force or report_src_path.exists()
                    and utils.out_of_date(report_src_path, report_dst_path)):
                self.run_ipynb(report_src_path)
            return 0

        return failure_count