def main(config_fpath): logging.basicConfig(level=logging.INFO) config = Config(config_fpath) ids = ImageDataSet(config.ids_uri) specs = [ stack_tuple_to_spec(tp) for tp in ids.all_possible_stack_tuples() ] with dtoolcore.DataSetCreator( config.output_name, config.output_base_uri ) as output_ds: logging.info(f"Monitor at: {output_ds.staging_directory}") for spec in specs: try: logging.info(f"Processing {spec}") process_image_and_series(config, spec, output_ds) except KeyError: logging.error(f"Failed on {spec}") readme_str = config.as_readme_format() output_ds.put_readme(readme_str)
def process_image_and_series(config, spec, output_ds): imageds = ImageDataSet(config.ids_uri) wall_stack = imageds.get_stack( spec.image_name, spec.series_name, 0, config.wall_channel ) logger.info(f"Loaded {spec.series_name} with shape {wall_stack.shape}") zoomed_wall_stack = zoom_to_match_scales(wall_stack) snfmt = spec.series_name.replace(" ", "_").replace("#", "") level = config.params['level'] logging.info(f"Segmenting with level {level}") segmentation = sitk_watershed_segmentation( zoomed_wall_stack, level=config.params["level"] ) wall_output_relpath = f"{spec.image_name}_{snfmt}_cell_wall.tif" seg_output_relpath = f"{spec.image_name}_{snfmt}_segmentation_L{level}.tif" wall_output_abspath = output_ds.prepare_staging_abspath_promise(wall_output_relpath) seg_output_abspath = output_ds.prepare_staging_abspath_promise(seg_output_relpath) segmentation.save(seg_output_abspath) zoomed_wall_stack.save(wall_output_abspath)
def annotated_specs_from_config(config): specs = generate_candidate_item_specs(config) ids = ImageDataSet(config.ids_uri) try: sidx = config.specifier_tuple_index except KeyError: sidx = 0 tuple_lookup = {st[sidx]: st for st in ids.all_possible_stack_tuples()} def annotate_spec(spec): spec_template = config.segmentation_spec_template spec_key = spec.template_repr(spec_template) spec.image_name, spec.series_name, spec.sidx = tuple_lookup[spec_key] spec.fname = spec.template_repr(config.segmentation_fname_template) spec.regions_fname = spec.template_repr(config.regions_fname_template) return spec annotated_specs = [annotate_spec(spec) for spec in specs] return annotated_specs
def main(ids_uri, output_dirpath): logging.basicConfig(level=logging.INFO) ids = ImageDataSet(ids_uri) output_dirpath = Path(output_dirpath) output_dirpath.mkdir(parents=True, exist_ok=True) pairs = ids.get_image_series_name_pairs() for im, sn in pairs: project_and_save(ids, im, sn, output_dirpath)
class DataLoader(object): def __init__(self, dataset_uri, segmentations_dirpath): self.ids = ImageDataSet(dataset_uri) self.segmentations_dirpath = segmentations_dirpath self.name_lookup = dict(self.ids.get_image_series_name_pairs()) @property def root_names(self): return list(self.name_lookup.keys()) def get_stack_by_series_name(self, series_name): series_name = series_name image_name = self.name_lookup[series_name] return self.ids.get_stack(series_name, image_name) def load_file_data(self, file_fpath): df = pd.read_csv(file_fpath, names=['rid', 'file']) file_lookup = pd.Series(df.file.values, index=df.rid).to_dict() fids_to_rids = defaultdict(list) for rid, fid in file_lookup.items(): if fid != 0: fids_to_rids[fid].append(rid) return fids_to_rids def load_by_name(self, name): base_dirpath = os.path.join(self.segmentations_dirpath, name) file_info_fname = "Root_segments.tif.csv" segmentation_name = "Root_segments.tif.tif" segmentation_fpath = os.path.join(base_dirpath, segmentation_name) volume = volread(segmentation_fpath) transposed = np.transpose(volume, axes=(1, 2, 0)) segmentation = transposed.view(Segmentation3D) raw_stack = self.get_stack_by_series_name(name) measure_stack = zoom_to_match_scales(raw_stack) file_info_fpath = os.path.join(base_dirpath, file_info_fname) fids_to_rids = self.load_file_data(file_info_fpath) return segmentation, measure_stack, fids_to_rids
def main(ids_uri, nuclear_channel_first, probe_channel): logging.basicConfig(level=logging.INFO) ids = ImageDataSet(ids_uri) pairs = ids.get_image_series_name_pairs() fca1_pairs = [(s, n) for s, n in pairs if s.startswith("fca1")] image_name, series_name = fca1_pairs[2] logger.info(f"Processing {image_name}/{series_name}") template = extract_template(ids, image_name, series_name, nuclear_channel_first, probe_channel - 1) template.view(Image).save("template.png")
def __init__(self, config, spec, use_deconv=True): self.config = config self.ids = ImageDataSet(self.config.ids_uri) nuclear_channel_first = True image_name = self.config.image_name_template.format(**spec) series_name = self.config.series_name_template.format(**spec) self.fishimage = FISHImage.from_ids_im_sn(self.ids, image_name, series_name, nuclear_channel_first) fname = self.config.deconv_fname_template.format(**spec) fpath = os.path.join(self.config.deconv_dirpath, fname) self.deconv_stack = Image3D.from_file(fpath) self.deconv_stack = np.clip(self.deconv_stack, 0, 10000) if self.deconv_stack.shape != self.fishimage.nuclei.shape: logger.warning("Deconv stack doesn't match shape, trimming") rdim, cdim, zdim = self.fishimage.nuclei.shape self.deconv_stack = self.deconv_stack[:rdim, :cdim, :zdim] sl = get_slice(config, spec) self.good_mask = mask_from_template_and_spec(config.good_template, config, spec, sl) self.bad_mask = mask_from_template_and_spec(config.bad_template, config, spec, sl) self.nuc_mask = mask_from_template_and_spec(config.nuc_template, config, spec, sl) if use_deconv: self.probe_stack = self.deconv_stack else: self.probe_stack = self.fishimage.probes[0]
def get_stack_by_imname_sname(ids_uri, imname, sname, channel=0): ids = ImageDataSet.from_uri(ids_uri) raw_stack = ids.get_stack(imname, sname, channel=channel) zoomed_stack = zoom_to_match_scales(raw_stack) return zoomed_stack
def main(config_fpath): logging.basicConfig(level=logging.INFO) config = Config.from_fpath(config_fpath) specs = generate_candidate_item_specs(config) ids = ImageDataSet(config.ids_uri) try: sidx = config.specifier_tuple_index except KeyError: sidx = 0 tuple_lookup = {st[sidx]: st for st in ids.all_possible_stack_tuples()} def annotate_spec(spec): spec_template = config.segmentation_spec_template spec_key = spec.template_repr(spec_template) spec.image_name, spec.series_name, spec.sidx = tuple_lookup[spec_key] spec.fname = spec.template_repr(config.segmentation_fname_template) spec.regions_fname = spec.template_repr(config.regions_fname_template) return spec annotated_specs = [annotate_spec(spec) for spec in specs] completed_specs = get_all_specs(config.working_dirpath) remaining_specs = set(annotated_specs) - set(completed_specs) logging.info( f"Completed {len(completed_specs)}, {len(remaining_specs)} left") def measure_from_spec(spec): sms = InitialsSMS.from_config_and_spec(config, spec) measures = measure_all_regions(sms) try: measures['root_number'] = spec.n except AttributeError: measures['root_number'] = 1 return measures selected_specs = remaining_specs all_measures = run_process(selected_specs, measure_from_spec, config)
def main(dataset_uri): logging.basicConfig(level=logging.INFO) imageds = ImageDataSet(dataset_uri) print(imageds.all_possible_stack_tuples()) image_name = '20200309_lhp1_W10_T14' series_name = 'SDB995-5_01' wall_channel = 1 logging.info("Loading wall stack") wall_stack = imageds.get_stack(image_name, series_name, 0, wall_channel) logging.info("Adjusting scales") zoomed_wall_stack = zoom_to_match_scales(wall_stack) output_filename = f'{image_name}_{series_name}_wall.tif' zoomed_wall_stack.save(output_filename)
def main(dataset_uri): if dataset_uri is None: with open('data.yml') as fh: dataset_uri = fh.readline().strip() imageds = ImageDataSet(dataset_uri) measure_all_items(imageds)
def main(dataset_uri, output_dirpath): logging.basicConfig(level=logging.INFO) imageds = ImageDataSet(dataset_uri) output_dirpath = pathlib.Path(output_dirpath) for image_name in imageds.get_image_names(): for series_name in imageds.get_series_names(image_name): logging.info(f"Processing {image_name} {series_name}") level = 0.3 nsegments = 5000 wall_channel = 1 output_filename = f'{image_name}_{series_name}_L{level}.tif' output_fpath = output_dirpath / output_filename segment_image_from_dataset(imageds, image_name, series_name, wall_channel, output_filename, level, nsegments)
def get_stack_by_name(ids_uri, root_name, channel=0): ids = ImageDataSet.from_uri(ids_uri) name_lookup = dict(ids.get_image_series_name_pairs()) series_name = name_lookup[root_name] raw_stack = ids.get_stack(root_name, series_name, channel=channel) zoomed_stack = zoom_to_match_scales(raw_stack) return zoomed_stack
def main(ids_uri, nuclear_channel_first, probe_channel, template_path): logging.basicConfig(level=logging.INFO) ids = ImageDataSet(ids_uri) pairs = ids.get_image_series_name_pairs() fca3_pairs = [(s, n) for s, n in pairs if s.startswith("Ler")] if template_path is not None: template = Image.from_file(template_path) else: template = None for image_name, series_name in fca3_pairs: logger.info(f"Processing {image_name}/{series_name}") process_pair(ids, image_name, series_name, nuclear_channel_first, probe_channel, template)
def main(ids_uri, imname): output_dirpath = "results/merges" seg_dirpath = "local-data/segmentations" config_dict = collections.OrderedDict() config_dict["ids_uri"] = ids_uri config_dict["output_dirpath"] = output_dirpath config_dict["seg_dirpath"] = seg_dirpath config_dict[ "output_fname_template"] = "{imname}-{sname}-thresh-and-merge.png" ids = ImageDataSet(ids_uri) config_dict["to_process"] = [{ "imname": imname, "sname": sname } for sname in ids.get_series_names(imname)] yaml = ruamel.yaml.YAML() yaml.dump(config_dict, sys.stdout)
def main(dataset_uri): logging.basicConfig(level=logging.INFO) imageds = ImageDataSet(dataset_uri) print(imageds.all_possible_stack_tuples()) image_name = '20200309_lhp1_W10_T14' series_name = 'SDB995-5_03' wall_channel = 1 level = 0.3 output_filename = f'{image_name}_{series_name}_L{level}.tif' segment_image_from_dataset(imageds, image_name, series_name, wall_channel, output_filename, level=level, nsegments=5000)
def __init__(self, config): self.config = SimpleNamespace(**config) self.ids = ImageDataSet(self.config.ids_uri)
def __init__(self, dataset_uri, segmentations_dirpath): self.ids = ImageDataSet(dataset_uri) self.segmentations_dirpath = segmentations_dirpath self.name_lookup = dict(self.ids.get_image_series_name_pairs())
def get_stack_cached(ids_uri, image_name, series_name, channel): image_ds = ImageDataSet(ids_uri) return image_ds.get_stack(image_name, series_name, 0, channel=channel)