def test_obsep_deconv_read(self, data_test_dir, tmp_path): for el in glob(os.path.join(data_test_dir, "obsep", "*")): shutil.copy(os.path.join(data_test_dir, "obsep", el), tmp_path) image = GenericImageReader.read_image(tmp_path / "test.obsep") assert image.channels == 2 assert np.allclose(image.spacing, (500 * 10 ** -9, 64 * 10 ** -9, 64 * 10 ** -9)) assert image.channel_names == ["channel 1", "channel 2"] shutil.copy(tmp_path / "Cy5.TIF", tmp_path / "Cy5_decon2.TIF") image = GenericImageReader.read_image(tmp_path / "test.obsep") assert image.channels == 2 shutil.copy(tmp_path / "Cy5.TIF", tmp_path / "Cy5_deconv.TIF") image = GenericImageReader.read_image(tmp_path / "test.obsep") assert image.channels == 3
def load( cls, load_locations: typing.List[typing.Union[str, BytesIO, Path]], range_changed: typing.Callable[[int, int], typing.Any] = None, step_changed: typing.Callable[[int], typing.Any] = None, metadata: typing.Optional[dict] = None, ): if metadata is None: metadata = {"default_spacing": (10**-6, 10**-6, 10**-6)} if len(load_locations) == 1: new_path, ext = os.path.splitext(load_locations[0]) new_path += "_mask" + ext if not os.path.exists(new_path): raise ValueError( "Cannot determine mask file. It need to have '_mask' suffix." ) load_locations.append(load_locations) image = GenericImageReader.read_image( load_locations[0], load_locations[1], callback_function=partial(proxy_callback, range_changed, step_changed), default_spacing=tuple(metadata["default_spacing"]), ) return ProjectTuple(load_locations[0], image, mask=image.mask)
def load( cls, load_locations: typing.List[typing.Union[str, BytesIO, Path]], range_changed: typing.Callable[[int, int], typing.Any] = None, step_changed: typing.Callable[[int], typing.Any] = None, metadata: typing.Optional[dict] = None, ): if metadata is None: metadata = { "default_spacing": tuple([1 / UNIT_SCALE[Units.nm.value] for _ in range(3)]) } if "recursion_limit" not in metadata: metadata = copy(metadata) metadata["recursion_limit"] = 3 image = GenericImageReader.read_image( load_locations[0], callback_function=partial(proxy_callback, range_changed, step_changed), default_spacing=tuple(metadata["default_spacing"]), ) re_read = True for el in image.get_ranges(): if el[0] != el[1]: re_read = False if re_read and metadata["recursion_limit"] > 0: metadata["recursion_limit"] -= 1 cls.load(load_locations, range_changed, step_changed, metadata) return ProjectTuple(load_locations[0], image)
def load( cls, load_locations: typing.List[typing.Union[str, BytesIO, Path]], range_changed: typing.Callable[[int, int], typing.Any] = None, step_changed: typing.Callable[[int], typing.Any] = None, metadata: typing.Optional[dict] = None, ) -> MaskProjectTuple: if metadata is None: metadata = {"default_spacing": (10**-6, 10**-6, 10**-6)} image = GenericImageReader.read_image( load_locations[0], callback_function=partial(proxy_callback, range_changed, step_changed), default_spacing=metadata["default_spacing"], ) return MaskProjectTuple(image.file_path, image, selected_components=[])
def load( cls, load_locations: typing.List[typing.Union[str, BytesIO, Path]], range_changed: typing.Callable[[int, int], typing.Any] = None, step_changed: typing.Callable[[int], typing.Any] = None, metadata: typing.Optional[dict] = None, ) -> SegmentationTuple: seg = LoadSegmentation.load(load_locations) if len(load_locations) > 1: base_file = load_locations[1] else: base_file = seg.image if base_file is None: raise IOError("base file for segmentation not defined") if os.path.isabs(base_file): file_path = base_file else: if not isinstance(load_locations[0], str): raise IOError( f"Cannot use relative path {base_file} for non path argument" ) file_path = os.path.join(os.path.dirname(load_locations[0]), base_file) if not os.path.exists(file_path): raise IOError( f"Base file for segmentation do not exists: {base_file} -> {file_path}" ) if metadata is None: metadata = {"default_spacing": (10**-6, 10**-6, 10**-6)} image = GenericImageReader.read_image( file_path, callback_function=partial(proxy_callback, range_changed, step_changed), default_spacing=metadata["default_spacing"], ) # noinspection PyProtectedMember # image.file_path = load_locations[0] return dataclasses.replace(seg, file_path=image.file_path, image=image, segmentation=image.fit_array_to_image( seg.segmentation))
def load_project( file: typing.Union[str, Path, tarfile.TarFile, TextIOBase, BufferedIOBase, RawIOBase, IOBase] ) -> ProjectTuple: """Load project from archive""" tar_file, file_path = open_tar_file(file) try: if check_segmentation_type(tar_file) != SegmentationType.analysis: raise WrongFileTypeException() image_buffer = BytesIO() image_tar = tar_file.extractfile(tar_file.getmember("image.tif")) image_buffer.write(image_tar.read()) image_buffer.seek(0) reader = GenericImageReader() image = reader.read(image_buffer, ext=".tif") image.file_path = file_path algorithm_str = tar_file.extractfile("algorithm.json").read() algorithm_dict = load_metadata(algorithm_str) algorithm_dict = update_algorithm_dict(algorithm_dict) algorithm_dict.get("project_file_version") try: version = parse_version( json.loads(tar_file.extractfile("metadata.json").read()) ["project_version_info"]) except KeyError: version = Version("1.0") if version == Version("1.0"): seg_dict = np.load(tar_to_buff(tar_file, "segmentation.npz")) mask = seg_dict["mask"] if "mask" in seg_dict else None segmentation = seg_dict["segmentation"] else: segmentation = tifffile.imread( tar_to_buff(tar_file, "segmentation.tif")) if "mask.tif" in tar_file.getnames(): mask = tifffile.imread(tar_to_buff(tar_file, "mask.tif")) if np.max(mask) == 1: mask = mask.astype(np.bool) else: mask = None history = [] try: history_buff = tar_file.extractfile( tar_file.getmember("history/history.json")).read() history_json = load_metadata(history_buff) for el in history_json: history_buffer = BytesIO() history_buffer.write( tar_file.extractfile( f"history/arrays_{el['index']}.npz").read()) history_buffer.seek(0) el = update_algorithm_dict(el) segmentation_parameters = { "algorithm_name": el["algorithm_name"], "values": el["values"] } history.append( HistoryElement( segmentation_parameters=segmentation_parameters, mask_property=el["mask_property"], arrays=history_buffer, )) except KeyError: pass finally: if isinstance(file, (str, Path)): tar_file.close() image.set_mask(mask) if version <= project_version_info: return ProjectTuple( file_path=file_path, image=image, roi=segmentation, mask=mask, history=history, algorithm_parameters=algorithm_dict, ) print("This project is from new version of PartSeg:", version, project_version_info, file=sys.stderr) return ProjectTuple( file_path=file_path, image=image, roi=segmentation, mask=mask, history=history, algorithm_parameters=algorithm_dict, errors= "This project is from new version of PartSeg. It may load incorrect.", )
def test_generic_reader(self, data_test_dir): GenericImageReader.read_image( os.path.join(data_test_dir, "stack1_components", "stack1_component1.tif"), os.path.join(data_test_dir, "stack1_components", "stack1_component1_mask.tif"), ) GenericImageReader.read_image(os.path.join(data_test_dir, "test_czi.czi")) GenericImageReader.read_image(os.path.join(data_test_dir, "test_lsm2.tif")) GenericImageReader.read_image(os.path.join(data_test_dir, "test_lsm.tif")) GenericImageReader.read_image(os.path.join(data_test_dir, "Image0003_01.oif")) GenericImageReader.read_image(os.path.join(data_test_dir, "N2A_H2BGFP_dapi_falloidin_cycling1.oib"))
from PartSegCore.io_utils import load_metadata_base from PartSegCore.segmentation.segmentation_algorithm import ThresholdPreview from PartSegImage import GenericImageReader file_path = "/home/czaki/Dokumenty/smFish/smFISH_7_001_with_points/test.obsep" profile_str = '{"__SegmentationProfile__": true, "name": "", "algorithm": "Only Threshold", "values": {"channel": 1, "noise_filtering": {"name": "Gauss", "values": {"dimension_type": {"__Enum__": true, "__subtype__": "PartSegCore.segmentation.noise_filtering.DimensionType", "value": 1}, "radius": 1.0}}, "threshold": 300}}' profile = load_metadata_base(profile_str) image = GenericImageReader.read_image(file_path) algorithm = ThresholdPreview() algorithm.set_image(image) algorithm.set_parameters(**profile.values) res = algorithm.calculation_run(lambda x, y: x)