def save_components( image: Image, components: list, segmentation: np.ndarray, dir_path: str, segmentation_info: typing.Optional[SegmentationInfo] = None, range_changed=None, step_changed=None, ): if range_changed is None: range_changed = empty_fun if step_changed is None: step_changed = empty_fun segmentation = image.fit_array_to_image(segmentation) if segmentation_info is None: segmentation_info = SegmentationInfo(segmentation) os.makedirs(dir_path, exist_ok=True) file_name = os.path.splitext(os.path.basename(image.file_path))[0] range_changed(0, 2 * len(components)) for i in components: slices = segmentation_info.bound_info[i].get_slices() cut_segmentation = segmentation[tuple(slices)] cut_image = image.cut_image(slices) im = cut_image.cut_image(cut_segmentation == i, replace_mask=True) # print(f"[run] {im}") ImageWriter.save( im, os.path.join(dir_path, f"{file_name}_component{i}.tif")) step_changed(2 * i + 1) ImageWriter.save_mask( im, os.path.join(dir_path, f"{file_name}_component{i}_mask.tif")) step_changed(2 * i + 2)
def test_load_files(self, part_settings, qtbot, monkeypatch, tmp_path): widget = MultipleFileWidget( part_settings, {LoadStackImage.get_name(): LoadStackImage}) qtbot.add_widget(widget) for i in range(5): ImageWriter.save( Image(np.random.random((10, 10)), image_spacing=(1, 1), axes_order="XY"), tmp_path / f"img_{i}.tif") file_list = [[[str(tmp_path / f"img_{i}.tif")], LoadStackImage.get_name()] for i in range(5)] load_property = LoadProperty( [str(tmp_path / f"img_{i}.tif") for i in range(5)], LoadStackImage.get_name(), LoadStackImage) with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files): widget.execute_load_files(load_property, lambda x, y: True, lambda x: True) assert widget.file_view.topLevelItemCount() == 5 assert part_settings.get_last_files_multiple() == file_list widget.file_view.clear() widget.state_dict.clear() widget.file_list.clear() monkeypatch.setattr(MultipleLoadDialog, "exec_", lambda x: True) monkeypatch.setattr(MultipleLoadDialog, "get_result", lambda x: load_property) with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files): widget.load_files() assert widget.file_view.topLevelItemCount() == 5 assert part_settings.get_last_files_multiple() == file_list part_settings.dump() part_settings.load() assert part_settings.get_last_files_multiple() == file_list
def test_load_recent(self, part_settings, qtbot, monkeypatch, tmp_path): widget = MultipleFileWidget( part_settings, {LoadStackImage.get_name(): LoadStackImage}) qtbot.add_widget(widget) for i in range(5): ImageWriter.save( Image(np.random.random((10, 10)), image_spacing=(1, 1), axes_order="XY"), tmp_path / f"img_{i}.tif") file_list = [[ [ tmp_path / f"img_{i}.tif", ], LoadStackImage.get_name(), ] for i in range(5)] with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files): widget.load_recent_fun(file_list, lambda x, y: True, lambda x: True) assert part_settings.get_last_files_multiple() == file_list assert widget.file_view.topLevelItemCount() == 5 widget.file_view.clear() widget.state_dict.clear() widget.file_list.clear() monkeypatch.setattr(LoadRecentFiles, "exec_", lambda x: True) monkeypatch.setattr(LoadRecentFiles, "get_files", lambda x: file_list) with qtbot.waitSignal(widget._add_state, check_params_cb=self.check_load_files): widget.load_recent() assert part_settings.get_last_files_multiple() == file_list assert widget.file_view.topLevelItemCount() == 5
def napari_write_image(path: str, data: Any, meta: dict) -> Optional[str]: ext = os.path.splitext(path)[1] if not isinstance(data, numpy.ndarray) or ext not in { ".tiff", ".tif", ".TIFF", ".TIF" }: return scale_shift = min(data.ndim, 3) axes = "TZXY" channel_names = [meta["name"]] if data.shape[-1] < 6: axes += "C" scale_shift -= 1 channel_names = [ f'{meta["name"]} {i}' for i in range(1, data.shape[-1] + 1) ] image = Image( data, numpy.divide(meta["scale"], DEFAULT_SCALE_FACTOR)[-scale_shift:], axes_order=axes[-data.ndim:], channel_names=channel_names, shift=numpy.divide(meta["translate"], DEFAULT_SCALE_FACTOR)[-scale_shift:], name="Image", ) ImageWriter.save(image, path) return path
def save( cls, save_location: typing.Union[str, BytesIO, Path], project_info, parameters: dict = None, range_changed=None, step_changed=None, ): ImageWriter.save(project_info.image, save_location)
def create_test_data(tmpdir): # for future use spacing = tuple(x / UNIT_SCALE[Units.nm.value] for x in (210, 70, 70)) res = [] for i in range(8): mask_data = np.zeros((10, 20, 20 + i), dtype=np.uint8) mask_data[1:-1, 2:-2, 2:-2] = 1 data = np.zeros(mask_data.shape + (2, ), dtype=np.uint16) data[1:-1, 2:-2, 2:-2] = 15000 data[2:-2, 3:-3, 3:7] = 33000 data[2:-2, 3:-3, -7:-3] = 33000 image = Image(data, spacing, "", mask=mask_data, axes_order="ZYXC") ImageWriter.save(image, os.path.join(str(tmpdir), f"file_{i}.tif")) res.append(os.path.join(str(tmpdir), f"file_{i}.tif")) ImageWriter.save_mask(image, os.path.join(str(tmpdir), f"file_{i}_mask.tif")) return res
def napari_write_labels(path: str, data: Any, meta: dict) -> Optional[str]: ext = os.path.splitext(path)[1] if not isinstance(data, numpy.ndarray) or ext not in { ".tiff", ".tif", ".TIFF", ".TIF" }: return scale_shift = min(data.ndim, 3) image = Image( data, numpy.divide(meta["scale"], DEFAULT_SCALE_FACTOR)[-scale_shift:], axes_order="TZXY"[-data.ndim:], channel_names=[meta["name"]], shift=numpy.divide(meta["translate"], DEFAULT_SCALE_FACTOR)[-scale_shift:], name="ROI", ) ImageWriter.save(image, path) return path
def test_save(self, tmp_path): data = np.zeros((1, 10, 20, 30, 3), np.uint8) data[..., :10, 0] = 2 data[..., :10, 1] = 20 data[..., :10, 2] = 9 image = self.image_class(data, (10**-6, 10**-6, 10**-6), "", axes_order="TZYXC") mask = np.zeros((10, 20, 30), np.uint8) mask[..., 2:12] = 1 image.set_mask(mask, "ZYX") ImageWriter.save(image, os.path.join(tmp_path, "img.tif")) ImageWriter.save_mask(image, os.path.join(tmp_path, "img_mask.tif")) read_image: Image = TiffImageReader.read_image( os.path.join(tmp_path, "img.tif"), os.path.join(tmp_path, "img_mask.tif")) assert read_image.get_um_spacing() == (1, 1, 1) assert len(read_image.get_ranges()) == 3 assert read_image.get_ranges() == [(0, 2), (0, 20), (0, 9)]
def save_stack_segmentation( file_data: typing.Union[tarfile.TarFile, str, Path, TextIOBase, BufferedIOBase, RawIOBase, IOBase], segmentation_info: SegmentationTuple, parameters: dict, range_changed=None, step_changed=None, ): if range_changed is None: range_changed = empty_fun if step_changed is None: step_changed = empty_fun range_changed(0, 7) tar_file, file_path = open_tar_file(file_data, "w") step_changed(1) try: segmentation_buff = BytesIO() # noinspection PyTypeChecker if segmentation_info.image is not None: spacing = segmentation_info.image.spacing else: spacing = parameters.get("spacing", (10**-6, 10**-6, 10**-6)) segmentation_image = Image(segmentation_info.segmentation, spacing, axes_order=Image.axis_order.replace( "C", "")) try: ImageWriter.save(segmentation_image, segmentation_buff) except ValueError: segmentation_buff.seek(0) tifffile.imwrite(segmentation_buff, segmentation_info.segmentation, compress=9) segmentation_tar = get_tarinfo("segmentation.tif", segmentation_buff) tar_file.addfile(segmentation_tar, fileobj=segmentation_buff) step_changed(3) metadata = { "components": [int(x) for x in segmentation_info.selected_components], "parameters": { str(k): v for k, v in segmentation_info.segmentation_parameters.items() }, "shape": segmentation_info.segmentation.shape, } if isinstance(segmentation_info.image, Image): file_path = segmentation_info.image.file_path elif isinstance(segmentation_info.image, str): file_path = segmentation_info.image else: file_path = "" if file_path != "": if parameters["relative_path"] and isinstance(file_data, str): metadata["base_file"] = os.path.relpath( file_path, os.path.dirname(file_data)) else: metadata["base_file"] = file_path metadata_buff = BytesIO( json.dumps(metadata, cls=ProfileEncoder).encode("utf-8")) metadata_tar = get_tarinfo("metadata.json", metadata_buff) tar_file.addfile(metadata_tar, metadata_buff) step_changed(4) if segmentation_info.mask is not None: mask = segmentation_info.mask if mask.dtype == np.bool: mask = mask.astype(np.uint8) mask_buff = BytesIO() tifffile.imwrite(mask_buff, mask, compress=9) mask_tar = get_tarinfo("mask.tif", mask_buff) tar_file.addfile(mask_tar, fileobj=mask_buff) step_changed(5) el_info = [] for i, hist in enumerate(segmentation_info.history): el_info.append({ "index": i, "mask_property": hist.mask_property, "segmentation_parameters": hist.segmentation_parameters, }) hist.arrays.seek(0) hist_info = get_tarinfo(f"history/arrays_{i}.npz", hist.arrays) hist.arrays.seek(0) tar_file.addfile(hist_info, hist.arrays) if len(el_info) > 0: hist_str = json.dumps(el_info, cls=ProfileEncoder) hist_buff = BytesIO(hist_str.encode("utf-8")) tar_algorithm = get_tarinfo("history/history.json", hist_buff) tar_file.addfile(tar_algorithm, hist_buff) step_changed(6) finally: if isinstance(file_data, (str, Path)): tar_file.close() step_changed(6)
def save_project( file_path: str, image: Image, segmentation: np.ndarray, mask: typing.Optional[np.ndarray], history: typing.List[HistoryElement], algorithm_parameters: dict, ): # TODO add support for binary objects ext = os.path.splitext(file_path)[1] if ext.lower() in [".bz2", ".tbz2"]: tar_mod = "w:bz2" else: tar_mod = "w:gz" with tarfile.open(file_path, tar_mod) as tar: segmentation_buff = BytesIO() # noinspection PyTypeChecker tifffile.imwrite(segmentation_buff, segmentation, compress=9) segmentation_tar = get_tarinfo("segmentation.tif", segmentation_buff) tar.addfile(segmentation_tar, fileobj=segmentation_buff) if mask is not None: if mask.dtype == np.bool: mask = mask.astype(np.uint8) segmentation_buff = BytesIO() # noinspection PyTypeChecker tifffile.imwrite(segmentation_buff, mask, compress=9) segmentation_tar = get_tarinfo("mask.tif", segmentation_buff) tar.addfile(segmentation_tar, fileobj=segmentation_buff) image_buff = BytesIO() ImageWriter.save(image, image_buff) tar_image = get_tarinfo("image.tif", image_buff) tar.addfile(tarinfo=tar_image, fileobj=image_buff) para_str = json.dumps(algorithm_parameters, cls=PartEncoder) parameters_buff = BytesIO(para_str.encode("utf-8")) tar_algorithm = get_tarinfo("algorithm.json", parameters_buff) tar.addfile(tar_algorithm, parameters_buff) meta_str = json.dumps( {"project_version_info": str(project_version_info)}, cls=PartEncoder) meta_buff = BytesIO(meta_str.encode("utf-8")) tar_meta = get_tarinfo("metadata.json", meta_buff) tar.addfile(tar_meta, meta_buff) el_info = [] for i, el in enumerate(history): el_info.append({ "index": i, "algorithm_name": el.segmentation_parameters["algorithm_name"], "values": el.segmentation_parameters["values"], "mask_property": el.mask_property, }) el.arrays.seek(0) hist_info = get_tarinfo(f"history/arrays_{i}.npz", el.arrays) el.arrays.seek(0) tar.addfile(hist_info, el.arrays) if len(el_info) > 0: hist_str = json.dumps(el_info, cls=PartEncoder) hist_buff = BytesIO(hist_str.encode("utf-8")) tar_algorithm = get_tarinfo("history/history.json", hist_buff) tar.addfile(tar_algorithm, hist_buff)