def main(): args = parser().parse_args() img_paths = get_sorted_file_paths(args.img_paths, file_extension=".tif") cells, non_cells = get_cell_arrays(args.cells_xml) with napari.gui_qt(): v = napari.Viewer(title="Cellfinder cell viewer") images = magic_imread(img_paths, use_dask=True, stack=True) v.add_image(images) v.add_points( non_cells, size=args.marker_size, n_dimensional=True, opacity=args.opacity, symbol=args.symbol, face_color="lightskyblue", name="Non-Cells", ) v.add_points( cells, size=args.marker_size, n_dimensional=True, opacity=args.opacity, symbol=args.symbol, face_color="lightgoldenrodyellow", name="Cells", )
def test_zarr(): image = np.random.random((10, 20, 20)) with TemporaryDirectory(suffix='.zarr') as fout: z = zarr.open(fout, 'a', shape=image.shape) z[:] = image image_in = io.magic_imread([fout]) # Note: due to lazy loading, the next line needs to happen within # the context manager. Alternatively, we could convert to NumPy here. np.testing.assert_array_equal(image, image_in)
def test_zarr_nested(tmp_path): image = np.random.random((10, 20, 20)) image_name = 'my_image' root_path = tmp_path / 'dataset.zarr' grp = zarr.open(str(root_path), mode='a') grp.create_dataset(image_name, data=image) image_in = io.magic_imread([str(root_path / image_name)]) np.testing.assert_array_equal(image, image_in)
def add_raw_image(viewer, image_path, name): """ Add a raw image (as a virtual stack) to the napari viewer :param viewer: Napari viewer object :param image_path: Path to the raw data :param str name: Name to give the data """ paths = get_sorted_file_paths(image_path, file_extension=".tif") images = magic_imread(paths, use_dask=True, stack=True) viewer.add_image(images, name=name, opacity=0.6, blending="additive")
def test_irregular_images(irregular_images): image_files = irregular_images # Ideally, this would work "magically" with dask and irregular images, # but there is no foolproof way to do this without reading in all the # files. We need to be able to inspect the file shape without reading # it in first, then we can automatically turn stacking off when shapes # are irregular (and create proper dask arrays) images = io.magic_imread(image_files, use_dask=False, stack=False) assert isinstance(images, list) assert len(images) == 2 assert tuple(image.shape for image in images) == ((512, 512), (303, 384))
def load_labels(self, image_path, use_dask=True, stack=True, name=None, opacity=1): labels = self.viewer.add_labels( magic_imread(image_path, use_dask=use_dask, stack=stack), name=name, opacity=opacity, ) return labels
def load_data(self, name="", visible=True): try: img_paths = get_sorted_file_paths(self.directory, file_extension=".tif") images = magic_imread(img_paths, use_dask=True, stack=True) return self.viewer.add_image(images) # return self.viewer.open( # str(self.directory), name=name, visible=visible # ) except ValueError: print(f"The directory ({self.directory}) cannot be " f"loaded, please try again.") return
def load_raw_data_directory(self): self.status_label.setText("Loading...") options = QFileDialog.Options() options |= QFileDialog.DontUseNativeDialog directory = QFileDialog.getExistingDirectory( self, "Select data directory", options=options, ) # deal with existing dialog if directory is not "": directory = Path(directory) img_paths = get_sorted_file_paths(directory, file_extension=".tif*") images = magic_imread(img_paths, use_dask=True, stack=True) self.viewer.add_image(images, name=directory.stem) self.status_label.setText("Ready")
def test_zarr_multiscale(): multiscale = [ np.random.random((20, 20)), np.random.random((10, 10)), np.random.random((5, 5)), ] with TemporaryDirectory(suffix='.zarr') as fout: root = zarr.open_group(fout, 'a') for i in range(len(multiscale)): shape = 20 // 2**i z = root.create_dataset(str(i), shape=(shape, ) * 2) z[:] = multiscale[i] multiscale_in = io.magic_imread([fout]) assert len(multiscale) == len(multiscale_in) # Note: due to lazy loading, the next line needs to happen within # the context manager. Alternatively, we could convert to NumPy here. for images, images_in in zip(multiscale, multiscale_in): np.testing.assert_array_equal(images, images_in)
def test_no_files_raises(tmp_path, two_pngs): with pytest.raises(ValueError) as e: io.magic_imread(tmp_path) assert "No files found in" in str(e.value)
def test_multi_png_no_stack(two_pngs): image_files = two_pngs images = io.magic_imread(image_files, stack=False) assert isinstance(images, list) assert len(images) == 2 assert all(a.shape == (512, 512) for a in images)
def test_multi_png_no_dask(two_pngs): image_files = two_pngs images = io.magic_imread(image_files, use_dask=False) assert isinstance(images, np.ndarray) assert images.shape == (2, 512, 512)
def test_multi_png_pathlib(two_pngs): image_files = [Path(png) for png in two_pngs] images = io.magic_imread(image_files) assert isinstance(images, da.Array) assert images.shape == (2, 512, 512)
def test_multi_png_defaults(two_pngs): image_files = two_pngs images = io.magic_imread(image_files) assert isinstance(images, da.Array) assert images.shape == (2, 512, 512)
def test_single_png_pathlib(single_png): image_files = Path(single_png[0]) images = io.magic_imread(image_files) assert isinstance(images, np.ndarray) assert images.shape == (512, 512)
def test_single_png_defaults(single_png): image_files = single_png images = io.magic_imread(image_files) assert isinstance(images, np.ndarray) assert images.shape == (512, 512)
def test_tiff(single_tiff): image_files = single_tiff images = io.magic_imread(image_files) assert isinstance(images, np.ndarray) assert images.shape == (2, 15, 10) assert images.dtype == np.uint8
def test_single_filename(single_tiff): image_files = single_tiff[0] images = io.magic_imread(image_files) assert images.shape == (2, 15, 10)
def main(): args = parser().parse_args() args = define_pixel_sizes(args) if args.output is None: output = Path(args.cells_xml) output_directory = output.parent print(f"No output directory given, so setting output " f"directory to: {output_directory}") else: output_directory = args.output ensure_directory_exists(output_directory) output_filename = output_directory / OUTPUT_NAME img_paths = get_sorted_file_paths(args.signal_image_paths, file_extension=".tif") cells, labels = get_cell_labels_arrays(args.cells_xml) properties = {"cell": labels} with napari.gui_qt(): viewer = napari.Viewer(title="Cellfinder cell curation") images = magic_imread(img_paths, use_dask=True, stack=True) viewer.add_image(images) face_color_cycle = ["lightskyblue", "lightgoldenrodyellow"] points_layer = viewer.add_points( cells, properties=properties, symbol=args.symbol, n_dimensional=True, size=args.marker_size, face_color="cell", face_color_cycle=face_color_cycle, name="Cell candidates", ) @viewer.bind_key("t") def toggle_point_property(viewer): """Toggle point type""" selected_points = viewer.layers[1].selected_data if selected_points: selected_properties = viewer.layers[1].properties["cell"][ selected_points] toggled_properties = np.logical_not(selected_properties) viewer.layers[1].properties["cell"][ selected_points] = toggled_properties # Add curated cells to list CURATED_POINTS.extend(selected_points) print(f"{len(selected_points)} points " f"toggled and added to the list ") # refresh the properties colour viewer.layers[1].refresh_colors(update_color_mapping=False) @viewer.bind_key("c") def confirm_point_property(viewer): """Confirm point type""" selected_points = viewer.layers[1].selected_data if selected_points: # Add curated cells to list CURATED_POINTS.extend(selected_points) print(f"{len(selected_points)} points " f"confirmed and added to the list ") @viewer.bind_key("Control-S") def save_curation(viewer): """Save file""" if not CURATED_POINTS: print("No cells have been confirmed or toggled, not saving") else: unique_cells = unique_elements_lists(CURATED_POINTS) points = viewer.layers[1].data[unique_cells] labels = viewer.layers[1].properties["cell"][unique_cells] labels = labels.astype("int") labels = labels + 1 cells_to_save = [] for idx, point in enumerate(points): cell = Cell([point[2], point[1], point[0]], labels[idx]) cells_to_save.append(cell) print(f"Saving results to: {output_filename}") save_cells(cells_to_save, output_filename) @viewer.bind_key("Alt-E") def start_cube_extraction(viewer): """Extract cubes for training""" if not output_filename.exists(): print("No curation results have been saved. " "Please save before extracting cubes") else: print(f"Saving cubes to: {output_directory}") run_extraction( output_filename, output_directory, args.signal_image_paths, args.background_image_paths, args.cube_depth, args.cube_width, args.cube_height, args.x_pixel_um, args.y_pixel_um, args.z_pixel_um, args.x_pixel_um_network, args.y_pixel_um_network, args.z_pixel_um_network, args.max_ram, args.n_free_cpus, args.save_empty_cubes, ) print("Saving yaml file to use for training") save_yaml_file(output_directory) print("Closing window") QApplication.closeAllWindows() print("Finished! You may now annotate more " "datasets, or go straight to training")
def test_many_tiffs(single_tiff): image_files = single_tiff * 3 images = io.magic_imread(image_files) assert isinstance(images, da.Array) assert images.shape == (3, 2, 15, 10) assert images.dtype == np.uint8
non_cells = df[df["type"] == Cell.UNKNOWN] cells = df[df["type"] == Cell.CELL] cells = cells_df_as_np(cells) non_cells = cells_df_as_np(non_cells) return cells, non_cells # def main(): args = parser().parse_args() img_paths = get_sorted_file_paths(args.img_paths, file_extension=".tif") cells, non_cells = get_cell_arrays(args.cells_xml) with napari.gui_qt(): v = napari.Viewer(title="Cellfinder cell viewer") images = magic_imread(img_paths, use_dask=True, stack=True) v.add_image(images) points_layer_non_cells = v.add_points( non_cells, size=args.marker_size, n_dimensional=True, opacity=args.opacity, symbol=args.symbol, face_color="lightskyblue", name="Non-Cells", ) points_layer_cells = v.add_points( cells, size=args.marker_size, n_dimensional=True,