def main(): start_time = datetime.now() args = cells_standard_space_cli_parser().parse_args() args.paths = prep.Paths(args.output_dir) args.paths.standard_space_output_folder = args.output_dir args.paths.cells_in_standard_space = join(args.paths.output_dir, "cells_in_standard_space.xml") cli_path_update(args.paths, args) args.paths.make_invert_cell_position_paths() args = define_pixel_sizes(args) # TODO: implement a recursive function to remove the need to do this # (probably using pathlib) ensure_directory_exists(args.paths.output_dir) ensure_directory_exists(args.paths.standard_space_output_folder) tools.start_logging( args.paths.output_dir, args=args, verbose=args.debug, filename="cells_to_standard_space", log_header="CELL TRANSFORMATION TO STANDARD SPACE LOG", ) logging.info("Starting transformation of cell positions") transform_cells_to_standard_space(args) logging.info("Finished. Total time taken: %s", datetime.now() - start_time)
def main(): args = region_summary_cli_parser().parse_args() args = define_pixel_sizes(args) args.paths = Paths(args, args.output_dir) args.paths.registered_atlas_path = args.registered_atlas_path args.paths.hemispheres_atlas_path = args.hemispheres_atlas_path args.paths.classification_out_file = args.xml_file_path analysis_run(args)
def run(): start_time = datetime.now() args = register_cli_parser().parse_args() args = define_pixel_sizes(args) args, additional_images_downsample = prep_registration(args) args = make_paths_absolute(args) fancylog.start_logging( args.registration_output_folder, program_for_log, variables=[args], verbose=args.debug, log_header="AMAP LOG", multiprocessing_aware=False, ) logging.info("Starting registration") register( args.registration_config, args.image_paths, args.registration_output_folder, x_pixel_um=args.x_pixel_um, y_pixel_um=args.y_pixel_um, z_pixel_um=args.z_pixel_um, orientation=args.orientation, flip_x=args.flip_x, flip_y=args.flip_y, flip_z=args.flip_z, rotation=args.rotation, affine_n_steps=args.affine_n_steps, affine_use_n_steps=args.affine_use_n_steps, freeform_n_steps=args.freeform_n_steps, freeform_use_n_steps=args.freeform_use_n_steps, bending_energy_weight=args.bending_energy_weight, grid_spacing=args.grid_spacing, smoothing_sigma_reference=args.smoothing_sigma_reference, smoothing_sigma_floating=args.smoothing_sigma_floating, histogram_n_bins_floating=args.histogram_n_bins_floating, histogram_n_bins_reference=args.histogram_n_bins_reference, sort_input_file=args.sort_input_file, n_free_cpus=args.n_free_cpus, save_downsampled=not (args.no_save_downsampled), boundaries=not (args.no_boundaries), additional_images_downsample=additional_images_downsample, debug=args.debug, ) logging.info("Finished. Total time taken: %s", datetime.now() - start_time)
def test_define_pixel_sizes(): args = Args() args.set_all_pixel_sizes() args = meta.define_pixel_sizes(args) assert isclose(1, args.x_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(2, args.y_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(3, args.z_pixel_um, abs_tol=VOX_DIM_TOLERANCE) # baking tray args = Args() args.set_some_pixel_sizes_w_meta_baking_tray() args = meta.define_pixel_sizes(args) assert isclose(10, args.x_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(2.14, args.y_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(40, args.z_pixel_um, abs_tol=VOX_DIM_TOLERANCE) # mesospim args = Args() args.set_some_pixel_sizes_w_meta_mesospim() args = meta.define_pixel_sizes(args) assert isclose(100, args.x_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(8.23, args.y_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(10, args.z_pixel_um, abs_tol=VOX_DIM_TOLERANCE) # cellfinder args = Args() args.set_some_pixel_sizes_w_meta_cellfinder() args = meta.define_pixel_sizes(args) assert isclose(2, args.x_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(0.2, args.y_pixel_um, abs_tol=VOX_DIM_TOLERANCE) assert isclose(40, args.z_pixel_um, abs_tol=VOX_DIM_TOLERANCE) # no metadata args = Args() args.set_some_pixel_sizes_no_meta() with pytest.raises(CommandLineInputError): assert meta.define_pixel_sizes(args) # unsupported metadata args = Args() args.set_some_pixel_sizes_unsupported_meta() with pytest.raises(CommandLineInputError): assert meta.define_pixel_sizes(args) # missing metadata args = Args() args.set_some_pixel_sizes_missing_meta() with pytest.raises(CommandLineInputError): assert meta.define_pixel_sizes(args)
def main(): start_time = datetime.now() args = register_cli_parser().parse_args() arg_groups = get_arg_groups(args, register_cli_parser()) args = define_pixel_sizes(args) args, additional_images_downsample = prep_registration(args) paths = Paths(args.brainreg_directory) log_metadata(paths.metadata_path, args) fancylog.start_logging( paths.registration_output_folder, program_for_log, variables=[args], verbose=args.debug, log_header="BRAINREG LOG", multiprocessing_aware=False, ) logging.info("Starting registration") atlas = BrainGlobeAtlas(args.atlas) register( atlas, args.orientation, args.image_paths, paths, arg_groups["NiftyReg registration backend options"], x_pixel_um=args.x_pixel_um, y_pixel_um=args.y_pixel_um, z_pixel_um=args.z_pixel_um, sort_input_file=args.sort_input_file, n_free_cpus=args.n_free_cpus, additional_images_downsample=additional_images_downsample, backend=args.backend, debug=args.debug, ) logging.info("Finished. Total time taken: %s", datetime.now() - start_time)
def prep_cellfinder_general(): args = parser.cellfinder_parser().parse_args() args = define_pixel_sizes(args) check_input_arg_existance(args) if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) args.paths = Paths(args.output_dir) args.paths.make_reg_paths() fancylog.start_logging( args.output_dir, program_for_log, variables=[args, args.paths], verbose=args.debug, log_header="CELLFINDER LOG", ) what_to_run = CalcWhatToRun(args) args.signal_ch_ids, args.background_ch_id = check_and_return_ch_ids( args.signal_ch_ids, args.background_ch_id, args.signal_planes_paths) return args, what_to_run
def main(): args = parser().parse_args() args = define_pixel_sizes(args) if args.output is None: output = Path(args.cells_xml) output_directory = output.parent print(f"No output directory given, so setting output " f"directory to: {output_directory}") else: output_directory = args.output ensure_directory_exists(output_directory) output_filename = output_directory / OUTPUT_NAME img_paths = get_sorted_file_paths(args.signal_image_paths, file_extension=".tif") cells, labels = get_cell_labels_arrays(args.cells_xml) properties = {"cell": labels} with napari.gui_qt(): viewer = napari.Viewer(title="Cellfinder cell curation") images = magic_imread(img_paths, use_dask=True, stack=True) viewer.add_image(images) face_color_cycle = ["lightskyblue", "lightgoldenrodyellow"] points_layer = viewer.add_points( cells, properties=properties, symbol=args.symbol, n_dimensional=True, size=args.marker_size, face_color="cell", face_color_cycle=face_color_cycle, name="Cell candidates", ) @viewer.bind_key("t") def toggle_point_property(viewer): """Toggle point type""" selected_points = viewer.layers[1].selected_data if selected_points: selected_properties = viewer.layers[1].properties["cell"][ selected_points] toggled_properties = np.logical_not(selected_properties) viewer.layers[1].properties["cell"][ selected_points] = toggled_properties # Add curated cells to list CURATED_POINTS.extend(selected_points) print(f"{len(selected_points)} points " f"toggled and added to the list ") # refresh the properties colour viewer.layers[1].refresh_colors(update_color_mapping=False) @viewer.bind_key("c") def confirm_point_property(viewer): """Confirm point type""" selected_points = viewer.layers[1].selected_data if selected_points: # Add curated cells to list CURATED_POINTS.extend(selected_points) print(f"{len(selected_points)} points " f"confirmed and added to the list ") @viewer.bind_key("Control-S") def save_curation(viewer): """Save file""" if not CURATED_POINTS: print("No cells have been confirmed or toggled, not saving") else: unique_cells = unique_elements_lists(CURATED_POINTS) points = viewer.layers[1].data[unique_cells] labels = viewer.layers[1].properties["cell"][unique_cells] labels = labels.astype("int") labels = labels + 1 cells_to_save = [] for idx, point in enumerate(points): cell = Cell([point[2], point[1], point[0]], labels[idx]) cells_to_save.append(cell) print(f"Saving results to: {output_filename}") save_cells(cells_to_save, output_filename) @viewer.bind_key("Alt-E") def start_cube_extraction(viewer): """Extract cubes for training""" if not output_filename.exists(): print("No curation results have been saved. " "Please save before extracting cubes") else: print(f"Saving cubes to: {output_directory}") run_extraction( output_filename, output_directory, args.signal_image_paths, args.background_image_paths, args.cube_depth, args.cube_width, args.cube_height, args.x_pixel_um, args.y_pixel_um, args.z_pixel_um, args.x_pixel_um_network, args.y_pixel_um_network, args.z_pixel_um_network, args.max_ram, args.n_free_cpus, args.save_empty_cubes, ) print("Saving yaml file to use for training") save_yaml_file(output_directory) print("Closing window") QApplication.closeAllWindows() print("Finished! You may now annotate more " "datasets, or go straight to training")