コード例 #1
0
def main(args, max_workers=3):
    signal_paths = args.signal_planes_paths[args.signal_channel]
    background_paths = args.background_planes_path[0]
    signal_images = get_sorted_file_paths(signal_paths, file_extension="tif")
    background_images = get_sorted_file_paths(background_paths,
                                              file_extension="tif")

    # Too many workers doesn't increase speed, and uses huge amounts of RAM
    workers = get_num_processes(min_free_cpu_cores=args.n_free_cpus,
                                n_max_processes=max_workers)

    logging.debug("Initialising cube generator")
    inference_generator = CubeGeneratorFromFile(
        args.paths.detected_points,
        signal_images,
        background_images,
        args.voxel_sizes,
        args.network_voxel_sizes,
        batch_size=args.batch_size,
        cube_width=args.cube_width,
        cube_height=args.cube_height,
        cube_depth=args.cube_depth,
    )

    model = get_model(
        existing_model=args.trained_model,
        model_weights=args.model_weights,
        network_depth=models[args.network_depth],
        inference=True,
    )

    logging.info("Running inference")
    predictions = model.predict(
        inference_generator,
        use_multiprocessing=True,
        workers=workers,
        verbose=True,
    )
    predictions = predictions.round()
    predictions = predictions.astype("uint16")

    predictions = np.argmax(predictions, axis=1)
    cells_list = []

    # only go through the "extractable" cells
    for idx, cell in enumerate(inference_generator.ordered_cells):
        cell.type = predictions[idx] + 1
        cells_list.append(cell)

    logging.info("Saving classified cells")
    save_cells(cells_list,
               args.paths.classified_points,
               save_csv=args.save_csv)
    try:
        get_cells(args.paths.classified_points, cells_only=True)
        return True
    except MissingCellsError:
        return False
コード例 #2
0
ファイル: curation.py プロジェクト: peq10/cellfinder
def run_extraction(
    output_filename,
    output_directory,
    signal_paths,
    background_paths,
    cube_depth,
    cube_width,
    cube_height,
    x_pixel_um,
    y_pixel_um,
    z_pixel_um,
    x_pixel_um_network,
    y_pixel_um_network,
    z_pixel_um_network,
    max_ram,
    n_free_cpus,
    save_empty_cubes,
):
    planes_paths = {}
    planes_paths[0] = get_sorted_file_paths(signal_paths,
                                            file_extension=".tif")
    planes_paths[1] = get_sorted_file_paths(background_paths,
                                            file_extension=".tif")

    all_candidates = get_cells(str(output_filename))

    cells = [c for c in all_candidates if c.is_cell()]
    non_cells = [c for c in all_candidates if not c.is_cell()]

    to_extract = {"cells": cells, "non_cells": non_cells}

    for cell_type, cell_list in to_extract.items():
        print(f"Extracting type: {cell_type}")
        cell_type_output_directory = output_directory / cell_type
        print(f"Saving to: {cell_type_output_directory}")
        ensure_directory_exists(str(cell_type_output_directory))
        extract_cubes_main(
            cell_list,
            cell_type_output_directory,
            planes_paths,
            cube_depth,
            cube_width,
            cube_height,
            x_pixel_um,
            y_pixel_um,
            z_pixel_um,
            x_pixel_um_network,
            y_pixel_um_network,
            z_pixel_um_network,
            max_ram,
            n_free_cpus,
            save_empty_cubes,
        )
コード例 #3
0
def main():
    args = parser().parse_args()
    img_paths = get_sorted_file_paths(args.img_paths, file_extension=".tif")
    cells, non_cells = get_cell_arrays(args.cells_xml)

    with napari.gui_qt():
        v = napari.Viewer(title="Cellfinder cell viewer")
        images = magic_imread(img_paths, use_dask=True, stack=True)

        v.add_image(images)
        v.add_points(
            non_cells,
            size=args.marker_size,
            n_dimensional=True,
            opacity=args.opacity,
            symbol=args.symbol,
            face_color="lightskyblue",
            name="Non-Cells",
        )
        v.add_points(
            cells,
            size=args.marker_size,
            n_dimensional=True,
            opacity=args.opacity,
            symbol=args.symbol,
            face_color="lightgoldenrodyellow",
            name="Cells",
        )
コード例 #4
0
def add_raw_image(viewer, image_path, name):
    """
    Add a raw image (as a virtual stack) to the napari viewer
    :param viewer: Napari viewer object
    :param image_path: Path to the raw data
    :param str name: Name to give the data
    """
    paths = get_sorted_file_paths(image_path, file_extension=".tif")
    images = magic_imread(paths, use_dask=True, stack=True)
    viewer.add_image(images, name=name, opacity=0.6, blending="additive")
コード例 #5
0
ファイル: curation.py プロジェクト: satyakam7/cellfinder
 def load_data(self, name="", visible=True):
     try:
         img_paths = get_sorted_file_paths(self.directory,
                                           file_extension=".tif")
         images = magic_imread(img_paths, use_dask=True, stack=True)
         return self.viewer.add_image(images)
         # return self.viewer.open(
         #     str(self.directory), name=name, visible=visible
         # )
     except ValueError:
         print(f"The directory ({self.directory}) cannot be "
               f"loaded, please try again.")
         return
コード例 #6
0
 def load_raw_data_directory(self):
     self.status_label.setText("Loading...")
     options = QFileDialog.Options()
     options |= QFileDialog.DontUseNativeDialog
     directory = QFileDialog.getExistingDirectory(
         self,
         "Select data directory",
         options=options,
     )
     # deal with existing dialog
     if directory is not "":
         directory = Path(directory)
         img_paths = get_sorted_file_paths(directory,
                                           file_extension=".tif*")
         images = magic_imread(img_paths, use_dask=True, stack=True)
         self.viewer.add_image(images, name=directory.stem)
     self.status_label.setText("Ready")
コード例 #7
0
ファイル: brainio.py プロジェクト: adamltyson/brainio
def load_from_folder(
    src_folder,
    x_scaling_factor,
    y_scaling_factor,
    z_scaling_factor,
    anti_aliasing=True,
    file_extension="",
    load_parallel=False,
    n_free_cpus=2,
):
    """
    Load a brain from a folder. All tiff files will be read sorted and assumed
    to belong to the same sample.
    Optionally a name_filter string can be supplied which will have to be
    present in the file names for them
    to be considered part of the sample

    :param str src_folder:
    :param float x_scaling_factor: The scaling of the brain along the x
        dimension (applied on loading before return)
    :param float y_scaling_factor: The scaling of the brain along the y
        dimension (applied on loading before return)
    :param float z_scaling_factor: The scaling of the brain along the z
        dimension
    :param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
        the image prior to down-scaling. It is crucial to filter when
        down-sampling the image to avoid aliasing artifacts.
    :param str file_extension: will have to be present in the file names for them\
        to be considered part of the sample
    :param bool load_parallel: Use multiprocessing to speedup image loading
    :param int n_free_cpus: Number of cpu cores to leave free.
    :return: The loaded and scaled brain
    :rtype: np.ndarray
    """
    paths = get_sorted_file_paths(src_folder, file_extension=file_extension)

    return load_image_series(
        paths,
        x_scaling_factor,
        y_scaling_factor,
        z_scaling_factor,
        load_parallel=load_parallel,
        n_free_cpus=n_free_cpus,
        anti_aliasing=anti_aliasing,
    )
コード例 #8
0
ファイル: brainio.py プロジェクト: adamltyson/brainio
def get_size_image_from_file_paths(file_path, file_extension="tif"):
    """
    Returns the size of an image (which is a list of 2D files), without loading
    the whole image
    :param str file_path: File containing file_paths in a text file,
    or as a list.
    :param str file_extension: Optional file extension (if a directory
     is passed)
    :return: Dict of image sizes
    """
    file_path = str(file_path)

    img_paths = get_sorted_file_paths(file_path, file_extension=file_extension)
    z_shape = len(img_paths)

    logging.debug("Loading file: {} to check raw image size"
                  "".format(img_paths[0]))
    image_0 = load_any(img_paths[0])
    y_shape, x_shape = image_0.shape

    image_shape = {"x": x_shape, "y": y_shape, "z": z_shape}
    return image_shape
コード例 #9
0
def test_get_sorted_file_paths():
    # test list
    shuffled = sorted_cubes_dir.copy()
    shuffle(shuffled)
    assert system.get_sorted_file_paths(shuffled) == sorted_cubes_dir

    # test dir
    assert system.get_sorted_file_paths(cubes_dir) == sorted_cubes_dir
    assert (system.get_sorted_file_paths(
        cubes_dir, file_extension=".tif") == sorted_cubes_dir)

    # test text file
    # specifying utf8, as written on linux
    assert system.get_sorted_file_paths(
        jabberwocky, encoding="utf8") == get_text_lines(jabberwocky_sorted,
                                                        encoding="utf8")

    # test unsupported
    with pytest.raises(NotImplementedError):
        system.get_sorted_file_paths(shuffled[0])
コード例 #10
0
def test_cube_extraction(tmpdir, depth=20):
    tmpdir = str(tmpdir)
    args = CubeExtractArgs(tmpdir)

    planes_paths = {}
    planes_paths[0] = get_sorted_file_paths(signal_data_dir,
                                            file_extension="tif")
    planes_paths[1] = get_sorted_file_paths(background_data_dir,
                                            file_extension="tif")

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes = load_cubes_in_dir(validate_cubes_dir)
    test_cubes = load_cubes_in_dir(tmpdir)

    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes[idx] == test_cube).all()

    delete_directory_contents(tmpdir)

    # test cube scaling
    args.voxel_sizes = [7.25, 2, 2]
    args.x_pixel_um = 2
    args.y_pixel_um = 2
    args.z_pixel_um = 7.25

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes_scale = load_cubes_in_dir(validate_cubes_scale_dir)
    test_cubes = load_cubes_in_dir(tmpdir)
    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes_scale[idx] == test_cube).all()

    #  test edge of data errors
    cell = Cell("x0y0z10", 2)
    plane_paths = os.listdir(signal_data_dir)
    first_plane = tifffile.imread(os.path.join(signal_data_dir,
                                               plane_paths[0]))
    stack_shape = first_plane.shape + (depth, )
    stacks = {}
    stacks[0] = np.zeros(stack_shape, dtype=np.uint16)
    stacks[0][:, :, 0] = first_plane

    for plane in range(1, depth):
        im_path = os.path.join(signal_data_dir, plane_paths[plane])
        stacks[0][:, :, plane] = tifffile.imread(im_path)

    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    cell = Cell("x2500y2500z10", 2)
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for a specific cube
    stacks[0] = stacks[0][:, :, 1:]
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for any cube to be extracted at all.
    delete_directory_contents(tmpdir)
    # args.z_pixel_um = 0.1
    args.voxel_sizes[0] = 0.1

    with pytest.raises(extract_cubes.StackSizeError):

        extract_cubes.main(
            get_cells(args.paths.cells_file_path),
            args.paths.tmp__cubes_output_dir,
            planes_paths,
            args.cube_depth,
            args.cube_width,
            args.cube_height,
            args.voxel_sizes,
            args.network_voxel_sizes,
            args.max_ram,
            args.n_free_cpus,
            args.save_empty_cubes,
        )
コード例 #11
0
ファイル: detect.py プロジェクト: vigji/cellfinder
def main(args):
    n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
    start_time = datetime.now()

    (
        soma_diameter,
        max_cluster_size,
        ball_xy_size,
        ball_z_size,
    ) = calculate_parameters_in_pixels(
        args.x_pixel_um,
        args.y_pixel_um,
        args.z_pixel_um,
        args.soma_diameter,
        args.max_cluster_size,
        args.ball_xy_size,
        args.ball_z_size,
    )

    # file extension only used if a directory is passed
    img_paths = get_sorted_file_paths(args.signal_planes_paths[0],
                                      file_extension="tif")

    if args.end_plane == -1:
        args.end_plane = len(img_paths)
    planes_paths_range = img_paths[args.start_plane:args.end_plane]

    workers_queue = MultiprocessingQueue(maxsize=n_processes)
    # WARNING: needs to be AT LEAST ball_z_size
    mp_3d_filter_queue = MultiprocessingQueue(maxsize=ball_z_size)
    for plane_id in range(n_processes):
        # place holder for the queue to have the right size on first run
        workers_queue.put(None)

    clipping_val, threshold_value, ball_filter, cell_detector = setup(
        img_paths[0],
        soma_diameter,
        ball_xy_size,
        ball_z_size,
        ball_overlap_fraction=args.ball_overlap_fraction,
        z_offset=args.start_plane,
    )

    progress_bar = tqdm(total=len(planes_paths_range),
                        desc="Processing planes")
    mp_3d_filter = Mp3DFilter(
        mp_3d_filter_queue,
        ball_filter,
        cell_detector,
        soma_diameter,
        args.output_dir,
        soma_size_spread_factor=args.soma_spread_factor,
        progress_bar=progress_bar,
        save_planes=args.save_planes,
        plane_directory=args.plane_directory,
        start_plane=args.start_plane,
        max_cluster_size=max_cluster_size,
        outlier_keep=args.outlier_keep,
        artifact_keep=args.artifact_keep,
        save_csv=args.save_csv,
    )

    # start 3D analysis (waits for planes in queue)
    bf_process = multiprocessing.Process(target=mp_3d_filter.process, args=())
    bf_process.start()  # needs to be started before the loop

    mp_tile_processor = MpTileProcessor(workers_queue, mp_3d_filter_queue)
    prev_lock = Lock()
    processes = []

    # start 2D tile filter (output goes into queue for 3D analysis)
    for plane_id, path in enumerate(planes_paths_range):
        workers_queue.get()
        lock = Lock()
        lock.acquire()
        p = multiprocessing.Process(
            target=mp_tile_processor.process,
            args=(
                plane_id,
                path,
                prev_lock,
                lock,
                clipping_val,
                threshold_value,
                soma_diameter,
                args.log_sigma_size,
                args.n_sds_above_mean_thresh,
            ),
        )
        prev_lock = lock
        processes.append(p)
        p.start()

    processes[-1].join()
    mp_3d_filter_queue.put((None, None, None))  # Signal the end
    bf_process.join()

    logging.info(
        "Detection complete - all planes done in : {}".format(datetime.now() -
                                                              start_time))
コード例 #12
0
ファイル: curation.py プロジェクト: fossabot/cellfinder
def main():
    args = parser().parse_args()
    args = define_pixel_sizes(args)

    if args.output is None:
        output = Path(args.cells_xml)
        output_directory = output.parent
        print(f"No output directory given, so setting output "
              f"directory to: {output_directory}")
    else:
        output_directory = args.output

    ensure_directory_exists(output_directory)
    output_filename = output_directory / OUTPUT_NAME

    img_paths = get_sorted_file_paths(args.signal_image_paths,
                                      file_extension=".tif")
    cells, labels = get_cell_labels_arrays(args.cells_xml)

    properties = {"cell": labels}

    with napari.gui_qt():
        viewer = napari.Viewer(title="Cellfinder cell curation")
        images = magic_imread(img_paths, use_dask=True, stack=True)
        viewer.add_image(images)
        face_color_cycle = ["lightskyblue", "lightgoldenrodyellow"]
        points_layer = viewer.add_points(
            cells,
            properties=properties,
            symbol=args.symbol,
            n_dimensional=True,
            size=args.marker_size,
            face_color="cell",
            face_color_cycle=face_color_cycle,
            name="Cell candidates",
        )

        @viewer.bind_key("t")
        def toggle_point_property(viewer):
            """Toggle point type"""
            selected_points = viewer.layers[1].selected_data
            if selected_points:
                selected_properties = viewer.layers[1].properties["cell"][
                    selected_points]
                toggled_properties = np.logical_not(selected_properties)
                viewer.layers[1].properties["cell"][
                    selected_points] = toggled_properties

                # Add curated cells to list
                CURATED_POINTS.extend(selected_points)
                print(f"{len(selected_points)} points "
                      f"toggled and added to the list ")

                # refresh the properties colour
                viewer.layers[1].refresh_colors(update_color_mapping=False)

        @viewer.bind_key("c")
        def confirm_point_property(viewer):
            """Confirm point type"""
            selected_points = viewer.layers[1].selected_data
            if selected_points:
                # Add curated cells to list
                CURATED_POINTS.extend(selected_points)
                print(f"{len(selected_points)} points "
                      f"confirmed and added to the list ")

        @viewer.bind_key("Control-S")
        def save_curation(viewer):
            """Save file"""
            if not CURATED_POINTS:
                print("No cells have been confirmed or toggled, not saving")
            else:
                unique_cells = unique_elements_lists(CURATED_POINTS)
                points = viewer.layers[1].data[unique_cells]
                labels = viewer.layers[1].properties["cell"][unique_cells]
                labels = labels.astype("int")
                labels = labels + 1

                cells_to_save = []
                for idx, point in enumerate(points):
                    cell = Cell([point[2], point[1], point[0]], labels[idx])
                    cells_to_save.append(cell)

                print(f"Saving results to: {output_filename}")
                save_cells(cells_to_save, output_filename)

        @viewer.bind_key("Alt-E")
        def start_cube_extraction(viewer):
            """Extract cubes for training"""

            if not output_filename.exists():
                print("No curation results have been saved. "
                      "Please save before extracting cubes")
            else:
                print(f"Saving cubes to: {output_directory}")
                run_extraction(
                    output_filename,
                    output_directory,
                    args.signal_image_paths,
                    args.background_image_paths,
                    args.cube_depth,
                    args.cube_width,
                    args.cube_height,
                    args.x_pixel_um,
                    args.y_pixel_um,
                    args.z_pixel_um,
                    args.x_pixel_um_network,
                    args.y_pixel_um_network,
                    args.z_pixel_um_network,
                    args.max_ram,
                    args.n_free_cpus,
                    args.save_empty_cubes,
                )

                print("Saving yaml file to use for training")
                save_yaml_file(output_directory)

                print("Closing window")
                QApplication.closeAllWindows()
                print("Finished! You may now annotate more "
                      "datasets, or go straight to training")