コード例 #1
0
def main(args, max_workers=3):
    signal_paths = args.signal_planes_paths[args.signal_channel]
    background_paths = args.background_planes_path[0]
    signal_images = get_sorted_file_paths(signal_paths, file_extension="tif")
    background_images = get_sorted_file_paths(background_paths,
                                              file_extension="tif")

    # Too many workers doesn't increase speed, and uses huge amounts of RAM
    workers = get_num_processes(min_free_cpu_cores=args.n_free_cpus,
                                n_max_processes=max_workers)

    logging.debug("Initialising cube generator")
    inference_generator = CubeGeneratorFromFile(
        args.paths.detected_points,
        signal_images,
        background_images,
        args.voxel_sizes,
        args.network_voxel_sizes,
        batch_size=args.batch_size,
        cube_width=args.cube_width,
        cube_height=args.cube_height,
        cube_depth=args.cube_depth,
    )

    model = get_model(
        existing_model=args.trained_model,
        model_weights=args.model_weights,
        network_depth=models[args.network_depth],
        inference=True,
    )

    logging.info("Running inference")
    predictions = model.predict(
        inference_generator,
        use_multiprocessing=True,
        workers=workers,
        verbose=True,
    )
    predictions = predictions.round()
    predictions = predictions.astype("uint16")

    predictions = np.argmax(predictions, axis=1)
    cells_list = []

    # only go through the "extractable" cells
    for idx, cell in enumerate(inference_generator.ordered_cells):
        cell.type = predictions[idx] + 1
        cells_list.append(cell)

    logging.info("Saving classified cells")
    save_cells(cells_list,
               args.paths.classified_points,
               save_csv=args.save_csv)
    try:
        get_cells(args.paths.classified_points, cells_only=True)
        return True
    except MissingCellsError:
        return False
コード例 #2
0
def prep_classification(args, what_to_run):
    try:
        get_cells(args.paths.detected_points)
        n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
        prep_tensorflow(n_processes)
        args = prep_models(args)
    except MissingCellsError:
        what_to_run.cells_exist = False
        what_to_run.candidates_exist = False
        what_to_run.update_if_cells_required()
    what_to_run.update_if_candidates_required()

    return args
コード例 #3
0
ファイル: analyse.py プロジェクト: marieisaac/cellfinder
def run(args, atlas, downsampled_space):
    deformation_field_paths = [
        args.brainreg_paths.deformation_field_0,
        args.brainreg_paths.deformation_field_1,
        args.brainreg_paths.deformation_field_2,
    ]

    cells = get_cells(args.paths.classified_points, cells_only=True)
    cell_list = []
    for cell in cells:
        cell_list.append([cell.z, cell.y, cell.x])
    cells = np.array(cell_list)

    run_analysis(
        cells,
        args.signal_planes_paths[0],
        args.orientation,
        args.voxel_sizes,
        atlas,
        deformation_field_paths,
        downsampled_space,
        args.paths.downsampled_points,
        args.paths.atlas_points,
        args.paths.brainrender_points,
        args.brainreg_paths.volume_csv_path,
        args.paths.all_points_csv,
        args.paths.summary_csv,
    )
コード例 #4
0
ファイル: points_to_brainrender.py プロジェクト: vigji/neuro
def run(
    cells_file,
    output_filename,
    pixel_size_x=10,
    pixel_size_y=10,
    pixel_size_z=10,
    max_z=13200,
    key="df",
):
    print(f"Converting file: {cells_file}")
    cells = cells_io.get_cells(cells_file)
    for cell in cells:
        cell.transform(
            x_scale=pixel_size_x,
            y_scale=pixel_size_y,
            z_scale=pixel_size_z,
            integer=True,
        )

    cells = cells_io.cells_to_dataframe(cells)
    cells.columns = ["z", "y", "x", "type"]

    cells["x"] = max_z - cells["x"]

    print("Ensuring output directory exists")
    ensure_directory_exists(output_filename.parent)

    print(f"Saving to: {output_filename}")
    cells.to_hdf(output_filename, key=key, mode="w")

    print("Finished")
コード例 #5
0
 def __load_cells(self):
     self.cells = get_cells(self.cells_file)
     if not self.cells:
         logging.error(f"No cells found, exiting. "
                       f"Please check the file: {self.cells_file}")
         raise ValueError(f"No cells found, exiting. "
                          f"Please check the file: {self.cells_file}")
コード例 #6
0
def get_cells_data(xml_file_path, cells_only=True):
    cells = get_cells(xml_file_path, cells_only=cells_only)
    if not cells:
        raise CellCountMissingCellsException(
            "No cells found in file: {}".format(xml_file_path)
        )
    return cells
コード例 #7
0
def test_detection_full(tmpdir):
    tmpdir = str(tmpdir)

    cellfinder_args = [
        "cellfinder",
        "-s",
        signal_data,
        "-b",
        background_data,
        "-o",
        tmpdir,
        "-v",
        z_pix,
        y_pix,
        x_pix,
        "--orientation",
        "psl",
        "--n-free-cpus",
        "0",
        "--no-register",
        "--save-planes",
    ]
    sys.argv = cellfinder_args
    cellfinder_run()

    cells_test_xml = os.path.join(tmpdir, "points", "cell_classification.xml")

    cells_validation = cell_io.get_cells(cells_validation_xml)
    cells_test = cell_io.get_cells(cells_test_xml)

    num_non_cells_validation = sum(
        [cell.type == 1 for cell in cells_validation]
    )
    num_cells_validation = sum([cell.type == 2 for cell in cells_validation])

    num_non_cells_test = sum([cell.type == 1 for cell in cells_test])
    num_cells_test = sum([cell.type == 2 for cell in cells_test])

    assert isclose(
        num_non_cells_validation,
        num_non_cells_test,
        abs_tol=DETECTION_TOLERANCE,
    )
    assert isclose(
        num_cells_validation, num_cells_test, abs_tol=DETECTION_TOLERANCE
    )
コード例 #8
0
def test_get_cells():
    cells = cell_io.get_cells(xml_path)
    assert len(cells) == 65
    assert Cell([2536, 523, 1286], 1) == cells[64]

    cells = cell_io.get_cells(yml_path)
    assert len(cells) == 250
    assert Cell([9170, 2537, 311], 1) == cells[194]

    cells = cell_io.get_cells(cubes_dir)
    assert len(cells) == 4
    assert natsorted(cubes_cells) == natsorted(cells)

    cells = cell_io.get_cells(roi_sorter_output_dir)
    assert len(cells) == 4
    assert natsorted(roi_sorter_cells) == natsorted(cells)

    with pytest.raises(NotImplementedError):
        assert cell_io.get_cells("misc_format.abc")
コード例 #9
0
ファイル: xml_scale.py プロジェクト: Jun-Lizst/cellfinder-1
def xml_scale(
    xml_file,
    x_scale=1,
    y_scale=1,
    z_scale=1,
    output_directory=None,
    integer=True,
):
    # TODO: add a csv option

    """
    To rescale the cell positions within an XML file. For compatibility with
    other software, or if  data has been scaled after cell detection.
    :param xml_file: Any cellfinder xml file
    :param x_scale: Rescaling factor in the first dimension
    :param y_scale: Rescaling factor in the second dimension
    :param z_scale: Rescaling factor in the third dimension
    :param output_directory: Directory to save the rescaled XML file.
    Defaults to the same directory as the input XML file
    :param integer: Force integer cell positions (default: True)
    :return:
    """
    if x_scale == y_scale == z_scale == 1:
        raise CommandLineInputError(
            "All rescaling factors are 1, " "please check the input."
        )
    else:
        input_file = Path(xml_file)
        start_time = datetime.now()
        cells = cell_io.get_cells(xml_file)

        for cell in cells:
            cell.transform(
                x_scale=x_scale,
                y_scale=y_scale,
                z_scale=z_scale,
                integer=integer,
            )

        if output_directory:
            output_directory = Path(output_directory)
        else:
            output_directory = input_file.parent

        ensure_directory_exists(output_directory)
        output_filename = output_directory / (input_file.stem + "_rescaled")
        output_filename = output_filename.with_suffix(input_file.suffix)

        cell_io.save_cells(cells, output_filename)

        print(
            "Finished. Total time taken: {}".format(
                datetime.now() - start_time
            )
        )
コード例 #10
0
ファイル: curation.py プロジェクト: peq10/cellfinder
def run_extraction(
    output_filename,
    output_directory,
    signal_paths,
    background_paths,
    cube_depth,
    cube_width,
    cube_height,
    x_pixel_um,
    y_pixel_um,
    z_pixel_um,
    x_pixel_um_network,
    y_pixel_um_network,
    z_pixel_um_network,
    max_ram,
    n_free_cpus,
    save_empty_cubes,
):
    planes_paths = {}
    planes_paths[0] = get_sorted_file_paths(signal_paths,
                                            file_extension=".tif")
    planes_paths[1] = get_sorted_file_paths(background_paths,
                                            file_extension=".tif")

    all_candidates = get_cells(str(output_filename))

    cells = [c for c in all_candidates if c.is_cell()]
    non_cells = [c for c in all_candidates if not c.is_cell()]

    to_extract = {"cells": cells, "non_cells": non_cells}

    for cell_type, cell_list in to_extract.items():
        print(f"Extracting type: {cell_type}")
        cell_type_output_directory = output_directory / cell_type
        print(f"Saving to: {cell_type_output_directory}")
        ensure_directory_exists(str(cell_type_output_directory))
        extract_cubes_main(
            cell_list,
            cell_type_output_directory,
            planes_paths,
            cube_depth,
            cube_width,
            cube_height,
            x_pixel_um,
            y_pixel_um,
            z_pixel_um,
            x_pixel_um_network,
            y_pixel_um_network,
            z_pixel_um_network,
            max_ram,
            n_free_cpus,
            save_empty_cubes,
        )
コード例 #11
0
def run(args, atlas, downsampled_space):
    deformation_field_paths = [
        args.brainreg_paths.deformation_field_0,
        args.brainreg_paths.deformation_field_1,
        args.brainreg_paths.deformation_field_2,
    ]

    cells = get_cells(args.paths.classified_points, cells_only=True)
    cell_list = []
    for cell in cells:
        cell_list.append([cell.z, cell.y, cell.x])
    cells = np.array(cell_list)

    source_shape = tuple(
        imio.get_size_image_from_file_paths(
            args.signal_planes_paths[0]).values())
    source_shape = (source_shape[2], source_shape[1], source_shape[0])

    source_space = bgs.AnatomicalSpace(
        args.orientation,
        shape=source_shape,
        resolution=[float(i) for i in args.voxel_sizes],
    )

    transformed_cells = transform_points_to_atlas_space(
        cells,
        source_space,
        atlas,
        deformation_field_paths,
        downsampled_space,
        downsampled_points_path=args.paths.downsampled_points,
        atlas_points_path=args.paths.atlas_points,
    )

    logging.info("Exporting cells to brainrender")
    export_points(
        transformed_cells,
        atlas.resolution[0],
        args.paths.brainrender_points,
    )

    logging.info("Summarising cell positions")
    summarise_points(
        cells,
        transformed_cells,
        atlas,
        args.brainreg_paths.volume_csv_path,
        args.paths.all_points_csv,
        args.paths.summary_csv,
    )
コード例 #12
0
def transform_cells_to_standard_space(args):
    if args.registration_config is None:
        args.registration_config = source_custom_config_cellfinder()

    reg_params = RegistrationParams(
        args.registration_config,
        affine_n_steps=args.affine_n_steps,
        affine_use_n_steps=args.affine_use_n_steps,
        freeform_n_steps=args.freeform_n_steps,
        freeform_use_n_steps=args.freeform_use_n_steps,
        bending_energy_weight=args.bending_energy_weight,
        grid_spacing=args.grid_spacing,
        smoothing_sigma_reference=args.smoothing_sigma_reference,
        smoothing_sigma_floating=args.smoothing_sigma_floating,
        histogram_n_bins_floating=args.histogram_n_bins_floating,
        histogram_n_bins_reference=args.histogram_n_bins_reference,
    )

    generate_deformation_field(args, reg_params)
    cells_only = not args.transform_all
    cells = get_cells(
        args.paths.classification_out_file, cells_only=cells_only
    )

    logging.info("Loading deformation field")
    deformation_field = load_any_image(
        args.paths.tmp__deformation_field, as_numpy=True
    )
    scales = get_scales(args, reg_params)
    field_scales = get_deformation_field_scales(reg_params)

    logging.info("Transforming cell positions")
    transformed_cells = transform_cell_positions(
        cells, deformation_field, field_scales, scales
    )

    logging.info("Saving transformed cell positions")

    save_cells(
        transformed_cells,
        args.paths.cells_in_standard_space,
        save_csv=args.save_csv,
    )

    if not args.debug:
        logging.info("Removing standard space transformation temp files")
        delete_temp(args.paths.standard_space_output_folder, args.paths)
コード例 #13
0
def get_cell_location_array(
    cell_file,
    cell_position_scaling=[None, None, None],
    cells_only=False,
    type_str="type",
    integer=True,
):
    """
    Loads a cell file, and converts to an array, with 3 columns of x,y,z
    positions
    :param cell_file: Any supported cell file, e.g. xml
    :param cell_position_scaling: list of cell scaling (raw -> final) for
    [x, y, z]
    :param cells_only: If only cells (rather than unknown or artifacts)
    should be included
    :param str type_str: String defining the title of the cell type column
    in the dataframe. Used to remove non cells (artifacts), and then to clean
    up the dataframe to be converted into a numpy array.
    :param integer: Force integer cell positions (default: True)
    :return: Array of cell positions, with x,y,z columns
    """

    logging.debug("Loading cells")
    cells = cell_io.get_cells(cell_file)

    if cell_position_scaling != [None, None, None]:
        for cell in cells:
            cell.transform(
                x_scale=cell_position_scaling[0],
                y_scale=cell_position_scaling[1],
                z_scale=cell_position_scaling[2],
                integer=integer,
            )

    cells = cell_io.cells_to_dataframe(cells)
    num_cells = len(cells[cells[type_str] == Cell.CELL])
    num_non_cells = len(cells[cells[type_str] == Cell.NO_CELL])
    logging.debug("{} cells, and {} non-cells".format(num_cells,
                                                      num_non_cells))
    if cells_only:
        logging.debug("Removing non cells")
        cells = cells[cells[type_str] == Cell.CELL]

    logging.debug("Tidying up dataframe to convert to array")
    cells.drop(type_str, axis=1, inplace=True)
    return cells.to_numpy()
コード例 #14
0
def run_xml_scale(xml_file, x_scale, y_scale, z_scale, output_dir):
    cellfinder_xml_scale_args = [
        "cellfinder_xml_scale",
        xml_file,
        "-x",
        str(x_scale),
        "-y",
        str(y_scale),
        "-z",
        str(z_scale),
        "-o",
        str(output_dir),
    ]

    sys.argv = cellfinder_xml_scale_args
    cellfinder_xml_scale_run()

    scaled_cells = get_cells(os.path.join(output_dir, SCALED_XML_FILE_NAME))
    return scaled_cells
コード例 #15
0
def main(
    pixel_size_x=10,
    pixel_size_y=10,
    pixel_size_z=10,
    max_z=13200,
):
    args = parser().parse_args()
    print(
        f"Calculating centroid positions for cells in: {Path(args.directory).stem}"
    )
    if args.all:
        print("Including all cell positions")
    else:
        print(f"Only including cell positions of type: {Cell.CELL}")
    xml_files = glob(args.directory + "/*.xml")
    results = []
    for xml_file in xml_files:
        print(f"Processing: {Path(xml_file).stem}")
        cells = get_cells(xml_file)
        positions = []
        for cell in tqdm(cells):
            if args.all or (cell.type == Cell.CELL):
                positions.append([cell.x, cell.y, cell.z])
        if positions:  # only if cells included
            positions = np.array(positions)
            means = positions.mean(axis=0)
            results.append([Path(xml_file).stem] + means.tolist())

    df = pd.DataFrame(results)
    df.columns = ["file", "x_center_um", "y_center_um", "z_center_um"]
    df["x_center_um"] = df["x_center_um"] * pixel_size_x
    df["y_center_um"] = df["y_center_um"] * pixel_size_y
    df["z_center_um"] = df["z_center_um"] * pixel_size_z
    # df["z"] = max_z - cells["z"]
    filename = Path(args.directory) / "summary.csv"
    df.to_csv(filename, index=False)
コード例 #16
0
def cells_exist(points_file):
    try:
        get_cells(points_file, cells_only=True)
        return True
    except MissingCellsError:
        return False
コード例 #17
0
def test_cells_to_xml(tmpdir):
    cells = cell_io.get_cells(xml_path)
    tmp_cells_out_path = os.path.join(str(tmpdir), "cells.xml")
    cell_io.cells_to_xml(cells, tmp_cells_out_path)
    assert cells == cell_io.get_cells(tmp_cells_out_path)
コード例 #18
0
def test_cube_extraction(tmpdir, depth=20):
    tmpdir = str(tmpdir)
    args = CubeExtractArgs(tmpdir)

    planes_paths = {}
    planes_paths[0] = get_sorted_file_paths(signal_data_dir,
                                            file_extension="tif")
    planes_paths[1] = get_sorted_file_paths(background_data_dir,
                                            file_extension="tif")

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes = load_cubes_in_dir(validate_cubes_dir)
    test_cubes = load_cubes_in_dir(tmpdir)

    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes[idx] == test_cube).all()

    delete_directory_contents(tmpdir)

    # test cube scaling
    args.voxel_sizes = [7.25, 2, 2]
    args.x_pixel_um = 2
    args.y_pixel_um = 2
    args.z_pixel_um = 7.25

    extract_cubes.main(
        get_cells(args.paths.cells_file_path),
        args.paths.tmp__cubes_output_dir,
        planes_paths,
        args.cube_depth,
        args.cube_width,
        args.cube_height,
        args.voxel_sizes,
        args.network_voxel_sizes,
        args.max_ram,
        args.n_free_cpus,
        args.save_empty_cubes,
    )

    validation_cubes_scale = load_cubes_in_dir(validate_cubes_scale_dir)
    test_cubes = load_cubes_in_dir(tmpdir)
    for idx, test_cube in enumerate(test_cubes):
        assert (validation_cubes_scale[idx] == test_cube).all()

    #  test edge of data errors
    cell = Cell("x0y0z10", 2)
    plane_paths = os.listdir(signal_data_dir)
    first_plane = tifffile.imread(os.path.join(signal_data_dir,
                                               plane_paths[0]))
    stack_shape = first_plane.shape + (depth, )
    stacks = {}
    stacks[0] = np.zeros(stack_shape, dtype=np.uint16)
    stacks[0][:, :, 0] = first_plane

    for plane in range(1, depth):
        im_path = os.path.join(signal_data_dir, plane_paths[plane])
        stacks[0][:, :, plane] = tifffile.imread(im_path)

    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    cell = Cell("x2500y2500z10", 2)
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for a specific cube
    stacks[0] = stacks[0][:, :, 1:]
    cube = extract_cubes.Cube(cell, 0, stacks)
    assert (cube.data == 0).all()

    # test insufficient z-planes for any cube to be extracted at all.
    delete_directory_contents(tmpdir)
    # args.z_pixel_um = 0.1
    args.voxel_sizes[0] = 0.1

    with pytest.raises(extract_cubes.StackSizeError):

        extract_cubes.main(
            get_cells(args.paths.cells_file_path),
            args.paths.tmp__cubes_output_dir,
            planes_paths,
            args.cube_depth,
            args.cube_width,
            args.cube_height,
            args.voxel_sizes,
            args.network_voxel_sizes,
            args.max_ram,
            args.n_free_cpus,
            args.save_empty_cubes,
        )
コード例 #19
0
def test_xml_scale_cli(tmpdir):
    scaled_cells = run_xml_scale(orig_xml_path, 0.5, 0.5, 1, tmpdir)
    assert scaled_cells == get_cells(half_scale_scaled_xml_path)

    scaled_cells = run_xml_scale(orig_xml_path, 10, 100, 1000, tmpdir)
    assert scaled_cells == get_cells(order_magnitude_scaled_xml_path)
コード例 #20
0
ファイル: test_cells.py プロジェクト: vigji/imlib
def test_group_cells_by_z():
    z_planes_validate = [
        1272,
        1273,
        1274,
        1275,
        1276,
        1277,
        1278,
        1279,
        1280,
        1281,
        1282,
        1283,
        1284,
        1285,
        1286,
        1287,
        1288,
        1289,
        1290,
        1291,
        1292,
        1294,
        1295,
        1296,
        1297,
        1298,
    ]

    cell_numbers_in_groups_validate = [
        1,
        3,
        7,
        8,
        3,
        1,
        4,
        3,
        1,
        2,
        2,
        1,
        1,
        2,
        5,
        2,
        2,
        2,
        3,
        1,
        1,
        6,
        1,
        1,
        1,
        1,
    ]

    cell_list = get_cells(xml_path)
    cells_groups = cells.group_cells_by_z(cell_list)
    z_planes_test = list(cells_groups.keys())
    z_planes_test.sort()

    assert z_planes_validate == z_planes_test

    cell_numbers_in_groups_test = [
        len(cells_groups[plane]) for plane in z_planes_test
    ]
    assert cell_numbers_in_groups_validate == cell_numbers_in_groups_test
コード例 #21
0
def run_all(args, what_to_run, atlas):

    from cellfinder_core.detect import detect
    from cellfinder_core.classify import classify
    from cellfinder_core.tools import prep
    from cellfinder_core.tools.IO import read_with_dask

    from cellfinder.analyse import analyse
    from cellfinder.figures import figures

    from cellfinder.tools.prep import (
        prep_candidate_detection,
        prep_channel_specific_general,
    )

    points = None
    signal_array = None
    args, what_to_run = prep_channel_specific_general(args, what_to_run)

    if what_to_run.detect:
        logging.info("Detecting cell candidates")
        args = prep_candidate_detection(args)
        signal_array = read_with_dask(
            args.signal_planes_paths[args.signal_channel]
        )

        points = detect.main(
            signal_array,
            args.start_plane,
            args.end_plane,
            args.voxel_sizes,
            args.soma_diameter,
            args.max_cluster_size,
            args.ball_xy_size,
            args.ball_z_size,
            args.ball_overlap_fraction,
            args.soma_spread_factor,
            args.n_free_cpus,
            args.log_sigma_size,
            args.n_sds_above_mean_thresh,
        )
        ensure_directory_exists(args.paths.points_directory)

        save_cells(
            points,
            args.paths.detected_points,
            save_csv=args.save_csv,
            artifact_keep=args.artifact_keep,
        )

    else:
        logging.info("Skipping cell detection")
        points = get_cells(args.paths.detected_points)

    if what_to_run.classify:
        model_weights = prep.prep_classification(
            args.trained_model,
            args.model_weights,
            args.install_path,
            args.model,
            args.n_free_cpus,
        )
        if what_to_run.classify:
            if points is None:
                points = get_cells(args.paths.detected_points)
            if signal_array is None:
                signal_array = read_with_dask(
                    args.signal_planes_paths[args.signal_channel]
                )
            logging.info("Running cell classification")
            background_array = read_with_dask(args.background_planes_path[0])

            points = classify.main(
                points,
                signal_array,
                background_array,
                args.n_free_cpus,
                args.voxel_sizes,
                args.network_voxel_sizes,
                args.batch_size,
                args.cube_height,
                args.cube_width,
                args.cube_depth,
                args.trained_model,
                model_weights,
                args.network_depth,
            )
            save_cells(
                points,
                args.paths.classified_points,
                save_csv=args.save_csv,
            )

            what_to_run.cells_exist = cells_exist(args.paths.classified_points)

        else:
            logging.info("No cells were detected, skipping classification.")

    else:
        logging.info("Skipping cell classification")

    what_to_run.update_if_cells_required()

    if what_to_run.analyse or what_to_run.figures:
        downsampled_space = get_downsampled_space(
            atlas, args.brainreg_paths.boundaries_file_path
        )

    if what_to_run.analyse:
        points = get_cells(args.paths.classified_points, cells_only=True)
        if len(points) == 0:
            logging.info("No cells detected, skipping cell position analysis")
        else:
            logging.info("Analysing cell positions")
            analyse.run(args, points, atlas, downsampled_space)
    else:
        logging.info("Skipping cell position analysis")

    if what_to_run.figures:
        points = get_cells(args.paths.detected_points, cells_only=True)
        if len(points) == 0:
            logging.info("No cells detected, skipping")
        else:
            logging.info("Generating figures")
            figures.run(args, atlas, downsampled_space.shape)
    else:
        logging.info("Skipping figure generation")