Ejemplo n.º 1
0
def setup(config_path):
    if len(sys.argv) == 2 and sys.argv[1] != "-h":
        args = experiment_parser.GetArgs(sys.argv[1])
    else:
        args = cli_parser.get_args()

    ensure_directory_exists(args.output_dir)

    if args.config_path is None:
        args.config_path = config_path

    config = config_parser.GetConfig(args.config_path)
    options = config_parser.GetOptions(args.config_path)
    options.num_processes = get_num_processes(
        min_free_cpu_cores=options.n_free_cpus)

    fancylog.start_logging(
        args.output_dir,
        program_for_log,
        variables=[args],
        verbose=args.verbose,
        log_header="OPENDIRECTION LOG",
    )

    return args, options, config
Ejemplo n.º 2
0
def main(args, max_workers=3):
    signal_paths = args.signal_planes_paths[args.signal_channel]
    background_paths = args.background_planes_path[0]
    signal_images = get_sorted_file_paths(signal_paths, file_extension="tif")
    background_images = get_sorted_file_paths(background_paths,
                                              file_extension="tif")

    # Too many workers doesn't increase speed, and uses huge amounts of RAM
    workers = get_num_processes(min_free_cpu_cores=args.n_free_cpus,
                                n_max_processes=max_workers)

    logging.debug("Initialising cube generator")
    inference_generator = CubeGeneratorFromFile(
        args.paths.detected_points,
        signal_images,
        background_images,
        args.voxel_sizes,
        args.network_voxel_sizes,
        batch_size=args.batch_size,
        cube_width=args.cube_width,
        cube_height=args.cube_height,
        cube_depth=args.cube_depth,
    )

    model = get_model(
        existing_model=args.trained_model,
        model_weights=args.model_weights,
        network_depth=models[args.network_depth],
        inference=True,
    )

    logging.info("Running inference")
    predictions = model.predict(
        inference_generator,
        use_multiprocessing=True,
        workers=workers,
        verbose=True,
    )
    predictions = predictions.round()
    predictions = predictions.astype("uint16")

    predictions = np.argmax(predictions, axis=1)
    cells_list = []

    # only go through the "extractable" cells
    for idx, cell in enumerate(inference_generator.ordered_cells):
        cell.type = predictions[idx] + 1
        cells_list.append(cell)

    logging.info("Saving classified cells")
    save_cells(cells_list,
               args.paths.classified_points,
               save_csv=args.save_csv)
    try:
        get_cells(args.paths.classified_points, cells_only=True)
        return True
    except MissingCellsError:
        return False
Ejemplo n.º 3
0
def prep_classification(args, what_to_run):
    try:
        get_cells(args.paths.detected_points)
        n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
        prep_tensorflow(n_processes)
        args = prep_models(args)
    except MissingCellsError:
        what_to_run.cells_exist = False
        what_to_run.candidates_exist = False
        what_to_run.update_if_cells_required()
    what_to_run.update_if_candidates_required()

    return args
Ejemplo n.º 4
0
def threaded_load_from_sequence(
    paths_sequence,
    x_scaling_factor=1.0,
    y_scaling_factor=1.0,
    anti_aliasing=True,
    n_free_cpus=2,
):
    """
    Use multiprocessing to load a brain from a sequence of image paths.

    :param list paths_sequence: The sorted list of the planes paths on the
        filesystem
    :param float x_scaling_factor: The scaling of the brain along the x
        dimension (applied on loading before return)
    :param float y_scaling_factor: The scaling of the brain along the y
        dimension (applied on loading before return)
    :param bool anti_aliasing: Whether to apply a Gaussian filter to smooth
        the image prior to down-scaling. It is crucial to filter when
        down-sampling the image to avoid aliasing artifacts.
    :param int n_free_cpus: Number of cpu cores to leave free.
    :return: The loaded and scaled brain
    :rtype: np.ndarray
    """

    stacks = []
    n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)

    # WARNING: will not work with interactive interpreter.
    pool = ProcessPoolExecutor(max_workers=n_processes)
    # FIXME: should detect and switch to other method

    n_paths_per_subsequence = math.ceil(len(paths_sequence) / n_processes)
    for i in range(n_processes):
        start_idx = i * n_paths_per_subsequence
        if start_idx >= len(paths_sequence):
            break
        else:
            end_idx = start_idx + n_paths_per_subsequence
            end_idx = end_idx if end_idx < len(paths_sequence) else -1
            sub_paths = paths_sequence[start_idx:end_idx]

        process = pool.submit(
            load_from_paths_sequence,
            sub_paths,
            x_scaling_factor,
            y_scaling_factor,
            anti_aliasing=anti_aliasing,
        )
        stacks.append(process)
    stack = np.dstack([s.result() for s in stacks])
    return stack
Ejemplo n.º 5
0
def mult_exp_setup():
    args = get_args()
    ensure_directory_exists(args.output_dir)
    options = config_parser.GetOptions(args.options)

    num_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
    options.num_processes = num_processes
    fancylog.start_logging(
        args.output_dir,
        program_for_log,
        variables=[args],
        verbose=args.verbose,
        log_header="OPENDIRECTION MULTI EXPERIMENT LOG",
    )

    experiment_files = glob(os.path.join(args.exp_files, "*.txt"))
    logging.info(f"Found {len(experiment_files)} experiment files")
    experiment_config_list = [
        experiment_parser.GetArgs(experiment_file)
        for experiment_file in experiment_files
    ]

    return args, options, experiment_config_list, num_processes
Ejemplo n.º 6
0
def prep_training(args):
    n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
    prep_tensorflow(n_processes)
    args = prep_models(args)
    return args
Ejemplo n.º 7
0
def prep_classification(args):
    n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
    prep_tensorflow(n_processes)
    args = prep_models(args)
    return args
Ejemplo n.º 8
0
def main(
    registration_config,
    target_brain_path,
    registration_output_folder,
    x_pixel_um=0.02,
    y_pixel_um=0.02,
    z_pixel_um=0.05,
    orientation="coronal",
    flip_x=False,
    flip_y=False,
    flip_z=False,
    rotation="x0y0z0",
    affine_n_steps=6,
    affine_use_n_steps=5,
    freeform_n_steps=6,
    freeform_use_n_steps=4,
    bending_energy_weight=0.95,
    grid_spacing=-10,
    smoothing_sigma_reference=-1.0,
    smoothing_sigma_floating=-1.0,
    histogram_n_bins_floating=128,
    histogram_n_bins_reference=128,
    n_free_cpus=2,
    sort_input_file=False,
    save_downsampled=True,
    additional_images_downsample=None,
    boundaries=True,
    debug=False,
):
    """
        The main function that will perform the library calls and
    register the atlas to the brain given on the CLI

    :param registration_config:
    :param target_brain_path:
    :param registration_output_folder:
    :param filtered_brain_path:
    :param x_pixel_um:
    :param y_pixel_um:
    :param z_pixel_um:
    :param orientation:
    :param flip_x:
    :param flip_y:
    :param flip_z:
    :param n_free_cpus:
    :param sort_input_file:
    :param save_downsampled:
    :param additional_images_downsample: dict of
    {image_name: image_to_be_downsampled}
    :return:
    """
    n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
    load_parallel = n_processes > 1
    paths = Paths(registration_output_folder)
    atlas = RegistrationAtlas(registration_config,
                              dest_folder=Path(registration_output_folder))
    run = Run(paths, atlas, boundaries=boundaries, debug=debug)

    if run.preprocess:
        logging.info("Preprocessing data for registration")
        logging.info("Loading data")

        brain = BrainProcessor(
            atlas.pix_sizes,
            target_brain_path,
            registration_output_folder,
            x_pixel_um,
            y_pixel_um,
            z_pixel_um,
            original_orientation=orientation,
            load_parallel=load_parallel,
            sort_input_file=sort_input_file,
            n_free_cpus=n_free_cpus,
        )

        for element in ["atlas", "brain", "hemispheres"]:
            key = f"{element}_name"
            logging.debug(f"Transforming atlas file: {element}")
            nii_img = atlas.get_nii_from_element(key)
            data = np.asanyarray(nii_img.dataobj)

            logging.debug("Reorienting to sample orientation")
            data = np.transpose(data,
                                transpositions[brain.original_orientation])
            data = np.swapaxes(data, 0, 1)

            logging.debug("Reorientating to nifti orientation")
            data = flip_multiple(data, flips[orientation])

            logging.debug("Flipping to nifti orientation")
            data = flip_multiple(data, [flip_x, flip_y, flip_z])

            logging.debug("Rotating to sample orientation")
            data = rotate_multiple(data, rotation)

            new_img = nb.Nifti1Image(data, nii_img.affine, nii_img.header)
            brainio.to_nii(new_img, atlas.get_dest_path(key))

        if save_downsampled:
            brain.target_brain = brain.target_brain.astype(np.uint16,
                                                           copy=False)
            logging.info("Saving downsampled image")
            brain.save(paths.downsampled_brain_path)

        brain.filter()
        logging.info("Saving filtered image")
        brain.save(paths.tmp__downsampled_filtered)

        del brain

    if additional_images_downsample:
        for name, image in additional_images_downsample.items():
            if not check_downsampled(registration_output_folder, name):
                save_downsampled_image(
                    image,
                    name,
                    registration_output_folder,
                    atlas,
                    x_pixel_um=x_pixel_um,
                    y_pixel_um=y_pixel_um,
                    z_pixel_um=z_pixel_um,
                    orientation=orientation,
                    n_free_cpus=n_free_cpus,
                    sort_input_file=sort_input_file,
                    load_parallel=load_parallel,
                )
            else:
                logging.info(f"Image: {name} already downsampled, skipping.")

    if run.register:
        logging.info("Registering")

    if any([
            run.affine,
            run.freeform,
            run.segment,
            run.hemispheres,
            run.inverse_transform,
    ]):
        registration_params = RegistrationParams(
            registration_config,
            affine_n_steps=affine_n_steps,
            affine_use_n_steps=affine_use_n_steps,
            freeform_n_steps=freeform_n_steps,
            freeform_use_n_steps=freeform_use_n_steps,
            bending_energy_weight=bending_energy_weight,
            grid_spacing=grid_spacing,
            smoothing_sigma_reference=smoothing_sigma_reference,
            smoothing_sigma_floating=smoothing_sigma_floating,
            histogram_n_bins_floating=histogram_n_bins_floating,
            histogram_n_bins_reference=histogram_n_bins_reference,
        )
        brain_reg = BrainRegistration(
            registration_config,
            paths,
            registration_params,
            n_processes=n_processes,
        )

    if run.affine:
        logging.info("Starting affine registration")
        brain_reg.register_affine()

    if run.freeform:
        logging.info("Starting freeform registration")
        brain_reg.register_freeform()

    if run.segment:
        logging.info("Starting segmentation")
        brain_reg.segment()

    if run.hemispheres:
        logging.info("Segmenting hemispheres")
        brain_reg.register_hemispheres()

    if run.inverse_transform:
        logging.info("Generating inverse (sample to atlas) transforms")
        brain_reg.generate_inverse_transforms()

    if run.volumes:
        logging.info("Calculating volumes of each brain area")
        calculate_volumes(
            paths.registered_atlas_path,
            paths.hemispheres_atlas_path,
            atlas.get_element_path("structures_name"),
            registration_config,
            paths.volume_csv_path,
            left_hemisphere_value=int(atlas["left_hemisphere_value"]),
            right_hemisphere_value=int(atlas["right_hemisphere_value"]),
        )

    if run.boundaries:
        logging.info("Generating boundary image")
        calc_boundaries(
            paths.registered_atlas_path,
            paths.boundaries_file_path,
            atlas_config=registration_config,
        )

    if run.delete_temp:
        logging.info("Removing registration temp files")
        delete_temp(paths.registration_output_folder, paths)

    logging.info(f"amap completed. Results can be found here: "
                 f"{registration_output_folder}")
Ejemplo n.º 9
0
def main(
    cells,
    cubes_output_dir,
    planes_paths,
    cube_depth,
    cube_width,
    cube_height,
    voxel_sizes,
    network_voxel_sizes,
    max_ram,
    n_free_cpus=4,
    save_empty_cubes=False,
):

    start_time = datetime.now()

    if voxel_sizes[0] != network_voxel_sizes[0]:
        plane_scaling_factor = float(network_voxel_sizes[0]) / float(
            voxel_sizes[0]
        )
        num_planes_needed_for_cube = round(cube_depth * plane_scaling_factor)
    else:
        num_planes_needed_for_cube = cube_depth

    if num_planes_needed_for_cube > len(planes_paths[0]):
        raise StackSizeError(
            "The number of planes provided is not sufficient "
            "for any cubes to be extracted. Please check the "
            "input data"
        )

    first_plane = tifffile.imread(list(planes_paths.values())[0][0])

    planes_shape = first_plane.shape
    brain_depth = len(list(planes_paths.values())[0])

    # TODO: use to assert all centre planes processed
    center_planes = sorted(list(set([cell.z for cell in cells])))

    # REFACTOR: rename (clashes with different meaning of planes_to_read below)
    planes_to_read = np.zeros(brain_depth, dtype=np.bool)

    if is_even(num_planes_needed_for_cube):
        half_nz = num_planes_needed_for_cube // 2
        # WARNING: not centered because even
        for p in center_planes:
            planes_to_read[p - half_nz : p + half_nz] = 1
    else:
        half_nz = num_planes_needed_for_cube // 2
        # centered
        for p in center_planes:
            planes_to_read[p - half_nz : p + half_nz + 1] = 1

    planes_to_read = np.where(planes_to_read)[0]

    if not planes_to_read.size:
        logging.error(
            f"No planes found, you need at the very least "
            f"{num_planes_needed_for_cube} "
            f"planes to proceed (i.e. cube z size)"
            f"Brain z dimension is {brain_depth}.",
            stack_info=True,
        )
        raise ValueError(
            f"No planes found, you need at the very least "
            f"{num_planes_needed_for_cube} "
            f"planes to proceed (i.e. cube z size)"
            f"Brain z dimension is {brain_depth}."
        )
    # TODO: check if needs to flip args.cube_width and args.cube_height
    cells_groups = group_cells_by_z(cells)

    # copies=2 is set because at all times there is a plane queue (deque)
    # and an array passed to `Cube`
    ram_per_process = get_ram_requirement_per_process(
        planes_paths[0][0],
        num_planes_needed_for_cube,
        copies=2,
    )
    n_processes = get_num_processes(
        min_free_cpu_cores=n_free_cpus,
        ram_needed_per_process=ram_per_process,
        n_max_processes=len(planes_to_read),
        fraction_free_ram=0.2,
        max_ram_usage=system.memory_in_bytes(max_ram, "GB"),
    )
    # TODO: don't need to extract cubes from all channels if
    #  n_signal_channels>1
    with ProcessPoolExecutor(max_workers=n_processes) as executor:
        n_planes_per_chunk = len(planes_to_read) // n_processes
        for i in range(n_processes):
            start_idx = i * n_planes_per_chunk
            end_idx = (
                start_idx + n_planes_per_chunk + num_planes_needed_for_cube - 1
            )
            if end_idx > planes_to_read[-1]:
                end_idx = None
            sub_planes_to_read = planes_to_read[start_idx:end_idx]

            executor.submit(
                save_cubes,
                cells_groups,
                planes_paths,
                sub_planes_to_read,
                planes_shape,
                voxel_sizes,
                network_voxel_sizes,
                num_planes_for_cube=num_planes_needed_for_cube,
                cube_width=cube_width,
                cube_height=cube_height,
                cube_depth=cube_depth,
                thread_id=i,
                output_dir=cubes_output_dir,
                save_empty_cubes=save_empty_cubes,
            )

    total_cubes = system.get_number_of_files_in_dir(cubes_output_dir)
    time_taken = datetime.now() - start_time
    logging.info(
        "All cubes ({}) extracted in: {}".format(total_cubes, time_taken)
    )
Ejemplo n.º 10
0
def main(
    atlas,
    data_orientation,
    target_brain_path,
    paths,
    niftyreg_args,
    x_pixel_um=0.02,
    y_pixel_um=0.02,
    z_pixel_um=0.05,
    n_free_cpus=2,
    sort_input_file=False,
    additional_images_downsample=None,
    backend="niftyreg",
    debug=False,
):

    n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
    load_parallel = n_processes > 1

    # TODO: check orientation of atlas voxel sizes
    atlas_pixel_sizes = {
        "x": atlas.metadata["resolution"][0],
        "y": atlas.metadata["resolution"][1],
        "z": atlas.metadata["resolution"][2],
    }

    scaling_rounding_decimals = 5

    x_scaling = round(x_pixel_um / atlas_pixel_sizes["x"],
                      scaling_rounding_decimals)
    y_scaling = round(y_pixel_um / atlas_pixel_sizes["y"],
                      scaling_rounding_decimals)
    z_scaling = round(z_pixel_um / atlas_pixel_sizes["z"],
                      scaling_rounding_decimals)

    logging.info("Loading raw image data")

    target_brain = imio.load_any(
        target_brain_path,
        x_scaling,
        y_scaling,
        z_scaling,
        load_parallel=load_parallel,
        sort_input_file=sort_input_file,
        n_free_cpus=n_free_cpus,
    )

    target_brain = bg.map_stack_to(data_orientation,
                                   atlas.metadata["orientation"], target_brain)

    if backend == "niftyreg":
        run_niftyreg(
            paths.registration_output_folder,
            paths,
            atlas,
            atlas_pixel_sizes,
            target_brain,
            n_processes,
            additional_images_downsample,
            data_orientation,
            atlas.metadata["orientation"],
            niftyreg_args,
            x_scaling,
            y_scaling,
            z_scaling,
            load_parallel,
            sort_input_file,
            n_free_cpus,
            debug=debug,
        )

    logging.info("Calculating volumes of each brain area")
    calculate_volumes(
        atlas,
        paths.registered_atlas,
        paths.registered_hemispheres,
        paths.volume_csv_path,
        # for all brainglobe atlases
        left_hemisphere_value=1,
        right_hemisphere_value=2,
    )

    logging.info("Generating boundary image")
    boundaries(
        paths.registered_atlas,
        paths.boundaries_file_path,
    )

    logging.info(f"brainreg completed. Results can be found here: "
                 f"{paths.registration_output_folder}")
Ejemplo n.º 11
0
def main(args):
    n_processes = get_num_processes(min_free_cpu_cores=args.n_free_cpus)
    start_time = datetime.now()

    (
        soma_diameter,
        max_cluster_size,
        ball_xy_size,
        ball_z_size,
    ) = calculate_parameters_in_pixels(
        args.x_pixel_um,
        args.y_pixel_um,
        args.z_pixel_um,
        args.soma_diameter,
        args.max_cluster_size,
        args.ball_xy_size,
        args.ball_z_size,
    )

    # file extension only used if a directory is passed
    img_paths = get_sorted_file_paths(args.signal_planes_paths[0],
                                      file_extension="tif")

    if args.end_plane == -1:
        args.end_plane = len(img_paths)
    planes_paths_range = img_paths[args.start_plane:args.end_plane]

    workers_queue = MultiprocessingQueue(maxsize=n_processes)
    # WARNING: needs to be AT LEAST ball_z_size
    mp_3d_filter_queue = MultiprocessingQueue(maxsize=ball_z_size)
    for plane_id in range(n_processes):
        # place holder for the queue to have the right size on first run
        workers_queue.put(None)

    clipping_val, threshold_value, ball_filter, cell_detector = setup(
        img_paths[0],
        soma_diameter,
        ball_xy_size,
        ball_z_size,
        ball_overlap_fraction=args.ball_overlap_fraction,
        z_offset=args.start_plane,
    )

    progress_bar = tqdm(total=len(planes_paths_range),
                        desc="Processing planes")
    mp_3d_filter = Mp3DFilter(
        mp_3d_filter_queue,
        ball_filter,
        cell_detector,
        soma_diameter,
        args.output_dir,
        soma_size_spread_factor=args.soma_spread_factor,
        progress_bar=progress_bar,
        save_planes=args.save_planes,
        plane_directory=args.plane_directory,
        start_plane=args.start_plane,
        max_cluster_size=max_cluster_size,
        outlier_keep=args.outlier_keep,
        artifact_keep=args.artifact_keep,
        save_csv=args.save_csv,
    )

    # start 3D analysis (waits for planes in queue)
    bf_process = multiprocessing.Process(target=mp_3d_filter.process, args=())
    bf_process.start()  # needs to be started before the loop

    mp_tile_processor = MpTileProcessor(workers_queue, mp_3d_filter_queue)
    prev_lock = Lock()
    processes = []

    # start 2D tile filter (output goes into queue for 3D analysis)
    for plane_id, path in enumerate(planes_paths_range):
        workers_queue.get()
        lock = Lock()
        lock.acquire()
        p = multiprocessing.Process(
            target=mp_tile_processor.process,
            args=(
                plane_id,
                path,
                prev_lock,
                lock,
                clipping_val,
                threshold_value,
                soma_diameter,
                args.log_sigma_size,
                args.n_sds_above_mean_thresh,
            ),
        )
        prev_lock = lock
        processes.append(p)
        p.start()

    processes[-1].join()
    mp_3d_filter_queue.put((None, None, None))  # Signal the end
    bf_process.join()

    logging.info(
        "Detection complete - all planes done in : {}".format(datetime.now() -
                                                              start_time))
Ejemplo n.º 12
0
def main(max_workers=3):
    from cellfinder.main import suppress_tf_logging

    suppress_tf_logging(tf_suppress_log_messages)

    from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint

    from cellfinder.tools.prep import prep_training
    from cellfinder.classify.tools import make_lists, get_model
    from cellfinder.classify.cube_generator import CubeGeneratorFromDisk

    start_time = datetime.now()
    args = training_parse()
    output_dir = Path(args.output_dir)
    ensure_directory_exists(output_dir)
    args = prep_training(args)
    yaml_contents = parse_yaml(args.yaml_file)
    tiff_files = get_tiff_files(yaml_contents)

    # Too many workers doesn't increase speed, and uses huge amounts of RAM
    workers = get_num_processes(
        min_free_cpu_cores=args.n_free_cpus, n_max_processes=max_workers
    )

    model = get_model(
        existing_model=args.trained_model,
        model_weights=args.model_weights,
        network_depth=models[args.network_depth],
        learning_rate=args.learning_rate,
        continue_training=args.continue_training,
    )

    signal_train, background_train, labels_train = make_lists(tiff_files)

    if args.test_fraction > 0:
        (
            signal_train,
            signal_test,
            background_train,
            background_test,
            labels_train,
            labels_test,
        ) = train_test_split(
            signal_train,
            background_train,
            labels_train,
            test_size=args.test_fraction,
        )
        validation_generator = CubeGeneratorFromDisk(
            signal_test,
            background_test,
            labels=labels_test,
            batch_size=args.batch_size,
            train=True,
        )
    else:
        validation_generator = None

    training_generator = CubeGeneratorFromDisk(
        signal_train,
        background_train,
        labels=labels_train,
        batch_size=args.batch_size,
        shuffle=True,
        train=True,
        augment=not args.no_augment,
    )
    callbacks = []

    if args.tensorboard:
        logdir = output_dir / "tensorboard"
        ensure_directory_exists(logdir)
        tensorboard = TensorBoard(
            log_dir=logdir,
            histogram_freq=0,
            write_graph=True,
            update_freq="epoch",
        )
        callbacks.append(tensorboard)

    if args.save_checkpoints:
        if args.save_weights:
            filepath = str(
                output_dir / "weights.{epoch:02d}-{val_loss:.3f}.h5"
            )
        else:
            filepath = str(output_dir / "model.{epoch:02d}-{val_loss:.3f}.h5")

        checkpoints = ModelCheckpoint(
            filepath, save_weights_only=args.save_weights
        )
        callbacks.append(checkpoints)

    model.fit(
        training_generator,
        validation_data=validation_generator,
        use_multiprocessing=True,
        workers=workers,
        epochs=args.epochs,
        callbacks=callbacks,
    )

    if args.save_weights:
        print("Saving model weights")
        model.save_weights(str(output_dir / "model_weights.h5"))
    else:
        print("Saving model")
        model.save(output_dir / "model.h5")

    print(
        "Finished training, " "Total time taken: %s",
        datetime.now() - start_time,
    )
Ejemplo n.º 13
0
def main(
    registration_config,
    target_brain_path,
    registration_output_folder,
    x_pixel_um=0.02,
    y_pixel_um=0.02,
    z_pixel_um=0.05,
    orientation="coronal",
    flip_x=False,
    flip_y=False,
    flip_z=False,
    affine_n_steps=6,
    affine_use_n_steps=5,
    freeform_n_steps=6,
    freeform_use_n_steps=4,
    bending_energy_weight=0.95,
    grid_spacing=-10,
    smoothing_sigma_reference=-1.0,
    smoothing_sigma_floating=-1.0,
    histogram_n_bins_floating=128,
    histogram_n_bins_reference=128,
    n_free_cpus=2,
    sort_input_file=False,
    save_downsampled=True,
    additional_images_downsample=None,
    boundaries=True,
    debug=False,
):
    """
        The main function that will perform the library calls and
    register the atlas to the brain given on the CLI

    :param registration_config:
    :param target_brain_path:
    :param registration_output_folder:
    :param filtered_brain_path:
    :param x_pixel_um:
    :param y_pixel_um:
    :param z_pixel_um:
    :param orientation:
    :param flip_x:
    :param flip_y:
    :param flip_z:
    :param n_free_cpus:
    :param sort_input_file:
    :param save_downsampled:
    :param additional_images_downsample: dict of
    {image_name: image_to_be_downsampled}
    :return:
    """
    n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
    load_parallel = n_processes > 1
    paths = Paths(registration_output_folder)
    atlas = Atlas(registration_config, dest_folder=registration_output_folder)
    run = Run(paths, atlas, boundaries=boundaries, debug=debug)

    if run.preprocess:
        logging.info("Preprocessing data for registration")
        logging.info("Loading data")

        brain = BrainProcessor(
            atlas,
            target_brain_path,
            registration_output_folder,
            x_pixel_um,
            y_pixel_um,
            z_pixel_um,
            original_orientation=orientation,
            load_parallel=load_parallel,
            sort_input_file=sort_input_file,
            n_free_cpus=n_free_cpus,
        )

        # reorients the atlas to the orientation of the sample
        brain.swap_atlas_orientation_to_self()

        # reorients atlas to the nifti (origin is the most ventral, posterior,
        # left voxel) coordinate framework

        flip = flips[orientation]
        brain.flip_atlas(flip)

        # flips if the input data doesnt match the nifti standard
        brain.flip_atlas((flip_x, flip_y, flip_z))

        brain.atlas.save_all()
        if save_downsampled:
            brain.target_brain = brain.target_brain.astype(
                np.uint16, copy=False
            )
            logging.info("Saving downsampled image")
            brain.save(paths.downsampled_brain_path)

        brain.filter()
        logging.info("Saving filtered image")
        brain.save(paths.tmp__downsampled_filtered)

        del brain

    if additional_images_downsample:
        for name, image in additional_images_downsample.items():
            if not check_downsampled(registration_output_folder, name):
                save_downsampled_image(
                    image,
                    name,
                    registration_output_folder,
                    atlas,
                    x_pixel_um=x_pixel_um,
                    y_pixel_um=y_pixel_um,
                    z_pixel_um=z_pixel_um,
                    orientation=orientation,
                    n_free_cpus=n_free_cpus,
                    sort_input_file=sort_input_file,
                    load_parallel=load_parallel,
                )
            else:
                logging.info(f"Image: {name} already downsampled, skipping.")

    if run.register:
        logging.info("Registering")

    if any(
        [
            run.affine,
            run.freeform,
            run.segment,
            run.hemispheres,
            run.inverse_transform,
        ]
    ):
        registration_params = RegistrationParams(
            registration_config,
            affine_n_steps=affine_n_steps,
            affine_use_n_steps=affine_use_n_steps,
            freeform_n_steps=freeform_n_steps,
            freeform_use_n_steps=freeform_use_n_steps,
            bending_energy_weight=bending_energy_weight,
            grid_spacing=grid_spacing,
            smoothing_sigma_reference=smoothing_sigma_reference,
            smoothing_sigma_floating=smoothing_sigma_floating,
            histogram_n_bins_floating=histogram_n_bins_floating,
            histogram_n_bins_reference=histogram_n_bins_reference,
        )
        brain_reg = BrainRegistration(
            registration_config,
            paths,
            registration_params,
            n_processes=n_processes,
        )

    if run.affine:
        logging.info("Starting affine registration")
        brain_reg.register_affine()

    if run.freeform:
        logging.info("Starting freeform registration")
        brain_reg.register_freeform()

    if run.segment:
        logging.info("Starting segmentation")
        brain_reg.segment()

    if run.hemispheres:
        logging.info("Segmenting hemispheres")
        brain_reg.register_hemispheres()

    if run.inverse_transform:
        logging.info("Generating inverse (sample to atlas) transforms")
        brain_reg.generate_inverse_transforms()

    if run.volumes:
        logging.info("Calculating volumes of each brain area")
        calculate_volumes(
            paths.registered_atlas_path,
            paths.hemispheres_atlas_path,
            atlas.get_structures_path(),
            registration_config,
            paths.volume_csv_path,
            left_hemisphere_value=atlas.get_left_hemisphere_value(),
            right_hemisphere_value=atlas.get_right_hemisphere_value(),
        )

    if run.boundaries:
        logging.info("Generating boundary image")
        calc_boundaries(
            paths.registered_atlas_path,
            paths.boundaries_file_path,
            atlas_config=registration_config,
        )

    if run.delete_temp:
        logging.info("Removing registration temp files")
        delete_temp(paths.registration_output_folder, paths)

    logging.info(
        f"amap completed. Results can be found here: "
        f"{registration_output_folder}"
    )
Ejemplo n.º 14
0
def test_max_processes():
    max_proc = 5
    correct_n = min(len(os.sched_getaffinity(0)), max_proc)
    assert correct_n == system.get_num_processes(n_max_processes=max_proc,
                                                 min_free_cpu_cores=0)
Ejemplo n.º 15
0
def test_get_num_processes():
    assert len(os.sched_getaffinity(0)) == system.get_num_processes(
        min_free_cpu_cores=0)
Ejemplo n.º 16
0
def test_get_num_processes():
    assert os.cpu_count() == system.get_num_processes(min_free_cpu_cores=0)
Ejemplo n.º 17
0
def test_max_processes():
    max_proc = 5
    correct_n = min(os.cpu_count(), max_proc)
    assert correct_n == system.get_num_processes(n_max_processes=max_proc,
                                                 min_free_cpu_cores=0)
Ejemplo n.º 18
0
def main(
    atlas,
    data_orientation,
    target_brain_path,
    paths,
    voxel_sizes,
    niftyreg_args,
    n_free_cpus=2,
    sort_input_file=False,
    additional_images_downsample=None,
    backend="niftyreg",
    scaling_rounding_decimals=5,
    debug=False,
):
    atlas = BrainGlobeAtlas(atlas)
    source_space = bg.AnatomicalSpace(data_orientation)

    scaling = []
    for idx, axis in enumerate(atlas.space.axes_order):
        scaling.append(
            round(
                float(voxel_sizes[idx]) /
                atlas.resolution[atlas.space.axes_order.index(
                    source_space.axes_order[idx])],
                scaling_rounding_decimals,
            ))

    n_processes = get_num_processes(min_free_cpu_cores=n_free_cpus)
    load_parallel = n_processes > 1

    logging.info("Loading raw image data")

    target_brain = imio.load_any(
        target_brain_path,
        scaling[1],
        scaling[2],
        scaling[0],
        load_parallel=load_parallel,
        sort_input_file=sort_input_file,
        n_free_cpus=n_free_cpus,
    )

    target_brain = bg.map_stack_to(data_orientation,
                                   atlas.metadata["orientation"], target_brain)

    if backend == "niftyreg":
        run_niftyreg(
            paths.registration_output_folder,
            paths,
            atlas,
            target_brain,
            n_processes,
            additional_images_downsample,
            data_orientation,
            atlas.metadata["orientation"],
            niftyreg_args,
            scaling,
            load_parallel,
            sort_input_file,
            n_free_cpus,
            debug=debug,
        )

    logging.info("Calculating volumes of each brain area")
    calculate_volumes(
        atlas,
        paths.registered_atlas,
        paths.registered_hemispheres,
        paths.volume_csv_path,
        # for all brainglobe atlases
        left_hemisphere_value=1,
        right_hemisphere_value=2,
    )

    logging.info("Generating boundary image")
    boundaries(
        paths.registered_atlas,
        paths.boundaries_file_path,
    )

    logging.info(f"brainreg completed. Results can be found here: "
                 f"{paths.registration_output_folder}")