Ejemplo n.º 1
0
    def load_specimen_into_vpv(self, spec_dir: Path, rev=True, title=None):


        invert_yaml = next(spec_dir.glob('**/inverted_transforms/invert.yaml'))
        with open(invert_yaml, 'r') as fh:
            invert_order = yaml.load(fh)['inversion_order']

        # 080121 Both methods of label propagation now use the rigidly-aligned images to overlay label onto
        vol_dir = next(spec_dir.rglob('**/reg*/*rigid*'))

        if not rev:
            try:
                lab_dir = next(spec_dir.rglob('**/inverted_labels/similarity'))
            except StopIteration:
                lab_dir = next(spec_dir.rglob('**/inverted_labels/affine'))
        else:
            # Labels progated by reverse registration
            last_dir = invert_order[-1]
            lab_dir = next(spec_dir.rglob(f'**/inverted_labels/{last_dir}'))

        vol = get_file_paths(vol_dir, ignore_folders=SUBFOLDERS_TO_IGNORE)[0]
        lab = get_file_paths(lab_dir, ignore_folders=SUBFOLDERS_TO_IGNORE)[0]

        vpv_ids = self.vpv.load_volumes([vol, lab], 'vol')

        return vpv_ids
Ejemplo n.º 2
0
def label_sizes(label_dir: Path, outpath: Path, mask_dir=None):
    """
    Given a directory of labelmaps and whole embryo masks, generate a csv file containing organ volumes normalised to
    mask size

    Parameters
    ----------
    label_dir: str
        directory containing (inverted) label maps - can be in subdirectories
    mask_dir: str
        directory containing (inverted) masks - can be in subdirectories
    outpath: str
        path to save generated csv

    """

    label_df = _get_label_sizes(get_file_paths(label_dir))

    if mask_dir:

        mask_df = _get_label_sizes(get_file_paths(mask_dir))

        label_df = label_df.divide(mask_df[1], axis=0)

    try:
        label_df.to_csv(outpath)
    except PermissionError:
        os.remove(outpath)
        label_df.to_csv(outpath)
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser(
        "fix specified labels using the active contour tool")
    parser.add_argument('-c',
                        '--config',
                        dest='config_file',
                        help='config file for label_fixer')

    args = parser.parse_args()

    # get configuration by reading the toml file
    config = get_config(args.config_file)

    # Set the rigid img and inverted label paths from the config
    rigid_img_path = config["rigid_img_path"]
    inverted_labels_path = config["inverted_labels_path"]

    # there should be multiple rigid images and labels, therefore get all filepaths
    rigid_paths = common.get_file_paths(Path(rigid_img_path))
    label_paths = common.get_file_paths(Path(inverted_labels_path))

    # for each volume, perform multiple active contours in the order and with the parameters specified within the config
    for i in range(len(rigid_paths)):
        parameters = config['active_contour_params']
        for (lab_of_int, kmean, num_int,
             prop_scaling) in zip(parameters['lab_of_int'],
                                  parameters['kmean'], parameters['num_int'],
                                  parameters['prop_scaling']):
            fix_label(Path(rigid_paths[i]), Path(label_paths[i]), lab_of_int,
                      kmean, num_int, prop_scaling)
Ejemplo n.º 4
0
def convert_16_bit_to_8bit(indir, outdir, clobber: bool):

    paths = common.get_file_paths(Path(indir))

    for inpath in paths:
        arr, header = nrrd.read(str(inpath))

        if arr.dtype not in (np.uint16, np.int16):
            print(("skipping {}. Not 16bit".format(inpath.name)))
            arr_cast = arr

        else:
            if arr.max() <= 255:
                print(("16bit image but with 8 bit intensity range {}".format(inpath.name)))
                arr_cast = arr.astype(np.uint8)

            # Fix the negative values, which can be caused by  the registration process. therwise we end up with hihglights
            # where there should be black
            else:
                if arr.dtype == np.int16:
                    # transform to unsigned range
                    print('unsigned short')
                    negative_range = np.power(2, 16) / 2
                    arr += negative_range
                # Do the cast
                arr2 = arr/256
                arr_cast = arr2.astype(np.uint8)
                print((arr_cast.min(), arr_cast.max()))

        if clobber:
            outpath = inpath
        else:
            outpath = Path(outdir) / inpath.name
        nrrd.write(str(outpath), arr_cast, header=header)
Ejemplo n.º 5
0
def move_intemediate_volumes(reg_outdir: Path):
    """
    If using elastix multi-resolution registration and outputting image each resolution, put the intermediate files
    in a separate folder
    """
    imgs = common.get_file_paths(reg_outdir)
    intermediate_imgs = [x for x in imgs if basename(x).startswith('result.')]
    if len(intermediate_imgs) > 0:
        int_dir = join(reg_outdir, RESOLUTION_IMG_FOLDER)
        common.mkdir_force(int_dir)
        for int_img in intermediate_imgs:
            shutil.move(str(int_img), str(int_dir))
Ejemplo n.º 6
0
    def make_average(self, out_path):
        """
        Create an average of the the input embryo volumes.
        This will search subfolders for all the registered volumes within them
        """
        vols = common.get_file_paths(self.stagedir,
                                     ignore_folder=RESOLUTION_IMG_FOLDER)
        #logging.info("making average from following volumes\n {}".format('\n'.join(vols)))

        average = common.average(vols)

        sitk.WriteImage(average, out_path, True)
Ejemplo n.º 7
0
    def run(self):

        if self.movdir.is_file():
            moving_imgs = [self.movdir]
        else:
            moving_imgs = common.get_file_paths(
                self.movdir, ignore_folder=RESOLUTION_IMG_FOLDER
            )  # This breaks if not ran from config dir

        if len(moving_imgs) < 1:
            raise common.LamaDataException("No volumes in {}".format(
                self.movdir))

        for mov in moving_imgs:
            mov_basename = mov.stem
            outdir = self.stagedir / mov_basename
            outdir.mkdir(parents=True)

            cmd = {
                'mov': str(mov),
                'fixed': str(self.fixed),
                'outdir': str(outdir),
                'elxparam_file': str(self.elxparam_file),
                'threads': self.threads,
                'fixed': str(self.fixed)
            }
            if self.fixed_mask is not None:
                cmd['fixed_mask'] = str(self.fixed_mask)

            run_elastix(cmd)

            # Rename the registered output.
            elx_outfile = outdir / f'result.0.{self.filetype}'
            new_out_name = outdir / f'{mov_basename}.{self.filetype}'

            try:
                shutil.move(elx_outfile, new_out_name)
            except IOError:
                logging.error(
                    'Cannot find elastix output. Ensure the following is not set: (WriteResultImage  "false")'
                )
                raise

            move_intemediate_volumes(outdir)

            # add registration metadata
            reg_metadata_path = outdir / common.INDV_REG_METADATA
            fixed_vol_relative = relpath(self.fixed, outdir)
            reg_metadata = {'fixed_vol': fixed_vol_relative}

            with open(reg_metadata_path, 'w') as fh:
                fh.write(yaml.dump(reg_metadata, default_flow_style=False))
Ejemplo n.º 8
0
def get_pairs(inputs_dir):
    """
    Given the input directory return the pairwise combinations (in the form name1_name_2 minus extensions: (ids tuple))

    """
    specimen_ids = [Path(x).stem for x in common.get_file_paths(inputs_dir)]
    perms = list(permutations(specimen_ids, r=2))
    d = {}
    for p in perms:
        k = f'{p[0]}_{p[1]}'
        d[k] = p
    # k = {(f'{x[0]}_{x[1]}'): x for x in perms}
    return d
Ejemplo n.º 9
0
def get_heatmaps(dir, s):
    heatmap_list = []
    spec_name_list = []
    map_paths = [spec_path for spec_path in common.get_file_paths(dir) if 'log_jac' in str(spec_path)]
    # enumerating for speed only
    for i, heatmap_path in enumerate(map_paths):
        heatmap, map_h = nrrd.read(heatmap_path)
        # only get heatmap vals inside of the mask

        spec_name_list.append(os.path.splitext(heatmap_path.name)[0].replace("log_jac_",""))
        heatmap = heatmap[s[0].start:s[0].stop,
                  s[1].start:s[1].stop,
                  s[2].start:s[2].stop]
        heatmap_list.append(heatmap)
    return heatmap_list, spec_name_list
Ejemplo n.º 10
0
def get_largest_dimensions(indirs: Iterable[Path]) -> Tuple[int]:
    max_dims = None

    for dir_ in indirs:

        volpaths = common.get_file_paths(dir_)

        for path in volpaths:
            im = sitk.ReadImage(str(path))
            dims = im.GetSize()

            if not max_dims:
                max_dims = dims
            else:
                max_dims = [max(d[0], d[1]) for d in zip(dims, max_dims)]

    return max_dims
Ejemplo n.º 11
0
def overlay_labels(first_stage_reg_dir: Path,
                   inverted_labeldir: Path,
                   out_dir_labels: Path):
    """
    Overlay the first registrated image (rigid) with the corresponding inverted labels
    It depends on the registered volumes and inverted label maps being named identically

    TODO: Add axial and coronal views.
    """

    for vol_path in common.get_file_paths(first_stage_reg_dir, ignore_folder=IGNORE_FOLDER):

        vol_reader = common.LoadImage(vol_path)

        if not vol_reader:
            logging.error(f'cannnot create qc image from {vol_path}')
            return

        label_path = inverted_labeldir / vol_path.stem / vol_path.name

        if label_path.is_file():
            label_reader = common.LoadImage(label_path)

            if not label_reader:
                logging.error(f'cannot create qc image from label file {label_path}')
                return

            cast_img = sitk.Cast(sitk.RescaleIntensity(vol_reader.img), sitk.sitkUInt8)
            arr = sitk.GetArrayFromImage(cast_img)
            slice_ = np.flipud(arr[:, :, arr.shape[2] // 2])
            l_arr = label_reader.array
            l_slice_ = np.flipud(l_arr[:, :, l_arr.shape[2] // 2])

            base = splitext(basename(label_reader.img_path))[0]
            out_path = join(out_dir_labels, base + '.png')
            blend_8bit(slice_, l_slice_, out_path)
        else:
            logging.info('No inverted label found. Skipping creation of inverted label-image overlay')
Ejemplo n.º 12
0
def do_mean_transforms(pairs, stage_status_dir, reg_stage_dir, mean_dir, previous_mean_dir, avg_out):

    mean_started_dir = stage_status_dir / 'mean_started'
    mean_finished_dir = stage_status_dir / 'mean_finished'

    moving_ids = []
    for moving_vol_dir in reg_stage_dir.iterdir():
        if not moving_vol_dir.is_dir():
            continue
        moving_ids.append(moving_vol_dir.name)

        try:
            with open(mean_started_dir / moving_vol_dir.name, 'x'):
                mean_transform(moving_vol_dir, previous_mean_dir, mean_dir)
                mean_finished_file = mean_finished_dir / moving_vol_dir.name
                open(mean_finished_file, 'x').close()
        except FileExistsError:
            continue

    while True:
    # Wait for mean transformas to finish
        means_finshed = [x.name for x in mean_finished_dir.iterdir()]
        means_not_finished = set(moving_ids).difference(means_finshed)

        if len(means_not_finished) > 0:
            print('waiting for mean transfroms')
            time.sleep(2)
        else:
            break
    # make averge images
    avg_started_file = str(avg_out) + 'started'
    try:
        with open(avg_started_file, 'x'):
            img_paths = common.get_file_paths(mean_dir)
            avg = common.average(img_paths)
            sitk.WriteImage(avg, str(avg_out))
    except FileExistsError:
        return # We don't have to wait for avergae to finish as it's not required for the next stage
Ejemplo n.º 13
0
def invert_isosurfaces(self):
    """
    Invert a bunch of isosurfaces that were proviously generated from the target labelmap
    For this to work, the target needs to be the largest of all volumes used in registration, as padding the target
    will through the mesh and taget corrdinates out of sync
    :return:
    """
    if not self.config.get('isosurface_dir'):
        logging.info('isosurface directory not in config file')
        return

    # TODO: put a check for target being the largest volume ?

    mesh_dir = self.paths['isosurface_dir']
    if not os.path.isdir(mesh_dir):
        logging.info('Mesh directory: {} not found'.format(mesh_dir))

    iso_out = self.paths.make('inverted_isosurfaces')

    logging.info('mesh inversion started')
    for mesh_path in common.get_file_paths(mesh_dir):
        im = InvertMeshes(self.invert_config, mesh_path, iso_out)
        im.run()
Ejemplo n.º 14
0
    def run(self):

        if self.movdir.is_file():
            moving_imgs = [self.movdir]
        else:
            moving_imgs = common.get_file_paths(
                self.movdir,
                ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR
                                ])  # This breaks if not ran from config dir

        if len(moving_imgs) < 1:
            raise common.LamaDataException("No volumes in {}".format(
                self.movdir))

        for mov in moving_imgs:
            mov_basename = mov.stem
            outdir = self.stagedir / mov_basename
            outdir.mkdir(parents=True)

            cmd = {
                'mov': str(mov),
                'fixed': str(self.fixed),
                'outdir': str(outdir),
                'elxparam_file': str(self.elxparam_file),
                'threads': self.threads,
                'fixed': str(self.fixed)
            }
            if self.fixed_mask is not None:
                cmd['fixed_mask'] = str(self.fixed_mask)

            run_elastix(cmd)

            # Rename the registered output.
            if self.rename_output:
                elx_outfile = outdir / f'result.0.{self.filetype}'
                new_out_name = outdir / f'{mov_basename}.{self.filetype}'

                try:
                    shutil.move(elx_outfile, new_out_name)
                except IOError:
                    logging.error(
                        'Cannot find elastix output. Ensure the following is not set: (WriteResultImage  "false")'
                    )
                    raise

                move_intemediate_volumes(outdir)

            # add registration metadata
            reg_metadata_path = outdir / common.INDV_REG_METADATA
            fixed_vol_relative = relpath(self.fixed, outdir)
            reg_metadata = {'fixed_vol': fixed_vol_relative}

            with open(reg_metadata_path, 'w') as fh:
                fh.write(yaml.dump(reg_metadata, default_flow_style=False))

            if self.fix_folding:
                # Remove any folds folds in the Bsplines, overwtite inplace
                tform_param_file = outdir / ELX_TRANSFORM_NAME
                unfold_bsplines(tform_param_file, tform_param_file)

                # Retransform the moving image with corrected tform file
                cmd = [
                    'transformix', '-in',
                    str(mov), '-out',
                    str(outdir), '-tp', tform_param_file
                ]
                subprocess.call(cmd)
                unfolded_moving_img = outdir / 'result.nrrd'
                new_out_name.unlink()
                shutil.move(unfolded_moving_img, new_out_name)
Ejemplo n.º 15
0
def load(line_dir: Path, rev=False, title=None):
    app = QtGui.QApplication([])
    ex = Vpv()

    try:
        invert_yaml = next(
            line_dir.glob('**/inverted_transforms/propagate.yaml'))
    except StopIteration:

        try:  # Old version name of config file
            invert_yaml = next(
                line_dir.glob('**/inverted_transforms/invert.yaml'))
        except StopIteration:
            raise FileNotFoundError(
                "Cannot find 'inverted_transforms/propagate.yaml' in LAMA output directory"
            )

    with open(invert_yaml, 'r') as fh:
        try:
            invert_order = yaml.load(fh)['label_propagation_order']
        except KeyError:
            try:
                invert_order = yaml.load(fh)['inversion_order']
            except KeyError:
                raise

    if not rev:
        vol_dir = next(line_dir.rglob('**/inputs'))
    else:
        vol_dir = next(line_dir.rglob('**/reg*/*rigid*'))
    lab_dir = next(line_dir.glob(f'**/inverted_labels'))

    vol = get_file_paths(vol_dir, ignore_folders=IGNORE)[0]

    lab = get_file_paths(lab_dir, ignore_folders=IGNORE)[0]

    ex.load_volumes([vol, lab], 'vol')

    # Vpv deals with images with the same name by appending parenthetical digits. We need to know the ids it will assign
    # if we are to get a handle once loaded
    img_ids = ex.img_ids()

    num_top_views = 3

    # Set the top row of views
    for i in range(num_top_views):

        vol_id = img_ids[0]
        # label_id = top_labels[i].stem
        label_id = img_ids[1]
        # if label_id == vol_id:
        #     label_id = f'{label_id}(1)'
        ex.views[i].layers[Layers.vol1].set_volume(vol_id)
        ex.views[i].layers[Layers.vol2].set_volume(label_id)

    if not title:
        title = line_dir.name
    ex.mainwindow.setWindowTitle(title)

    print('Finished loading')

    # Show two rows
    ex.data_manager.show2Rows(False)

    # Set orientation
    # ex.data_manager.on_orientation('sagittal')

    # Set colormap
    ex.data_manager.on_vol2_lut_changed('anatomy_labels')

    # opacity
    ex.data_manager.modify_layer(Layers.vol2, 'set_opacity', OPACITY)

    sys.exit(app.exec_())
Ejemplo n.º 16
0
def job_runner(config_path: Path) -> Path:
    """
    Run the registrations specified in the config file

    Returns
    -------
    The path to the final registrered images
    """

    config = LamaConfig(config_path)
    print(common.git_log())

    avg_dir = config.options['average_folder']
    avg_dir.mkdir(exist_ok=True, parents=True)

    elastix_stage_parameters = generate_elx_parameters(
        config, do_pairwise=config['pairwise_registration'])

    # Set the fixed volume up for the first stage. This will checnge each stage if doing population average
    fixed_vol = config['fixed_volume']

    # Get list of specimens
    inputs_dir = config.options['inputs']
    spec_ids = [Path(x).stem for x in common.get_file_paths(inputs_dir)]

    for i, reg_stage in enumerate(config['registration_stage_params']):

        stage_id = reg_stage['stage_id']
        logging.info(stage_id)
        stage_dir = Path(config.stage_dirs[stage_id])

        # Make stage dir if not made by another instance of the script
        stage_dir.mkdir(exist_ok=True, parents=True)

        starting_avg = stage_dir / 'avg_started'
        average_done = stage_dir / "avg_done"

        while True:  # Pick up unstarted speciemens. Only break when reg and avergae complete

            # Check if any specimens left (It's possible the avg is being made but all specimens are registered)
            spec_stage_dirs = [
                x.name for x in stage_dir.iterdir() if x.is_dir()
            ]
            not_started = set(spec_ids).difference(spec_stage_dirs)

            next_stage = False  # No breaking out yet

            if len(not_started) > 0:
                next_spec_id = list(not_started)[
                    0]  # Some specimens left. Pick up spec_id and process

            else:  # All specimens are being processed
                next_stage = True

                #  This block controls what happens if we have all speciemns registered
                while True:
                    if not check_stage_done(stage_dir):
                        print('waiting for stage to finish')
                        time.sleep(5)
                        continue

                    print('stage finished')

                    if average_done.is_file():
                        print('found average done file')
                        break  # Next stage
                    else:
                        if starting_avg.is_file():
                            print('found starting average file')
                            time.sleep(5)
                            continue
                        else:
                            try:
                                open(starting_avg, 'x')
                            except FileExistsError:
                                time.sleep(5)
                                print('cannot write avg starting file')
                                continue
                            else:
                                average_path = avg_dir / f'{stage_id}.nrrd'
                                make_avg(stage_dir, average_path,
                                         avg_dir / f'{stage_id}.log')
                                open(average_done, 'x').close()
                                print('making average')
                                break

            if next_stage:
                print('breaking stage')
                break

            # Get the input for this specimen
            if i == 0:  # The first stage
                moving = inputs_dir / f'{next_spec_id}.nrrd'
            else:
                moving = list(config.stage_dirs.values())[
                    i - 1] / next_spec_id / f'{next_spec_id}.nrrd'
                fixed_vol = avg_dir / f'{list(config.stage_dirs.keys())[i-1]}.nrrd'
            reg_method = TargetBasedRegistration

            # Make the elastix parameter file for this stage
            elxparam = elastix_stage_parameters[stage_id]
            elxparam_path = stage_dir / f'{ELX_PARAM_PREFIX}{stage_id}.txt'

            if not elxparam_path.is_file():
                with open(elxparam_path, 'w') as fh:
                    if elxparam:
                        fh.write(elxparam)

            fixed_mask = None

            logging.info(moving)

            # Do the registrations
            registrator = reg_method(elxparam_path, moving, stage_dir,
                                     config['filetype'], config['threads'],
                                     fixed_mask)

            registrator.set_target(fixed_vol)

            try:
                registrator.run()  # Do the registrations for a single stage
            except FileExistsError as e:
                # 040620: Bodge as some specimens are picked up twice.
                # Need a better way to make sure each speciemn picked up only once
                continue

            spec_done = stage_dir / next_spec_id / 'spec_done'  # The directory gets created in .run()
            open(spec_done, 'x').close()
Ejemplo n.º 17
0
def batch_invert_transform_parameters(config: Union[str, LamaConfig],
                                      clobber=True,
                                      new_log: bool = False):
    """
    Create new elastix TransformParameter files that can then be used by transformix to invert labelmaps, stats etc

    Parameters
    ----------
    config
        path to original reg pipeline config file

    clobber
        if True overwrite inverted parameters present

    new_log:
        Whether to create a new log file. If called from another module, logging may happen there
    """
    common.test_installation('elastix')

    if isinstance(config, Path):
        config = LamaConfig(config)

    threads = str(config['threads'])

    if new_log:
        common.init_logging(config / 'invert_transforms.log')

    reg_dirs = get_reg_dirs(config)

    # Get the image basenames from the first stage registration folder (usually rigid)
    # ignore images in non-relevent folder that may be present
    volume_names = [
        x.stem for x in common.get_file_paths(reg_dirs[0],
                                              ignore_folder=IGNORE_FOLDER)
    ]

    inv_outdir = config.mkdir('inverted_transforms')

    stages_to_invert = defaultdict(list)

    jobs: List[Dict] = []

    reg_stage_dir: Path

    for i, vol_id in enumerate(volume_names):

        label_replacements = {
            'FinalBSplineInterpolationOrder': '0',
            'FixedInternalImagePixelType': 'short',
            'MovingInternalImagePixelType': 'short',
            'ResultImagePixelType': 'unsigned char',
            'WriteTransformParametersEachResolution': 'false',
            'WriteResultImageAfterEachResolution': 'false'
        }

        image_replacements = {
            'FinalBSplineInterpolationOrder': '3',
            'FixedInternalImagePixelType': 'float',
            'MovingInternalImagePixelType': 'float',
            'ResultImagePixelType': 'float',
            'WriteTransformParametersEachResolution': 'false',
            'WriteResultImageAfterEachResolution': 'false'
        }

        for reg_stage_dir in reg_dirs:

            if not reg_stage_dir.is_dir():
                logging.error('cannot find {}'.format(reg_stage_dir))
                raise FileNotFoundError(
                    f'Cannot find registration dir {reg_stage_dir}')

            inv_stage_dir = inv_outdir / reg_stage_dir.name

            specimen_stage_reg_dir = reg_stage_dir / vol_id
            specimen_stage_inversion_dir = inv_stage_dir / vol_id

            transform_file = common.getfile_startswith(specimen_stage_reg_dir,
                                                       ELX_TRANSFORM_PREFIX)
            parameter_file = common.getfile_startswith(reg_stage_dir,
                                                       ELX_PARAM_PREFIX)

            # Create the folder to put the specimen inversion parameter files in.
            inv_stage_dir.mkdir(exist_ok=True)

            # Add the stage to the inversion order config (in reverse order), if not already.
            if reg_stage_dir.name not in stages_to_invert['inversion_order']:
                stages_to_invert['inversion_order'].insert(
                    0, reg_stage_dir.name)

            if clobber:
                common.mkdir_force(
                    specimen_stage_inversion_dir
                )  # Overwrite any inversion file that exist for a single specimen

            # Each registration directory contains a metadata file, which contains the relative path to the fixed volume
            reg_metadata = cfg_load(specimen_stage_reg_dir /
                                    common.INDV_REG_METADATA)
            fixed_volume = (specimen_stage_reg_dir /
                            reg_metadata['fixed_vol']).resolve()

            # Invert the Transform parameters with options for normal image inversion

            job = {
                'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
                'parameter_file': abspath(parameter_file),
                'transform_file': transform_file,
                'fixed_volume': fixed_volume,
                'param_file_output_name': 'inversion_parameters.txt',
                'image_replacements': image_replacements,
                'label_replacements': label_replacements,
                'image_transform_file': IMAGE_INVERTED_TRANSFORM,
                'label_transform_file': LABEL_INVERTED_TRANFORM,
                'clobber': clobber,
                'threads': threads
            }

            jobs.append(job)

    # Run the inversion jobs. Currently using only one thread as it seems that elastix now uses multiple threads on the
    # Inversions

    logging.info('inverting with {} threads: '.format(threads))
    pool = Pool(
        1
    )  # 17/09/18 If we can get multithreded inversion in elastix 4.9 we can remove the python multithreading
    try:
        pool.map(_invert_transform_parameters, jobs)

    except KeyboardInterrupt:
        print('terminating inversion')
        pool.terminate()
        pool.join()

    # TODO: Should we replace the need for this invert.yaml?
    reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
    stages_to_invert['registration_directory'] = str(
        reg_dir)  # Doc why we need this
    # Create a yaml config file so that inversions can be run seperatley
    invert_config = config['inverted_transforms'] / INVERT_CONFIG

    with open(invert_config, 'w') as yf:
        yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
Ejemplo n.º 18
0
                vol -= int(np.round(mean_difference))


if __name__ == '__main__':

    import argparse
    raise SystemExit('This CLI interafce needs updating')

    parser = argparse.ArgumentParser()
    parser.add_argument('-w', dest='wt', help='wt dir', required=True)
    parser.add_argument('-m', dest='mut', help='mut dir', required=True)
    parser.add_argument('-o', dest='output', help='output dir', required=True)
    parser.add_argument('-s',
                        dest='starts',
                        help='start indices (x, y, z)',
                        required=True,
                        nargs=3,
                        type=int)
    parser.add_argument('-e',
                        dest='ends',
                        help='end indices (x, y, z)',
                        required=True,
                        nargs=3,
                        type=int)

    args = parser.parse_args()
    wt = common.get_file_paths(args.wt)

    mut = common.get_file_paths(args.mut)
    normalise(wt, mut, args.elx_points, args.starts, args.ends)
Ejemplo n.º 19
0
def batch_invert_transform_parameters(config: Union[Path, LamaConfig],
                                      clobber=True, new_log:bool=False):
    """
    Create new elastix TransformParameter files that can then be used by transformix to invert labelmaps, stats etc

    Parameters
    ----------
    config
        path to original reg pipeline config file

    clobber
        if True overwrite inverted parameters present

    new_log:
        Whether to create a new log file. If called from another module, logging may happen there
    """
    common.test_installation('elastix')

    if isinstance(config, (Path, str)):
        config = LamaConfig(config)

    threads = str(config['threads'])

    if new_log:
        common.init_logging(config / 'invert_transforms.log')

    reg_dirs = get_reg_dirs(config)

    # Get the image basenames from the first stage registration folder (usually rigid)
    # ignore images in non-relevent folder that may be present
    volume_names = [x.stem for x in common.get_file_paths(reg_dirs[0], ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR])]

    inv_outdir = config.mkdir('inverted_transforms')

    stages_to_invert = defaultdict(list)

    jobs: List[Dict] = []

    reg_stage_dir: Path

    for i, vol_id in enumerate(volume_names):

        for reg_stage_dir in reg_dirs:

            if not reg_stage_dir.is_dir():
                logging.error('cannot find {}'.format(reg_stage_dir))
                raise FileNotFoundError(f'Cannot find registration dir {reg_stage_dir}')

            inv_stage_dir = inv_outdir / reg_stage_dir.name

            specimen_stage_reg_dir = reg_stage_dir / vol_id
            specimen_stage_inversion_dir = inv_stage_dir / vol_id

            transform_file = common.getfile_startswith(specimen_stage_reg_dir, ELX_TRANSFORM_NAME)
            parameter_file = common.getfile_startswith(reg_stage_dir, ELX_PARAM_PREFIX)

            # Create the folder to put the specimen inversion parameter files in.
            inv_stage_dir.mkdir(exist_ok=True)

            # Add the stage to the inversion order config (in reverse order), if not already.
            if reg_stage_dir.name not in stages_to_invert['label_propagation_order']:
                stages_to_invert['label_propagation_order'].insert(0, reg_stage_dir.name)

            if clobber:
                common.mkdir_force(specimen_stage_inversion_dir)  # Overwrite any inversion file that exist for a single specimen

            # Each registration directory contains a metadata file, which contains the relative path to the fixed volume
            reg_metadata = cfg_load(specimen_stage_reg_dir / common.INDV_REG_METADATA)
            fixed_volume = (specimen_stage_reg_dir / reg_metadata['fixed_vol']).resolve()

            # Invert the Transform parameters with options for normal image inversion

            job = {
                'specimen_stage_inversion_dir': specimen_stage_inversion_dir,
                'parameter_file': abspath(parameter_file),
                'transform_file': transform_file,
                'fixed_volume': fixed_volume,
                'param_file_output_name': 'inversion_parameters.txt',
                'image_replacements': IMAGE_REPLACEMENTS,
                'label_replacements': LABEL_REPLACEMENTS,
                'image_transform_file': PROPAGATE_IMAGE_TRANSFORM,
                'label_transform_file': PROPAGATE_LABEL_TRANFORM,
                'clobber': clobber,
                'threads': threads
            }

            jobs.append(job)

    # By putting each inverison job (a single job per registration stage) we can speed things up a bit
    # If we can get multithreded inversion in elastix we can remove this python multithreading
    pool = Pool(8)
    try:
        pool.map(_invert_transform_parameters, jobs)

    except KeyboardInterrupt:
        print('terminating inversion')
        pool.terminate()
        pool.join()

    # TODO: Should we replace the need for this invert.yaml?
    reg_dir = Path(os.path.relpath(reg_stage_dir, inv_outdir))
    stages_to_invert['registration_directory'] = str(reg_dir)  # Doc why we need this
    # Create a yaml config file so that inversions can be run seperatley
    invert_config = config['inverted_transforms'] / PROPAGATE_CONFIG

    with open(invert_config, 'w') as yf:
        yf.write(yaml.dump(dict(stages_to_invert), default_flow_style=False))
Ejemplo n.º 20
0
def pad_volumes(indirs: Iterable[Path],
                max_dims: Tuple,
                outdir: Path,
                clobber: bool,
                filetype: str = 'nrrd'):
    """
    Pad volumes, masks, labels. Output files will have same name as original, but be in a new output folder

    Parameters
    ----------
    indirs
        one or more directories containing volumes to pad (Will search subdirectories for volumes)
    max_dims
        dimensions to pad to (z, y, x)
    outdir
        path to output dir
    """

    if clobber and outdir:
        print('Specifiy either --clobber or an output dir (-o)')
        return
    if not clobber and not outdir:
        print('Specifiy either --clobber or an output dir (-o)')
        return

    if not max_dims:
        max_dims = get_largest_dimensions(indirs)

    print(f'Zero padding to {max_dims}')

    outdir = outdir

    for dir_ in indirs:
        dir_ = Path(dir_)

        if clobber:
            result_dir = dir_
        else:
            result_dir = outdir / dir_.name
            result_dir.mkdir(exist_ok=True, parents=True)

        volpaths = common.get_file_paths(dir_)

        # print('Padding to {} - {} volumes/masks:'.format(str(max_dims), str(len(volpaths))))
        # pad_info = Dict()

        for path in volpaths:

            if clobber:
                outpath = path
            else:
                outpath = result_dir / path.name

            loader = common.LoadImage(path)
            vol = loader.img
            if not vol:
                logging.error('error loading image for padding: {}'.format(
                    loader.error_msg))
                sys.exit()
            vol_dims = vol.GetSize()

            # The voxel differences between the vol dims and the max dims
            diffs = [m - v for m, v in zip(max_dims, vol_dims)]

            # How many pixels to add to the upper bounds of each dimension, divide by two and round down to nearest int
            upper_extend = [d // 2 for d in diffs]

            # In case of differnces that cannot be /2. Get the remainder to add to the lower bound
            remainders = [d % 2 for d in diffs]

            # Add the remainders to the upper bound extension to get the lower bound extension
            lower_extend = [u + r for u, r in zip(upper_extend, remainders)]

            # if any values are negative, stop. We need all volumes to be the same size
            for ex_val in zip(lower_extend, upper_extend):

                if ex_val[0] < 0 or ex_val[1] < 0:
                    msg = (
                        "\ncan't pad images\n"
                        "{} is larger than the specified volume size\n"
                        "Current vol size:{},\n"
                        "Max vol size: {}"
                        "\nCheck the 'pad_dims' in the config file\n".format(
                            basename(path), str(vol_dims), str(max_dims)))

                    logging.error(msg)
                    raise common.LamaDataException(msg)

            # Pad the volume. New pixels set to zero
            padded_vol = sitk.ConstantPad(vol, upper_extend, lower_extend, 0)
            padded_vol.SetOrigin((0, 0, 0))
            padded_vol.SetSpacing((1, 1, 1))

            sitk.WriteImage(padded_vol, str(outpath), True)
            # pad_info['data'][input_basename]['pad'] = [upper_extend, lower_extend]
    print('Finished padding')
Ejemplo n.º 21
0
def _overlay_labels(first_stage_reg_dir: Path,
                    inverted_labeldir: Path,
                    out_dir_labels: Path,
                    mask: Path = None):
    """
    Overlay the first registrated image (rigid) with the corresponding inverted labels
    It depends on the registered volumes and inverted label maps being named identically
    """
    if mask:
        mask = sitk.GetArrayFromImage(sitk.ReadImage(str(mask)))
        rp = regionprops(mask)
        # Get the largest label. Likley only one from the mask
        mask_props = list(reversed(sorted(rp, key=lambda x: x.area)))[0]
        bbox = mask_props['bbox']

    for vol_path in common.get_file_paths(
            first_stage_reg_dir,
            ignore_folders=[RESOLUTION_IMGS_DIR, IMG_PYRAMID_DIR]):

        vol_reader = common.LoadImage(vol_path)

        if not vol_reader:
            logging.error(f'cannnot create qc image from {vol_path}')
            return

        label_path = inverted_labeldir / vol_path.stem / vol_path.name

        if label_path.is_file():
            label_reader = common.LoadImage(label_path)

            if not label_reader:
                logging.error(
                    f'cannot create qc image from label file {label_path}')
                return

            cast_img = sitk.Cast(sitk.RescaleIntensity(vol_reader.img),
                                 sitk.sitkUInt8)
            arr = sitk.GetArrayFromImage(cast_img)
            base = splitext(basename(label_reader.img_path))[0]
            l_arr = label_reader.array

            def sag(idx_):
                slice_sag = np.flipud(arr[:, :, idx_])
                l_slice_sag = np.flipud(l_arr[:, :, idx_])
                sag_dir = out_dir_labels / 'sagittal'
                sag_dir.mkdir(exist_ok=True)
                out_path_sag = sag_dir / f'{base}_{idx_}.png'
                _blend_8bit(slice_sag, l_slice_sag, out_path_sag)

            if mask is None:  # get a few slices from middle
                sag_indxs = np.linspace(0, arr.shape[2], 8, dtype=np.int)[2:-2]
            else:
                sag_start = bbox[2]
                sag_end = bbox[5]
                sag_indxs = np.linspace(
                    sag_start, sag_end, 6,
                    dtype=np.int)[1:-1]  # Take the 4 inner slices
            for idx in sag_indxs:
                sag(idx)

            def ax(idx_):
                slice_ax = arr[idx_, :, :]
                l_slice_ax = l_arr[idx_, :, :]
                ax_dir = out_dir_labels / 'axial'
                ax_dir.mkdir(exist_ok=True)
                out_path_ax = ax_dir / f'{base}_{idx_}.png'
                _blend_8bit(slice_ax, l_slice_ax, out_path_ax)

            if mask is None:  # get a few slices from middle
                ax_indxs = np.linspace(0, arr.shape[0], 8, dtype=np.int)[2:-2]
            else:
                ax_start = bbox[0]
                ax_end = bbox[3]
                ax_indxs = np.linspace(ax_start, ax_end, 6, dtype=np.int)[1:-1]
            for idx in ax_indxs:
                ax(idx)

            def cor(idx_):
                slice_cor = np.flipud(arr[:, idx_, :])
                l_slice_cor = np.flipud(l_arr[:, idx_, :])
                cor_dir = out_dir_labels / 'coronal'
                cor_dir.mkdir(exist_ok=True)
                out_path_cor = cor_dir / f'{base}_{idx_}.png'
                _blend_8bit(slice_cor, l_slice_cor, out_path_cor)

            if mask is None:  # get a few slices from middle
                cor_indxs = np.linspace(0, arr.shape[1], 8, dtype=np.int)[2:-2]
            else:
                cor_start = bbox[1]
                cor_end = bbox[4]
                cor_indxs = np.linspace(cor_start, cor_end, 6,
                                        dtype=np.int)[1:-1]
            for idx in cor_indxs:
                cor(idx)

        else:
            logging.info(
                'No inverted label found. Skipping creation of inverted label-image overlay'
            )
Ejemplo n.º 22
0
    def run(self):

        # If inputs_vols is a file get the specified root and paths from it
        if isdir(self.movdir):
            movlist = common.get_file_paths(self.movdir)
        else:
            movlist = common.get_inputs_from_file_list(self.movdir,
                                                       self.config_dir)

        if len(movlist) < 1:
            raise common.LamaDataException("No volumes in {}".format(
                self.movdir))

        for fixed in movlist:  # Todo: change variable name fixed to moving
            tp_file_paths = defaultdict(list)
            full_tp_file_paths = []
            fixed_basename = splitext(basename(fixed))[0]
            fixed_dir = self.paths.make(join(self.stagedir, fixed_basename),
                                        'f')

            for moving in movlist:
                if basename(fixed) == basename(moving):
                    continue
                moving_basename = splitext(basename(moving))[0]
                outdir = join(fixed_dir, moving_basename)
                common.mkdir_force(outdir)

                run_elastix({
                    'mov': moving,
                    'fixed': fixed,
                    'outdir': outdir,
                    'elxparam_file': self.elxparam_file,
                    'threads': self.threads,
                    'fixed': fixed
                })
                # Get the resolution tforms
                tforms = list(
                    sorted([
                        x for x in os.listdir(outdir)
                        if x.startswith(REOLSUTION_TP_PREFIX)
                    ]))
                # get the full tform that spans all resolutions
                full_tp_file_paths.append(join(outdir, FULL_STAGE_TP_FILENAME))

                # Add the tforms to a resolution-specific list so we can generate deformations from any range
                # of deformations later
                for i, tform in enumerate(tforms):
                    tp_file_paths[i].append(join(outdir, tform))

                # add registration metadata
                reg_metadata_path = join(outdir, common.INDV_REG_METADATA)
                fixed_vol_relative = relpath(fixed, outdir)
                reg_metadata = {'fixed_vol': fixed_vol_relative}
                with open(reg_metadata_path, 'w') as fh:
                    fh.write(yaml.dump(reg_metadata, default_flow_style=False))

            for i, files_ in tp_file_paths.items():
                mean_tfom_name = "{}{}.txt".format(REOLSUTION_TP_PREFIX, i)
                self.generate_mean_tranform(files_, fixed, fixed_dir,
                                            mean_tfom_name, self.filetype)
            self.generate_mean_tranform(full_tp_file_paths, fixed, fixed_dir,
                                        FULL_STAGE_TP_FILENAME, self.filetype)