Пример #1
0
    def display(self, title=None, dir_output=None):
        pd.set_option('display.width', 1000)
        N_trafos, dof = self._transform_params.shape
        if dof == 6:
            params = self._get_scaled_params(self._transform_params)

            # add mean value
            params = np.concatenate((params, np.mean(params,
                                                     axis=0).reshape(1, -1),
                                     np.std(params, axis=0).reshape(1, -1)))

            cols = self._labels_long[dof]

        else:
            params = self._transform_params
            cols = ["a%d" % (d + 1) for d in range(0, dof)]

        rows = ["Trafo %d" % (d + 1) for d in range(0, N_trafos)]
        rows.append("Mean")
        rows.append("Std")

        df = pd.DataFrame(params, rows, cols)
        print(df)

        if dir_output is not None:
            title = self._replace_string(title)
            filename = "%s.csv" % title
            ph.create_directory(dir_output)
            df.to_csv(os.path.join(dir_output, filename))
Пример #2
0
    def reveal_anonymized_files(self, directory):
        ph.create_directory(directory)

        filenames_revealed = []
        for i in range(0, len(self._filenames)):
            basename_anonymized = os.path.basename(self._filenames[i])
            filename_anonymized = ph.strip_filename_extension(
                basename_anonymized)[0]
            try:
                basename_revealed = self._dictionary[basename_anonymized]
            except KeyError:
                raise IOError(
                    "Dictionary does not match given (anonymized) filenames")
            filename_revealed = "%s_%s" % (filename_anonymized,
                                           basename_revealed)

            # filename_anonymized = self._identifiers[i] + filename_extension
            # filename_revealed = self._identifiers[i] + "_" + \
            #     self._dictionary[self._identifiers[i]] + filename_extension
            # filename_revealed = re.sub("_masked_srr", "", filename_revealed)

            # path_to_file_anon = os.path.join(directory, filename_anonymized)
            path_to_file_reve = os.path.join(directory, filename_revealed)

            # if not os.path.isfile(path_to_file_anon):
            #     print("%s: Nothing to reveal" % (filename_anonymized))

            cmd = "cp -p "
            cmd += self._filenames[i] + " "
            cmd += path_to_file_reve + " "
            # print(cmd)
            ph.execute_command(cmd)

            filenames_revealed.append(filename_revealed)
        return filenames_revealed
Пример #3
0
def split_labels(path_to_labels, dimension, path_to_output):
    if dimension == 4:
        labels_nib = nib.load(path_to_labels)
        nda = labels_nib.get_data().astype(np.uint8)
    else:
        labels_sitk = sitk.ReadImage(path_to_labels)
        nda = sitk.GetArrayFromImage(labels_sitk).astype(np.uint8)

    # split labels into separate components
    n_labels = nda.max()
    shape = nda.shape + (n_labels, )
    nda_4d = np.zeros((shape), dtype=np.uint8)
    for label in range(n_labels):
        indices = np.where(nda == label + 1)
        indices += (label * np.ones(len(indices[0]), dtype=np.uint8), )
        nda_4d[indices] = 1

    if dimension == 4:
        labels_4d_nib = nib.Nifti1Image(nda_4d,
                                        affine=labels_nib.affine,
                                        header=labels_nib.header)
        labels_4d_nib.set_data_dtype(np.uint8)
        ph.create_directory(os.path.dirname(path_to_output))
        nib.save(labels_4d_nib, path_to_output)
    else:
        labels_5d_sitk = sitk.GetImageFromArray(nda_4d)
        labels_5d_sitk.SetOrigin(labels_sitk.GetOrigin())
        labels_5d_sitk.SetSpacing(labels_sitk.GetSpacing())
        labels_5d_sitk.SetDirection(labels_sitk.GetDirection())
        sitkh.write_nifti_image_sitk(labels_5d_sitk, path_to_output)
Пример #4
0
    def create_video(self, path_to_video, dir_input_slices, fps=1):

        dir_output_video = os.path.dirname(path_to_video)
        filename = os.path.basename(path_to_video).split(".")[0]
        path_to_slices = "%s*.png" % os.path.join(dir_input_slices, filename)
        path_to_video = os.path.join(dir_output_video, "%s.mp4" % filename)

        path_to_video_tmp = os.path.join(
            dir_output_video, "%s_tmp.mp4" % filename)

        # Check that folder containing the slices exist
        if not ph.directory_exists(dir_input_slices):
            raise IOError("Folder '%s' meant to contain exported slices does "
                          "not exist" % dir_input_slices)

        # Check that the folder contains exported slices as png files        
        # if not ph.file_exists(os.path.join(
        #         dir_input_slices, self._get_filename_slice(filename, 1))):
        #     raise IOError(
        #         "Slices '%s' need to be generated first using "
        #         "'export_slices'" % (path_to_slices))

        # Create output folder for video
        ph.create_directory(dir_output_video)

        # ---------------Create temp video from exported slices----------------
        cmd_args = []
        cmd_args.append("-monitor")
        cmd_args.append("-delay %d" % (100. / fps))

        cmd_exe = "convert"

        cmd = "%s %s %s %s" % (
            cmd_exe, (" ").join(cmd_args), path_to_slices, path_to_video_tmp)
        flag = ph.execute_command(cmd)
        if flag != 0:
            raise RuntimeError("Unable to create video from slices")

        # ----------------------Use more common codec (?)----------------------
        cmd_args = []
        # overwrite possibly existing image
        cmd_args.append("-y")
        # Define input video to be converted
        cmd_args.append("-i %s" % path_to_video_tmp)
        # Use H.264 codec for video compression of MP4 file
        cmd_args.append("-vcodec libx264")
        # Define used pixel format
        cmd_args.append("-pix_fmt yuv420p")
        # Avoid error message associated to odd rows
        # (https://stackoverflow.com/questions/20847674/ffmpeg-libx264-height-not-divisible-by-2)
        cmd_args.append("-vf 'scale=trunc(iw/2)*2:trunc(ih/2)*2'")
        cmd = "ffmpeg %s %s" % ((" ").join(cmd_args), path_to_video)
        ph.execute_command(cmd)

        # Delete temp video
        os.remove(path_to_video_tmp)
Пример #5
0
 def write_transforms_sitk(self,
                           directory,
                           prefix_filename="Euler3dTransform_"):
     ph.create_directory(directory)
     for i, transform in enumerate(self._transforms_sitk):
         path_to_file = os.path.join(
             directory, "%s%d.tfm" % (prefix_filename, i))
         sitk.WriteTransform(transform, path_to_file)
         if self._verbose:
             ph.print_info("Transform written to %s" % path_to_file)
Пример #6
0
    def write(
        self,
        directory,
        filename=None,
        write_slice=True,
        write_transform=True,
        suffix_mask="_mask",
        prefix_slice="_slice",
        write_transforms_history=False,
    ):

        # Create directory if not existing
        ph.create_directory(directory)

        # Construct filename
        if filename is None:
            filename_out = self._filename + \
                prefix_slice + str(self._slice_number)
        else:
            filename_out = filename + prefix_slice + str(self._slice_number)

        full_file_name = os.path.join(directory, filename_out)

        # Write slice and affine transform
        if write_slice:
            dw.DataWriter.write_image(self.sitk,
                                      "%s.nii.gz" % full_file_name,
                                      verbose=False)

            # Write mask to specified location if given
            if self.sitk_mask is not None:
                nda = sitk.GetArrayFromImage(self.sitk_mask)

                # Write mask if it does not consist of only ones
                if not np.all(nda):
                    dw.DataWriter.write_mask(
                        self.sitk_mask,
                        "%s%s.nii.gz" % (full_file_name, suffix_mask),
                        verbose=False,
                    )

        if write_transform:
            sitk.WriteTransform(
                # self.get_affine_transform(),
                self.get_motion_correction_transform(),
                full_file_name + ".tfm")

        if write_transforms_history:
            dir_output = os.path.join(directory, "motion_correction_history")
            ph.create_directory(dir_output)
            registration_transforms = self.get_registration_history()[1]
            for i, transform_sitk in enumerate(registration_transforms):
                path_to_transform = os.path.join(
                    dir_output, "%s_%d.tfm" % (filename_out, i))
                sitk.WriteTransform(transform_sitk, path_to_transform)
Пример #7
0
    def write_transform(transform_sitk, path_to_file, verbose=0):

        extension = ph.strip_filename_extension(path_to_file)[1]
        if extension not in ALLOWED_TRANSFORMS and \
                extension not in ALLOWED_TRANSFORMS_DISPLACEMENTS:
            raise IOError("Transform file extension must be of type "
                          "%s (transformation) or %s (displacements)" %
                          (", ".join(ALLOWED_TRANSFORMS),
                           ", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS)))

        if extension in ALLOWED_TRANSFORMS:
            if isinstance(transform_sitk, sitk.Image):
                raise IOError("Cannot convert displacement field (%s) to "
                              "transform (%s)" % (
                                  ", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
                                  ", ".join(ALLOWED_TRANSFORMS),
                              ))

            if isinstance(transform_sitk, sitk.Transform):
                ph.create_directory(os.path.dirname(path_to_file))
                sitk.WriteTransform(transform_sitk, path_to_file)
                if verbose:
                    ph.print_info("Transform written to '%s'" % path_to_file)
            elif isinstance(transform_sitk, np.ndarray):
                ph.write_array_to_file(path_to_file,
                                       transform_sitk,
                                       delimiter=" ",
                                       access_mode="w",
                                       verbose=verbose)
            else:
                raise IOError("Transform must be of type "
                              "sitk.Transform or np.ndarray")
        else:
            if isinstance(transform_sitk, sitk.Transform):
                raise IOError("Cannot convert transform (%s) to "
                              "displacement field (%s)" % (
                                  ", ".join(ALLOWED_TRANSFORMS),
                                  ", ".join(ALLOWED_TRANSFORMS_DISPLACEMENTS),
                              ))
            elif isinstance(transform_sitk, sitk.Image):
                sitkh.write_nifti_image_sitk(image_sitk=transform_sitk,
                                             path_to_file=path_to_file,
                                             verbose=verbose)
            elif isinstance(transform_sitk, nib.nifti1.Nifti1Image):
                ph.create_directory(os.path.dirname(path_to_file))
                nib.save(transform_sitk, path_to_file)
            else:
                raise IOError("Transform must be of type "
                              "sitk.Image or nibabel.nifti1.Nifti1Image")
Пример #8
0
    def write_data(self):

        if self._filename is None:
            raise ValueError("Filename is not set")

        ph.create_directory(os.path.dirname(self._filename))
        header_update = DataWriter._get_header_update(
            description=self._description)

        info = "Write image to '%s'" % self._filename
        vector_image_sitk = sitkh.get_sitk_vector_image_from_components(
            [stack.sitk for stack in self._stacks])
        if self._compress:
            if not "integer" in vector_image_sitk.GetPixelIDTypeAsString():
                vector_image_sitk = sitk.Cast(vector_image_sitk,
                                              sitk.sitkVectorFloat32)
                info += " (float32)"

        ph.print_info("%s ... " % info, newline=False)
        sitkh.write_sitk_vector_image(
            vector_image_sitk,
            self._filename,
            verbose=False,
            header_update=header_update,
        )
        print("done")

        if self._write_mask:
            info = "Write image mask to '%s'" % self._filename
            filename_split = (self._filename).split(".")
            filename = filename_split[0]
            filename += self._suffix_mask + "." + \
                (".").join(filename_split[1:])
            vector_image_sitk = sitkh.get_sitk_vector_image_from_components(
                [stack.sitk_mask for stack in self._stacks])

            if self._compress:
                vector_image_sitk = sitk.Cast(vector_image_sitk,
                                              sitk.sitkVectorUInt8)
                info += " (uint8)"
            ph.print_info("%s ... " % info, newline=False)
            sitkh.write_sitk_vector_image(
                vector_image_sitk,
                filename,
                verbose=False,
                header_update=header_update,
            )
            print("done")
Пример #9
0
    def write_data(self):

        if self._filename is None:
            raise ValueError("Filename is not set")

        ph.create_directory(os.path.dirname(self._filename))

        vector_image_sitk = sitkh.get_sitk_vector_image_from_components(
            [stack.sitk for stack in self._stacks])
        sitkh.write_sitk_vector_image(vector_image_sitk, self._filename)

        if self._write_mask:
            filename_split = (self._filename).split(".")
            filename = filename_split[0]
            filename += self._suffix_mask + "." + \
                (".").join(filename_split[1:])
            vector_image_sitk = sitkh.get_sitk_vector_image_from_components(
                [stack.sitk_mask for stack in self._stacks])
            sitkh.write_sitk_vector_image(vector_image_sitk, filename)
Пример #10
0
    def __init__(
            self,
            fixed=None,
            moving=None,
            cov=None,
            use_fixed_mask=False,
            use_moving_mask=False,
            registration_type="Rigid",
            interpolator="Linear",
            metric="Correlation",
            scales_estimator="PhysicalShift",
            use_multiresolution_framework=False,
            use_verbose=False,
            ANTSradius=20,
            dir_tmp=os.path.join(DIR_TMP, "CppItkRegistration"),
    ):

        SimpleItkRegistration.__init__(
            self,
            fixed=fixed,
            moving=moving,
            use_fixed_mask=use_fixed_mask,
            use_moving_mask=use_moving_mask,
            registration_type=registration_type,
            interpolator=interpolator,
            metric=metric,
            metric_params=None,
            optimizer=None,
            optimizer_params=None,
            scales_estimator=scales_estimator,
            initializer_type=None,
            use_oriented_psf=None,
            use_multiresolution_framework=use_multiresolution_framework,
            shrink_factors=None,
            smoothing_sigmas=None,
            use_verbose=use_verbose,
        )
        self._cov = cov
        self._REGISTRATION_TYPES = ["Rigid", "Affine", "InplaneSimilarity"]
        self._INITIALIZER_TYPES = [None]
        self._SCALES_ESTIMATORS = ["IndexShift", "PhysicalShift", "Jacobian"]

        self._ANTSradius = ANTSradius

        self._use_verbose = use_verbose

        # Temporary output where files are written in order to use ITK from the
        # commandline
        self._dir_tmp = ph.create_directory(dir_tmp, delete_files=False)

        self._run_registration_ = {
            "Rigid": self._run_registration_rigid_affine,
            "Affine": self._run_registration_rigid_affine,
            "InplaneSimilarity": self._run_registration_inplane_similarity_3D
        }
Пример #11
0
    def convert_flirt_to_sitk_transform(
        path_to_flirt_mat,
        path_to_fixed,
        path_to_moving,
        path_to_sitk_transform,
        verbose=0,
    ):

        ph.create_directory(os.path.dirname(path_to_sitk_transform))

        c3d = nipype.interfaces.c3.C3dAffineTool()
        c3d.inputs.reference_file = path_to_fixed
        c3d.inputs.source_file = path_to_moving
        c3d.inputs.transform_file = path_to_flirt_mat
        c3d.inputs.fsl2ras = True
        c3d.inputs.itk_transform = path_to_sitk_transform

        if verbose:
            ph.print_execution(c3d.cmdline)
        c3d.run()
Пример #12
0
    def convert_sitk_to_flirt_transform(
        path_to_sitk_transform,
        path_to_fixed,
        path_to_moving,
        path_to_flirt_mat,
        verbose=0,
    ):

        ph.create_directory(os.path.dirname(path_to_flirt_mat))

        c3d = nipype.interfaces.c3.C3dAffineTool()
        c3d.inputs.reference_file = path_to_fixed
        c3d.inputs.source_file = path_to_moving

        # position of -ras2fsl matters!!
        c3d.inputs.args = "-itk %s -ras2fsl -o %s" % (path_to_sitk_transform,
                                                      path_to_flirt_mat)
        if verbose:
            ph.print_execution(c3d.cmdline)
        c3d.run()
Пример #13
0
    def write_dictionary(self,
                         path_to_file,
                         filename_backup=None,
                         verbose=False):

        directory = os.path.dirname((path_to_file))
        filename, ext = ph.strip_filename_extension(
            os.path.basename(path_to_file))
        ph.create_directory(directory)

        # Write backup file (human readable)
        if filename_backup is None:
            path_to_file_backup = os.path.join(
                directory, "%s_backup_human_readable.txt" % filename)

        # Save randomized dictionary
        f = open(path_to_file, 'wb')
        cPickle.dump(self._dictionary, f, protocol=cPickle.HIGHEST_PROTOCOL)
        f.close()

        date = ph.get_current_date()
        time = ph.get_current_time()
        file_handle = open(path_to_file_backup, "w")
        text = "## Randomized Dictionary " + date + " " + time + "\n"
        file_handle.write(text)
        file_handle.close()

        # Print in an alphabetical order
        keys = sorted(self._dictionary.keys())
        for i in range(0, len(self._filenames)):
            file_handle = open(path_to_file_backup, "a")
            text = keys[i] + " : " + self._dictionary[keys[i]] + "\n"
            file_handle.write(text)
            file_handle.close()
            if verbose:
                print("\t%s : %s" % (keys[i], self._dictionary[keys[i]]))

        ph.print_info("Anonymization dictionary written to '%s'" %
                      path_to_file)
Пример #14
0
    def anonymize_files(self, dir_output):
        ph.create_directory(dir_output)

        filenames_in = [os.path.basename(f) for f in self._filenames]

        for i in range(0, len(self._filenames)):
            filename_anonymized = self._identifiers[i]
            filename_original = self._dictionary[self._identifiers[i]]
            try:
                index = filenames_in.index(filename_original)
            except ValueError:
                raise IOError(
                    "Given filenames (--filenames) do not match the ones given in the dictionary"
                )

            path_to_file_anon = os.path.join(dir_output, filename_anonymized)

            cmd = "cp -p "
            cmd += self._filenames[index] + " "
            cmd += path_to_file_anon + " "
            # print(cmd)
            ph.execute_command(cmd)
Пример #15
0
    def export_slices(self, dir_output, filename, begin=None, end=None):
        ph.create_directory(dir_output)

        if begin is None:
            begin = 0
        if end is None:
            end = self._nda.shape[self._axis]

        # Write each slice individually
        for k in range(begin, end):
            if self._axis == 0:
                nda_2d = self._nda[k, :, :]
            elif self._axis == 1:
                nda_2d = self._nda[:, k, :]
            else:
                nda_2d = self._nda[:, :, k]
            # nda_2d = np.swapaxes(nda_2d, 0, 1)
            nda_2d = nda_2d[::-1, :]

            path_to_image = os.path.join(
                dir_output, self._get_filename_slice(filename, k + 1))
            ph.write_image(nda_2d, path_to_image, verbose=True)
Пример #16
0
    def _run(self):

        # Create and delete all possibly existing files in the directory
        ph.create_directory(self._dir_tmp, delete_files=True)

        sitkh.write_nifti_image_sitk(self._fixed_sitk, self._fixed_str)
        sitkh.write_nifti_image_sitk(self._moving_sitk, self._moving_str)

        if self._fixed_sitk_mask is not None:
            sitkh.write_nifti_image_sitk(
                self._fixed_sitk_mask, self._fixed_mask_str)

        if self._moving_sitk_mask is not None:
            sitkh.write_nifti_image_sitk(
                self._moving_sitk_mask, self._moving_mask_str)

        if self._transform_init is not None:
            ph.write_array_to_file(
                self._transform_init_str,
                self._transform_init,
                access_mode="a",
                verbose=0)
Пример #17
0
    def _run(self):

        # Create and delete all possibly existing files in the directory
        ph.create_directory(self._dir_tmp, delete_files=True)

        sitkh.write_nifti_image_sitk(self._fixed_sitk, self._fixed_str)
        sitkh.write_nifti_image_sitk(self._moving_sitk, self._moving_str)

        flt = nipype.interfaces.fsl.FLIRT()
        flt.inputs.in_file = self._moving_str
        flt.inputs.reference = self._fixed_str
        flt.inputs.out_file = self._warped_moving_str
        flt.inputs.out_matrix_file = self._registration_transform_str
        flt.inputs.output_type = "NIFTI_GZ"

        if self._fixed_sitk_mask is not None:
            sitkh.write_nifti_image_sitk(self._fixed_sitk_mask,
                                         self._fixed_mask_str)
            flt.inputs.ref_weight = self._fixed_mask_str

        if self._moving_sitk_mask is not None:
            sitkh.write_nifti_image_sitk(self._moving_sitk_mask,
                                         self._moving_mask_str)
            flt.inputs.in_weight = self._moving_mask_str

        flt.inputs.args = self._options

        # Execute registration
        if self._verbose:
            ph.print_execution(flt.cmdline)
        flt.run()

        # Read warped image
        self._warped_moving_sitk = sitkh.read_nifti_image_sitk(
            self._warped_moving_str)

        # Convert to sitk affine transform
        self._registration_transform_sitk = self._convert_to_sitk_transform()
Пример #18
0
    def run(self):
        ph.create_directory(dir_tmp, delete_files=True)

        # Write images
        sitkh.write_nifti_image_sitk(
            self._stack1.sitk,
            self._dir_tmp + self._stack1.get_filename() + ".nii.gz")
        sitkh.write_nifti_image_sitk(
            self._stack2.sitk,
            self._dir_tmp + self._stack2.get_filename() + ".nii.gz")

        cmd = "siena "
        cmd += self._dir_tmp + self._stack1.get_filename() + ".nii.gz "
        cmd += self._dir_tmp + self._stack2.get_filename() + ".nii.gz "
        cmd += "-o " + self._dir_output + " "
        cmd += self._options

        time_start = ph.start_timing()
        ph.execute_command(cmd)
        self._elapsed_time = ph.stop_timing(time_start)

        # Extract measures from report
        self._extract_percentage_brain_volume_change()
Пример #19
0
    def write(self,
              directory,
              filename=None,
              write_transform=False,
              suffix_mask="_mask",
              prefix_slice="_slice"):

        # Create directory if not existing
        ph.create_directory(directory)

        # Construct filename
        if filename is None:
            filename_out = self._filename + \
                prefix_slice + str(self._slice_number)
        else:
            filename_out = filename + prefix_slice + str(self._slice_number)

        full_file_name = os.path.join(directory, filename_out)

        # Write slice and affine transform
        sitkh.write_nifti_image_sitk(self.sitk, full_file_name + ".nii.gz")
        if write_transform:
            sitk.WriteTransform(
                # self.get_affine_transform(),
                self.get_motion_correction_transform(),
                full_file_name + ".tfm")

        # Write mask to specified location if given
        if self.sitk_mask is not None:
            nda = sitk.GetArrayFromImage(self.sitk_mask)

            # Write mask if it does not consist of only ones
            if not np.all(nda):
                sitkh.write_nifti_image_sitk(
                    self.sitk_mask,
                    full_file_name + "%s.nii.gz" % (suffix_mask))
Пример #20
0
    def _run_bet_for_brain_stripping(self, debug=0):

        filename_out = "image"

        self._dir_tmp = ph.create_directory(self._dir_tmp, delete_files=True)

        path_to_image = os.path.join(self._dir_tmp, filename_out + ".nii.gz")
        path_to_res = os.path.join(self._dir_tmp, filename_out + "_bet.nii.gz")
        path_to_res_mask = os.path.join(self._dir_tmp,
                                        filename_out + "_bet_mask.nii.gz")
        path_to_res_skull = os.path.join(self._dir_tmp,
                                         filename_out + "_bet_skull.nii.gz")

        sitkh.write_nifti_image_sitk(self._sitk, path_to_image)

        bet = nipype.interfaces.fsl.BET()
        bet.inputs.in_file = path_to_image
        bet.inputs.out_file = path_to_res

        options = ""
        if not self._compute_brain_image:
            options += "-n "

        if self._compute_brain_mask:
            options += "-m "

        if self._compute_skull_image:
            options += "-s "

        options += self._bet_options
        bet.inputs.args = options

        if debug:
            print(bet.cmdline)
        bet.run()

        if self._compute_brain_image:
            self._sitk_brain_image = sitkh.read_nifti_image_sitk(
                path_to_res, sitk.sitkFloat64)

        if self._compute_brain_mask:
            self._sitk_brain_mask = sitkh.read_nifti_image_sitk(
                path_to_res_mask, sitk.sitkUInt8)

        if self._compute_skull_image:
            self._sitk_skull_image = sitkh.read_nifti_image_sitk(
                path_to_res_skull)
Пример #21
0
def main():

    input_parser = InputArgparser(description="Convert NIfTI to DICOM image", )
    input_parser.add_filename(required=True)
    input_parser.add_option(
        option_string="--template",
        type=str,
        required=True,
        help="Template DICOM to extract relevant DICOM tags.",
    )
    input_parser.add_dir_output(required=True)
    input_parser.add_label(
        help="Label used for series description of DICOM output.",
        default="SRR_NiftyMIC")
    input_parser.add_argument(
        "--volume",
        "-volume",
        action='store_true',
        help="If given, the output DICOM file is combined as 3D volume")
    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    # Prepare for final DICOM output
    ph.create_directory(args.dir_output)

    if args.volume:
        dir_output_2d_slices = os.path.join(DIR_TMP, "dicom_slices")
    else:
        dir_output_2d_slices = os.path.join(args.dir_output, args.label)
    ph.create_directory(dir_output_2d_slices, delete_files=True)

    # read NiftyMIC version (if available)
    data_reader = dr.ImageHeaderReader(args.filename)
    data_reader.read_data()
    niftymic_version = data_reader.get_niftymic_version()
    if niftymic_version is None:
        niftymic_version = "NiftyMIC"
    else:
        niftymic_version = "NiftyMIC-v%s" % niftymic_version

    # Create set of 2D DICOM slices from 3D NIfTI image
    # (correct image orientation!)
    ph.print_title("Create set of 2D DICOM slices from 3D NIfTI image")
    cmd_args = ["nifti2dicom"]
    cmd_args.append("-i '%s'" % args.filename)
    cmd_args.append("-o '%s'" % dir_output_2d_slices)
    cmd_args.append("-d '%s'" % args.template)
    cmd_args.append("--prefix ''")
    cmd_args.append("--seriesdescription '%s'" % args.label)
    cmd_args.append("--accessionnumber '%s'" % ACCESSION_NUMBER)
    cmd_args.append("--seriesnumber '%s'" % SERIES_NUMBER)
    cmd_args.append("--institutionname '%s'" % IMAGE_COMMENTS)

    # Overwrite default "nifti2dicom" tags which would be added otherwise
    # (no deletion/update with empty '' sufficient to overwrite them)
    cmd_args.append("--manufacturersmodelname '%s'" % "NiftyMIC")
    cmd_args.append("--protocolname '%s'" % niftymic_version)

    cmd_args.append("-y")
    ph.execute_command(" ".join(cmd_args))

    if args.volume:
        path_to_output = os.path.join(args.dir_output, "%s.dcm" % args.label)
        # Combine set of 2D DICOM slices to form 3D DICOM image
        # (image orientation stays correct)
        ph.print_title("Combine set of 2D DICOM slices to form 3D DICOM image")
        cmd_args = ["medcon"]
        cmd_args.append("-f '%s'/*.dcm" % dir_output_2d_slices)
        cmd_args.append("-o '%s'" % path_to_output)
        cmd_args.append("-c dicom")
        cmd_args.append("-stack3d")
        cmd_args.append("-n")
        cmd_args.append("-qc")
        cmd_args.append("-w")
        ph.execute_command(" ".join(cmd_args))

        # Update all relevant DICOM tags accordingly
        ph.print_title("Update all relevant DICOM tags accordingly")
        print("")
        dataset_template = pydicom.dcmread(args.template)
        dataset = pydicom.dcmread(path_to_output)

        # Copy tags from template (to guarantee grouping with original data)
        update_dicom_tags = {}
        for tag in COPY_DICOM_TAGS:
            try:
                update_dicom_tags[tag] = getattr(dataset_template, tag)
            except:
                update_dicom_tags[tag] = ""

        # Additional tags
        update_dicom_tags["SeriesDescription"] = args.label
        update_dicom_tags["InstitutionName"] = institution_name
        update_dicom_tags["ImageComments"] = IMAGE_COMMENTS
        update_dicom_tags["AccessionNumber"] = ACCESSION_NUMBER
        update_dicom_tags["SeriesNumber"] = SERIES_NUMBER

        for tag in sorted(update_dicom_tags.keys()):
            value = update_dicom_tags[tag]
            setattr(dataset, tag, value)
            ph.print_info("%s: '%s'" % (tag, value))

        dataset.save_as(path_to_output)
        print("")
        ph.print_info("3D DICOM image written to '%s'" % path_to_output)

    else:
        ph.print_info("DICOM images written to '%s'" % dir_output_2d_slices)

    return 0
Пример #22
0
def main():

    input_parser = InputArgparser(
        description="Script to export a side-by-side comparison of originally "
        "acquired and simulated/projected slice given the estimated "
        "volumetric reconstruction."
        "This function takes the result of "
        "simulate_stacks_from_reconstruction.py as input.", )
    input_parser.add_filenames(required=True)
    input_parser.add_dir_output(required=True)
    input_parser.add_option(
        option_string="--prefix-simulated",
        type=str,
        help="Specify the prefix of the simulated stacks to distinguish them "
        "from the original data.",
        default="Simulated_",
    )
    input_parser.add_option(
        option_string="--dir-input-simulated",
        type=str,
        help="Specify the directory where the simulated stacks are. "
        "If not given, it is assumed that they are in the same directory "
        "as the original ones.",
        default=None)
    input_parser.add_option(
        option_string="--resize",
        type=float,
        help="Factor to resize images (otherwise they might be very small "
        "depending on the FOV)",
        default=3)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    # Read original data
    filenames_original = args.filenames
    data_reader = dr.MultipleImagesReader(filenames_original)
    data_reader.read_data()
    stacks_original = data_reader.get_data()

    # Read data simulated from obtained reconstruction
    if args.dir_input_simulated is None:
        dir_input_simulated = os.path.dirname(filenames_original[0])
    else:
        dir_input_simulated = args.dir_input_simulated
    filenames_simulated = [
        os.path.join("%s", "%s%s") %
        (dir_input_simulated, args.prefix_simulated, os.path.basename(f))
        for f in filenames_original
    ]
    data_reader = dr.MultipleImagesReader(filenames_simulated)
    data_reader.read_data()
    stacks_simulated = data_reader.get_data()

    ph.create_directory(args.dir_output)

    for i in range(len(stacks_original)):
        try:
            stacks_original[i].sitk - stacks_simulated[i].sitk
        except:
            raise IOError(
                "Images '%s' and '%s' do not occupy the same space!" %
                (filenames_original[i], filenames_simulated[i]))

    # ---------------------Create side-by-side comparisons---------------------
    ph.print_title("Create side-by-side comparisons")
    intensity_max = 255
    intensity_min = 0
    for i in range(len(stacks_original)):
        ph.print_subtitle("Stack %d/%d" % (i + 1, len(stacks_original)))
        nda_3D_original = sitk.GetArrayFromImage(stacks_original[i].sitk)
        nda_3D_simulated = sitk.GetArrayFromImage(stacks_simulated[i].sitk)

        # Scale uniformly between 0 and 255 according to the simulated stack
        # for export to png
        scale = np.max(nda_3D_simulated)
        nda_3D_original = intensity_max * nda_3D_original / scale
        nda_3D_simulated = intensity_max * nda_3D_simulated / scale

        nda_3D_simulated = np.clip(nda_3D_simulated, intensity_min,
                                   intensity_max)
        nda_3D_original = np.clip(nda_3D_original, intensity_min,
                                  intensity_max)

        filename = stacks_original[i].get_filename()
        path_to_file = os.path.join(args.dir_output, "%s.pdf" % filename)

        # Export side-by-side comparison of each stack to a pdf file
        export_comparison_to_file(nda_3D_original,
                                  nda_3D_simulated,
                                  path_to_file,
                                  resize=args.resize)
Пример #23
0
def main():
    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Perform automatic brain masking using "
        "fetal_brain_seg, part of the MONAIfbs package "
        "(https://github.com/gift-surg/MONAIfbs). ",
    )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks(required=False)
    input_parser.add_dir_output(required=False)
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=0)
    input_parser.add_option(
        option_string="--neuroimage-legacy-seg",
        type=int,
        required=False,
        default=0,
        help="If set to 1, use the legacy method for fetal brain segmentation "
             "i.e. the two-step approach proposed in Ebner, Wang et al "
             "NeuroImage (2020)"
    )

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.neuroimage_legacy_seg:
        try:
            DIR_FETAL_BRAIN_SEG = os.environ["FETAL_BRAIN_SEG"]
        except KeyError as e:
            raise RuntimeError(
                "Environment variable FETAL_BRAIN_SEG is not specified. "
                "Specify the root directory of fetal_brain_seg "
                "(https://github.com/gift-surg/fetal_brain_seg) "
                "using "
                "'export FETAL_BRAIN_SEG=path_to_fetal_brain_seg_dir' "
                "(in bashrc).")
    else:
        try:
            import monaifbs
            DIR_FETAL_BRAIN_SEG = os.path.dirname(monaifbs.__file__)
        except ImportError as e:
            raise RuntimeError(
                "monaifbs not correctly installed. "
                "Please check its installation running "
                "pip install -e MONAIfbs/ "
            )

    print("Using executable from {}".format(DIR_FETAL_BRAIN_SEG))

    if args.filenames_masks is None and args.dir_output is None:
        raise IOError("Either --filenames-masks or --dir-output must be set")

    if args.dir_output is not None:
        args.filenames_masks = [
            os.path.join(args.dir_output, os.path.basename(f))
            for f in args.filenames
        ]

    if len(args.filenames) != len(args.filenames_masks):
        raise IOError("Number of filenames and filenames-masks must match")

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    cd_fetal_brain_seg = "cd %s" % DIR_FETAL_BRAIN_SEG

    for f, m in zip(args.filenames, args.filenames_masks):

        if not ph.file_exists(f):
            raise IOError("File '%s' does not exist" % f)

        # use absolute path for input image
        f = os.path.abspath(f)

        # use absolute path for output image
        dir_output = os.path.dirname(m)
        if not os.path.isabs(dir_output):
            dir_output = os.path.realpath(
                os.path.join(os.getcwd(), dir_output))
            m = os.path.join(dir_output, os.path.basename(m))

        ph.create_directory(dir_output)

        # Change to root directory of fetal_brain_seg
        cmds = [cd_fetal_brain_seg]

        # Run masking independently (Takes longer but ensures that it does
        # not terminate because of provided 'non-brain images')
        cmd_args = ["python fetal_brain_seg.py"]
        cmd_args.append("--input_names '%s'" % f)
        cmd_args.append("--segment_output_names '%s'" % m)
        cmds.append(" ".join(cmd_args))

        # Execute both steps
        cmd = " && ".join(cmds)
        flag = ph.execute_command(cmd)

        if flag != 0:
            ph.print_warning(
                "Error using fetal_brain_seg. \n"
                "Execute '%s' for further investigation" %
                cmd)

        ph.print_info("Fetal brain segmentation written to '%s'" % m)

        if args.verbose:
            ph.show_nifti(f, segmentation=m)

    elapsed_time_total = ph.stop_timing(time_start)

    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time: %s" % (exe_file_info, elapsed_time_total))

    return 0
Пример #24
0
def main():

    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Register an obtained reconstruction (moving) "
        "to a template image/space (fixed) using rigid registration. "
        "The resulting registration can optionally be applied to previously "
        "obtained motion correction slice transforms so that a volumetric "
        "reconstruction is possible in the (standard anatomical) space "
        "defined by the fixed.", )
    input_parser.add_fixed(required=True)
    input_parser.add_moving(required=True)
    input_parser.add_output(help="Path to registration transform (.txt)",
                            required=True)
    input_parser.add_fixed_mask(required=False)
    input_parser.add_moving_mask(required=False)
    input_parser.add_option(
        option_string="--initial-transform",
        type=str,
        help="Path to initial transform. "
        "If not provided, registration will be initialized based on "
        "rigid alignment of eigenbasis of the fixed/moving image masks "
        "using principal component analysis",
        default=None)
    input_parser.add_v2v_method(
        option_string="--method",
        help="Registration method used for the registration.",
        default="RegAladin",
    )
    input_parser.add_argument(
        "--refine-pca",
        "-refine-pca",
        action='store_true',
        help="If given, PCA-based initializations will be refined using "
        "RegAladin registrations.")
    input_parser.add_dir_input_mc()
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if not args.output.endswith(".txt"):
        raise IOError("output transformation path must end in '.txt'")

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    fixed = st.Stack.from_filename(file_path=args.fixed,
                                   file_path_mask=args.fixed_mask,
                                   extract_slices=False)
    moving = st.Stack.from_filename(file_path=args.moving,
                                    file_path_mask=args.moving_mask,
                                    extract_slices=False)

    path_to_tmp_output = os.path.join(
        DIR_TMP, ph.append_to_filename(os.path.basename(args.moving),
                                       "_warped"))

    # ---------------------------- Initialization ----------------------------
    if args.initial_transform is None:
        ph.print_title("Estimate initial transform using PCA")

        if args.moving_mask is None or args.fixed_mask is None:
            ph.print_warning("Fixed and moving masks are strongly recommended")
        transform_initializer = tinit.TransformInitializer(
            fixed=fixed,
            moving=moving,
            similarity_measure="NMI",
            refine_pca_initializations=args.refine_pca,
        )
        transform_initializer.run()
        transform_init_sitk = transform_initializer.get_transform_sitk()
    else:
        transform_init_sitk = sitkh.read_transform_sitk(args.initial_transform)
    sitk.WriteTransform(transform_init_sitk, args.output)

    # -------------------Register Reconstruction to Template-------------------
    ph.print_title("Registration")

    if args.method == "RegAladin":

        path_to_transform_regaladin = os.path.join(DIR_TMP,
                                                   "transform_regaladin.txt")

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            args.output, path_to_transform_regaladin)
        ph.execute_command(cmd, verbose=False)

        # Run NiftyReg
        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref '%s'" % args.fixed)
        cmd_args.append("-flo '%s'" % args.moving)
        cmd_args.append("-res '%s'" % path_to_tmp_output)
        cmd_args.append("-inaff '%s'" % path_to_transform_regaladin)
        cmd_args.append("-aff '%s'" % path_to_transform_regaladin)
        cmd_args.append("-rigOnly")
        cmd_args.append("-ln 2")  # seems to perform better for spina bifida
        cmd_args.append("-voff")
        if args.fixed_mask is not None:
            cmd_args.append("-rmask '%s'" % args.fixed_mask)

        # To avoid error "0 correspondences between blocks were found" that can
        # occur for some cases. Also, disable moving mask, as this would be ignored
        # anyway
        cmd_args.append("-noSym")
        # if args.moving_mask is not None:
        #     cmd_args.append("-fmask '%s'" % args.moving_mask)

        ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert RegAladin to SimpleITK transform
        cmd = "simplereg_transform -nreg2sitk '%s' '%s'" % (
            path_to_transform_regaladin, args.output)
        ph.execute_command(cmd, verbose=False)

    else:
        path_to_transform_flirt = os.path.join(DIR_TMP, "transform_flirt.txt")

        # Convert SimpleITK into FLIRT transform
        cmd = "simplereg_transform -sitk2flirt '%s' '%s' '%s' '%s'" % (
            args.output, args.fixed, args.moving, path_to_transform_flirt)
        ph.execute_command(cmd, verbose=False)

        # Define search angle ranges for FLIRT in all three dimensions
        search_angles = [
            "-searchr%s -%d %d" % (x, 180, 180) for x in ["x", "y", "z"]
        ]

        cmd_args = ["flirt"]
        cmd_args.append("-in '%s'" % args.moving)
        cmd_args.append("-ref '%s'" % args.fixed)
        if args.initial_transform is not None:
            cmd_args.append("-init '%s'" % path_to_transform_flirt)
        cmd_args.append("-omat '%s'" % path_to_transform_flirt)
        cmd_args.append("-out '%s'" % path_to_tmp_output)
        cmd_args.append("-dof 6")
        cmd_args.append((" ").join(search_angles))
        if args.moving_mask is not None:
            cmd_args.append("-inweight '%s'" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-refweight '%s'" % args.fixed_mask)
        ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert FLIRT to SimpleITK transform
        cmd = "simplereg_transform -flirt2sitk '%s' '%s' '%s' '%s'" % (
            path_to_transform_flirt, args.fixed, args.moving, args.output)
        ph.execute_command(cmd, verbose=False)

    if args.dir_input_mc is not None:
        ph.print_title("Update Motion-Correction Transformations")
        transform_sitk = sitkh.read_transform_sitk(args.output, inverse=1)

        if args.dir_input_mc.endswith("/"):
            subdir_mc = args.dir_input_mc.split("/")[-2]
        else:
            subdir_mc = args.dir_input_mc.split("/")[-1]
        dir_output_mc = os.path.join(dir_output, subdir_mc)

        ph.create_directory(dir_output_mc, delete_files=True)
        pattern = REGEX_FILENAMES + "[.]tfm"
        p = re.compile(pattern)
        trafos = [t for t in os.listdir(args.dir_input_mc) if p.match(t)]
        for t in trafos:
            path_to_input_transform = os.path.join(args.dir_input_mc, t)
            path_to_output_transform = os.path.join(dir_output_mc, t)
            t_sitk = sitkh.read_transform_sitk(path_to_input_transform)
            t_sitk = sitkh.get_composite_sitk_affine_transform(
                transform_sitk, t_sitk)
            sitk.WriteTransform(t_sitk, path_to_output_transform)
        ph.print_info("%d transformations written to '%s'" %
                      (len(trafos), dir_output_mc))

    if args.verbose:
        ph.show_niftis([args.fixed, path_to_tmp_output])

    elapsed_time_total = ph.stop_timing(time_start)

    # Summary
    ph.print_title("Summary")
    print("Computational Time: %s" % (elapsed_time_total))

    return 0
def main():
    parser = argparse.ArgumentParser(description="Create video from volume")

    parser.add_argument(
        '--image',
        required=True,
        type=str,
        help="Path to 3D image (*.nii.gz or *.nii)",
    )
    parser.add_argument(
        '--fps',
        required=False,
        type=float,
        help="Frames per second",
        default=1,
    )
    parser.add_argument(
        '--axis',
        required=False,
        type=int,
        help="Axis to sweep through the volume",
        default=2,
    )
    parser.add_argument(
        '--begin',
        required=False,
        type=int,
        help="Starting slice for video",
        default=None,
    )
    parser.add_argument(
        '--end',
        required=False,
        type=int,
        help="End slice for video",
        default=None,
    )
    parser.add_argument(
        '--output',
        required=True,
        type=str,
        help="Path to output video (*.mp4)",
    )

    args = parser.parse_args()

    image_sitk = sitk.ReadImage(args.image)

    image_nda = sitk.GetArrayFromImage(image_sitk)

    scale = np.max(image_nda)
    filename = os.path.basename(args.output).split(".")[0]
    dir_output = os.path.dirname(args.output)
    dir_output_slices = os.path.join(dir_output, "slices")
    ph.create_directory(dir_output_slices)
    ph.clear_directory(dir_output_slices)

    splitter = vol_split.VolumeSplitter(image_nda, axis=args.axis)
    splitter.rescale_array(scale=scale)
    splitter.export_slices(
        dir_output=dir_output_slices,
        filename=filename,
        begin=args.begin,
        end=args.end,
    )
    splitter.create_video(
        dir_input_slices=dir_output_slices,
        path_to_video=args.output,
        fps=args.fps,
    )

    return 0
Пример #26
0
    def write(
        self,
        directory,
        filename=None,
        write_stack=True,
        write_mask=False,
        write_slices=False,
        write_transforms=False,
        suffix_mask="_mask",
        write_transforms_history=False,
    ):

        # Create directory if not existing
        ph.create_directory(directory)

        # Construct filename
        if filename is None:
            filename = self._filename

        full_file_name = os.path.join(directory, filename)

        # Write file to specified location
        if write_stack:
            dw.DataWriter.write_image(self.sitk, "%s.nii.gz" % full_file_name)

        # Write mask to specified location if given
        if self.sitk_mask is not None:
            # nda = sitk.GetArrayFromImage(self.sitk_mask)

            # Write mask if it does not consist of only ones
            if not self._is_unity_mask and write_mask:
                dw.DataWriter.write_mask(
                    self.sitk_mask,
                    "%s%s.nii.gz" % (full_file_name, suffix_mask))

        if write_transforms:
            stack_transform_sitk = self._history_motion_corrections[-1]
            sitk.WriteTransform(
                stack_transform_sitk,
                os.path.join(directory,
                             self.get_filename() + ".tfm"))

        # Write each separate Slice of stack (if they exist)
        if write_slices or write_transforms:
            try:
                # Check whether variable exists
                # if 'self._slices' not in locals() or all(i is None for i in
                # self._slices):
                if not hasattr(self, '_slices'):
                    raise ValueError(
                        "Error occurred in attempt to write %s.nii.gz: "
                        "No separate slices of object Slice are found" %
                        full_file_name)

                # Write slices
                else:
                    if write_transforms and write_slices:
                        ph.print_info(
                            "Write %s image slices and slice transforms to %s ... "
                            % (self.get_filename(), directory),
                            newline=False)
                    elif write_transforms and not write_slices:
                        ph.print_info("Write %s slice transforms to %s ... " %
                                      (self.get_filename(), directory),
                                      newline=False)
                    else:
                        ph.print_info("Write %s image slices to %s ... " %
                                      (self.get_filename(), directory),
                                      newline=False)
                    for slice in self.get_slices():
                        slice.write(
                            directory=directory,
                            filename=filename,
                            write_transform=write_transforms,
                            write_slice=write_slices,
                            suffix_mask=suffix_mask,
                            write_transforms_history=write_transforms_history,
                        )
                    print("done")

            except ValueError as err:
                print(err.message)
Пример #27
0
    def show_estimated_transform_parameters(
        self,
        path_to_file=None,
        title="RobustMotionEstimator",
        fullscreen=1,
    ):
        indices, params_nda = self._get_transformation_params_nda(
            self._transforms_sitk)
        robust_params_nda = self._get_transformation_params_nda(
            self.get_robust_transforms_sitk())[1]

        dof = params_nda.shape[0]

        N_rows = np.ceil(dof / 2.)
        i_ref_marker = 0

        subpackages = self._get_temporal_packages(indices)

        fig = plt.figure(title, figsize=(15, 10))
        for i_dof in range(dof):
            y1 = params_nda[i_dof, :]
            y2 = robust_params_nda[i_dof, :]

            ax = plt.subplot(N_rows, 2, i_dof + 1)
            ax.plot(
                indices,
                y1,
                marker=ph.MARKERS[i_ref_marker],
                color=ph.COLORS_TABLEAU20[0],
                linestyle="",
                # linestyle=":",
                label="original",
                markerfacecolor="w",
            )

            # print connecting line between subpackage slices
            ls = ["--", ":", "-."]
            for i_p, p in enumerate(subpackages):
                t = sorted(p.keys())

                slices_package = [indices.index(p[t_i]) for t_i in t]
                y = y1[slices_package]
                x = [indices[i] for i in slices_package]
                ax.plot(
                    x,
                    y,
                    marker=".",
                    # color=ph.COLORS_TABLEAU20[2 + i_p],
                    color=[0.7, 0.7, 0.7],
                    linestyle=ls[i_p],
                )
            for i in range(len(y1)):
                ax.plot(
                    [indices[i], indices[i]],
                    [y1[i], y2[i]],
                    linestyle="-",
                    marker="",
                    color=ph.COLORS_TABLEAU20[2],
                )
            ax.plot(
                indices,
                y2,
                marker=ph.MARKERS[i_ref_marker],
                color=ph.COLORS_TABLEAU20[2],
                # linestyle="-",
                linestyle="",
                label="robust",
            )
            ax.set_xticks(indices)
            plt.ylabel(sitkh.TRANSFORM_SITK_DOF_LABELS_LONG[dof][i_dof])
        plt.legend(loc="best")
        plt.xlabel('Slice')
        plt.suptitle(title)

        plt.show(block=False)

        if path_to_file is not None:
            ph.create_directory(os.path.dirname(path_to_file))
            fig.savefig(path_to_file)
            ph.print_info("Figure written to %s" % path_to_file)
        plt.close()
Пример #28
0
def main():

    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Register an obtained reconstruction (moving) "
        "to a template image/space (fixed) using rigid registration. "
        "The resulting registration can optionally be applied to previously "
        "obtained motion correction slice transforms so that a volumetric "
        "reconstruction is possible in the (standard anatomical) space "
        "defined by the fixed.", )
    input_parser.add_fixed(required=True)
    input_parser.add_moving(required=True)
    input_parser.add_output(help="Path to registration transform (.txt)",
                            required=True)
    input_parser.add_fixed_mask()
    input_parser.add_moving_mask()
    input_parser.add_dir_input_mc()
    input_parser.add_search_angle(default=180)
    input_parser.add_option(option_string="--initial-transform",
                            type=str,
                            help="Path to initial transform.",
                            default=None)
    input_parser.add_option(
        option_string="--test-ap-flip",
        type=int,
        help="Turn on/off functionality to run an additional registration "
        "after an AP-flip. Seems to be more robust to find a better "
        "registration outcome in general.",
        default=1)
    input_parser.add_option(
        option_string="--use-flirt",
        type=int,
        help="Turn on/off functionality to use FLIRT for the registration.",
        default=1)
    input_parser.add_option(
        option_string="--use-regaladin",
        type=int,
        help="Turn on/off functionality to use RegAladin for the "
        "registration.",
        default=1)
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    debug = 0

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if not args.use_regaladin and not args.use_flirt:
        raise IOError("Either RegAladin or FLIRT must be activated.")

    if not args.output.endswith(".txt"):
        raise IOError("output transformation path must end in '.txt'")

    dir_output = os.path.dirname(args.output)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    fixed = st.Stack.from_filename(file_path=args.fixed,
                                   file_path_mask=args.fixed_mask,
                                   extract_slices=False)
    moving = st.Stack.from_filename(file_path=args.moving,
                                    file_path_mask=args.moving_mask,
                                    extract_slices=False)

    if args.initial_transform is not None:
        transform_sitk = sitkh.read_transform_sitk(args.initial_transform)
    else:
        transform_sitk = sitk.AffineTransform(fixed.sitk.GetDimension())
    sitk.WriteTransform(transform_sitk, args.output)

    path_to_tmp_output = os.path.join(
        DIR_TMP, ph.append_to_filename(os.path.basename(args.moving),
                                       "_warped"))

    # -------------------Register Reconstruction to Template-------------------
    ph.print_title("Register Reconstruction to Template")

    if args.use_flirt:
        path_to_transform_flirt = os.path.join(DIR_TMP, "transform_flirt.txt")

        # Convert SimpleITK into FLIRT transform
        cmd = "simplereg_transform -sitk2flirt %s %s %s %s" % (
            args.output, args.fixed, args.moving, path_to_transform_flirt)
        ph.execute_command(cmd, verbose=False)

        # Define search angle ranges for FLIRT in all three dimensions
        search_angles = [
            "-searchr%s -%d %d" % (x, args.search_angle, args.search_angle)
            for x in ["x", "y", "z"]
        ]

        # flt = nipype.interfaces.fsl.FLIRT()
        # flt.inputs.in_file = args.moving
        # flt.inputs.reference = args.fixed
        # if args.initial_transform is not None:
        #     flt.inputs.in_matrix_file = path_to_transform_flirt
        # flt.inputs.out_matrix_file = path_to_transform_flirt
        # # flt.inputs.output_type = "NIFTI_GZ"
        # flt.inputs.out_file = path_to_tmp_output
        # flt.inputs.args = "-dof 6"
        # flt.inputs.args += " %s" % " ".join(search_angles)
        # if args.moving_mask is not None:
        #     flt.inputs.in_weight = args.moving_mask
        # if args.fixed_mask is not None:
        #     flt.inputs.ref_weight = args.fixed_mask
        # ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        # flt.run()
        # print("done")

        cmd_args = ["flirt"]
        cmd_args.append("-in %s" % args.moving)
        cmd_args.append("-ref %s" % args.fixed)
        if args.initial_transform is not None:
            cmd_args.append("-init %s" % path_to_transform_flirt)
        cmd_args.append("-omat %s" % path_to_transform_flirt)
        cmd_args.append("-out %s" % path_to_tmp_output)
        cmd_args.append("-dof 6")
        cmd_args.append((" ").join(search_angles))
        if args.moving_mask is not None:
            cmd_args.append("-inweight %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-refweight %s" % args.fixed_mask)
        ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert FLIRT to SimpleITK transform
        cmd = "simplereg_transform -flirt2sitk %s %s %s %s" % (
            path_to_transform_flirt, args.fixed, args.moving, args.output)
        ph.execute_command(cmd, verbose=False)

        if debug:
            ph.show_niftis([args.fixed, path_to_tmp_output])

    # Additionally, use RegAladin for more accurate alignment
    # Rationale: FLIRT has better capture range, but RegAladin seems to
    # find better alignment once it is within its capture range.
    if args.use_regaladin:
        path_to_transform_regaladin = os.path.join(DIR_TMP,
                                                   "transform_regaladin.txt")

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            args.output, path_to_transform_regaladin)
        ph.execute_command(cmd, verbose=False)

        # nreg = nipype.interfaces.niftyreg.RegAladin()
        # nreg.inputs.ref_file = args.fixed
        # nreg.inputs.flo_file = args.moving
        # nreg.inputs.res_file = path_to_tmp_output
        # nreg.inputs.in_aff_file = path_to_transform_regaladin
        # nreg.inputs.aff_file = path_to_transform_regaladin
        # nreg.inputs.args = "-rigOnly -voff"
        # if args.moving_mask is not None:
        #     nreg.inputs.fmask_file = args.moving_mask
        # if args.fixed_mask is not None:
        #     nreg.inputs.rmask_file = args.fixed_mask
        # ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        # nreg.run()
        # print("done")

        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref %s" % args.fixed)
        cmd_args.append("-flo %s" % args.moving)
        cmd_args.append("-res %s" % path_to_tmp_output)
        if args.initial_transform is not None or args.use_flirt == 1:
            cmd_args.append("-inaff %s" % path_to_transform_regaladin)
        cmd_args.append("-aff %s" % path_to_transform_regaladin)
        # cmd_args.append("-cog")
        # cmd_args.append("-ln 2")
        cmd_args.append("-rigOnly")
        cmd_args.append("-voff")
        if args.moving_mask is not None:
            cmd_args.append("-fmask %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-rmask %s" % args.fixed_mask)
        ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert RegAladin to SimpleITK transform
        cmd = "simplereg_transform -nreg2sitk %s %s" % (
            path_to_transform_regaladin, args.output)
        ph.execute_command(cmd, verbose=False)

        if debug:
            ph.show_niftis([args.fixed, path_to_tmp_output])

    if args.test_ap_flip:
        path_to_transform_flip = os.path.join(DIR_TMP, "transform_flip.txt")
        path_to_tmp_output_flip = os.path.join(DIR_TMP, "output_flip.nii.gz")

        # Get AP-flip transform
        transform_ap_flip_sitk = get_ap_flip_transform(args.fixed)
        path_to_transform_flip_regaladin = os.path.join(
            DIR_TMP, "transform_flip_regaladin.txt")
        sitk.WriteTransform(transform_ap_flip_sitk, path_to_transform_flip)

        # Compose current transform with AP flip transform
        cmd = "simplereg_transform -c %s %s %s" % (
            args.output, path_to_transform_flip, path_to_transform_flip)
        ph.execute_command(cmd, verbose=False)

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            path_to_transform_flip, path_to_transform_flip_regaladin)
        ph.execute_command(cmd, verbose=False)

        # nreg = nipype.interfaces.niftyreg.RegAladin()
        # nreg.inputs.ref_file = args.fixed
        # nreg.inputs.flo_file = args.moving
        # nreg.inputs.res_file = path_to_tmp_output_flip
        # nreg.inputs.in_aff_file = path_to_transform_flip_regaladin
        # nreg.inputs.aff_file = path_to_transform_flip_regaladin
        # nreg.inputs.args = "-rigOnly -voff"
        # if args.moving_mask is not None:
        #     nreg.inputs.fmask_file = args.moving_mask
        # if args.fixed_mask is not None:
        #     nreg.inputs.rmask_file = args.fixed_mask
        # ph.print_info("Run Registration AP-flipped (RegAladin) ... ",
        #               newline=False)
        # nreg.run()
        # print("done")

        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref %s" % args.fixed)
        cmd_args.append("-flo %s" % args.moving)
        cmd_args.append("-res %s" % path_to_tmp_output_flip)
        cmd_args.append("-inaff %s" % path_to_transform_flip_regaladin)
        cmd_args.append("-aff %s" % path_to_transform_flip_regaladin)
        cmd_args.append("-rigOnly")
        # cmd_args.append("-ln 2")
        cmd_args.append("-voff")
        if args.moving_mask is not None:
            cmd_args.append("-fmask %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-rmask %s" % args.fixed_mask)
        ph.print_info("Run Registration AP-flipped (RegAladin) ... ",
                      newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        if debug:
            ph.show_niftis(
                [args.fixed, path_to_tmp_output, path_to_tmp_output_flip])

        warped_moving = st.Stack.from_filename(path_to_tmp_output,
                                               extract_slices=False)
        warped_moving_flip = st.Stack.from_filename(path_to_tmp_output_flip,
                                                    extract_slices=False)
        fixed = st.Stack.from_filename(args.fixed, args.fixed_mask)

        stacks = [warped_moving, warped_moving_flip]
        image_similarity_evaluator = ise.ImageSimilarityEvaluator(
            stacks=stacks, reference=fixed)
        image_similarity_evaluator.compute_similarities()
        similarities = image_similarity_evaluator.get_similarities()

        if similarities["NMI"][1] > similarities["NMI"][0]:
            ph.print_info("AP-flipped outcome better")

            # Convert RegAladin to SimpleITK transform
            cmd = "simplereg_transform -nreg2sitk %s %s" % (
                path_to_transform_flip_regaladin, args.output)
            ph.execute_command(cmd, verbose=False)

            # Copy better outcome
            cmd = "cp -p %s %s" % (path_to_tmp_output_flip, path_to_tmp_output)
            ph.execute_command(cmd, verbose=False)

        else:
            ph.print_info("AP-flip does not improve outcome")

    if args.dir_input_mc is not None:
        transform_sitk = sitkh.read_transform_sitk(args.output, inverse=1)

        if args.dir_input_mc.endswith("/"):
            subdir_mc = args.dir_input_mc.split("/")[-2]
        else:
            subdir_mc = args.dir_input_mc.split("/")[-1]
        dir_output_mc = os.path.join(dir_output, subdir_mc)

        ph.create_directory(dir_output_mc, delete_files=True)
        pattern = REGEX_FILENAMES + "[.]tfm"
        p = re.compile(pattern)
        trafos = [t for t in os.listdir(args.dir_input_mc) if p.match(t)]
        for t in trafos:
            path_to_input_transform = os.path.join(args.dir_input_mc, t)
            path_to_output_transform = os.path.join(dir_output_mc, t)
            t_sitk = sitkh.read_transform_sitk(path_to_input_transform)
            t_sitk = sitkh.get_composite_sitk_affine_transform(
                transform_sitk, t_sitk)
            sitk.WriteTransform(t_sitk, path_to_output_transform)

    if args.verbose:
        ph.show_niftis([args.fixed, path_to_tmp_output])

    elapsed_time_total = ph.stop_timing(time_start)

    # Summary
    ph.print_title("Summary")
    print("Computational Time: %s" % (elapsed_time_total))

    return 0
Пример #29
0
def main():

    time_start = ph.start_timing()

    # Set print options for numpy
    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Volumetric MRI reconstruction framework to reconstruct "
        "an isotropic, high-resolution 3D volume from multiple stacks of 2D "
        "slices with motion correction. The resolution of the computed "
        "Super-Resolution Reconstruction (SRR) is given by the in-plane "
        "spacing of the selected target stack. A region of interest can be "
        "specified by providing a mask for the selected target stack. Only "
        "this region will then be reconstructed by the SRR algorithm which "
        "can substantially reduce the computational time.",
    )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_target_stack(default=None)
    input_parser.add_search_angle(default=45)
    input_parser.add_multiresolution(default=0)
    input_parser.add_shrink_factors(default=[3, 2, 1])
    input_parser.add_smoothing_sigmas(default=[1.5, 1, 0])
    input_parser.add_sigma(default=1)
    input_parser.add_reconstruction_type(default="TK1L2")
    input_parser.add_iterations(default=15)
    input_parser.add_alpha(default=0.015)
    input_parser.add_alpha_first(default=0.2)
    input_parser.add_iter_max(default=10)
    input_parser.add_iter_max_first(default=5)
    input_parser.add_dilation_radius(default=3)
    input_parser.add_extra_frame_target(default=10)
    input_parser.add_bias_field_correction(default=0)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_isotropic_resolution(default=1)
    input_parser.add_log_config(default=1)
    input_parser.add_subfolder_motion_correction()
    input_parser.add_write_motion_correction(default=1)
    input_parser.add_verbose(default=0)
    input_parser.add_two_step_cycles(default=3)
    input_parser.add_use_masks_srr(default=0)
    input_parser.add_boundary_stacks(default=[10, 10, 0])
    input_parser.add_metric(default="Correlation")
    input_parser.add_metric_radius(default=10)
    input_parser.add_reference()
    input_parser.add_reference_mask()
    input_parser.add_outlier_rejection(default=1)
    input_parser.add_threshold_first(default=0.5)
    input_parser.add_threshold(default=0.8)
    input_parser.add_interleave(default=3)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_viewer(default="itksnap")
    input_parser.add_v2v_method(default="RegAladin")
    input_parser.add_argument(
        "--v2v-robust", "-v2v-robust",
        action='store_true',
        help="If given, a more robust volume-to-volume registration step is "
        "performed, i.e. four rigid registrations are performed using four "
        "rigid transform initializations based on "
        "principal component alignment of associated masks."
    )
    input_parser.add_argument(
        "--s2v-hierarchical", "-s2v-hierarchical",
        action='store_true',
        help="If given, a hierarchical approach for the first slice-to-volume "
        "registration cycle is used, i.e. sub-packages defined by the "
        "specified interleave (--interleave) are registered until each "
        "slice is registered independently."
    )
    input_parser.add_argument(
        "--sda", "-sda",
        action='store_true',
        help="If given, the volumetric reconstructions are performed using "
        "Scattered Data Approximation (Vercauteren et al., 2006). "
        "'alpha' is considered the final 'sigma' for the "
        "iterative adjustment. "
        "Recommended value is, e.g., --alpha 0.8"
    )
    input_parser.add_option(
        option_string="--transforms-history",
        type=int,
        help="Write entire history of applied slice motion correction "
        "transformations to motion correction output directory",
        default=0,
    )

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    rejection_measure = "NCC"
    threshold_v2v = -2  # 0.3
    debug = False

    if args.v2v_method not in V2V_METHOD_OPTIONS:
        raise ValueError("v2v-method must be in {%s}" % (
            ", ".join(V2V_METHOD_OPTIONS)))

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError(
            "output filename invalid; allowed extensions are: %s" %
            ", ".join(ALLOWED_EXTENSIONS))

    if args.alpha_first < args.alpha and not args.sda:
        raise ValueError("It must hold alpha-first >= alpha")

    if args.threshold_first > args.threshold:
        raise ValueError("It must hold threshold-first <= threshold")

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=args.filenames_masks,
        suffix_mask=args.suffix_mask,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )

    if len(args.boundary_stacks) is not 3:
        raise IOError(
            "Provide exactly three values for '--boundary-stacks' to define "
            "cropping in i-, j-, and k-dimension of the input stacks")

    data_reader.read_data()
    stacks = data_reader.get_data()
    ph.print_info("%d input stacks read for further processing" % len(stacks))

    if all(s.is_unity_mask() is True for s in stacks):
        ph.print_warning("No mask is provided! "
                         "Generated reconstruction space may be very big!")
        ph.print_warning("Consider using a mask to speed up computations")

        # args.extra_frame_target = 0
        # ph.wrint_warning("Overwritten: extra-frame-target set to 0")

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        try:
            target_stack_index = args.filenames.index(args.target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Data Preprocessing---------------------------
    ph.print_title("Data Preprocessing")

    segmentation_propagator = segprop.SegmentationPropagation(
        # registration_method=regflirt.FLIRT(use_verbose=args.verbose),
        # registration_method=niftyreg.RegAladin(use_verbose=False),
        dilation_radius=args.dilation_radius,
        dilation_kernel="Ball",
    )

    data_preprocessing = dp.DataPreprocessing(
        stacks=stacks,
        segmentation_propagator=segmentation_propagator,
        use_cropping_to_mask=True,
        use_N4BiasFieldCorrector=args.bias_field_correction,
        target_stack_index=target_stack_index,
        boundary_i=args.boundary_stacks[0],
        boundary_j=args.boundary_stacks[1],
        boundary_k=args.boundary_stacks[2],
        unit="mm",
    )
    data_preprocessing.run()
    time_data_preprocessing = data_preprocessing.get_computational_time()

    # Get preprocessed stacks
    stacks = data_preprocessing.get_preprocessed_stacks()

    # Define reference/target stack for registration and reconstruction
    if args.reference is not None:
        reference = st.Stack.from_filename(
            file_path=args.reference,
            file_path_mask=args.reference_mask,
            extract_slices=False)

    else:
        reference = st.Stack.from_stack(stacks[target_stack_index])

    # ------------------------Volume-to-Volume Registration--------------------
    if len(stacks) > 1:

        if args.v2v_method == "FLIRT":
            # Define search angle ranges for FLIRT in all three dimensions
            search_angles = ["-searchr%s -%d %d" %
                             (x, args.search_angle, args.search_angle)
                             for x in ["x", "y", "z"]]
            options = (" ").join(search_angles)
            # options += " -noresample"

            vol_registration = regflirt.FLIRT(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                options=options,
                use_verbose=False,
            )
        else:
            vol_registration = niftyreg.RegAladin(
                registration_type="Rigid",
                use_fixed_mask=True,
                use_moving_mask=True,
                # options="-ln 2 -voff",
                use_verbose=False,
            )
        v2vreg = pipeline.VolumeToVolumeRegistration(
            stacks=stacks,
            reference=reference,
            registration_method=vol_registration,
            verbose=debug,
            robust=args.v2v_robust,
        )
        v2vreg.run()
        stacks = v2vreg.get_stacks()
        time_registration = v2vreg.get_computational_time()

    else:
        time_registration = ph.get_zero_time()

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." % (
                    i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " % (
                    i + 1, stack.get_filename()), newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # ---------------------------Create first volume---------------------------
    time_tmp = ph.start_timing()

    # Isotropic resampling to define HR target space
    ph.print_title("Reconstruction Space Generation")
    HR_volume = reference.get_isotropically_resampled_stack(
        resolution=args.isotropic_resolution)
    ph.print_info(
        "Isotropic reconstruction space with %g mm resolution is created" %
        HR_volume.sitk.GetSpacing()[0])

    if args.reference is None:
        # Create joint image mask in target space
        joint_image_mask_builder = imb.JointImageMaskBuilder(
            stacks=stacks,
            target=HR_volume,
            dilation_radius=1,
        )
        joint_image_mask_builder.run()
        HR_volume = joint_image_mask_builder.get_stack()
        ph.print_info(
            "Isotropic reconstruction space is centered around "
            "joint stack masks. ")

        # Crop to space defined by mask (plus extra margin)
        HR_volume = HR_volume.get_cropped_stack_based_on_mask(
            boundary_i=args.extra_frame_target,
            boundary_j=args.extra_frame_target,
            boundary_k=args.extra_frame_target,
            unit="mm",
        )

        # Create first volume
        # If outlier rejection is activated, eliminate obvious outliers early
        # from stack and re-run SDA to get initial volume without them
        ph.print_title("First Estimate of HR Volume")
        if args.outlier_rejection and threshold_v2v > -1:
            ph.print_subtitle("SDA Approximation")
            SDA = sda.ScatteredDataApproximation(
                stacks, HR_volume, sigma=args.sigma)
            SDA.run()
            HR_volume = SDA.get_reconstruction()

            # Identify and reject outliers
            ph.print_subtitle("Eliminate slice outliers (%s < %g)" % (
                rejection_measure, threshold_v2v))
            outlier_rejector = outre.OutlierRejector(
                stacks=stacks,
                reference=HR_volume,
                threshold=threshold_v2v,
                measure=rejection_measure,
                verbose=True,
            )
            outlier_rejector.run()
            stacks = outlier_rejector.get_stacks()

        ph.print_subtitle("SDA Approximation Image")
        SDA = sda.ScatteredDataApproximation(
            stacks, HR_volume, sigma=args.sigma)
        SDA.run()
        HR_volume = SDA.get_reconstruction()

        ph.print_subtitle("SDA Approximation Image Mask")
        SDA = sda.ScatteredDataApproximation(
            stacks, HR_volume, sigma=args.sigma, sda_mask=True)
        SDA.run()
        # HR volume contains updated mask based on SDA
        HR_volume = SDA.get_reconstruction()

        HR_volume.set_filename(SDA.get_setting_specific_filename())

    time_reconstruction = ph.stop_timing(time_tmp)

    if args.verbose:
        tmp = list(stacks)
        tmp.insert(0, HR_volume)
        sitkh.show_stacks(tmp, segmentation=HR_volume, viewer=args.viewer)

    # -----------Two-step Slice-to-Volume Registration-Reconstruction----------
    if args.two_step_cycles > 0:

        # Slice-to-volume registration set-up
        if args.metric == "ANTSNeighborhoodCorrelation":
            metric_params = {"radius": args.metric_radius}
        else:
            metric_params = None
        registration = regsitk.SimpleItkRegistration(
            moving=HR_volume,
            use_fixed_mask=True,
            use_moving_mask=True,
            interpolator="Linear",
            metric=args.metric,
            metric_params=metric_params,
            use_multiresolution_framework=args.multiresolution,
            shrink_factors=args.shrink_factors,
            smoothing_sigmas=args.smoothing_sigmas,
            initializer_type="SelfGEOMETRY",
            optimizer="ConjugateGradientLineSearch",
            optimizer_params={
                "learningRate": 1,
                "numberOfIterations": 100,
                "lineSearchUpperLimit": 2,
            },
            scales_estimator="Jacobian",
            use_verbose=debug,
        )

        # Volumetric reconstruction set-up
        if args.sda:
            recon_method = sda.ScatteredDataApproximation(
                stacks,
                HR_volume,
                sigma=args.sigma,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.sigma, args.alpha]
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1",
                minimizer="lsmr",
                alpha=args.alpha_first,
                iter_max=np.min([args.iter_max_first, args.iter_max]),
                verbose=True,
                use_masks=args.use_masks_srr,
            )
            alpha_range = [args.alpha_first, args.alpha]

        # Define the regularization parameters for the individual
        # reconstruction steps in the two-step cycles
        alphas = np.linspace(
            alpha_range[0], alpha_range[1], args.two_step_cycles)

        # Define outlier rejection threshold after each S2V-reg step
        thresholds = np.linspace(
            args.threshold_first, args.threshold, args.two_step_cycles)

        two_step_s2v_reg_recon = \
            pipeline.TwoStepSliceToVolumeRegistrationReconstruction(
                stacks=stacks,
                reference=HR_volume,
                registration_method=registration,
                reconstruction_method=recon_method,
                cycles=args.two_step_cycles,
                alphas=alphas[0:args.two_step_cycles - 1],
                outlier_rejection=args.outlier_rejection,
                threshold_measure=rejection_measure,
                thresholds=thresholds,
                interleave=args.interleave,
                viewer=args.viewer,
                verbose=args.verbose,
                use_hierarchical_registration=args.s2v_hierarchical,
            )
        two_step_s2v_reg_recon.run()
        HR_volume_iterations = \
            two_step_s2v_reg_recon.get_iterative_reconstructions()
        time_registration += \
            two_step_s2v_reg_recon.get_computational_time_registration()
        time_reconstruction += \
            two_step_s2v_reg_recon.get_computational_time_reconstruction()
        stacks = two_step_s2v_reg_recon.get_stacks()

    # no two-step s2v-registration/reconstruction iterations
    else:
        HR_volume_iterations = []

    # Write motion-correction results
    ph.print_title("Write Motion Correction Results")
    if args.write_motion_correction:
        dir_output_mc = os.path.join(
            dir_output, args.subfolder_motion_correction)
        ph.clear_directory(dir_output_mc)

        for stack in stacks:
            stack.write(
                dir_output_mc,
                write_stack=False,
                write_mask=False,
                write_slices=False,
                write_transforms=True,
                write_transforms_history=args.transforms_history,
            )

        if args.outlier_rejection:
            deleted_slices_dic = {}
            for i, stack in enumerate(stacks):
                deleted_slices = stack.get_deleted_slice_numbers()
                deleted_slices_dic[stack.get_filename()] = deleted_slices

            # check whether any stack was removed entirely
            stacks0 = data_preprocessing.get_preprocessed_stacks()
            if len(stacks) != len(stacks0):
                stacks_remain = [s.get_filename() for s in stacks]
                for stack in stacks0:
                    if stack.get_filename() in stacks_remain:
                        continue

                    # add info that all slices of this stack were rejected
                    deleted_slices = [
                        slice.get_slice_number()
                        for slice in stack.get_slices()
                    ]
                    deleted_slices_dic[stack.get_filename()] = deleted_slices
                    ph.print_info(
                        "All slices of stack '%s' were rejected entirely. "
                        "Information added." % stack.get_filename())

            ph.write_dictionary_to_json(
                deleted_slices_dic,
                os.path.join(
                    dir_output,
                    args.subfolder_motion_correction,
                    "rejected_slices.json"
                )
            )

    # ---------------------Final Volumetric Reconstruction---------------------
    ph.print_title("Final Volumetric Reconstruction")
    if args.sda:
        recon_method = sda.ScatteredDataApproximation(
            stacks,
            HR_volume,
            sigma=args.alpha,
            use_masks=args.use_masks_srr,
        )
    else:
        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            recon_method = pd.PrimalDualSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TV" if args.reconstruction_type == "TVL2" else "huber",
                iterations=args.iterations,
                use_masks=args.use_masks_srr,
            )
        else:
            recon_method = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=HR_volume,
                reg_type="TK1" if args.reconstruction_type == "TK1L2" else "TK0",
                use_masks=args.use_masks_srr,
            )
        recon_method.set_alpha(args.alpha)
        recon_method.set_iter_max(args.iter_max)
        recon_method.set_verbose(True)
    recon_method.run()
    time_reconstruction += recon_method.get_computational_time()
    HR_volume_final = recon_method.get_reconstruction()

    ph.print_subtitle("Final SDA Approximation Image Mask")
    SDA = sda.ScatteredDataApproximation(
        stacks, HR_volume_final, sigma=args.sigma, sda_mask=True)
    SDA.run()
    # HR volume contains updated mask based on SDA
    HR_volume_final = SDA.get_reconstruction()
    time_reconstruction += SDA.get_computational_time()

    elapsed_time_total = ph.stop_timing(time_start)

    # Write SRR result
    filename = recon_method.get_setting_specific_filename()
    HR_volume_final.set_filename(filename)
    dw.DataWriter.write_image(
        HR_volume_final.sitk,
        args.output,
        description=filename)
    dw.DataWriter.write_mask(
        HR_volume_final.sitk_mask,
        ph.append_to_filename(args.output, "_mask"),
        description=SDA.get_setting_specific_filename())

    HR_volume_iterations.insert(0, HR_volume_final)
    for stack in stacks:
        HR_volume_iterations.append(stack)

    if args.verbose:
        sitkh.show_stacks(
            HR_volume_iterations,
            segmentation=HR_volume_final,
            viewer=args.viewer,
        )

    # Summary
    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time for Data Preprocessing: %s" %
          (exe_file_info, time_data_preprocessing))
    print("%s | Computational Time for Registrations: %s" %
          (exe_file_info, time_registration))
    print("%s | Computational Time for Reconstructions: %s" %
          (exe_file_info, time_reconstruction))
    print("%s | Computational Time for Entire Reconstruction Pipeline: %s" %
          (exe_file_info, elapsed_time_total))

    ph.print_line_separator()

    return 0
def main():
    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Perform automatic brain masking using "
        "fetal_brain_seg (https://github.com/gift-surg/fetal_brain_seg). ", )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks(required=False)
    input_parser.add_dir_output(required=False)
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=0)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    try:
        DIR_FETAL_BRAIN_SEG = os.environ["FETAL_BRAIN_SEG"]
    except KeyError as e:
        raise RuntimeError(
            "Environment variable FETAL_BRAIN_SEG is not specified. "
            "Specify the root directory of fetal_brain_seg "
            "(https://github.com/gift-surg/fetal_brain_seg) "
            "using "
            "'export FETAL_BRAIN_SEG=path_to_fetal_brain_seg_dir' "
            "(in bashrc).")

    if args.filenames_masks is None and args.dir_output is None:
        raise IOError("Either --filenames-masks or --dir-output must be set")

    if args.dir_output is not None:
        args.filenames_masks = [
            os.path.join(args.dir_output, os.path.basename(f))
            for f in args.filenames
        ]

    if len(args.filenames) != len(args.filenames_masks):
        raise IOError("Number of filenames and filenames-masks must match")

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    cd_fetal_brain_seg = "cd %s" % DIR_FETAL_BRAIN_SEG

    for f, m in zip(args.filenames, args.filenames_masks):

        if not ph.file_exists(f):
            raise IOError("File '%s' does not exist" % f)

        # use absolute path for input image
        f = os.path.abspath(f)

        # use absolute path for output image
        dir_output = os.path.dirname(m)
        if not os.path.isabs(dir_output):
            dir_output = os.path.realpath(os.path.join(os.getcwd(),
                                                       dir_output))
            m = os.path.join(dir_output, os.path.basename(m))

        ph.create_directory(dir_output)

        # Change to root directory of fetal_brain_seg
        cmds = [cd_fetal_brain_seg]

        # Run masking independently (Takes longer but ensures that it does
        # not terminate because of provided 'non-brain images')
        cmd_args = ["python fetal_brain_seg.py"]
        cmd_args.append("--input_names '%s'" % f)
        cmd_args.append("--segment_output_names '%s'" % m)
        cmds.append(" ".join(cmd_args))

        # Execute both steps
        cmd = " && ".join(cmds)
        flag = ph.execute_command(cmd)

        if flag != 0:
            ph.print_warning("Error using fetal_brain_seg. \n"
                             "Execute '%s' for further investigation" % cmd)

        ph.print_info("Fetal brain segmentation written to '%s'" % m)

        if args.verbose:
            ph.show_nifti(f, segmentation=m)

    return 0