示例#1
0
def main():

    input_parser = InputArgparser(
        description="Multiply images. "
        "Pixel type is determined by first given image.", )

    input_parser.add_filenames(required=True)
    input_parser.add_output(required=True)
    input_parser.add_verbose(default=0)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if len(args.filenames) < 2:
        raise IOError("At least two images must be provided")

    out_sitk = sitk.ReadImage(args.filenames[0])
    for f in args.filenames[1:]:
        im_sitk = sitk.Cast(sitk.ReadImage(f), out_sitk.GetPixelIDValue())
        out_sitk = out_sitk * im_sitk

    dw.DataWriter.write_image(out_sitk, args.output)

    if args.verbose:
        args.filenames.insert(0, args.output)
        ph.show_niftis(args.filenames)
def main():

    input_parser = InputArgparser(
        description="Show data/slice coverage over specified reconstruction "
        "space.", )

    input_parser.add_filenames(required=True)
    input_parser.add_reconstruction_space(required=True)
    input_parser.add_output(required=True)
    input_parser.add_dir_input_mc()
    input_parser.add_slice_thicknesses()
    input_parser.add_verbose(default=0)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        dir_motion_correction=args.dir_input_mc,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )
    data_reader.read_data()
    stacks = data_reader.get_data()

    reconstruction_space_sitk = sitk.ReadImage(args.reconstruction_space)
    slice_coverage = sc.SliceCoverage(
        stacks=stacks,
        reconstruction_sitk=reconstruction_space_sitk,
    )
    slice_coverage.run()

    coverage_sitk = slice_coverage.get_coverage_sitk()

    dw.DataWriter.write_mask(coverage_sitk, args.output)

    if args.verbose:
        niftis = [
            args.reconstruction_space,
            args.output,
        ]
        ph.show_niftis(niftis)
示例#3
0
def main():

    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Register an obtained reconstruction (moving) "
        "to a template image/space (fixed) using rigid registration. "
        "The resulting registration can optionally be applied to previously "
        "obtained motion correction slice transforms so that a volumetric "
        "reconstruction is possible in the (standard anatomical) space "
        "defined by the fixed.", )
    input_parser.add_fixed(required=True)
    input_parser.add_moving(required=True)
    input_parser.add_output(help="Path to registration transform (.txt)",
                            required=True)
    input_parser.add_fixed_mask(required=False)
    input_parser.add_moving_mask(required=False)
    input_parser.add_option(
        option_string="--initial-transform",
        type=str,
        help="Path to initial transform. "
        "If not provided, registration will be initialized based on "
        "rigid alignment of eigenbasis of the fixed/moving image masks "
        "using principal component analysis",
        default=None)
    input_parser.add_v2v_method(
        option_string="--method",
        help="Registration method used for the registration.",
        default="RegAladin",
    )
    input_parser.add_argument(
        "--refine-pca",
        "-refine-pca",
        action='store_true',
        help="If given, PCA-based initializations will be refined using "
        "RegAladin registrations.")
    input_parser.add_dir_input_mc()
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if not args.output.endswith(".txt"):
        raise IOError("output transformation path must end in '.txt'")

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    fixed = st.Stack.from_filename(file_path=args.fixed,
                                   file_path_mask=args.fixed_mask,
                                   extract_slices=False)
    moving = st.Stack.from_filename(file_path=args.moving,
                                    file_path_mask=args.moving_mask,
                                    extract_slices=False)

    path_to_tmp_output = os.path.join(
        DIR_TMP, ph.append_to_filename(os.path.basename(args.moving),
                                       "_warped"))

    # ---------------------------- Initialization ----------------------------
    if args.initial_transform is None:
        ph.print_title("Estimate initial transform using PCA")

        if args.moving_mask is None or args.fixed_mask is None:
            ph.print_warning("Fixed and moving masks are strongly recommended")
        transform_initializer = tinit.TransformInitializer(
            fixed=fixed,
            moving=moving,
            similarity_measure="NMI",
            refine_pca_initializations=args.refine_pca,
        )
        transform_initializer.run()
        transform_init_sitk = transform_initializer.get_transform_sitk()
    else:
        transform_init_sitk = sitkh.read_transform_sitk(args.initial_transform)
    sitk.WriteTransform(transform_init_sitk, args.output)

    # -------------------Register Reconstruction to Template-------------------
    ph.print_title("Registration")

    if args.method == "RegAladin":

        path_to_transform_regaladin = os.path.join(DIR_TMP,
                                                   "transform_regaladin.txt")

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            args.output, path_to_transform_regaladin)
        ph.execute_command(cmd, verbose=False)

        # Run NiftyReg
        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref '%s'" % args.fixed)
        cmd_args.append("-flo '%s'" % args.moving)
        cmd_args.append("-res '%s'" % path_to_tmp_output)
        cmd_args.append("-inaff '%s'" % path_to_transform_regaladin)
        cmd_args.append("-aff '%s'" % path_to_transform_regaladin)
        cmd_args.append("-rigOnly")
        cmd_args.append("-ln 2")  # seems to perform better for spina bifida
        cmd_args.append("-voff")
        if args.fixed_mask is not None:
            cmd_args.append("-rmask '%s'" % args.fixed_mask)

        # To avoid error "0 correspondences between blocks were found" that can
        # occur for some cases. Also, disable moving mask, as this would be ignored
        # anyway
        cmd_args.append("-noSym")
        # if args.moving_mask is not None:
        #     cmd_args.append("-fmask '%s'" % args.moving_mask)

        ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert RegAladin to SimpleITK transform
        cmd = "simplereg_transform -nreg2sitk '%s' '%s'" % (
            path_to_transform_regaladin, args.output)
        ph.execute_command(cmd, verbose=False)

    else:
        path_to_transform_flirt = os.path.join(DIR_TMP, "transform_flirt.txt")

        # Convert SimpleITK into FLIRT transform
        cmd = "simplereg_transform -sitk2flirt '%s' '%s' '%s' '%s'" % (
            args.output, args.fixed, args.moving, path_to_transform_flirt)
        ph.execute_command(cmd, verbose=False)

        # Define search angle ranges for FLIRT in all three dimensions
        search_angles = [
            "-searchr%s -%d %d" % (x, 180, 180) for x in ["x", "y", "z"]
        ]

        cmd_args = ["flirt"]
        cmd_args.append("-in '%s'" % args.moving)
        cmd_args.append("-ref '%s'" % args.fixed)
        if args.initial_transform is not None:
            cmd_args.append("-init '%s'" % path_to_transform_flirt)
        cmd_args.append("-omat '%s'" % path_to_transform_flirt)
        cmd_args.append("-out '%s'" % path_to_tmp_output)
        cmd_args.append("-dof 6")
        cmd_args.append((" ").join(search_angles))
        if args.moving_mask is not None:
            cmd_args.append("-inweight '%s'" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-refweight '%s'" % args.fixed_mask)
        ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert FLIRT to SimpleITK transform
        cmd = "simplereg_transform -flirt2sitk '%s' '%s' '%s' '%s'" % (
            path_to_transform_flirt, args.fixed, args.moving, args.output)
        ph.execute_command(cmd, verbose=False)

    if args.dir_input_mc is not None:
        ph.print_title("Update Motion-Correction Transformations")
        transform_sitk = sitkh.read_transform_sitk(args.output, inverse=1)

        if args.dir_input_mc.endswith("/"):
            subdir_mc = args.dir_input_mc.split("/")[-2]
        else:
            subdir_mc = args.dir_input_mc.split("/")[-1]
        dir_output_mc = os.path.join(dir_output, subdir_mc)

        ph.create_directory(dir_output_mc, delete_files=True)
        pattern = REGEX_FILENAMES + "[.]tfm"
        p = re.compile(pattern)
        trafos = [t for t in os.listdir(args.dir_input_mc) if p.match(t)]
        for t in trafos:
            path_to_input_transform = os.path.join(args.dir_input_mc, t)
            path_to_output_transform = os.path.join(dir_output_mc, t)
            t_sitk = sitkh.read_transform_sitk(path_to_input_transform)
            t_sitk = sitkh.get_composite_sitk_affine_transform(
                transform_sitk, t_sitk)
            sitk.WriteTransform(t_sitk, path_to_output_transform)
        ph.print_info("%d transformations written to '%s'" %
                      (len(trafos), dir_output_mc))

    if args.verbose:
        ph.show_niftis([args.fixed, path_to_tmp_output])

    elapsed_time_total = ph.stop_timing(time_start)

    # Summary
    ph.print_title("Summary")
    print("Computational Time: %s" % (elapsed_time_total))

    return 0
示例#4
0
def main():

    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Register an obtained reconstruction (moving) "
        "to a template image/space (fixed) using rigid registration. "
        "The resulting registration can optionally be applied to previously "
        "obtained motion correction slice transforms so that a volumetric "
        "reconstruction is possible in the (standard anatomical) space "
        "defined by the fixed.", )
    input_parser.add_fixed(required=True)
    input_parser.add_moving(required=True)
    input_parser.add_output(help="Path to registration transform (.txt)",
                            required=True)
    input_parser.add_fixed_mask()
    input_parser.add_moving_mask()
    input_parser.add_dir_input_mc()
    input_parser.add_search_angle(default=180)
    input_parser.add_option(option_string="--initial-transform",
                            type=str,
                            help="Path to initial transform.",
                            default=None)
    input_parser.add_option(
        option_string="--test-ap-flip",
        type=int,
        help="Turn on/off functionality to run an additional registration "
        "after an AP-flip. Seems to be more robust to find a better "
        "registration outcome in general.",
        default=1)
    input_parser.add_option(
        option_string="--use-flirt",
        type=int,
        help="Turn on/off functionality to use FLIRT for the registration.",
        default=1)
    input_parser.add_option(
        option_string="--use-regaladin",
        type=int,
        help="Turn on/off functionality to use RegAladin for the "
        "registration.",
        default=1)
    input_parser.add_verbose(default=0)
    input_parser.add_log_config(default=1)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    debug = 0

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if not args.use_regaladin and not args.use_flirt:
        raise IOError("Either RegAladin or FLIRT must be activated.")

    if not args.output.endswith(".txt"):
        raise IOError("output transformation path must end in '.txt'")

    dir_output = os.path.dirname(args.output)

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")
    fixed = st.Stack.from_filename(file_path=args.fixed,
                                   file_path_mask=args.fixed_mask,
                                   extract_slices=False)
    moving = st.Stack.from_filename(file_path=args.moving,
                                    file_path_mask=args.moving_mask,
                                    extract_slices=False)

    if args.initial_transform is not None:
        transform_sitk = sitkh.read_transform_sitk(args.initial_transform)
    else:
        transform_sitk = sitk.AffineTransform(fixed.sitk.GetDimension())
    sitk.WriteTransform(transform_sitk, args.output)

    path_to_tmp_output = os.path.join(
        DIR_TMP, ph.append_to_filename(os.path.basename(args.moving),
                                       "_warped"))

    # -------------------Register Reconstruction to Template-------------------
    ph.print_title("Register Reconstruction to Template")

    if args.use_flirt:
        path_to_transform_flirt = os.path.join(DIR_TMP, "transform_flirt.txt")

        # Convert SimpleITK into FLIRT transform
        cmd = "simplereg_transform -sitk2flirt %s %s %s %s" % (
            args.output, args.fixed, args.moving, path_to_transform_flirt)
        ph.execute_command(cmd, verbose=False)

        # Define search angle ranges for FLIRT in all three dimensions
        search_angles = [
            "-searchr%s -%d %d" % (x, args.search_angle, args.search_angle)
            for x in ["x", "y", "z"]
        ]

        # flt = nipype.interfaces.fsl.FLIRT()
        # flt.inputs.in_file = args.moving
        # flt.inputs.reference = args.fixed
        # if args.initial_transform is not None:
        #     flt.inputs.in_matrix_file = path_to_transform_flirt
        # flt.inputs.out_matrix_file = path_to_transform_flirt
        # # flt.inputs.output_type = "NIFTI_GZ"
        # flt.inputs.out_file = path_to_tmp_output
        # flt.inputs.args = "-dof 6"
        # flt.inputs.args += " %s" % " ".join(search_angles)
        # if args.moving_mask is not None:
        #     flt.inputs.in_weight = args.moving_mask
        # if args.fixed_mask is not None:
        #     flt.inputs.ref_weight = args.fixed_mask
        # ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        # flt.run()
        # print("done")

        cmd_args = ["flirt"]
        cmd_args.append("-in %s" % args.moving)
        cmd_args.append("-ref %s" % args.fixed)
        if args.initial_transform is not None:
            cmd_args.append("-init %s" % path_to_transform_flirt)
        cmd_args.append("-omat %s" % path_to_transform_flirt)
        cmd_args.append("-out %s" % path_to_tmp_output)
        cmd_args.append("-dof 6")
        cmd_args.append((" ").join(search_angles))
        if args.moving_mask is not None:
            cmd_args.append("-inweight %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-refweight %s" % args.fixed_mask)
        ph.print_info("Run Registration (FLIRT) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert FLIRT to SimpleITK transform
        cmd = "simplereg_transform -flirt2sitk %s %s %s %s" % (
            path_to_transform_flirt, args.fixed, args.moving, args.output)
        ph.execute_command(cmd, verbose=False)

        if debug:
            ph.show_niftis([args.fixed, path_to_tmp_output])

    # Additionally, use RegAladin for more accurate alignment
    # Rationale: FLIRT has better capture range, but RegAladin seems to
    # find better alignment once it is within its capture range.
    if args.use_regaladin:
        path_to_transform_regaladin = os.path.join(DIR_TMP,
                                                   "transform_regaladin.txt")

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            args.output, path_to_transform_regaladin)
        ph.execute_command(cmd, verbose=False)

        # nreg = nipype.interfaces.niftyreg.RegAladin()
        # nreg.inputs.ref_file = args.fixed
        # nreg.inputs.flo_file = args.moving
        # nreg.inputs.res_file = path_to_tmp_output
        # nreg.inputs.in_aff_file = path_to_transform_regaladin
        # nreg.inputs.aff_file = path_to_transform_regaladin
        # nreg.inputs.args = "-rigOnly -voff"
        # if args.moving_mask is not None:
        #     nreg.inputs.fmask_file = args.moving_mask
        # if args.fixed_mask is not None:
        #     nreg.inputs.rmask_file = args.fixed_mask
        # ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        # nreg.run()
        # print("done")

        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref %s" % args.fixed)
        cmd_args.append("-flo %s" % args.moving)
        cmd_args.append("-res %s" % path_to_tmp_output)
        if args.initial_transform is not None or args.use_flirt == 1:
            cmd_args.append("-inaff %s" % path_to_transform_regaladin)
        cmd_args.append("-aff %s" % path_to_transform_regaladin)
        # cmd_args.append("-cog")
        # cmd_args.append("-ln 2")
        cmd_args.append("-rigOnly")
        cmd_args.append("-voff")
        if args.moving_mask is not None:
            cmd_args.append("-fmask %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-rmask %s" % args.fixed_mask)
        ph.print_info("Run Registration (RegAladin) ... ", newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        # Convert RegAladin to SimpleITK transform
        cmd = "simplereg_transform -nreg2sitk %s %s" % (
            path_to_transform_regaladin, args.output)
        ph.execute_command(cmd, verbose=False)

        if debug:
            ph.show_niftis([args.fixed, path_to_tmp_output])

    if args.test_ap_flip:
        path_to_transform_flip = os.path.join(DIR_TMP, "transform_flip.txt")
        path_to_tmp_output_flip = os.path.join(DIR_TMP, "output_flip.nii.gz")

        # Get AP-flip transform
        transform_ap_flip_sitk = get_ap_flip_transform(args.fixed)
        path_to_transform_flip_regaladin = os.path.join(
            DIR_TMP, "transform_flip_regaladin.txt")
        sitk.WriteTransform(transform_ap_flip_sitk, path_to_transform_flip)

        # Compose current transform with AP flip transform
        cmd = "simplereg_transform -c %s %s %s" % (
            args.output, path_to_transform_flip, path_to_transform_flip)
        ph.execute_command(cmd, verbose=False)

        # Convert SimpleITK to RegAladin transform
        cmd = "simplereg_transform -sitk2nreg %s %s" % (
            path_to_transform_flip, path_to_transform_flip_regaladin)
        ph.execute_command(cmd, verbose=False)

        # nreg = nipype.interfaces.niftyreg.RegAladin()
        # nreg.inputs.ref_file = args.fixed
        # nreg.inputs.flo_file = args.moving
        # nreg.inputs.res_file = path_to_tmp_output_flip
        # nreg.inputs.in_aff_file = path_to_transform_flip_regaladin
        # nreg.inputs.aff_file = path_to_transform_flip_regaladin
        # nreg.inputs.args = "-rigOnly -voff"
        # if args.moving_mask is not None:
        #     nreg.inputs.fmask_file = args.moving_mask
        # if args.fixed_mask is not None:
        #     nreg.inputs.rmask_file = args.fixed_mask
        # ph.print_info("Run Registration AP-flipped (RegAladin) ... ",
        #               newline=False)
        # nreg.run()
        # print("done")

        cmd_args = ["reg_aladin"]
        cmd_args.append("-ref %s" % args.fixed)
        cmd_args.append("-flo %s" % args.moving)
        cmd_args.append("-res %s" % path_to_tmp_output_flip)
        cmd_args.append("-inaff %s" % path_to_transform_flip_regaladin)
        cmd_args.append("-aff %s" % path_to_transform_flip_regaladin)
        cmd_args.append("-rigOnly")
        # cmd_args.append("-ln 2")
        cmd_args.append("-voff")
        if args.moving_mask is not None:
            cmd_args.append("-fmask %s" % args.moving_mask)
        if args.fixed_mask is not None:
            cmd_args.append("-rmask %s" % args.fixed_mask)
        ph.print_info("Run Registration AP-flipped (RegAladin) ... ",
                      newline=False)
        ph.execute_command(" ".join(cmd_args), verbose=False)
        print("done")

        if debug:
            ph.show_niftis(
                [args.fixed, path_to_tmp_output, path_to_tmp_output_flip])

        warped_moving = st.Stack.from_filename(path_to_tmp_output,
                                               extract_slices=False)
        warped_moving_flip = st.Stack.from_filename(path_to_tmp_output_flip,
                                                    extract_slices=False)
        fixed = st.Stack.from_filename(args.fixed, args.fixed_mask)

        stacks = [warped_moving, warped_moving_flip]
        image_similarity_evaluator = ise.ImageSimilarityEvaluator(
            stacks=stacks, reference=fixed)
        image_similarity_evaluator.compute_similarities()
        similarities = image_similarity_evaluator.get_similarities()

        if similarities["NMI"][1] > similarities["NMI"][0]:
            ph.print_info("AP-flipped outcome better")

            # Convert RegAladin to SimpleITK transform
            cmd = "simplereg_transform -nreg2sitk %s %s" % (
                path_to_transform_flip_regaladin, args.output)
            ph.execute_command(cmd, verbose=False)

            # Copy better outcome
            cmd = "cp -p %s %s" % (path_to_tmp_output_flip, path_to_tmp_output)
            ph.execute_command(cmd, verbose=False)

        else:
            ph.print_info("AP-flip does not improve outcome")

    if args.dir_input_mc is not None:
        transform_sitk = sitkh.read_transform_sitk(args.output, inverse=1)

        if args.dir_input_mc.endswith("/"):
            subdir_mc = args.dir_input_mc.split("/")[-2]
        else:
            subdir_mc = args.dir_input_mc.split("/")[-1]
        dir_output_mc = os.path.join(dir_output, subdir_mc)

        ph.create_directory(dir_output_mc, delete_files=True)
        pattern = REGEX_FILENAMES + "[.]tfm"
        p = re.compile(pattern)
        trafos = [t for t in os.listdir(args.dir_input_mc) if p.match(t)]
        for t in trafos:
            path_to_input_transform = os.path.join(args.dir_input_mc, t)
            path_to_output_transform = os.path.join(dir_output_mc, t)
            t_sitk = sitkh.read_transform_sitk(path_to_input_transform)
            t_sitk = sitkh.get_composite_sitk_affine_transform(
                transform_sitk, t_sitk)
            sitk.WriteTransform(t_sitk, path_to_output_transform)

    if args.verbose:
        ph.show_niftis([args.fixed, path_to_tmp_output])

    elapsed_time_total = ph.stop_timing(time_start)

    # Summary
    ph.print_title("Summary")
    print("Computational Time: %s" % (elapsed_time_total))

    return 0
示例#5
0
def main():

    time_start = ph.start_timing()

    np.set_printoptions(precision=3)

    input_parser = InputArgparser(
        description="Perform Bias Field correction using N4ITK.", )
    input_parser.add_filename(required=True)
    input_parser.add_output(required=True)
    input_parser.add_filename_mask()
    input_parser.add_option(
        option_string="--convergence-threshold",
        type=float,
        help="Specify the convergence threshold.",
        default=1e-6,
    )
    input_parser.add_option(
        option_string="--spline-order",
        type=int,
        help="Specify the spline order defining the bias field estimate.",
        default=3,
    )
    input_parser.add_option(
        option_string="--wiener-filter-noise",
        type=float,
        help="Specify the noise estimate defining the Wiener filter.",
        default=0.11,
    )
    input_parser.add_option(
        option_string="--bias-field-fwhm",
        type=float,
        help="Specify the full width at half maximum parameter characterizing "
        "the width of the Gaussian deconvolution.",
        default=0.15,
    )
    input_parser.add_log_config(default=1)
    input_parser.add_verbose(default=0)

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError(
            "output filename invalid; allowed extensions are: %s" %
            ", ".join(ALLOWED_EXTENSIONS))

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    # Read data
    stack = st.Stack.from_filename(
        file_path=args.filename,
        file_path_mask=args.filename_mask,
        extract_slices=False,
    )

    # Perform Bias Field Correction
    # ph.print_title("Perform Bias Field Correction")
    bias_field_corrector = n4itk.N4BiasFieldCorrection(
        stack=stack,
        use_mask=True if args.filename_mask is not None else False,
        convergence_threshold=args.convergence_threshold,
        spline_order=args.spline_order,
        wiener_filter_noise=args.wiener_filter_noise,
        bias_field_fwhm=args.bias_field_fwhm,
    )
    ph.print_info("N4ITK Bias Field Correction ... ", newline=False)
    bias_field_corrector.run_bias_field_correction()
    stack_corrected = bias_field_corrector.get_bias_field_corrected_stack()
    print("done")

    dw.DataWriter.write_image(stack_corrected.sitk, args.output)

    elapsed_time = ph.stop_timing(time_start)

    if args.verbose:
        ph.show_niftis([args.filename, args.output])

    ph.print_title("Summary")
    exe_file_info = os.path.basename(os.path.abspath(__file__)).split(".")[0]
    print("%s | Computational Time for Bias Field Correction: %s" %
          (exe_file_info, elapsed_time))

    return 0
def main():

    time_start = ph.start_timing()

    # Set print options for numpy
    np.set_printoptions(precision=3)

    # Read input
    input_parser = InputArgparser(
        description="Volumetric MRI reconstruction framework to reconstruct "
        "an isotropic, high-resolution 3D volume from multiple "
        "motion-corrected (or static) stacks of low-resolution slices.", )
    input_parser.add_filenames(required=True)
    input_parser.add_filenames_masks()
    input_parser.add_dir_input_mc()
    input_parser.add_output(required=True)
    input_parser.add_suffix_mask(default="_mask")
    input_parser.add_target_stack(default=None)
    input_parser.add_extra_frame_target(default=10)
    input_parser.add_isotropic_resolution(default=None)
    input_parser.add_intensity_correction(default=1)
    input_parser.add_reconstruction_space(default=None)
    input_parser.add_minimizer(default="lsmr")
    input_parser.add_iter_max(default=10)
    input_parser.add_reconstruction_type(default="TK1L2")
    input_parser.add_data_loss(default="linear")
    input_parser.add_data_loss_scale(default=1)
    input_parser.add_alpha(default=0.01  # TK1L2
                           # default=0.006  #TVL2, HuberL2
                           )
    input_parser.add_rho(default=0.5)
    input_parser.add_tv_solver(default="PD")
    input_parser.add_pd_alg_type(default="ALG2")
    input_parser.add_iterations(default=15)
    input_parser.add_log_config(default=1)
    input_parser.add_use_masks_srr(default=0)
    input_parser.add_slice_thicknesses(default=None)
    input_parser.add_verbose(default=0)
    input_parser.add_viewer(default="itksnap")
    input_parser.add_argument(
        "--mask",
        "-mask",
        action='store_true',
        help="If given, input images are interpreted as image masks. "
        "Obtained volumetric reconstruction will be exported in uint8 format.")
    input_parser.add_argument(
        "--sda",
        "-sda",
        action='store_true',
        help="If given, the volume is reconstructed using "
        "Scattered Data Approximation (Vercauteren et al., 2006). "
        "--alpha is considered the value for the standard deviation then. "
        "Recommended value is, e.g., --alpha 0.8")

    args = input_parser.parse_args()
    input_parser.print_arguments(args)

    if args.reconstruction_type not in ["TK1L2", "TVL2", "HuberL2"]:
        raise IOError("Reconstruction type unknown")

    if np.alltrue([not args.output.endswith(t) for t in ALLOWED_EXTENSIONS]):
        raise ValueError("output filename '%s' invalid; "
                         "allowed image extensions are: %s" %
                         (args.output, ", ".join(ALLOWED_EXTENSIONS)))

    dir_output = os.path.dirname(args.output)
    ph.create_directory(dir_output)

    if args.log_config:
        input_parser.log_config(os.path.abspath(__file__))

    if args.verbose:
        show_niftis = []
        # show_niftis = [f for f in args.filenames]

    # --------------------------------Read Data--------------------------------
    ph.print_title("Read Data")

    if args.mask:
        filenames_masks = args.filenames
    else:
        filenames_masks = args.filenames_masks

    data_reader = dr.MultipleImagesReader(
        file_paths=args.filenames,
        file_paths_masks=filenames_masks,
        suffix_mask=args.suffix_mask,
        dir_motion_correction=args.dir_input_mc,
        stacks_slice_thicknesses=args.slice_thicknesses,
    )
    data_reader.read_data()
    stacks = data_reader.get_data()

    ph.print_info("%d input stacks read for further processing" % len(stacks))

    # Specify target stack for intensity correction and reconstruction space
    if args.target_stack is None:
        target_stack_index = 0
    else:
        filenames = ["%s.nii.gz" % s.get_filename() for s in stacks]
        filename_target_stack = os.path.basename(args.target_stack)
        try:
            target_stack_index = filenames.index(filename_target_stack)
        except ValueError as e:
            raise ValueError(
                "--target-stack must correspond to an image as provided by "
                "--filenames")

    # ---------------------------Intensity Correction--------------------------
    if args.intensity_correction and not args.mask:
        ph.print_title("Intensity Correction")
        intensity_corrector = ic.IntensityCorrection()
        intensity_corrector.use_individual_slice_correction(False)
        intensity_corrector.use_stack_mask(True)
        intensity_corrector.use_reference_mask(True)
        intensity_corrector.use_verbose(False)

        for i, stack in enumerate(stacks):
            if i == target_stack_index:
                ph.print_info("Stack %d (%s): Reference image. Skipped." %
                              (i + 1, stack.get_filename()))
                continue
            else:
                ph.print_info("Stack %d (%s): Intensity Correction ... " %
                              (i + 1, stack.get_filename()),
                              newline=False)
            intensity_corrector.set_stack(stack)
            intensity_corrector.set_reference(
                stacks[target_stack_index].get_resampled_stack(
                    resampling_grid=stack.sitk,
                    interpolator="NearestNeighbor",
                ))
            intensity_corrector.run_linear_intensity_correction()
            stacks[i] = intensity_corrector.get_intensity_corrected_stack()
            print("done (c1 = %g) " %
                  intensity_corrector.get_intensity_correction_coefficients())

    # -------------------------Volumetric Reconstruction-----------------------
    ph.print_title("Volumetric Reconstruction")

    # Reconstruction space is given isotropically resampled target stack
    if args.reconstruction_space is None:
        recon0 = stacks[target_stack_index].get_isotropically_resampled_stack(
            resolution=args.isotropic_resolution,
            extra_frame=args.extra_frame_target)
        recon0 = recon0.get_cropped_stack_based_on_mask(
            boundary_i=args.extra_frame_target,
            boundary_j=args.extra_frame_target,
            boundary_k=args.extra_frame_target,
            unit="mm",
        )

    # Reconstruction space was provided by user
    else:
        recon0 = st.Stack.from_filename(args.reconstruction_space,
                                        extract_slices=False)

        # Change resolution for isotropic resolution if provided by user
        if args.isotropic_resolution is not None:
            recon0 = recon0.get_isotropically_resampled_stack(
                args.isotropic_resolution)

        # Use image information of selected target stack as recon0 serves
        # as initial value for reconstruction
        recon0 = stacks[target_stack_index].get_resampled_stack(recon0.sitk)
        recon0 = recon0.get_stack_multiplied_with_mask()

    ph.print_info("Reconstruction space defined with %s mm3 resolution" %
                  " x ".join(["%.2f" % s for s in recon0.sitk.GetSpacing()]))

    if args.sda:
        ph.print_title("Compute SDA reconstruction")
        SDA = sda.ScatteredDataApproximation(stacks,
                                             recon0,
                                             sigma=args.alpha,
                                             sda_mask=args.mask)
        SDA.run()
        recon = SDA.get_reconstruction()
        if args.mask:
            dw.DataWriter.write_mask(recon.sitk_mask, args.output)
        else:
            dw.DataWriter.write_image(recon.sitk, args.output)

        if args.verbose:
            show_niftis.insert(0, args.output)

    else:
        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            ph.print_title("Compute Initial value for %s" %
                           args.reconstruction_type)
            SRR0 = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=recon0,
                alpha=args.alpha,
                iter_max=np.min([5, args.iter_max]),
                reg_type="TK1",
                minimizer="lsmr",
                data_loss="linear",
                use_masks=args.use_masks_srr,
                # verbose=args.verbose,
            )
        else:
            ph.print_title("Compute %s reconstruction" %
                           args.reconstruction_type)
            SRR0 = tk.TikhonovSolver(
                stacks=stacks,
                reconstruction=recon0,
                alpha=args.alpha,
                iter_max=args.iter_max,
                reg_type="TK1",
                minimizer=args.minimizer,
                data_loss=args.data_loss,
                data_loss_scale=args.data_loss_scale,
                use_masks=args.use_masks_srr,
                # verbose=args.verbose,
            )
        SRR0.run()

        recon = SRR0.get_reconstruction()

        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            output = ph.append_to_filename(args.output, "_initTK1L2")
        else:
            output = args.output

        if args.mask:
            mask_estimator = bm.BinaryMaskFromMaskSRREstimator(recon.sitk)
            mask_estimator.run()
            mask_sitk = mask_estimator.get_mask_sitk()
            dw.DataWriter.write_mask(mask_sitk, output)
        else:
            dw.DataWriter.write_image(recon.sitk, output)

        if args.verbose:
            show_niftis.insert(0, output)

        if args.reconstruction_type in ["TVL2", "HuberL2"]:
            ph.print_title("Compute %s reconstruction" %
                           args.reconstruction_type)
            if args.tv_solver == "ADMM":
                SRR = admm.ADMMSolver(
                    stacks=stacks,
                    reconstruction=st.Stack.from_stack(
                        SRR0.get_reconstruction()),
                    minimizer=args.minimizer,
                    alpha=args.alpha,
                    iter_max=args.iter_max,
                    rho=args.rho,
                    data_loss=args.data_loss,
                    iterations=args.iterations,
                    use_masks=args.use_masks_srr,
                    verbose=args.verbose,
                )

            else:
                SRR = pd.PrimalDualSolver(
                    stacks=stacks,
                    reconstruction=st.Stack.from_stack(
                        SRR0.get_reconstruction()),
                    minimizer=args.minimizer,
                    alpha=args.alpha,
                    iter_max=args.iter_max,
                    iterations=args.iterations,
                    alg_type=args.pd_alg_type,
                    reg_type="TV"
                    if args.reconstruction_type == "TVL2" else "huber",
                    data_loss=args.data_loss,
                    use_masks=args.use_masks_srr,
                    verbose=args.verbose,
                )
            SRR.run()
            recon = SRR.get_reconstruction()

            if args.mask:
                mask_estimator = bm.BinaryMaskFromMaskSRREstimator(recon.sitk)
                mask_estimator.run()
                mask_sitk = mask_estimator.get_mask_sitk()
                dw.DataWriter.write_mask(mask_sitk, args.output)

            else:
                dw.DataWriter.write_image(recon.sitk, args.output)

            if args.verbose:
                show_niftis.insert(0, args.output)

    if args.verbose:
        ph.show_niftis(show_niftis, viewer=args.viewer)

    ph.print_line_separator()

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    print("Computational Time for Volumetric Reconstruction: %s" %
          (elapsed_time))

    return 0
示例#7
0
def main():

    time_start = ph.start_timing()

    flag_individual_cases_only = 1

    flag_batch_script = 0
    batch_ctr = [32]

    flag_correct_bias_field = 0
    # flag_correct_intensities = 0

    flag_collect_segmentations = 0
    flag_select_images_segmentations = 0

    flag_reconstruct_volume_subject_space = 0
    flag_reconstruct_volume_subject_space_irtk = 0
    flag_reconstruct_volume_subject_space_show_comparison = 0
    flag_register_to_template = 0
    flag_register_to_template_irtk = 0
    flag_show_srr_template_space = 0
    flag_reconstruct_volume_template_space = 0
    flag_collect_volumetric_reconstruction_results = 0
    flag_show_volumetric_reconstruction_results = 0

    flag_rsync_stuff = 0

    # Analysis
    flag_remove_failed_cases_for_analysis = 1
    flag_postop = 2  # 0... preop, 1...postop, 2... pre+postop

    flag_evaluate_image_similarities = 0
    flag_analyse_image_similarities = 1

    flag_evaluate_slice_residual_similarities = 0
    flag_analyse_slice_residual_similarities = 0

    flag_analyse_stacks = 0
    flag_analyse_qualitative_assessment = 0

    flag_collect_data_blinded_analysis = 0
    flag_anonymize_data_blinded_analysis = 0

    provide_comparison = 0
    intensity_correction = 1
    isotropic_resolution = 0.75
    alpha = 0.02
    outlier_rejection = 1
    threshold = 0.7
    threshold_first = 0.6

    # metric = "ANTSNeighborhoodCorrelation"
    # metric_radius = 5
    # multiresolution = 0

    prefix_srr = "srr_"
    prefix_srr_qa = "masked_"

    # ----------------------------------Set Up---------------------------------
    if flag_correct_bias_field:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT, "BiasFieldCorrection")
    elif flag_reconstruct_volume_subject_space:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionSubjectSpace")
    elif flag_register_to_template:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionRegisterToTemplate")
    elif flag_reconstruct_volume_template_space:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT,
                                 "VolumetricReconstructionTemplateSpace")
    else:
        dir_batch = os.path.join(utils.DIR_BATCH_ROOT, "foo")
    file_prefix_batch = os.path.join(dir_batch, "command")

    if flag_batch_script:
        verbose = 0
    else:
        verbose = 1

    data_reader = dr.ExcelSheetDataReader(utils.EXCEL_FILE)
    data_reader.read_data()
    cases = data_reader.get_data()

    if flag_analyse_qualitative_assessment:
        data_reader = dr.ExcelSheetQualitativeAssessmentReader(utils.QA_FILE)
        data_reader.read_data()
        qualitative_assessment = data_reader.get_data()

        statistical_evaluation = se.StatisticalEvaluation(
            qualitative_assessment)
        statistical_evaluation.run_tests(ref="seg_manual")
        ph.exit()

    cases_similarities = []
    cases_stacks = []

    if flag_individual_cases_only:
        N_cases = len(INDIVIDUAL_CASE_IDS)
    else:
        N_cases = len(cases.keys())

    i_case = 0
    for case_id in sorted(cases.keys()):
        if flag_individual_cases_only and case_id not in INDIVIDUAL_CASE_IDS:
            continue
        if not flag_analyse_image_similarities and \
                not flag_analyse_slice_residual_similarities:
            i_case += 1
            ph.print_title("%d/%d: %s" % (i_case, N_cases, case_id))

        if flag_rsync_stuff:
            dir_output = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="template_space", seg_mode="")

            dir_input = re.sub("Volumes/spina/",
                               "Volumes/medic-volumetric_res/SpinaBifida/",
                               dir_output)
            cmd = "rsync -avuhn --exclude 'motion_correction' %sseg_manual %s" % (
                dir_input, dir_output)
            ph.print_execution(cmd)
            # ph.execute_command(cmd)

        # -------------------------Correct Bias Field--------------------------
        if flag_correct_bias_field:
            filenames = utils.get_filenames_preprocessing_bias_field(case_id)
            paths_to_filenames = [
                os.path.join(utils.get_directory_case_original(case_id), f)
                for f in filenames
            ]
            dir_output = utils.get_directory_case_preprocessing(
                case_id, stage="01_N4ITK")

            # no image found matching the pattern
            if len(paths_to_filenames) == 0:
                continue

            cmd_args = []
            cmd_args.append("--filenames %s" % " ".join(paths_to_filenames))
            cmd_args.append("--dir-output %s" % dir_output)
            cmd_args.append("--prefix-output ''")
            cmd = "niftymic_correct_bias_field %s" % (" ").join(cmd_args)

            ph.execute_command(cmd,
                               flag_print_to_file=flag_batch_script,
                               path_to_file="%s%d.txt" %
                               (file_prefix_batch, ph.add_one(batch_ctr)))

        # # Skip case in case segmentations have not been provided yet
        # if not ph.directory_exists(utils.get_directory_case_segmentation(
        #         case_id, utils.SEGMENTATION_INIT, SEG_MODES[0])):
        #     continue

        # ------------------------Collect Segmentations------------------------
        if flag_collect_segmentations:
            # Skip case in case segmentations have been collected already
            if ph.directory_exists(
                    utils.get_directory_case_segmentation(
                        case_id, utils.SEGMENTATION_SELECTED, SEG_MODES[0])):
                ph.print_info("skipped")
                continue

            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_INIT)

            for i_seg_mode, seg_mode in enumerate(SEG_MODES):
                directory_selected = utils.get_directory_case_segmentation(
                    case_id, utils.SEGMENTATION_SELECTED, seg_mode)
                ph.create_directory(directory_selected)
                paths_to_filenames_init = [
                    os.path.join(
                        utils.get_directory_case_segmentation(
                            case_id, utils.SEGMENTATION_INIT, seg_mode), f)
                    for f in filenames
                ]
                paths_to_filenames_selected = [
                    os.path.join(directory_selected, f) for f in filenames
                ]
                for i in range(len(filenames)):
                    cmd = "cp -p %s %s" % (paths_to_filenames_init[i],
                                           paths_to_filenames_selected[i])
                    # ph.print_execution(cmd)
                    ph.execute_command(cmd)

        if flag_select_images_segmentations:
            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_SELECTED)
            paths_to_filenames = [
                os.path.join(
                    utils.get_directory_case_preprocessing(case_id,
                                                           stage="01_N4ITK"),
                    f) for f in filenames
            ]
            paths_to_filenames_masks = [
                os.path.join(
                    utils.get_directory_case_segmentation(
                        case_id, utils.SEGMENTATION_SELECTED, "seg_manual"), f)
                for f in filenames
            ]
            for i in range(len(filenames)):
                ph.show_niftis(
                    [paths_to_filenames[i]],
                    segmentation=paths_to_filenames_masks[i],
                    # viewer="fsleyes",
                )
                ph.pause()
                ph.killall_itksnap()

        # # -------------------------Correct Intensities-----------------------
        # if flag_correct_intensities:
        #     filenames = utils.get_segmented_image_filenames(case_id)
        #     paths_to_filenames_bias = [os.path.join(
        #         utils.get_directory_case_preprocessing(
        #             case_id, stage="01_N4ITK"), f) for f in filenames]
        #     print paths_to_filenames_bias

        # -----------------Reconstruct Volume in Subject Space-----------------
        if flag_reconstruct_volume_subject_space:

            filenames = utils.get_segmented_image_filenames(
                case_id, subfolder=utils.SEGMENTATION_SELECTED)
            # filenames = filenames[0:2]

            paths_to_filenames = [
                os.path.join(
                    utils.get_directory_case_preprocessing(case_id,
                                                           stage="01_N4ITK"),
                    f) for f in filenames
            ]

            # Estimate target stack
            target_stack_index = utils.get_target_stack_index(
                case_id, utils.SEGMENTATION_SELECTED, "seg_auto", filenames)

            for i, seg_mode in enumerate(SEG_MODES):
                # Get mask filenames
                paths_to_filenames_masks = [
                    os.path.join(
                        utils.get_directory_case_segmentation(
                            case_id, utils.SEGMENTATION_SELECTED, seg_mode), f)
                    for f in filenames
                ]

                if flag_reconstruct_volume_subject_space_irtk:
                    if seg_mode != "seg_manual":
                        continue
                    utils.export_irtk_call_to_workstation(
                        case_id=case_id,
                        filenames=filenames,
                        seg_mode=seg_mode,
                        isotropic_resolution=isotropic_resolution,
                        target_stack_index=target_stack_index,
                        kernel_mask_dilation=(15, 15, 4))

                else:
                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="subject_space",
                        seg_mode=seg_mode)
                    # dir_output = "/tmp/foo"

                    cmd_args = []
                    cmd_args.append("--filenames %s" %
                                    " ".join(paths_to_filenames))
                    cmd_args.append("--filenames-masks %s" %
                                    " ".join(paths_to_filenames_masks))
                    cmd_args.append("--dir-output %s" % dir_output)
                    cmd_args.append("--use-masks-srr 0")
                    cmd_args.append("--isotropic-resolution %f" %
                                    isotropic_resolution)
                    cmd_args.append("--target-stack-index %d" %
                                    target_stack_index)
                    cmd_args.append("--intensity-correction %d" %
                                    intensity_correction)
                    cmd_args.append("--outlier-rejection %d" %
                                    outlier_rejection)
                    cmd_args.append("--threshold-first %f" % threshold_first)
                    cmd_args.append("--threshold %f" % threshold)
                    # cmd_args.append("--metric %s" % metric)
                    # cmd_args.append("--multiresolution %d" % multiresolution)
                    # cmd_args.append("--metric-radius %s" % metric_radius)
                    # if i > 0:
                    #     cmd_args.append("--reconstruction-space %s" % (
                    #         utils.get_path_to_recon(
                    #             utils.get_directory_case_recon_seg_mode(
                    #                 case_id, "seg_manual"))))
                    # cmd_args.append("--two-step-cycles 0")
                    cmd_args.append("--verbose %d" % verbose)
                    cmd_args.append("--provide-comparison %d" %
                                    provide_comparison)
                    # cmd_args.append("--iter-max 1")

                    cmd = "niftymic_reconstruct_volume %s" % (
                        " ").join(cmd_args)

                    ph.execute_command(
                        cmd,
                        flag_print_to_file=flag_batch_script,
                        path_to_file="%s%d.txt" %
                        (file_prefix_batch, ph.add_one(batch_ctr)))

        if flag_reconstruct_volume_subject_space_show_comparison:
            recon_paths = []
            for seg_mode in SEG_MODES:
                path_to_recon = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="subject_space",
                        seg_mode=seg_mode))
                recon_paths.append(path_to_recon)
            recon_path_irtk = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="subject_space",
                    seg_mode="IRTK"), "IRTK_SRR.nii.gz")
            show_modes = list(SEG_MODES)
            if ph.file_exists(recon_path_irtk):
                recon_paths.append(recon_path_irtk)
                show_modes.append("irtk")
            ph.show_niftis(recon_paths)
            ph.print_info("Sequence: %s" % (" -- ").join(show_modes))
            ph.pause()
            ph.killall_itksnap()

        # -------------------------Register to template------------------------
        if flag_register_to_template:
            for seg_mode in SEG_MODES:

                cmd_args = []
                # register seg_auto-recon to template space
                if seg_mode == "seg_auto":

                    path_to_recon = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode))

                    template_stack_estimator = \
                        tse.TemplateStackEstimator.from_mask(
                            ph.append_to_filename(path_to_recon, "_mask"))
                    path_to_reference = \
                        template_stack_estimator.get_path_to_template()

                    dir_input_motion_correction = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode), "motion_correction")

                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode)
                    # dir_output = "/home/mebner/tmp"
                    # # ------- DELETE -----
                    # dir_output = re.sub("data", "foo+1", dir_output)
                    # dir_output = re.sub(
                    #     "volumetric_reconstruction/20180126/template_space/seg_auto",
                    #     "", dir_output)
                    # # -------
                    # cmd_args.append("--use-fixed-mask 1")
                    cmd_args.append("--use-moving-mask 1")

                    # HACK
                    path_to_initial_transform = os.path.join(
                        utils.DIR_INPUT_ROOT_DATA, case_id,
                        "volumetric_reconstruction", "20180126",
                        "template_space", "seg_manual",
                        "registration_transform_sitk.txt")
                    cmd_args.append("--initial-transform %s" %
                                    path_to_initial_transform)
                    cmd_args.append("--use-flirt 0")
                    cmd_args.append("--use-regaladin 1")
                    cmd_args.append("--test-ap-flip 0")

                # register remaining recons to registered seg_auto-recon
                else:
                    path_to_reference = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="template_space",
                            seg_mode="seg_auto"),
                        suffix="ResamplingToTemplateSpace",
                    )
                    path_to_initial_transform = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="template_space",
                            seg_mode="seg_auto"),
                        "registration_transform_sitk.txt")

                    path_to_recon = utils.get_path_to_recon(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode))
                    dir_input_motion_correction = os.path.join(
                        utils.get_directory_case_recon_seg_mode(
                            case_id=case_id,
                            recon_space="subject_space",
                            seg_mode=seg_mode), "motion_correction")
                    dir_output = utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode)

                    cmd_args.append("--use-fixed-mask 0")
                    cmd_args.append("--use-moving-mask 0")
                    cmd_args.append("--initial-transform %s" %
                                    path_to_initial_transform)
                    cmd_args.append("--use-flirt 0")
                    cmd_args.append("--use-regaladin 1")
                    cmd_args.append("--test-ap-flip 0")

                cmd_args.append("--moving %s" % path_to_recon)
                cmd_args.append("--fixed %s" % path_to_reference)
                cmd_args.append("--dir-input %s" % dir_input_motion_correction)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--write-transform 1")
                cmd_args.append("--verbose %d" % verbose)
                cmd = "niftymic_register_image %s" % (" ").join(cmd_args)

                ph.execute_command(cmd,
                                   flag_print_to_file=flag_batch_script,
                                   path_to_file="%s%d.txt" %
                                   (file_prefix_batch, ph.add_one(batch_ctr)))

        if flag_register_to_template_irtk:
            dir_input = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="subject_space", seg_mode="IRTK")
            dir_output = utils.get_directory_case_recon_seg_mode(
                case_id=case_id, recon_space="template_space", seg_mode="IRTK")
            path_to_recon = os.path.join(dir_input, "IRTK_SRR.nii.gz")
            path_to_reference = utils.get_path_to_recon(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="seg_manual"),
                suffix="ResamplingToTemplateSpace",
            )
            path_to_initial_transform = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="seg_manual"), "registration_transform_sitk.txt")

            cmd_args = []
            cmd_args.append("--fixed %s" % path_to_reference)
            cmd_args.append("--moving %s" % path_to_recon)
            cmd_args.append("--initial-transform %s" %
                            path_to_initial_transform)
            cmd_args.append("--use-fixed-mask 0")
            cmd_args.append("--use-moving-mask 0")
            cmd_args.append("--use-flirt 0")
            cmd_args.append("--use-regaladin 1")
            cmd_args.append("--test-ap-flip 0")
            cmd_args.append("--dir-output %s" % dir_output)
            cmd_args.append("--verbose %d" % verbose)
            cmd = "niftymic_register_image %s" % (" ").join(cmd_args)
            ph.execute_command(cmd)

        if flag_show_srr_template_space:
            recon_paths = []
            show_modes = list(SEG_MODES)
            # show_modes.append("IRTK")
            for seg_mode in show_modes:
                dir_input = utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode=seg_mode)
                # # ------- DELETE -----
                # dir_input = re.sub("data", "foo+1", dir_input)
                # dir_input = re.sub(
                #     "volumetric_reconstruction/20180126/template_space/seg_auto",
                #     "", dir_input)
                # # -------
                path_to_recon_space = utils.get_path_to_recon(
                    dir_input,
                    suffix="ResamplingToTemplateSpace",
                )
                recon_paths.append(path_to_recon_space)
            ph.show_niftis(recon_paths)
            ph.print_info("Sequence: %s" % (" -- ").join(show_modes))
            ph.pause()
            ph.killall_itksnap()

        # -----------------Reconstruct Volume in Template Space----------------
        if flag_reconstruct_volume_template_space:
            for seg_mode in SEG_MODES:
                path_to_recon_space = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode),
                    suffix="ResamplingToTemplateSpace",
                )
                dir_input = os.path.join(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode), "motion_correction")
                dir_output = utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode=seg_mode)
                # dir_output = os.path.join("/tmp/spina/template_space/%s-%s" % (
                #     case_id, seg_mode))

                cmd_args = []
                cmd_args.append("--dir-input %s" % dir_input)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--reconstruction-space %s" %
                                path_to_recon_space)
                cmd_args.append("--alpha %s" % alpha)
                cmd_args.append("--verbose %s" % verbose)
                cmd_args.append("--use-masks-srr 0")

                # cmd_args.append("--minimizer L-BFGS-B")
                # cmd_args.append("--alpha 0.006")
                # cmd_args.append("--reconstruction-type HuberL2")
                # cmd_args.append("--data-loss arctan")
                # cmd_args.append("--iterations 5")
                # cmd_args.append("--data-loss-scale 0.7")

                cmd = "niftymic_reconstruct_volume_from_slices %s" % \
                    (" ").join(cmd_args)
                ph.execute_command(cmd,
                                   flag_print_to_file=flag_batch_script,
                                   path_to_file="%s%d.txt" %
                                   (file_prefix_batch, ph.add_one(batch_ctr)))

        # ----------------Collect SRR results in Template Space----------------
        if flag_collect_volumetric_reconstruction_results:
            directory = utils.get_directory_case_recon_summary(case_id)
            ph.create_directory(directory)

            # clear potentially existing files
            cmd = "rm -f %s/*.nii.gz" % (directory)
            ph.execute_command(cmd)

            # Collect SRRs
            for seg_mode in SEG_MODES:
                path_to_recon_src = utils.get_path_to_recon(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode), )
                path_to_recon = os.path.join(
                    directory, "%s%s.nii.gz" % (prefix_srr, seg_mode))

                cmd = "cp -p %s %s" % (path_to_recon_src, path_to_recon)
                ph.execute_command(cmd)

            # Collect IRTK recon
            path_to_recon_src = os.path.join(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="template_space",
                    seg_mode="IRTK"),
                "IRTK_SRR_LinearResamplingToTemplateSpace.nii.gz")

            path_to_recon = os.path.join(directory,
                                         "%s%s.nii.gz" % (prefix_srr, "irtk"))

            cmd = "cp -p %s %s" % (path_to_recon_src, path_to_recon)
            ph.execute_command(cmd)

            # Collect evaluation mask
            path_to_recon = utils.get_path_to_recon(
                utils.get_directory_case_recon_seg_mode(
                    case_id=case_id,
                    recon_space="subject_space",
                    seg_mode="seg_auto"))

            template_stack_estimator = \
                tse.TemplateStackEstimator.from_mask(
                    ph.append_to_filename(path_to_recon, "_mask"))
            path_to_template = \
                template_stack_estimator.get_path_to_template()
            path_to_template_mask_src = ph.append_to_filename(
                path_to_template, "_mask_dil")
            path_to_template_mask = "%s/" % directory

            cmd = "cp -p %s %s" % (path_to_template_mask_src,
                                   path_to_template_mask)
            ph.execute_command(cmd)

        if flag_show_volumetric_reconstruction_results:
            dir_output = utils.get_directory_case_recon_summary(case_id)
            paths_to_recons = []
            for seg_mode in RECON_MODES:
                path_to_recon = os.path.join(
                    dir_output, "%s%s.nii.gz" % (prefix_srr, seg_mode))
                paths_to_recons.append(path_to_recon)
            path_to_mask = "%s/STA*.nii.gz" % dir_output
            cmd = ph.show_niftis(paths_to_recons, segmentation=path_to_mask)
            sitkh.write_executable_file([cmd], dir_output=dir_output)
            ph.pause()
            ph.killall_itksnap()

        # ---------------------Evaluate Image Similarities---------------------
        if flag_evaluate_image_similarities:
            dir_input = utils.get_directory_case_recon_summary(case_id)
            dir_output = utils.get_directory_case_recon_similarities(case_id)
            paths_to_recons = []
            for seg_mode in ["seg_auto", "detect", "irtk"]:
                path_to_recon = os.path.join(
                    dir_input, "%s%s.nii.gz" % (prefix_srr, seg_mode))
                paths_to_recons.append(path_to_recon)
            path_to_reference = os.path.join(
                dir_input, "%s%s.nii.gz" % (prefix_srr, "seg_manual"))
            path_to_reference_mask = utils.get_path_to_mask(dir_input)

            cmd_args = []
            cmd_args.append("--filenames %s" % " ".join(paths_to_recons))
            cmd_args.append("--reference %s" % path_to_reference)
            cmd_args.append("--reference-mask %s" % path_to_reference_mask)
            # cmd_args.append("--verbose 1")
            cmd_args.append("--dir-output %s" % dir_output)

            exe = re.sub("pyc", "py",
                         os.path.abspath(evaluate_image_similarity.__file__))
            cmd_args.insert(0, exe)

            # clear potentially existing files
            cmd = "rm -f %s/*.txt" % (dir_output)
            ph.execute_command(cmd)

            cmd = "python %s" % " ".join(cmd_args)
            ph.execute_command(cmd)

        # -----------------Evaluate Slice Residual Similarities----------------
        if flag_evaluate_slice_residual_similarities:

            path_to_reference_mask = utils.get_path_to_mask(
                utils.get_directory_case_recon_summary(case_id))

            dir_output_root = \
                utils.get_directory_case_slice_residual_similarities(case_id)

            # clear potentially existing files
            # cmd = "rm -f %s/*.txt" % (dir_output_root)
            # ph.execute_command(cmd)

            for seg_mode in SEG_MODES:
                dir_input = os.path.join(
                    utils.get_directory_case_recon_seg_mode(
                        case_id=case_id,
                        recon_space="template_space",
                        seg_mode=seg_mode,
                    ), "motion_correction")
                path_to_reference = os.path.join(
                    utils.get_directory_case_recon_summary(case_id),
                    "%s%s.nii.gz" % (prefix_srr, seg_mode))
                dir_output = os.path.join(dir_output_root, seg_mode)

                cmd_args = []
                cmd_args.append("--dir-input %s" % dir_input)
                cmd_args.append("--reference %s" % path_to_reference)
                cmd_args.append("--reference-mask %s" % path_to_reference_mask)
                cmd_args.append("--use-reference-mask 1")
                cmd_args.append("--use-slice-masks 0")
                # cmd_args.append("--verbose 1")
                cmd_args.append("--dir-output %s" % dir_output)

                exe = re.sub("pyc", "py", os.path.abspath(esrs.__file__))
                cmd_args.insert(0, exe)

                cmd = "python %s" % " ".join(cmd_args)
                ph.execute_command(cmd)

        # Collect data for blinded analysis
        if flag_collect_data_blinded_analysis:
            if flag_remove_failed_cases_for_analysis and case_id in RECON_FAILED_CASE_IDS:
                continue

            dir_input = utils.get_directory_case_recon_summary(case_id)
            # pattern = "STA([0-9]+)[_]mask.nii.gz"
            pattern = "STA([0-9]+)[_]mask_dil.nii.gz"
            p = re.compile(pattern)
            gw = [
                p.match(f).group(1) for f in os.listdir(dir_input)
                if p.match(f)
            ][0]

            dir_output = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "open"), case_id)

            exe = re.sub("pyc", "py", os.path.abspath(mswm.__file__))

            recons = []

            for seg_mode in RECON_MODES:
                path_to_recon = os.path.join(
                    dir_input, "%s%s.nii.gz" % (prefix_srr, seg_mode))

                cmd_args = []
                cmd_args.append("--filename %s" % path_to_recon)
                cmd_args.append("--gestational-age %s" % gw)
                cmd_args.append("--dir-output %s" % dir_output)
                cmd_args.append("--prefix-output %s" % prefix_srr_qa)
                cmd_args.append("--verbose 0")
                cmd_args.insert(0, exe)

                cmd = "python %s" % " ".join(cmd_args)
                # ph.execute_command(cmd)

                recon = "%s%s" % (prefix_srr_qa,
                                  os.path.basename(path_to_recon))
                recons.append(recon)
            ph.write_show_niftis_exe(recons, dir_output)

        if flag_anonymize_data_blinded_analysis:
            dir_input = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "open"), case_id)
            dir_output_dictionaries = utils.get_directory_anonymized_dictionares(
                case_id)
            dir_output_anonymized_images = os.path.join(
                utils.get_directory_blinded_analysis(case_id, "anonymized"),
                case_id)

            if not ph.directory_exists(dir_input):
                continue
            ph.create_directory(dir_output_dictionaries)
            ph.create_directory(dir_output_anonymized_images)

            data_anonymizer = da.DataAnonymizer()
            # Create random dictionary (only required once)
            # data_anonymizer.set_prefix_identifiers("%s_" % case_id)
            # data_anonymizer.read_nifti_filenames_from_directory(dir_input)
            # data_anonymizer.generate_identifiers()
            # data_anonymizer.generate_randomized_dictionary()
            # data_anonymizer.write_dictionary(
            #     dir_output_dictionaries, "dictionary_%s" % case_id)

            # Read dictionary
            data_anonymizer.read_dictionary(dir_output_dictionaries,
                                            "dictionary_%s" % case_id)

            # Anonymize files
            if 0:
                ph.clear_directory(dir_output_anonymized_images)
                data_anonymizer.anonymize_files(dir_input,
                                                dir_output_anonymized_images)

                # Write executable script
                filenames = [
                    "%s.nii.gz" % f
                    for f in sorted(data_anonymizer.get_identifiers())
                ]
                ph.write_show_niftis_exe(filenames,
                                         dir_output_anonymized_images)

            # Reveal anonymized files
            if 1:
                filenames = data_anonymizer.reveal_anonymized_files(
                    dir_output_anonymized_images)
                filenames = sorted(["%s" % f for f in filenames])
                ph.write_show_niftis_exe(filenames,
                                         dir_output_anonymized_images)

            # Reveal additional, original files
            # data_anonymizer.reveal_original_files(dir_output)

            # relative_directory = re.sub(
            #     utils.get_directory_blinded_analysis(case_id, "anonymized"),
            #     ".",
            #     dir_output_anonymized_images)
            # paths_to_filenames = [os.path.join(
            #     relative_directory, f) for f in filenames]

        # ---------------------Analyse Image Similarities---------------------
        if flag_analyse_image_similarities or \
                flag_analyse_slice_residual_similarities or \
                flag_analyse_stacks:
            if flag_remove_failed_cases_for_analysis:
                if case_id in RECON_FAILED_CASE_IDS:
                    continue
            if cases[case_id]['postrep'] == flag_postop or flag_postop == 2:
                cases_similarities.append(case_id)
                cases_stacks.append(
                    utils.get_segmented_image_filenames(
                        case_id,
                        # subfolder=utils.SEGMENTATION_INIT,
                        subfolder=utils.SEGMENTATION_SELECTED,
                    ))

        dir_output_analysis = os.path.join(
            # "/Users/mebner/UCL/UCL/Publications",
            "/home/mebner/Dropbox/UCL/Publications",
            "2018_MICCAI/brain_reconstruction_paper")

    if flag_analyse_image_similarities:
        dir_inputs = []
        filename = "image_similarities_postop%d.txt" % flag_postop
        for case_id in cases_similarities:
            dir_inputs.append(
                utils.get_directory_case_recon_similarities(case_id))
        cmd_args = []
        cmd_args.append("--dir-inputs %s" % " ".join(dir_inputs))
        cmd_args.append("--dir-output %s" % dir_output_analysis)
        cmd_args.append("--filename %s" % filename)

        exe = re.sub("pyc", "py",
                     os.path.abspath(src.analyse_image_similarities.__file__))
        cmd_args.insert(0, exe)

        cmd = "python %s" % " ".join(cmd_args)
        ph.execute_command(cmd)

    if flag_analyse_slice_residual_similarities:
        dir_inputs = []
        filename = "slice_residuals_postop%d.txt" % flag_postop
        for case_id in cases_similarities:
            dir_inputs.append(
                utils.get_directory_case_slice_residual_similarities(case_id))
        cmd_args = []
        cmd_args.append("--dir-inputs %s" % " ".join(dir_inputs))
        cmd_args.append("--subfolder %s" % " ".join(SEG_MODES))
        cmd_args.append("--dir-output %s" % dir_output_analysis)
        cmd_args.append("--filename %s" % filename)

        exe = re.sub(
            "pyc", "py",
            os.path.abspath(src.analyse_slice_residual_similarities.__file__))
        cmd_args.insert(0, exe)

        cmd = "python %s" % " ".join(cmd_args)
        # print len(cases_similarities)
        # print cases_similarities
        ph.execute_command(cmd)

    if flag_analyse_stacks:
        cases_stacks_N = [len(s) for s in cases_stacks]
        ph.print_subtitle("%d cases -- Number of stacks" % len(cases_stacks))
        ph.print_info("min: %g" % np.min(cases_stacks_N))
        ph.print_info("mean: %g" % np.mean(cases_stacks_N))
        ph.print_info("median: %g" % np.median(cases_stacks_N))
        ph.print_info("max: %g" % np.max(cases_stacks_N))

    elapsed_time = ph.stop_timing(time_start)
    ph.print_title("Summary")
    print("Computational Time for Pipeline: %s" % (elapsed_time))

    return 0
示例#8
0
def main():

    # Read input
    parser = argparse.ArgumentParser(
        description="Resample image.",
        prog=None,
        epilog="Author: Michael Ebner ([email protected])",
    )
    parser.add_argument(
        "-m",
        "--moving",
        help="Path to moving image",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-f",
        "--fixed",
        help="Path to fixed image. "
        "Can be 'same' if fixed image space is identical to the moving image "
        "space.",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-o",
        "--output",
        help="Path to resampled image",
        type=str,
        required=1,
    )
    parser.add_argument(
        "-t",
        "--transform",
        help="Path to (SimpleITK) transformation (.txt) or displacement "
        "field (.nii.gz) to be applied",
        type=str,
        required=0,
    )
    parser.add_argument(
        "-i",
        "--interpolator",
        help="Interpolator for image resampling. Can be either name (%s) "
        "or order (0, 1)" % (", ".join(ALLOWED_INTERPOLATORS)),
        type=str,
        required=0,
        default="Linear",
        # default="BSpline",  # might cause problems for some images
    )
    parser.add_argument(
        "-p",
        "--padding",
        help="Padding value",
        type=int,
        required=0,
        default=0,
    )
    parser.add_argument(
        "-s",
        "--spacing",
        help="Set spacing for resampling grid in fixed image space",
        nargs="+",
        type=float,
        default=None,
    )
    parser.add_argument(
        "-atg",
        "--add-to-grid",
        help="Additional grid extension/reduction in each direction of each "
        "axis in millimeter. If scalar, changes are applied uniformly to grid",
        nargs="+",
        type=float,
        default=None,
    )
    parser.add_argument(
        "-v",
        "--verbose",
        help="Turn on/off verbose output",
        type=int,
        required=0,
        default=0,
    )
    args = parser.parse_args()

    if args.fixed == "same":
        args.fixed = args.moving

    resampler = simplereg.resampler.Resampler(
        path_to_fixed=args.fixed,
        path_to_moving=args.moving,
        path_to_transform=args.transform,
        interpolator=args.interpolator,
        spacing=args.spacing,
        padding=args.padding,
        add_to_grid=args.add_to_grid,
        verbose=args.verbose,
    )
    resampler.run()
    resampler.write_image(args.output)

    if args.verbose:
        ph.show_niftis([
            args.output,
            args.fixed,
        ])

    return 0