Exemplo n.º 1
0
def main():
    
    # Parse input arguments and get sanitised version
    sargs = mm_parse_inputs.SanitisedArgs(mm_parse_inputs.parse_input_arguments())
    
    # Enable calling NiftyReg and NiftySeg
    nk = mm_niftk.MM_Niftk()
    
    mmfn.check_create_directories([sargs.output_directory])
    
    # Set default parameters
    if sargs.number is not None:
        iterations = sargs.number
    else:
        iterations = 200
        
    fwhm = 0.15			    # default 0.15
    subsampleFactor = 4	    # default 4; recommend 2 for in vivo mouse brains with resolution >100 micrometres.
    nlevels = 4             # default 4
    conv = 0.001			# default 0.001
    nhistbins = 256		    # default 200

    # Get list of files (use file_name_filter)
    nifti_files_list = sorted(glob.glob(os.path.join(sargs.input_directory, sargs.file_name_filter + '.nii*')))
    print("  Processing {0} files: \n   - - -\n   \t{1}\n   - - -\n  ...".format(len(nifti_files_list), '\n\t'.join([ str(item) for item in nifti_files_list ])))
    
    TASKS = [(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins) for nf_path in nifti_files_list]
    # Set the number of parallel processes to use
    pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count() / 2))
    # The _star function will unpack TASKS to use in the actual function
    # Using pool.map because we do care about the order of the results.
    all_output_paths = pool.map(nuc_n4_debias_star, TASKS)
    pool.close()
    pool.join()
    
    all_output_paths = []
    
    for counter, nf_path in enumerate(nifti_files_list):
        outpath = nuc_n4_debias(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels, conv, nhistbins)
        all_output_paths.append(outpath)
    
    print(" all_output_paths: {0}".format(all_output_paths))
    
    print("  Copying headers from original images to bias-corrected images ...")
    
    for counter, nifti_file in enumerate(nifti_files_list):
        print "  Processing {0} / {1}: {2} ...".format((counter + 1), len(nifti_files_list), nifti_file)
        
        original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0]
        original_nifti = nib.load(nifti_file)
        bias_corrected_path = mmfn.get_corresponding_file(sargs.output_directory, original_nf_name, path_only=True)
        
        print("Bias corrected result path: '{0}'".format(bias_corrected_path))
        
        updated_nii = mmfn.copy_header(nib.load(bias_corrected_path), original_nifti)
        # This will overwrite the bias corrected files
        nib.save(updated_nii, bias_corrected_path)
    
    print("  Bias field correction completed; files saved to: \n{0}".format('\n\t'.join([ str(item) for item in all_output_paths ])))
Exemplo n.º 2
0
def go(mm):
    
    args = mm.args
    
    # Enable calling NiftyReg and NiftySeg
    nk = mm_niftk.MM_Niftk()
    
    # Get list of files
    input_files_list = sorted(glob.glob(os.path.join(args.input_directory, args.input_name_filter + '.nii*')))
    print("  Processing {0} files: \n   - - -\n   \t{1}\n   - - -\n  ...".format(len(input_files_list), '\n\t'.join([ str(item) for item in input_files_list ])))
    
    TASKS = [(nk, args, f_path, args.iterations, args.fwhm, args.subsample, args.nlevels, args.convergence, args.nhistbins) for f_path in args.input_files_list]
    
    if args.parallel:
    
        # all_output_paths = []
        # print("[nuc_n4_debias] + TASKS: {0}".format([nuc_n4_debias] + TASKS))
        # for task in TASKS:
            # all_output_paths.append(nuc_n4_debias(*task))
            # mmfn.function_star([nuc_n4_debias] + task)
        
        import multiprocessing
        
        # Set the number of parallel processes to use
        pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count()))
        # Prepend the function name to the list, so function_star() works
        all_output_paths = pool.map(mmfn.function_star, [nuc_n4_debias] + TASKS)
        pool.close()
        pool.join()
        
    else:
    
        all_output_paths = []
        for task in TASKS:
            all_output_paths.append(nuc_n4_debias(*task))
        
        print(" All output images: \n{0}".format(all_output_paths))
        
        print("  Copying headers from original images to bias-corrected images ...")
    
    for counter, nifti_file in enumerate(input_files_list):
        print "  Processing {0} / {1}: {2} ...".format((counter + 1), len(input_files_list), nifti_file)
        
        original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0]
        original_nifti = nib.load(nifti_file)
        bias_corrected_path = mmfn.get_corresponding_file(args.output_directory, original_nf_name, path_only=True)
        
        updated_nii = mmfn.copy_header(nib.load(bias_corrected_path), original_nifti)
        # This will overwrite the bias corrected files
        nib.save(updated_nii, bias_corrected_path)
    
    print("  Bias field correction completed; files saved to: \n{0}".format('\n\t'.join([ str(item) for item in all_output_paths ])))
Exemplo n.º 3
0
def go(mm):

    args = mm.args

    # Enable calling NiftyReg and NiftySeg
    nk = mm_niftk.MM_Niftk()

    # Get list of files
    input_files_list = sorted(
        glob.glob(
            os.path.join(args.input_directory,
                         args.input_name_filter + '.nii*')))
    print(
        "  Processing {0} files: \n   - - -\n   \t{1}\n   - - -\n  ...".format(
            len(input_files_list),
            '\n\t'.join([str(item) for item in input_files_list])))

    TASKS = [(nk, args, f_path, args.iterations, args.fwhm, args.subsample,
              args.nlevels, args.convergence, args.nhistbins)
             for f_path in args.input_files_list]

    if args.parallel:

        # all_output_paths = []
        # print("[nuc_n4_debias] + TASKS: {0}".format([nuc_n4_debias] + TASKS))
        # for task in TASKS:
        # all_output_paths.append(nuc_n4_debias(*task))
        # mmfn.function_star([nuc_n4_debias] + task)

        import multiprocessing

        # Set the number of parallel processes to use
        pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count()))
        # Prepend the function name to the list, so function_star() works
        all_output_paths = pool.map(mmfn.function_star,
                                    [nuc_n4_debias] + TASKS)
        pool.close()
        pool.join()

    else:

        all_output_paths = []
        for task in TASKS:
            all_output_paths.append(nuc_n4_debias(*task))

        print(" All output images: \n{0}".format(all_output_paths))

        print(
            "  Copying headers from original images to bias-corrected images ..."
        )

    for counter, nifti_file in enumerate(input_files_list):
        print "  Processing {0} / {1}: {2} ...".format((counter + 1),
                                                       len(input_files_list),
                                                       nifti_file)

        original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0]
        original_nifti = nib.load(nifti_file)
        bias_corrected_path = mmfn.get_corresponding_file(
            args.output_directory, original_nf_name, path_only=True)

        updated_nii = mmfn.copy_header(nib.load(bias_corrected_path),
                                       original_nifti)
        # This will overwrite the bias corrected files
        nib.save(updated_nii, bias_corrected_path)

    print("  Bias field correction completed; files saved to: \n{0}".format(
        '\n\t'.join([str(item) for item in all_output_paths])))
Exemplo n.º 4
0
def main():

    # Parse input arguments and get sanitised version
    sargs = mm_parse_inputs.SanitisedArgs(
        mm_parse_inputs.parse_input_arguments())

    # Enable calling NiftyReg and NiftySeg
    nk = mm_niftk.MM_Niftk()

    mmfn.check_create_directories([sargs.output_directory])

    # Set default parameters
    if sargs.number is not None:
        iterations = sargs.number
    else:
        iterations = 200

    fwhm = 0.15  # default 0.15
    subsampleFactor = 4  # default 4; recommend 2 for in vivo mouse brains with resolution >100 micrometres.
    nlevels = 4  # default 4
    conv = 0.001  # default 0.001
    nhistbins = 256  # default 200

    # Get list of files (use file_name_filter)
    nifti_files_list = sorted(
        glob.glob(
            os.path.join(sargs.input_directory,
                         sargs.file_name_filter + '.nii*')))
    print(
        "  Processing {0} files: \n   - - -\n   \t{1}\n   - - -\n  ...".format(
            len(nifti_files_list),
            '\n\t'.join([str(item) for item in nifti_files_list])))

    TASKS = [(nk, sargs, nf_path, iterations, fwhm, subsampleFactor, nlevels,
              conv, nhistbins) for nf_path in nifti_files_list]
    # Set the number of parallel processes to use
    pool = multiprocessing.Pool(np.int(multiprocessing.cpu_count() / 2))
    # The _star function will unpack TASKS to use in the actual function
    # Using pool.map because we do care about the order of the results.
    all_output_paths = pool.map(nuc_n4_debias_star, TASKS)
    pool.close()
    pool.join()

    all_output_paths = []

    for counter, nf_path in enumerate(nifti_files_list):
        outpath = nuc_n4_debias(nk, sargs, nf_path, iterations, fwhm,
                                subsampleFactor, nlevels, conv, nhistbins)
        all_output_paths.append(outpath)

    print(" all_output_paths: {0}".format(all_output_paths))

    print(
        "  Copying headers from original images to bias-corrected images ...")

    for counter, nifti_file in enumerate(nifti_files_list):
        print "  Processing {0} / {1}: {2} ...".format((counter + 1),
                                                       len(nifti_files_list),
                                                       nifti_file)

        original_nf_name = os.path.basename(nifti_file).split(os.extsep)[0]
        original_nifti = nib.load(nifti_file)
        bias_corrected_path = mmfn.get_corresponding_file(
            sargs.output_directory, original_nf_name, path_only=True)

        print("Bias corrected result path: '{0}'".format(bias_corrected_path))

        updated_nii = mmfn.copy_header(nib.load(bias_corrected_path),
                                       original_nifti)
        # This will overwrite the bias corrected files
        nib.save(updated_nii, bias_corrected_path)

    print("  Bias field correction completed; files saved to: \n{0}".format(
        '\n\t'.join([str(item) for item in all_output_paths])))